Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/npm/cli.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorclaudiahdz <cghr1990@gmail.com>2020-02-06 23:25:33 +0300
committerisaacs <i@izs.me>2020-05-08 04:11:51 +0300
commit0a45514d8beba749bebbd973ca9750ee0dc13b40 (patch)
treeeec0c989c38fcb7395c9fd565f2a44c79369c59e /node_modules/cacache
parent019e6c41a85edf62727b517e1dfe2d5b9e53ba0e (diff)
cacache@15.0.0
Diffstat (limited to 'node_modules/cacache')
-rw-r--r--node_modules/cacache/CHANGELOG.md112
-rw-r--r--node_modules/cacache/README.es.md628
-rw-r--r--node_modules/cacache/README.md104
-rw-r--r--node_modules/cacache/en.js3
-rw-r--r--node_modules/cacache/es.js3
-rw-r--r--node_modules/cacache/get.js338
-rw-r--r--node_modules/cacache/index.js40
-rw-r--r--node_modules/cacache/lib/content/path.js11
-rw-r--r--node_modules/cacache/lib/content/read.js201
-rw-r--r--node_modules/cacache/lib/content/rm.js17
-rw-r--r--node_modules/cacache/lib/content/write.js232
-rw-r--r--node_modules/cacache/lib/entry-index.js271
-rw-r--r--node_modules/cacache/lib/memoization.js25
-rw-r--r--node_modules/cacache/lib/util/disposer.js30
-rw-r--r--node_modules/cacache/lib/util/fix-owner.js55
-rw-r--r--node_modules/cacache/lib/util/hash-to-segments.js6
-rw-r--r--node_modules/cacache/lib/util/move-file.js73
-rw-r--r--node_modules/cacache/lib/util/tmp.js24
-rw-r--r--node_modules/cacache/lib/util/y.js25
-rw-r--r--node_modules/cacache/lib/verify.js276
-rw-r--r--node_modules/cacache/locales/en.js47
-rw-r--r--node_modules/cacache/locales/en.json7
-rw-r--r--node_modules/cacache/locales/es.js49
-rw-r--r--node_modules/cacache/locales/es.json6
-rw-r--r--node_modules/cacache/ls.js2
l---------node_modules/cacache/node_modules/.bin/mkdirp1
l---------node_modules/cacache/node_modules/.bin/rimraf1
-rw-r--r--node_modules/cacache/node_modules/fs-minipass/LICENSE15
-rw-r--r--node_modules/cacache/node_modules/fs-minipass/README.md70
-rw-r--r--node_modules/cacache/node_modules/fs-minipass/index.js422
-rw-r--r--node_modules/cacache/node_modules/fs-minipass/package.json69
-rw-r--r--node_modules/cacache/node_modules/minipass/LICENSE15
-rw-r--r--node_modules/cacache/node_modules/minipass/README.md606
-rw-r--r--node_modules/cacache/node_modules/minipass/index.js538
-rw-r--r--node_modules/cacache/node_modules/minipass/package.json75
-rw-r--r--node_modules/cacache/node_modules/minizlib/LICENSE26
-rw-r--r--node_modules/cacache/node_modules/minizlib/README.md60
-rw-r--r--node_modules/cacache/node_modules/minizlib/constants.js115
-rw-r--r--node_modules/cacache/node_modules/minizlib/index.js338
-rw-r--r--node_modules/cacache/node_modules/minizlib/package.json75
-rw-r--r--node_modules/cacache/node_modules/mkdirp/CHANGELOG.md15
-rw-r--r--node_modules/cacache/node_modules/mkdirp/LICENSE21
-rwxr-xr-xnode_modules/cacache/node_modules/mkdirp/bin/cmd.js68
-rw-r--r--node_modules/cacache/node_modules/mkdirp/index.js31
-rw-r--r--node_modules/cacache/node_modules/mkdirp/lib/find-made.js29
-rw-r--r--node_modules/cacache/node_modules/mkdirp/lib/mkdirp-manual.js64
-rw-r--r--node_modules/cacache/node_modules/mkdirp/lib/mkdirp-native.js39
-rw-r--r--node_modules/cacache/node_modules/mkdirp/lib/opts-arg.js23
-rw-r--r--node_modules/cacache/node_modules/mkdirp/lib/path-arg.js29
-rw-r--r--node_modules/cacache/node_modules/mkdirp/lib/use-native.js10
-rw-r--r--node_modules/cacache/node_modules/mkdirp/package.json76
-rw-r--r--node_modules/cacache/node_modules/mkdirp/readme.markdown266
-rw-r--r--node_modules/cacache/node_modules/rimraf/LICENSE15
-rw-r--r--node_modules/cacache/node_modules/rimraf/README.md101
-rwxr-xr-xnode_modules/cacache/node_modules/rimraf/bin.js50
-rw-r--r--node_modules/cacache/node_modules/rimraf/package.json67
-rw-r--r--node_modules/cacache/node_modules/rimraf/rimraf.js372
-rw-r--r--node_modules/cacache/node_modules/ssri/CHANGELOG.md286
-rw-r--r--node_modules/cacache/node_modules/ssri/LICENSE.md16
-rw-r--r--node_modules/cacache/node_modules/ssri/README.md488
-rw-r--r--node_modules/cacache/node_modules/ssri/index.js395
-rw-r--r--node_modules/cacache/node_modules/ssri/package.json89
-rw-r--r--node_modules/cacache/node_modules/tar/CHANGELOG.md66
-rw-r--r--node_modules/cacache/node_modules/tar/LICENSE15
-rw-r--r--node_modules/cacache/node_modules/tar/README.md1031
-rw-r--r--node_modules/cacache/node_modules/tar/index.js18
-rw-r--r--node_modules/cacache/node_modules/tar/lib/create.js105
-rw-r--r--node_modules/cacache/node_modules/tar/lib/extract.js112
-rw-r--r--node_modules/cacache/node_modules/tar/lib/get-write-flag.js20
-rw-r--r--node_modules/cacache/node_modules/tar/lib/header.js288
-rw-r--r--node_modules/cacache/node_modules/tar/lib/high-level-opt.js29
-rw-r--r--node_modules/cacache/node_modules/tar/lib/large-numbers.js97
-rw-r--r--node_modules/cacache/node_modules/tar/lib/list.js128
-rw-r--r--node_modules/cacache/node_modules/tar/lib/mkdir.js206
-rw-r--r--node_modules/cacache/node_modules/tar/lib/mode-fix.js24
-rw-r--r--node_modules/cacache/node_modules/tar/lib/pack.js403
-rw-r--r--node_modules/cacache/node_modules/tar/lib/parse.js483
-rw-r--r--node_modules/cacache/node_modules/tar/lib/path-reservations.js125
-rw-r--r--node_modules/cacache/node_modules/tar/lib/pax.js145
-rw-r--r--node_modules/cacache/node_modules/tar/lib/read-entry.js98
-rw-r--r--node_modules/cacache/node_modules/tar/lib/replace.js219
-rw-r--r--node_modules/cacache/node_modules/tar/lib/types.js44
-rw-r--r--node_modules/cacache/node_modules/tar/lib/unpack.js680
-rw-r--r--node_modules/cacache/node_modules/tar/lib/update.js36
-rw-r--r--node_modules/cacache/node_modules/tar/lib/warn-mixin.js21
-rw-r--r--node_modules/cacache/node_modules/tar/lib/winchars.js23
-rw-r--r--node_modules/cacache/node_modules/tar/lib/write-entry.js436
-rw-r--r--node_modules/cacache/node_modules/tar/package.json81
-rw-r--r--node_modules/cacache/node_modules/yallist/LICENSE15
-rw-r--r--node_modules/cacache/node_modules/yallist/README.md204
-rw-r--r--node_modules/cacache/node_modules/yallist/iterator.js8
-rw-r--r--node_modules/cacache/node_modules/yallist/package.json64
-rw-r--r--node_modules/cacache/node_modules/yallist/yallist.js426
-rw-r--r--node_modules/cacache/package.json95
-rw-r--r--node_modules/cacache/put.js132
-rw-r--r--node_modules/cacache/rm.js7
96 files changed, 10578 insertions, 2867 deletions
diff --git a/node_modules/cacache/CHANGELOG.md b/node_modules/cacache/CHANGELOG.md
index f67fbc8b4..b8540a375 100644
--- a/node_modules/cacache/CHANGELOG.md
+++ b/node_modules/cacache/CHANGELOG.md
@@ -2,6 +2,116 @@
All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines.
+## [15.0.0](https://github.com/npm/cacache/compare/v14.0.0...v15.0.0) (2020-02-18)
+
+
+### ⚠ BREAKING CHANGES
+
+* drop figgy-pudding and use canonical option names.
+
+### Features
+
+* remove figgy-pudding ([57d11bc](https://github.com/npm/cacache/commit/57d11bce34f979247d1057d258acc204c4944491))
+
+## [14.0.0](https://github.com/npm/cacache/compare/v13.0.1...v14.0.0) (2020-01-28)
+
+
+### ⚠ BREAKING CHANGES
+
+* **deps:** bumps engines to >= 10
+
+* **deps:** tar v6 and mkdirp v1 ([5a66e7a](https://github.com/npm/cacache/commit/5a66e7a))
+
+### [13.0.1](https://github.com/npm/cacache/compare/v13.0.0...v13.0.1) (2019-09-30)
+
+
+### Bug Fixes
+
+* **fix-owner:** chownr.sync quits on non-root uid ([08801be](https://github.com/npm/cacache/commit/08801be))
+
+## [13.0.0](https://github.com/npm/cacache/compare/v12.0.3...v13.0.0) (2019-09-25)
+
+
+### ⚠ BREAKING CHANGES
+
+* This subtly changes the streaming interface of
+everything in cacache that streams, which is, well, everything in
+cacache. Most users will probably not notice, but any code that
+depended on stream behavior always being deferred until next tick will
+need to adjust.
+
+The mississippi methods 'to', 'from', 'through', and so on, have been
+replaced with their Minipass counterparts, and streaming interaction
+with the file system is done via fs-minipass.
+
+The following modules are of interest here:
+
+- [minipass](http://npm.im/minipass) The core stream library.
+
+- [fs-minipass](http://npm.im/fs-minipass) Note that the 'WriteStream'
+ class from fs-minipass is _not_ a Minipass stream, but rather a plain
+ old EventEmitter that duck types as a Writable.
+
+- [minipass-collect](http://npm.im/minipass-collect) Gather up all the
+ data from a stream. Cacache only uses Collect.PassThrough, which is a
+ basic Minipass passthrough stream which emits a 'collect' event with
+ the completed data just before the 'end' event.
+
+- [minipass-pipeline](http://npm.im/minipass-pipeline) Connect one or
+ more streams into a pipe chain. Errors anywhere in the pipeline are
+ proxied down the chain and then up to the Pipeline object itself.
+ Writes go into the head, reads go to the tail. Used in place of
+ pump() and pumpify().
+
+- [minipass-flush](http://npm.im/minipass-flush) A Minipass passthrough
+ stream that defers its 'end' event until after a flush() method has
+ completed (either calling the supplied callback, or returning a
+ promise.) Use in place of flush-write-stream (aka mississippi.to).
+
+Streams from through2, concat-stream, and the behavior provided by
+end-of-stream are all implemented in Minipass itself.
+
+Features of interest to cacache, which make Minipass a particularly good
+fit:
+
+- All of the 'endish' events are normalized, so we can just listen on
+ 'end' and know that finish, prefinish, and close will be handled as
+ well.
+- Minipass doesn't waste time [containing
+ zalgo](https://blog.izs.me/2013/08/designing-apis-for-asynchrony).
+- Minipass has built-in support for promises that indicate the end or
+ error: stream.promise(), stream.collect(), and stream.concat().
+- With reliable and consistent timing guarantees, much less
+ error-checking logic is required. We can be more confident that an
+ error is being thrown or emitted in the correct place, rather than in
+ a callback which is deferred, resulting in a hung promise or
+ uncaughtException.
+
+The biggest downside of Minipass is that it lacks some of the internal
+characteristics of node-core streams, which many community modules use
+to identify streams. They have no _writableState or _readableState
+objects, or _read or _write methods. As a result, the is-stream module
+(at least, at the time of this commit) doesn't recognize Minipass
+streams as readable or writable streams.
+
+All in all, the changes required of downstream users should be minimal,
+but are unlikely to be zero. Hence the semver major change.
+
+### Features
+
+* replace all streams with Minipass streams ([f4c0962](https://github.com/npm/cacache/commit/f4c0962))
+* **deps:** Add minipass and minipass-pipeline ([a6545a9](https://github.com/npm/cacache/commit/a6545a9))
+* **promise:** converted .resolve to native promise, converted .map and .reduce to native ([220c56d](https://github.com/npm/cacache/commit/220c56d))
+* **promise:** individually promisifing functions as needed ([74b939e](https://github.com/npm/cacache/commit/74b939e))
+* **promise:** moved .reject from bluebird to native promise ([1d56da1](https://github.com/npm/cacache/commit/1d56da1))
+* **promise:** removed .fromNode, removed .join ([9c457a0](https://github.com/npm/cacache/commit/9c457a0))
+* **promise:** removed .map, replaced with p-map. removed .try ([cc3ee05](https://github.com/npm/cacache/commit/cc3ee05))
+* **promise:** removed .tap ([0260f12](https://github.com/npm/cacache/commit/0260f12))
+* **promise:** removed .using/.disposer ([5d832f3](https://github.com/npm/cacache/commit/5d832f3))
+* **promise:** removed bluebird ([c21298c](https://github.com/npm/cacache/commit/c21298c))
+* **promise:** removed bluebird specific .catch calls ([28aeeac](https://github.com/npm/cacache/commit/28aeeac))
+* **promise:** replaced .reduce and .mapSeries ([478f5cb](https://github.com/npm/cacache/commit/478f5cb))
+
### [12.0.3](https://github.com/npm/cacache/compare/v12.0.2...v12.0.3) (2019-08-19)
@@ -65,7 +175,7 @@ chown everything to root.
The solution is for the user to create a folder, make it user-owned, and
use that, rather than relying on cacache to create the root cache folder.
-If we decide to restore the uid/gid opts, and use ownership inferrence
+If we decide to restore the uid/gid opts, and use ownership inference
only when uid/gid are unset, then take care to also make rm take an
option object, and pass it through to entry-index.js.
diff --git a/node_modules/cacache/README.es.md b/node_modules/cacache/README.es.md
deleted file mode 100644
index 55007e20d..000000000
--- a/node_modules/cacache/README.es.md
+++ /dev/null
@@ -1,628 +0,0 @@
-# cacache [![npm version](https://img.shields.io/npm/v/cacache.svg)](https://npm.im/cacache) [![license](https://img.shields.io/npm/l/cacache.svg)](https://npm.im/cacache) [![Travis](https://img.shields.io/travis/zkat/cacache.svg)](https://travis-ci.org/zkat/cacache) [![AppVeyor](https://ci.appveyor.com/api/projects/status/github/zkat/cacache?svg=true)](https://ci.appveyor.com/project/zkat/cacache) [![Coverage Status](https://coveralls.io/repos/github/zkat/cacache/badge.svg?branch=latest)](https://coveralls.io/github/zkat/cacache?branch=latest)
-
-[`cacache`](https://github.com/zkat/cacache) es una librería de Node.js para
-manejar caches locales en disco, con acceso tanto con claves únicas como
-direcciones de contenido (hashes/hacheos). Es súper rápida, excelente con el
-acceso concurrente, y jamás te dará datos incorrectos, aún si se corrompen o
-manipulan directamente los ficheros del cache.
-
-El propósito original era reemplazar el caché local de
-[npm](https://npm.im/npm), pero se puede usar por su propia cuenta.
-
-_Traducciones: [English](README.md)_
-
-## Instalación
-
-`$ npm install --save cacache`
-
-## Índice
-
-* [Ejemplo](#ejemplo)
-* [Características](#características)
-* [Cómo Contribuir](#cómo-contribuir)
-* [API](#api)
- * [Usando el API en español](#localized-api)
- * Leer
- * [`ls`](#ls)
- * [`ls.flujo`](#ls-stream)
- * [`saca`](#get-data)
- * [`saca.flujo`](#get-stream)
- * [`saca.info`](#get-info)
- * [`saca.tieneDatos`](#get-hasContent)
- * Escribir
- * [`mete`](#put-data)
- * [`mete.flujo`](#put-stream)
- * [opciones para `mete*`](#put-options)
- * [`rm.todo`](#rm-all)
- * [`rm.entrada`](#rm-entry)
- * [`rm.datos`](#rm-content)
- * Utilidades
- * [`ponLenguaje`](#set-locale)
- * [`limpiaMemoizado`](#clear-memoized)
- * [`tmp.hazdir`](#tmp-mkdir)
- * [`tmp.conTmp`](#with-tmp)
- * Integridad
- * [Subresource Integrity](#integrity)
- * [`verifica`](#verify)
- * [`verifica.ultimaVez`](#verify-last-run)
-
-### Ejemplo
-
-```javascript
-const cacache = require('cacache/es')
-const fs = require('fs')
-
-const tarbol = '/ruta/a/mi-tar.tgz'
-const rutaCache = '/tmp/my-toy-cache'
-const clave = 'mi-clave-única-1234'
-
-// ¡Añádelo al caché! Usa `rutaCache` como raíz del caché.
-cacache.mete(rutaCache, clave, '10293801983029384').then(integrity => {
- console.log(`Saved content to ${rutaCache}.`)
-})
-
-const destino = '/tmp/mytar.tgz'
-
-// Copia el contenido del caché a otro fichero, pero esta vez con flujos.
-cacache.saca.flujo(
- rutaCache, clave
-).pipe(
- fs.createWriteStream(destino)
-).on('finish', () => {
- console.log('extracción completada')
-})
-
-// La misma cosa, pero accesando el contenido directamente, sin tocar el índice.
-cacache.saca.porHacheo(rutaCache, integridad).then(datos => {
- fs.writeFile(destino, datos, err => {
- console.log('datos del tarbol sacados basado en su sha512, y escrito a otro fichero')
- })
-})
-```
-
-### Características
-
-* Extracción por clave o por dirección de contenido (shasum, etc)
-* Usa el estándard de web, [Subresource Integrity](#integrity)
-* Compatible con multiples algoritmos - usa sha1, sha512, etc, en el mismo caché sin problema
-* Entradas con contenido idéntico comparten ficheros
-* Tolerancia de fallas (inmune a corrupción, ficheros parciales, carreras de proceso, etc)
-* Verificación completa de datos cuando (escribiendo y leyendo)
-* Concurrencia rápida, segura y "lockless"
-* Compatible con `stream`s (flujos)
-* Compatible con `Promise`s (promesas)
-* Bastante rápida -- acceso, incluyendo verificación, en microsegundos
-* Almacenaje de metadatos arbitrarios
-* Colección de basura y verificación adicional fuera de banda
-* Cobertura rigurosa de pruebas
-* Probablente hay un "Bloom filter" por ahí en algún lado. Eso le mola a la gente, ¿Verdad? 🤔
-
-### Cómo Contribuir
-
-El equipo de cacache felizmente acepta contribuciones de código y otras maneras de participación. ¡Hay muchas formas diferentes de contribuir! La [Guía de Colaboradores](CONTRIBUTING.md) (en inglés) tiene toda la información que necesitas para cualquier tipo de contribución: todo desde cómo reportar errores hasta cómo someter parches con nuevas características. Con todo y eso, no se preocupe por si lo que haces está exáctamente correcto: no hay ningún problema en hacer preguntas si algo no está claro, o no lo encuentras.
-
-El equipo de cacache tiene miembros hispanohablantes: es completamente aceptable crear `issues` y `pull requests` en español/castellano.
-
-Todos los participantes en este proyecto deben obedecer el [Código de Conducta](CODE_OF_CONDUCT.md) (en inglés), y en general actuar de forma amable y respetuosa mientras participan en esta comunidad.
-
-Por favor refiérase al [Historial de Cambios](CHANGELOG.md) (en inglés) para detalles sobre cambios importantes incluídos en cada versión.
-
-Finalmente, cacache tiene un sistema de localización de lenguaje. Si te interesa añadir lenguajes o mejorar los que existen, mira en el directorio `./locales` para comenzar.
-
-Happy hacking!
-
-### API
-
-#### <a name="localized-api"></a> Usando el API en español
-
-cacache incluye una traducción completa de su API al castellano, con las mismas
-características. Para usar el API como está documentado en este documento, usa
-`require('cacache/es')`
-
-cacache también tiene otros lenguajes: encuéntralos bajo `./locales`, y podrás
-usar el API en ese lenguaje con `require('cacache/<lenguaje>')`
-
-#### <a name="ls"></a> `> cacache.ls(cache) -> Promise<Object>`
-
-Enumera todas las entradas en el caché, dentro de un solo objeto. Cada entrada
-en el objeto tendrá como clave la clave única usada para el índice, el valor
-siendo un objeto de [`saca.info`](#get-info).
-
-##### Ejemplo
-
-```javascript
-cacache.ls(rutaCache).then(console.log)
-// Salida
-{
- 'my-thing': {
- key: 'my-thing',
- integrity: 'sha512-BaSe64/EnCoDED+HAsh=='
- path: '.testcache/content/deadbeef', // unido con `rutaCache`
- time: 12345698490,
- size: 4023948,
- metadata: {
- name: 'blah',
- version: '1.2.3',
- description: 'this was once a package but now it is my-thing'
- }
- },
- 'other-thing': {
- key: 'other-thing',
- integrity: 'sha1-ANothER+hasH=',
- path: '.testcache/content/bada55',
- time: 11992309289,
- size: 111112
- }
-}
-```
-
-#### <a name="ls-stream"></a> `> cacache.ls.flujo(cache) -> Readable`
-
-Enumera todas las entradas en el caché, emitiendo un objeto de
-[`saca.info`](#get-info) por cada evento de `data` en el flujo.
-
-##### Ejemplo
-
-```javascript
-cacache.ls.flujo(rutaCache).on('data', console.log)
-// Salida
-{
- key: 'my-thing',
- integrity: 'sha512-BaSe64HaSh',
- path: '.testcache/content/deadbeef', // unido con `rutaCache`
- time: 12345698490,
- size: 13423,
- metadata: {
- name: 'blah',
- version: '1.2.3',
- description: 'this was once a package but now it is my-thing'
- }
-}
-
-{
- key: 'other-thing',
- integrity: 'whirlpool-WoWSoMuchSupport',
- path: '.testcache/content/bada55',
- time: 11992309289,
- size: 498023984029
-}
-
-{
- ...
-}
-```
-
-#### <a name="get-data"></a> `> cacache.saca(cache, clave, [ops]) -> Promise({data, metadata, integrity})`
-
-Devuelve un objeto con los datos, hacheo de integridad y metadatos identificados
-por la `clave`. La propiedad `data` de este objeto será una instancia de
-`Buffer` con los datos almacenados en el caché. to do with it! cacache just
-won't care.
-
-`integrity` es un `string` de [Subresource Integrity](#integrity). Dígase, un
-`string` que puede ser usado para verificar a la `data`, que tiene como formato
-`<algoritmo>-<hacheo-integridad-base64>`.
-
-So no existe ninguna entrada identificada por `clave`, o se los datos
-almacenados localmente fallan verificación, el `Promise` fallará.
-
-Una sub-función, `saca.porHacheo`, tiene casi el mismo comportamiento, excepto
-que busca entradas usando el hacheo de integridad, sin tocar el índice general.
-Esta versión *sólo* devuelve `data`, sin ningún objeto conteniéndola.
-
-##### Nota
-
-Esta función lee la entrada completa a la memoria antes de devolverla. Si estás
-almacenando datos Muy Grandes, es posible que [`saca.flujo`](#get-stream) sea
-una mejor solución.
-
-##### Ejemplo
-
-```javascript
-// Busca por clave
-cache.saca(rutaCache, 'my-thing').then(console.log)
-// Salida:
-{
- metadata: {
- thingName: 'my'
- },
- integrity: 'sha512-BaSe64HaSh',
- data: Buffer#<deadbeef>,
- size: 9320
-}
-
-// Busca por hacheo
-cache.saca.porHacheo(rutaCache, 'sha512-BaSe64HaSh').then(console.log)
-// Salida:
-Buffer#<deadbeef>
-```
-
-#### <a name="get-stream"></a> `> cacache.saca.flujo(cache, clave, [ops]) -> Readable`
-
-Devuelve un [Readable
-Stream](https://nodejs.org/api/stream.html#stream_readable_streams) de los datos
-almacenados bajo `clave`.
-
-So no existe ninguna entrada identificada por `clave`, o se los datos
-almacenados localmente fallan verificación, el `Promise` fallará.
-
-`metadata` y `integrity` serán emitidos como eventos antes de que el flujo
-cierre.
-
-Una sub-función, `saca.flujo.porHacheo`, tiene casi el mismo comportamiento,
-excepto que busca entradas usando el hacheo de integridad, sin tocar el índice
-general. Esta versión no emite eventos de `metadata` o `integrity`.
-
-##### Ejemplo
-
-```javascript
-// Busca por clave
-cache.saca.flujo(
- rutaCache, 'my-thing'
-).on('metadata', metadata => {
- console.log('metadata:', metadata)
-}).on('integrity', integrity => {
- console.log('integrity:', integrity)
-}).pipe(
- fs.createWriteStream('./x.tgz')
-)
-// Salidas:
-metadata: { ... }
-integrity: 'sha512-SoMeDIGest+64=='
-
-// Busca por hacheo
-cache.saca.flujo.porHacheo(
- rutaCache, 'sha512-SoMeDIGest+64=='
-).pipe(
- fs.createWriteStream('./x.tgz')
-)
-```
-
-#### <a name="get-info"></a> `> cacache.saca.info(cache, clave) -> Promise`
-
-Busca la `clave` en el índice del caché, devolviendo información sobre la
-entrada si existe.
-
-##### Campos
-
-* `key` - Clave de la entrada. Igual al argumento `clave`.
-* `integrity` - [hacheo de Subresource Integrity](#integrity) del contenido al que se refiere esta entrada.
-* `path` - Dirección del fichero de datos almacenados, unida al argumento `cache`.
-* `time` - Hora de creación de la entrada
-* `metadata` - Metadatos asignados a esta entrada por el usuario
-
-##### Ejemplo
-
-```javascript
-cacache.saca.info(rutaCache, 'my-thing').then(console.log)
-
-// Salida
-{
- key: 'my-thing',
- integrity: 'sha256-MUSTVERIFY+ALL/THINGS=='
- path: '.testcache/content/deadbeef',
- time: 12345698490,
- size: 849234,
- metadata: {
- name: 'blah',
- version: '1.2.3',
- description: 'this was once a package but now it is my-thing'
- }
-}
-```
-
-#### <a name="get-hasContent"></a> `> cacache.saca.tieneDatos(cache, integrity) -> Promise`
-
-Busca un [hacheo Subresource Integrity](#integrity) en el caché. Si existe el
-contenido asociado con `integrity`, devuelve un objeto con dos campos: el hacheo
-_específico_ que se usó para la búsqueda, `sri`, y el tamaño total del
-contenido, `size`. Si no existe ningún contenido asociado con `integrity`,
-devuelve `false`.
-
-##### Ejemplo
-
-```javascript
-cacache.saca.tieneDatos(rutaCache, 'sha256-MUSTVERIFY+ALL/THINGS==').then(console.log)
-
-// Salida
-{
- sri: {
- source: 'sha256-MUSTVERIFY+ALL/THINGS==',
- algorithm: 'sha256',
- digest: 'MUSTVERIFY+ALL/THINGS==',
- options: []
- },
- size: 9001
-}
-
-cacache.saca.tieneDatos(rutaCache, 'sha521-NOT+IN/CACHE==').then(console.log)
-
-// Salida
-false
-```
-
-#### <a name="put-data"></a> `> cacache.mete(cache, clave, datos, [ops]) -> Promise`
-
-Inserta `datos` en el caché. El `Promise` devuelto se resuelve con un hacheo
-(generado conforme a [`ops.algorithms`](#optsalgorithms)) después que la entrada
-haya sido escrita en completo.
-
-##### Ejemplo
-
-```javascript
-fetch(
- 'https://registry.npmjs.org/cacache/-/cacache-1.0.0.tgz'
-).then(datos => {
- return cacache.mete(rutaCache, 'registry.npmjs.org|cacache@1.0.0', datos)
-}).then(integridad => {
- console.log('el hacheo de integridad es', integridad)
-})
-```
-
-#### <a name="put-stream"></a> `> cacache.mete.flujo(cache, clave, [ops]) -> Writable`
-
-Devuelve un [Writable
-Stream](https://nodejs.org/api/stream.html#stream_writable_streams) que inserta
-al caché los datos escritos a él. Emite un evento `integrity` con el hacheo del
-contenido escrito, cuando completa.
-
-##### Ejemplo
-
-```javascript
-request.get(
- 'https://registry.npmjs.org/cacache/-/cacache-1.0.0.tgz'
-).pipe(
- cacache.mete.flujo(
- rutaCache, 'registry.npmjs.org|cacache@1.0.0'
- ).on('integrity', d => console.log(`integrity digest is ${d}`))
-)
-```
-
-#### <a name="put-options"></a> `> opciones para cacache.mete`
-
-La funciones `cacache.mete` tienen un número de opciones en común.
-
-##### `ops.metadata`
-
-Metadatos del usuario que se almacenarán con la entrada.
-
-##### `ops.size`
-
-El tamaño declarado de los datos que se van a insertar. Si es proveído, cacache
-verificará que los datos escritos sean de ese tamaño, o si no, fallará con un
-error con código `EBADSIZE`.
-
-##### `ops.integrity`
-
-El hacheo de integridad de los datos siendo escritos.
-
-Si es proveído, y los datos escritos no le corresponden, la operación fallará
-con un error con código `EINTEGRITY`.
-
-`ops.algorithms` no tiene ningún efecto si esta opción está presente.
-
-##### `ops.algorithms`
-
-Por Defecto: `['sha512']`
-
-Algoritmos que se deben usar cuando se calcule el hacheo de [subresource
-integrity](#integrity) para los datos insertados. Puede usar cualquier algoritmo
-enumerado en `crypto.getHashes()`.
-
-Por el momento, sólo se acepta un algoritmo (dígase, un array con exáctamente un
-valor). No tiene ningún efecto si `ops.integrity` también ha sido proveido.
-
-##### `ops.uid`/`ops.gid`
-
-Si están presentes, cacache hará todo lo posible para asegurarse que todos los
-ficheros creados en el proceso de sus operaciones en el caché usen esta
-combinación en particular.
-
-##### `ops.memoize`
-
-Por Defecto: `null`
-
-Si es verdad, cacache tratará de memoizar los datos de la entrada en memoria. La
-próxima vez que el proceso corriente trate de accesar los datos o entrada,
-cacache buscará en memoria antes de buscar en disco.
-
-Si `ops.memoize` es un objeto regular o un objeto como `Map` (es decir, un
-objeto con métodos `get()` y `set()`), este objeto en sí sera usado en vez del
-caché de memoria global. Esto permite tener lógica específica a tu aplicación
-encuanto al almacenaje en memoria de tus datos.
-
-Si quieres asegurarte que los datos se lean del disco en vez de memoria, usa
-`memoize: false` cuando uses funciones de `cacache.saca`.
-
-#### <a name="rm-all"></a> `> cacache.rm.todo(cache) -> Promise`
-
-Borra el caché completo, incluyendo ficheros temporeros, ficheros de datos, y el
-índice del caché.
-
-##### Ejemplo
-
-```javascript
-cacache.rm.todo(rutaCache).then(() => {
- console.log('THE APOCALYPSE IS UPON US 😱')
-})
-```
-
-#### <a name="rm-entry"></a> `> cacache.rm.entrada(cache, clave) -> Promise`
-
-Alias: `cacache.rm`
-
-Borra la entrada `clave` del índuce. El contenido asociado con esta entrada
-seguirá siendo accesible por hacheo usando
-[`saca.flujo.porHacheo`](#get-stream).
-
-Para borrar el contenido en sí, usa [`rm.datos`](#rm-content). Si quieres hacer
-esto de manera más segura (pues ficheros de contenido pueden ser usados por
-multiples entradas), usa [`verifica`](#verify) para borrar huérfanos.
-
-##### Ejemplo
-
-```javascript
-cacache.rm.entrada(rutaCache, 'my-thing').then(() => {
- console.log('I did not like it anyway')
-})
-```
-
-#### <a name="rm-content"></a> `> cacache.rm.datos(cache, integrity) -> Promise`
-
-Borra el contenido identificado por `integrity`. Cualquier entrada que se
-refiera a este contenido quedarán huérfanas y se invalidarán si se tratan de
-accesar, al menos que contenido idéntico sea añadido bajo `integrity`.
-
-##### Ejemplo
-
-```javascript
-cacache.rm.datos(rutaCache, 'sha512-SoMeDIGest/IN+BaSE64==').then(() => {
- console.log('los datos para `mi-cosa` se borraron')
-})
-```
-
-#### <a name="set-locale"></a> `> cacache.ponLenguaje(locale)`
-
-Configura el lenguaje usado para mensajes y errores de cacache. La lista de
-lenguajes disponibles está en el directorio `./locales` del proyecto.
-
-_Te interesa añadir más lenguajes? [Somete un PR](CONTRIBUTING.md)!_
-
-#### <a name="clear-memoized"></a> `> cacache.limpiaMemoizado()`
-
-Completamente reinicializa el caché de memoria interno. Si estás usando tu
-propio objecto con `ops.memoize`, debes hacer esto de manera específica a él.
-
-#### <a name="tmp-mkdir"></a> `> tmp.hazdir(cache, ops) -> Promise<Path>`
-
-Alias: `tmp.mkdir`
-
-Devuelve un directorio único dentro del directorio `tmp` del caché.
-
-Una vez tengas el directorio, es responsabilidad tuya asegurarte que todos los
-ficheros escrito a él sean creados usando los permisos y `uid`/`gid` concordante
-con el caché. Si no, puedes pedirle a cacache que lo haga llamando a
-[`cacache.tmp.fix()`](#tmp-fix). Esta función arreglará todos los permisos en el
-directorio tmp.
-
-Si quieres que cacache limpie el directorio automáticamente cuando termines, usa
-[`cacache.tmp.conTmp()`](#with-tpm).
-
-##### Ejemplo
-
-```javascript
-cacache.tmp.mkdir(cache).then(dir => {
- fs.writeFile(path.join(dir, 'blablabla'), Buffer#<1234>, ...)
-})
-```
-
-#### <a name="with-tmp"></a> `> tmp.conTmp(cache, ops, cb) -> Promise`
-
-Crea un directorio temporero con [`tmp.mkdir()`](#tmp-mkdir) y ejecuta `cb` con
-él como primer argumento. El directorio creado será removido automáticamente
-cuando el valor devolvido por `cb()` se resuelva.
-
-Las mismas advertencias aplican en cuanto a manejando permisos para los ficheros
-dentro del directorio.
-
-##### Ejemplo
-
-```javascript
-cacache.tmp.conTmp(cache, dir => {
- return fs.writeFileAsync(path.join(dir, 'blablabla'), Buffer#<1234>, ...)
-}).then(() => {
- // `dir` no longer exists
-})
-```
-
-#### <a name="integrity"></a> Hacheos de Subresource Integrity
-
-cacache usa strings que siguen la especificación de [Subresource Integrity
-spec](https://developer.mozilla.org/en-US/docs/Web/Security/Subresource_Integrity).
-
-Es decir, donde quiera cacache espera un argumento o opción `integrity`, ese
-string debería usar el formato `<algoritmo>-<hacheo-base64>`.
-
-Una variación importante sobre los hacheos que cacache acepta es que acepta el
-nombre de cualquier algoritmo aceptado por el proceso de Node.js donde se usa.
-Puedes usar `crypto.getHashes()` para ver cuales están disponibles.
-
-##### Generando tus propios hacheos
-
-Si tienes un `shasum`, en general va a estar en formato de string hexadecimal
-(es decir, un `sha1` se vería como algo así:
-`5f5513f8822fdbe5145af33b64d8d970dcf95c6e`).
-
-Para ser compatible con cacache, necesitas convertir esto a su equivalente en
-subresource integrity. Por ejemplo, el hacheo correspondiente al ejemplo
-anterior sería: `sha1-X1UT+IIv2+UUWvM7ZNjZcNz5XG4=`.
-
-Puedes usar código así para generarlo por tu cuenta:
-
-```javascript
-const crypto = require('crypto')
-const algoritmo = 'sha512'
-const datos = 'foobarbaz'
-
-const integrity = (
- algorithm +
- '-' +
- crypto.createHash(algoritmo).update(datos).digest('base64')
-)
-```
-
-También puedes usar [`ssri`](https://npm.im/ssri) para deferir el trabajo a otra
-librería que garantiza que todo esté correcto, pues maneja probablemente todas
-las operaciones que tendrías que hacer con SRIs, incluyendo convirtiendo entre
-hexadecimal y el formato SRI.
-
-#### <a name="verify"></a> `> cacache.verifica(cache, ops) -> Promise`
-
-Examina y arregla tu caché:
-
-* Limpia entradas inválidas, huérfanas y corrompidas
-* Te deja filtrar cuales entradas retener, con tu propio filtro
-* Reclama cualquier ficheros de contenido sin referencias en el índice
-* Verifica integridad de todos los ficheros de contenido y remueve los malos
-* Arregla permisos del caché
-* Remieve el directorio `tmp` en el caché, y todo su contenido.
-
-Cuando termine, devuelve un objeto con varias estadísticas sobre el proceso de
-verificación, por ejemplo la cantidad de espacio de disco reclamado, el número
-de entradas válidas, número de entradas removidas, etc.
-
-##### Opciones
-
-* `ops.uid` - uid para asignarle al caché y su contenido
-* `ops.gid` - gid para asignarle al caché y su contenido
-* `ops.filter` - recibe una entrada como argumento. Devuelve falso para removerla. Nota: es posible que esta función sea invocada con la misma entrada más de una vez.
-
-##### Example
-
-```sh
-echo somegarbage >> $RUTACACHE/content/deadbeef
-```
-
-```javascript
-cacache.verifica(rutaCache).then(stats => {
- // deadbeef collected, because of invalid checksum.
- console.log('cache is much nicer now! stats:', stats)
-})
-```
-
-#### <a name="verify-last-run"></a> `> cacache.verifica.ultimaVez(cache) -> Promise`
-
-Alias: `últimaVez`
-
-Devuelve un `Date` que representa la última vez que `cacache.verifica` fue
-ejecutada en `cache`.
-
-##### Example
-
-```javascript
-cacache.verifica(rutaCache).then(() => {
- cacache.verifica.ultimaVez(rutaCache).then(última => {
- console.log('La última vez que se usó cacache.verifica() fue ' + última)
- })
-})
-```
diff --git a/node_modules/cacache/README.md b/node_modules/cacache/README.md
index 7f8ec5eec..3f70f49a4 100644
--- a/node_modules/cacache/README.md
+++ b/node_modules/cacache/README.md
@@ -12,8 +12,6 @@ when running as `root`.
It was written to be used as [npm](https://npm.im)'s local cache, but can
just as easily be used on its own.
-_Translations: [español](README.es.md)_
-
## Install
`$ npm install --save cacache`
@@ -35,12 +33,10 @@ _Translations: [español](README.es.md)_
* Writing
* [`put`](#put-data)
* [`put.stream`](#put-stream)
- * [`put*` opts](#put-options)
* [`rm.all`](#rm-all)
* [`rm.entry`](#rm-entry)
* [`rm.content`](#rm-content)
* Utilities
- * [`setLocale`](#set-locale)
* [`clearMemoized`](#clear-memoized)
* [`tmp.mkdir`](#tmp-mkdir)
* [`tmp.withTmp`](#with-tmp)
@@ -52,7 +48,7 @@ _Translations: [español](README.es.md)_
### Example
```javascript
-const cacache = require('cacache/en')
+const cacache = require('cacache')
const fs = require('fs')
const tarball = '/path/to/mytar.tgz'
@@ -95,7 +91,7 @@ cacache.get.byDigest(cachePath, integrityHash).then(data => {
* Lockless, high-concurrency cache access
* Streaming support
* Promise support
-* Pretty darn fast -- sub-millisecond reads and writes including verification
+* Fast -- sub-millisecond reads and writes including verification
* Arbitrary metadata storage
* Garbage collection and additional offline verification
* Thorough test coverage
@@ -113,21 +109,6 @@ Happy hacking!
### API
-#### <a name="localized-api"></a> Using localized APIs
-
-cacache includes a complete API in English, with the same features as other
-translations. To use the English API as documented in this README, use
-`require('cacache/en')`. This is also currently the default if you do
-`require('cacache')`, but may change in the future.
-
-cacache also supports other languages! You can find the list of currently
-supported ones by looking in `./locales` in the source directory. You can use
-the API in that language with `require('cacache/<lang>')`.
-
-Want to add support for a new language? Please go ahead! You should be able to
-copy `./locales/en.js` and `./locales/en.json` and fill them in. Translating the
-`README.md` is a bit more work, but also appreciated if you get around to it. 👍🏼
-
#### <a name="ls"></a> `> cacache.ls(cache) -> Promise<Object>`
Lists info for all entries currently in the cache as a single large object. Each
@@ -219,6 +200,8 @@ A sub-function, `get.byDigest` may be used for identical behavior, except lookup
will happen by integrity hash, bypassing the index entirely. This version of the
function *only* returns `data` itself, without any wrapper.
+See: [options](#get-options)
+
##### Note
This function loads the entire cache entry into memory before returning it. If
@@ -260,6 +243,8 @@ A sub-function, `get.stream.byDigest` may be used for identical behavior,
except lookup will happen by integrity hash, bypassing the index entirely. This
version does not emit the `metadata` and `integrity` events at all.
+See: [options](#get-options)
+
##### Example
```javascript
@@ -346,12 +331,33 @@ cacache.get.hasContent(cachePath, 'sha521-NOT+IN/CACHE==').then(console.log)
false
```
+##### <a name="get-options"></a> Options
+
+##### `opts.integrity`
+If present, the pre-calculated digest for the inserted content. If this option
+is provided and does not match the post-insertion digest, insertion will fail
+with an `EINTEGRITY` error.
+
+##### `opts.memoize`
+
+Default: null
+
+If explicitly truthy, cacache will read from memory and memoize data on bulk read. If `false`, cacache will read from disk data. Reader functions by default read from in-memory cache.
+
+##### `opts.size`
+If provided, the data stream will be verified to check that enough data was
+passed through. If there's more or less data than expected, insertion will fail
+with an `EBADSIZE` error.
+
+
#### <a name="put-data"></a> `> cacache.put(cache, key, data, [opts]) -> Promise`
Inserts data passed to it into the cache. The returned Promise resolves with a
digest (generated according to [`opts.algorithms`](#optsalgorithms)) after the
cache entry has been successfully written.
+See: [options](#put-options)
+
##### Example
```javascript
@@ -371,6 +377,8 @@ Stream](https://nodejs.org/api/stream.html#stream_writable_streams) that inserts
data written to it into the cache. Emits an `integrity` event with the digest of
written contents when it succeeds.
+See: [options](#put-options)
+
##### Example
```javascript
@@ -383,9 +391,7 @@ request.get(
)
```
-#### <a name="put-options"></a> `> cacache.put options`
-
-`cacache.put` functions have a number of options in common.
+##### <a name="put-options"></a> Options
##### `opts.metadata`
@@ -400,7 +406,7 @@ with an `EBADSIZE` error.
##### `opts.integrity`
If present, the pre-calculated digest for the inserted content. If this option
-if provided and does not match the post-insertion digest, insertion will fail
+is provided and does not match the post-insertion digest, insertion will fail
with an `EINTEGRITY` error.
`algorithms` has no effect if this option is present.
@@ -433,6 +439,11 @@ cache.
Reading from disk data can be forced by explicitly passing `memoize: false` to
the reader functions, but their default will be to read from memory.
+##### `opts.tmpPrefix`
+Default: null
+
+Prefix to append on the temporary directory name inside the cache's tmp dir.
+
#### <a name="rm-all"></a> `> cacache.rm.all(cache) -> Promise`
Clears the entire cache. Mainly by blowing away the cache directory itself.
@@ -478,14 +489,6 @@ cacache.rm.content(cachePath, 'sha512-SoMeDIGest/IN+BaSE64==').then(() => {
})
```
-#### <a name="set-locale"></a> `> cacache.setLocale(locale)`
-
-Configure the language/locale used for messages and errors coming from cacache.
-The list of available locales is in the `./locales` directory in the project
-root.
-
-_Interested in contributing more languages! [Submit a PR](CONTRIBUTING.md)!_
-
#### <a name="clear-memoized"></a> `> cacache.clearMemoized()`
Completely resets the in-memory entry cache.
@@ -504,6 +507,8 @@ permissions.
If you want automatic cleanup of this directory, use
[`tmp.withTmp()`](#with-tpm)
+See: [options](#tmp-options)
+
##### Example
```javascript
@@ -537,12 +542,14 @@ cacache.tmp.mkdir(cache).then(dir => {
Creates a temporary directory with [`tmp.mkdir()`](#tmp-mkdir) and calls `cb`
with it. The created temporary directory will be removed when the return value
-of `cb()` resolves -- that is, if you return a Promise from `cb()`, the tmp
-directory will be automatically deleted once that promise completes.
+of `cb()` resolves, the tmp directory will be automatically deleted once that
+promise completes.
The same caveats apply when it comes to managing permissions for the tmp dir's
contents.
+See: [options](#tmp-options)
+
##### Example
```javascript
@@ -553,6 +560,13 @@ cacache.tmp.withTmp(cache, dir => {
})
```
+##### <a name="tmp-options"></a> Options
+
+##### `opts.tmpPrefix`
+Default: null
+
+Prefix to append on the temporary directory name inside the cache's tmp dir.
+
#### <a name="integrity"></a> Subresource Integrity Digests
For content verification and addressing, cacache uses strings following the
@@ -608,10 +622,24 @@ When it's done, it'll return an object with various stats about the verification
process, including amount of storage reclaimed, number of valid entries, number
of entries removed, etc.
-##### Options
+##### <a name="verify-options"></a> Options
+
+##### `opts.concurrency`
-* `opts.filter` - receives a formatted entry. Return false to remove it.
- Note: might be called more than once on the same entry.
+Default: 20
+
+Number of concurrently read files in the filesystem while doing clean up.
+
+##### `opts.filter`
+Receives a formatted entry. Return false to remove it.
+Note: might be called more than once on the same entry.
+
+##### `opts.log`
+Custom logger function:
+```
+ log: { silly () {} }
+ log.silly('verify', 'verifying cache at', cache)
+```
##### Example
diff --git a/node_modules/cacache/en.js b/node_modules/cacache/en.js
deleted file mode 100644
index a3db581c9..000000000
--- a/node_modules/cacache/en.js
+++ /dev/null
@@ -1,3 +0,0 @@
-'use strict'
-
-module.exports = require('./locales/en.js')
diff --git a/node_modules/cacache/es.js b/node_modules/cacache/es.js
deleted file mode 100644
index 6282363c3..000000000
--- a/node_modules/cacache/es.js
+++ /dev/null
@@ -1,3 +0,0 @@
-'use strict'
-
-module.exports = require('./locales/es.js')
diff --git a/node_modules/cacache/get.js b/node_modules/cacache/get.js
index 008cb83a9..b6bae1e50 100644
--- a/node_modules/cacache/get.js
+++ b/node_modules/cacache/get.js
@@ -1,21 +1,16 @@
'use strict'
-const BB = require('bluebird')
-
-const figgyPudding = require('figgy-pudding')
+const util = require('util')
const fs = require('fs')
const index = require('./lib/entry-index')
const memo = require('./lib/memoization')
-const pipe = require('mississippi').pipe
-const pipeline = require('mississippi').pipeline
const read = require('./lib/content/read')
-const through = require('mississippi').through
-const GetOpts = figgyPudding({
- integrity: {},
- memoize: {},
- size: {}
-})
+const Minipass = require('minipass')
+const Collect = require('minipass-collect')
+const Pipeline = require('minipass-pipeline')
+
+const writeFile = util.promisify(fs.writeFile)
module.exports = function get (cache, key, opts) {
return getData(false, cache, key, opts)
@@ -23,44 +18,53 @@ module.exports = function get (cache, key, opts) {
module.exports.byDigest = function getByDigest (cache, digest, opts) {
return getData(true, cache, digest, opts)
}
-function getData (byDigest, cache, key, opts) {
- opts = GetOpts(opts)
- const memoized = (
- byDigest
- ? memo.get.byDigest(cache, key, opts)
- : memo.get(cache, key, opts)
- )
- if (memoized && opts.memoize !== false) {
- return BB.resolve(byDigest ? memoized : {
- metadata: memoized.entry.metadata,
- data: memoized.data,
- integrity: memoized.entry.integrity,
- size: memoized.entry.size
- })
+
+function getData (byDigest, cache, key, opts = {}) {
+ const { integrity, memoize, size } = opts
+ const memoized = byDigest
+ ? memo.get.byDigest(cache, key, opts)
+ : memo.get(cache, key, opts)
+ if (memoized && memoize !== false) {
+ return Promise.resolve(
+ byDigest
+ ? memoized
+ : {
+ metadata: memoized.entry.metadata,
+ data: memoized.data,
+ integrity: memoized.entry.integrity,
+ size: memoized.entry.size
+ }
+ )
}
- return (
- byDigest ? BB.resolve(null) : index.find(cache, key, opts)
- ).then(entry => {
- if (!entry && !byDigest) {
- throw new index.NotFoundError(cache, key)
- }
- return read(cache, byDigest ? key : entry.integrity, {
- integrity: opts.integrity,
- size: opts.size
- }).then(data => byDigest ? data : {
- metadata: entry.metadata,
- data: data,
- size: entry.size,
- integrity: entry.integrity
- }).then(res => {
- if (opts.memoize && byDigest) {
- memo.put.byDigest(cache, key, res, opts)
- } else if (opts.memoize) {
- memo.put(cache, entry, res.data, opts)
+ return (byDigest ? Promise.resolve(null) : index.find(cache, key, opts)).then(
+ (entry) => {
+ if (!entry && !byDigest) {
+ throw new index.NotFoundError(cache, key)
}
- return res
- })
- })
+ return read(cache, byDigest ? key : entry.integrity, {
+ integrity,
+ size
+ })
+ .then((data) =>
+ byDigest
+ ? data
+ : {
+ data,
+ metadata: entry.metadata,
+ size: entry.size,
+ integrity: entry.integrity
+ }
+ )
+ .then((res) => {
+ if (memoize && byDigest) {
+ memo.put.byDigest(cache, key, res, opts)
+ } else if (memoize) {
+ memo.put(cache, entry, res.data, opts)
+ }
+ return res
+ })
+ }
+ )
}
module.exports.sync = function get (cache, key, opts) {
@@ -69,33 +73,30 @@ module.exports.sync = function get (cache, key, opts) {
module.exports.sync.byDigest = function getByDigest (cache, digest, opts) {
return getDataSync(true, cache, digest, opts)
}
-function getDataSync (byDigest, cache, key, opts) {
- opts = GetOpts(opts)
- const memoized = (
- byDigest
- ? memo.get.byDigest(cache, key, opts)
- : memo.get(cache, key, opts)
- )
- if (memoized && opts.memoize !== false) {
- return byDigest ? memoized : {
- metadata: memoized.entry.metadata,
- data: memoized.data,
- integrity: memoized.entry.integrity,
- size: memoized.entry.size
- }
+
+function getDataSync (byDigest, cache, key, opts = {}) {
+ const { integrity, memoize, size } = opts
+ const memoized = byDigest
+ ? memo.get.byDigest(cache, key, opts)
+ : memo.get(cache, key, opts)
+ if (memoized && memoize !== false) {
+ return byDigest
+ ? memoized
+ : {
+ metadata: memoized.entry.metadata,
+ data: memoized.data,
+ integrity: memoized.entry.integrity,
+ size: memoized.entry.size
+ }
}
const entry = !byDigest && index.find.sync(cache, key, opts)
if (!entry && !byDigest) {
throw new index.NotFoundError(cache, key)
}
- const data = read.sync(
- cache,
- byDigest ? key : entry.integrity,
- {
- integrity: opts.integrity,
- size: opts.size
- }
- )
+ const data = read.sync(cache, byDigest ? key : entry.integrity, {
+ integrity: integrity,
+ size: size
+ })
const res = byDigest
? data
: {
@@ -104,106 +105,100 @@ function getDataSync (byDigest, cache, key, opts) {
size: entry.size,
integrity: entry.integrity
}
- if (opts.memoize && byDigest) {
+ if (memoize && byDigest) {
memo.put.byDigest(cache, key, res, opts)
- } else if (opts.memoize) {
+ } else if (memoize) {
memo.put(cache, entry, res.data, opts)
}
return res
}
module.exports.stream = getStream
-function getStream (cache, key, opts) {
- opts = GetOpts(opts)
- let stream = through()
+
+const getMemoizedStream = (memoized) => {
+ const stream = new Minipass()
+ stream.on('newListener', function (ev, cb) {
+ ev === 'metadata' && cb(memoized.entry.metadata)
+ ev === 'integrity' && cb(memoized.entry.integrity)
+ ev === 'size' && cb(memoized.entry.size)
+ })
+ stream.end(memoized.data)
+ return stream
+}
+
+function getStream (cache, key, opts = {}) {
+ const { memoize, size } = opts
const memoized = memo.get(cache, key, opts)
- if (memoized && opts.memoize !== false) {
- stream.on('newListener', function (ev, cb) {
- ev === 'metadata' && cb(memoized.entry.metadata)
- ev === 'integrity' && cb(memoized.entry.integrity)
- ev === 'size' && cb(memoized.entry.size)
- })
- stream.write(memoized.data, () => stream.end())
- return stream
+ if (memoized && memoize !== false) {
+ return getMemoizedStream(memoized)
}
- index.find(cache, key).then(entry => {
- if (!entry) {
- return stream.emit(
- 'error', new index.NotFoundError(cache, key)
- )
- }
- let memoStream
- if (opts.memoize) {
- let memoData = []
- let memoLength = 0
- memoStream = through((c, en, cb) => {
- memoData && memoData.push(c)
- memoLength += c.length
- cb(null, c, en)
- }, cb => {
- memoData && memo.put(cache, entry, Buffer.concat(memoData, memoLength), opts)
- cb()
+
+ const stream = new Pipeline()
+ index
+ .find(cache, key)
+ .then((entry) => {
+ if (!entry) {
+ throw new index.NotFoundError(cache, key)
+ }
+ stream.emit('metadata', entry.metadata)
+ stream.emit('integrity', entry.integrity)
+ stream.emit('size', entry.size)
+ stream.on('newListener', function (ev, cb) {
+ ev === 'metadata' && cb(entry.metadata)
+ ev === 'integrity' && cb(entry.integrity)
+ ev === 'size' && cb(entry.size)
})
- } else {
- memoStream = through()
- }
- stream.emit('metadata', entry.metadata)
- stream.emit('integrity', entry.integrity)
- stream.emit('size', entry.size)
- stream.on('newListener', function (ev, cb) {
- ev === 'metadata' && cb(entry.metadata)
- ev === 'integrity' && cb(entry.integrity)
- ev === 'size' && cb(entry.size)
+
+ const src = read.readStream(
+ cache,
+ entry.integrity,
+ { ...opts, size: typeof size !== 'number' ? entry.size : size }
+ )
+
+ if (memoize) {
+ const memoStream = new Collect.PassThrough()
+ memoStream.on('collect', data => memo.put(cache, entry, data, opts))
+ stream.unshift(memoStream)
+ }
+ stream.unshift(src)
})
- pipe(
- read.readStream(cache, entry.integrity, opts.concat({
- size: opts.size == null ? entry.size : opts.size
- })),
- memoStream,
- stream
- )
- }).catch(err => stream.emit('error', err))
+ .catch((err) => stream.emit('error', err))
+
return stream
}
module.exports.stream.byDigest = getStreamDigest
-function getStreamDigest (cache, integrity, opts) {
- opts = GetOpts(opts)
+
+function getStreamDigest (cache, integrity, opts = {}) {
+ const { memoize } = opts
const memoized = memo.get.byDigest(cache, integrity, opts)
- if (memoized && opts.memoize !== false) {
- const stream = through()
- stream.write(memoized, () => stream.end())
+ if (memoized && memoize !== false) {
+ const stream = new Minipass()
+ stream.end(memoized)
return stream
} else {
- let stream = read.readStream(cache, integrity, opts)
- if (opts.memoize) {
- let memoData = []
- let memoLength = 0
- const memoStream = through((c, en, cb) => {
- memoData && memoData.push(c)
- memoLength += c.length
- cb(null, c, en)
- }, cb => {
- memoData && memo.put.byDigest(
- cache,
- integrity,
- Buffer.concat(memoData, memoLength),
- opts
- )
- cb()
- })
- stream = pipeline(stream, memoStream)
+ const stream = read.readStream(cache, integrity, opts)
+ if (!memoize) {
+ return stream
}
- return stream
+ const memoStream = new Collect.PassThrough()
+ memoStream.on('collect', data => memo.put.byDigest(
+ cache,
+ integrity,
+ data,
+ opts
+ ))
+ return new Pipeline(stream, memoStream)
}
}
module.exports.info = info
-function info (cache, key, opts) {
- opts = GetOpts(opts)
+
+function info (cache, key, opts = {}) {
+ const { memoize } = opts
const memoized = memo.get(cache, key, opts)
- if (memoized && opts.memoize !== false) {
- return BB.resolve(memoized.entry)
+ if (memoized && memoize !== false) {
+ return Promise.resolve(memoized.entry)
} else {
return index.find(cache, key)
}
@@ -211,37 +206,50 @@ function info (cache, key, opts) {
module.exports.hasContent = read.hasContent
-module.exports.copy = function cp (cache, key, dest, opts) {
+function cp (cache, key, dest, opts) {
return copy(false, cache, key, dest, opts)
}
-module.exports.copy.byDigest = function cpDigest (cache, digest, dest, opts) {
+
+module.exports.copy = cp
+
+function cpDigest (cache, digest, dest, opts) {
return copy(true, cache, digest, dest, opts)
}
-function copy (byDigest, cache, key, dest, opts) {
- opts = GetOpts(opts)
+
+module.exports.copy.byDigest = cpDigest
+
+function copy (byDigest, cache, key, dest, opts = {}) {
if (read.copy) {
- return (
- byDigest ? BB.resolve(null) : index.find(cache, key, opts)
- ).then(entry => {
+ return (byDigest
+ ? Promise.resolve(null)
+ : index.find(cache, key, opts)
+ ).then((entry) => {
if (!entry && !byDigest) {
throw new index.NotFoundError(cache, key)
}
- return read.copy(
- cache, byDigest ? key : entry.integrity, dest, opts
- ).then(() => byDigest ? key : {
- metadata: entry.metadata,
- size: entry.size,
- integrity: entry.integrity
- })
+ return read
+ .copy(cache, byDigest ? key : entry.integrity, dest, opts)
+ .then(() => {
+ return byDigest
+ ? key
+ : {
+ metadata: entry.metadata,
+ size: entry.size,
+ integrity: entry.integrity
+ }
+ })
})
- } else {
- return getData(byDigest, cache, key, opts).then(res => {
- return fs.writeFileAsync(dest, byDigest ? res : res.data)
- .then(() => byDigest ? key : {
+ }
+
+ return getData(byDigest, cache, key, opts).then((res) => {
+ return writeFile(dest, byDigest ? res : res.data).then(() => {
+ return byDigest
+ ? key
+ : {
metadata: res.metadata,
size: res.size,
integrity: res.integrity
- })
+ }
})
- }
+ })
}
diff --git a/node_modules/cacache/index.js b/node_modules/cacache/index.js
index a3db581c9..08ba14835 100644
--- a/node_modules/cacache/index.js
+++ b/node_modules/cacache/index.js
@@ -1,3 +1,41 @@
'use strict'
-module.exports = require('./locales/en.js')
+const ls = require('./ls.js')
+const get = require('./get.js')
+const put = require('./put.js')
+const rm = require('./rm.js')
+const verify = require('./verify.js')
+const { clearMemoized } = require('./lib/memoization.js')
+const tmp = require('./lib/util/tmp.js')
+
+module.exports.ls = ls
+module.exports.ls.stream = ls.stream
+
+module.exports.get = get
+module.exports.get.byDigest = get.byDigest
+module.exports.get.sync = get.sync
+module.exports.get.sync.byDigest = get.sync.byDigest
+module.exports.get.stream = get.stream
+module.exports.get.stream.byDigest = get.stream.byDigest
+module.exports.get.copy = get.copy
+module.exports.get.copy.byDigest = get.copy.byDigest
+module.exports.get.info = get.info
+module.exports.get.hasContent = get.hasContent
+module.exports.get.hasContent.sync = get.hasContent.sync
+
+module.exports.put = put
+module.exports.put.stream = put.stream
+
+module.exports.rm = rm.entry
+module.exports.rm.all = rm.all
+module.exports.rm.entry = module.exports.rm
+module.exports.rm.content = rm.content
+
+module.exports.clearMemoized = clearMemoized
+
+module.exports.tmp = {}
+module.exports.tmp.mkdir = tmp.mkdir
+module.exports.tmp.withTmp = tmp.withTmp
+
+module.exports.verify = verify
+module.exports.verify.lastRun = verify.lastRun
diff --git a/node_modules/cacache/lib/content/path.js b/node_modules/cacache/lib/content/path.js
index c67c28061..ad5a76a4f 100644
--- a/node_modules/cacache/lib/content/path.js
+++ b/node_modules/cacache/lib/content/path.js
@@ -11,16 +11,19 @@ const ssri = require('ssri')
// ~/.my-cache/content-v2/sha512/ba/da/55deadbeefc0ffee
//
module.exports = contentPath
+
function contentPath (cache, integrity) {
const sri = ssri.parse(integrity, { single: true })
// contentPath is the *strongest* algo given
- return path.join.apply(path, [
+ return path.join(
contentDir(cache),
- sri.algorithm
- ].concat(hashToSegments(sri.hexDigest())))
+ sri.algorithm,
+ ...hashToSegments(sri.hexDigest())
+ )
}
-module.exports._contentDir = contentDir
+module.exports.contentDir = contentDir
+
function contentDir (cache) {
return path.join(cache, `content-v${contentVer}`)
}
diff --git a/node_modules/cacache/lib/content/read.js b/node_modules/cacache/lib/content/read.js
index 7929524f8..7cc16482d 100644
--- a/node_modules/cacache/lib/content/read.js
+++ b/node_modules/cacache/lib/content/read.js
@@ -1,104 +1,125 @@
'use strict'
-const BB = require('bluebird')
+const util = require('util')
-const contentPath = require('./path')
-const figgyPudding = require('figgy-pudding')
-const fs = require('graceful-fs')
-const PassThrough = require('stream').PassThrough
-const pipe = BB.promisify(require('mississippi').pipe)
+const fs = require('fs')
+const fsm = require('fs-minipass')
const ssri = require('ssri')
-const Y = require('../util/y.js')
-
-const lstatAsync = BB.promisify(fs.lstat)
-const readFileAsync = BB.promisify(fs.readFile)
+const contentPath = require('./path')
+const Pipeline = require('minipass-pipeline')
-const ReadOpts = figgyPudding({
- size: {}
-})
+const lstat = util.promisify(fs.lstat)
+const readFile = util.promisify(fs.readFile)
module.exports = read
-function read (cache, integrity, opts) {
- opts = ReadOpts(opts)
+
+const MAX_SINGLE_READ_SIZE = 64 * 1024 * 1024
+function read (cache, integrity, opts = {}) {
+ const { size } = opts
return withContentSri(cache, integrity, (cpath, sri) => {
- return readFileAsync(cpath, null).then(data => {
- if (typeof opts.size === 'number' && opts.size !== data.length) {
- throw sizeError(opts.size, data.length)
- } else if (ssri.checkData(data, sri)) {
- return data
- } else {
+ // get size
+ return lstat(cpath).then(stat => ({ stat, cpath, sri }))
+ }).then(({ stat, cpath, sri }) => {
+ if (typeof size === 'number' && stat.size !== size) {
+ throw sizeError(size, stat.size)
+ }
+ if (stat.size > MAX_SINGLE_READ_SIZE) {
+ return readPipeline(cpath, stat.size, sri, new Pipeline()).concat()
+ }
+
+ return readFile(cpath, null).then((data) => {
+ if (!ssri.checkData(data, sri)) {
throw integrityError(sri, cpath)
}
+ return data
})
})
}
+const readPipeline = (cpath, size, sri, stream) => {
+ stream.push(
+ new fsm.ReadStream(cpath, {
+ size,
+ readSize: MAX_SINGLE_READ_SIZE
+ }),
+ ssri.integrityStream({
+ integrity: sri,
+ size
+ })
+ )
+ return stream
+}
+
module.exports.sync = readSync
-function readSync (cache, integrity, opts) {
- opts = ReadOpts(opts)
+
+function readSync (cache, integrity, opts = {}) {
+ const { size } = opts
return withContentSriSync(cache, integrity, (cpath, sri) => {
const data = fs.readFileSync(cpath)
- if (typeof opts.size === 'number' && opts.size !== data.length) {
- throw sizeError(opts.size, data.length)
- } else if (ssri.checkData(data, sri)) {
+ if (typeof size === 'number' && size !== data.length) {
+ throw sizeError(size, data.length)
+ }
+
+ if (ssri.checkData(data, sri)) {
return data
- } else {
- throw integrityError(sri, cpath)
}
+
+ throw integrityError(sri, cpath)
})
}
module.exports.stream = readStream
module.exports.readStream = readStream
-function readStream (cache, integrity, opts) {
- opts = ReadOpts(opts)
- const stream = new PassThrough()
+
+function readStream (cache, integrity, opts = {}) {
+ const { size } = opts
+ const stream = new Pipeline()
withContentSri(cache, integrity, (cpath, sri) => {
- return lstatAsync(cpath).then(stat => ({ cpath, sri, stat }))
- }).then(({ cpath, sri, stat }) => {
- return pipe(
- fs.createReadStream(cpath),
- ssri.integrityStream({
- integrity: sri,
- size: opts.size
- }),
- stream
- )
- }).catch(err => {
- stream.emit('error', err)
- })
+ // just lstat to ensure it exists
+ return lstat(cpath).then((stat) => ({ stat, cpath, sri }))
+ }).then(({ stat, cpath, sri }) => {
+ if (typeof size === 'number' && size !== stat.size) {
+ return stream.emit('error', sizeError(size, stat.size))
+ }
+ readPipeline(cpath, stat.size, sri, stream)
+ }, er => stream.emit('error', er))
+
return stream
}
-let copyFileAsync
+let copyFile
if (fs.copyFile) {
module.exports.copy = copy
module.exports.copy.sync = copySync
- copyFileAsync = BB.promisify(fs.copyFile)
+ copyFile = util.promisify(fs.copyFile)
}
-function copy (cache, integrity, dest, opts) {
- opts = ReadOpts(opts)
+function copy (cache, integrity, dest) {
return withContentSri(cache, integrity, (cpath, sri) => {
- return copyFileAsync(cpath, dest)
+ return copyFile(cpath, dest)
})
}
-function copySync (cache, integrity, dest, opts) {
- opts = ReadOpts(opts)
+function copySync (cache, integrity, dest) {
return withContentSriSync(cache, integrity, (cpath, sri) => {
return fs.copyFileSync(cpath, dest)
})
}
module.exports.hasContent = hasContent
+
function hasContent (cache, integrity) {
- if (!integrity) { return BB.resolve(false) }
+ if (!integrity) {
+ return Promise.resolve(false)
+ }
return withContentSri(cache, integrity, (cpath, sri) => {
- return lstatAsync(cpath).then(stat => ({ size: stat.size, sri, stat }))
- }).catch(err => {
- if (err.code === 'ENOENT') { return false }
+ return lstat(cpath).then((stat) => ({ size: stat.size, sri, stat }))
+ }).catch((err) => {
+ if (err.code === 'ENOENT') {
+ return false
+ }
if (err.code === 'EPERM') {
+ /* istanbul ignore else */
if (process.platform !== 'win32') {
throw err
} else {
@@ -109,15 +130,21 @@ function hasContent (cache, integrity) {
}
module.exports.hasContent.sync = hasContentSync
+
function hasContentSync (cache, integrity) {
- if (!integrity) { return false }
+ if (!integrity) {
+ return false
+ }
return withContentSriSync(cache, integrity, (cpath, sri) => {
try {
const stat = fs.lstatSync(cpath)
return { size: stat.size, sri, stat }
} catch (err) {
- if (err.code === 'ENOENT') { return false }
+ if (err.code === 'ENOENT') {
+ return false
+ }
if (err.code === 'EPERM') {
+ /* istanbul ignore else */
if (process.platform !== 'win32') {
throw err
} else {
@@ -129,30 +156,58 @@ function hasContentSync (cache, integrity) {
}
function withContentSri (cache, integrity, fn) {
- return BB.try(() => {
+ const tryFn = () => {
const sri = ssri.parse(integrity)
// If `integrity` has multiple entries, pick the first digest
// with available local data.
const algo = sri.pickAlgorithm()
const digests = sri[algo]
+
if (digests.length <= 1) {
const cpath = contentPath(cache, digests[0])
return fn(cpath, digests[0])
} else {
- return BB.any(sri[sri.pickAlgorithm()].map(meta => {
- return withContentSri(cache, meta, fn)
- }, { concurrency: 1 }))
- .catch(err => {
- if ([].some.call(err, e => e.code === 'ENOENT')) {
- throw Object.assign(
- new Error('No matching content found for ' + sri.toString()),
- { code: 'ENOENT' }
- )
- } else {
- throw err[0]
+ // Can't use race here because a generic error can happen before a ENOENT error, and can happen before a valid result
+ return Promise
+ .all(digests.map((meta) => {
+ return withContentSri(cache, meta, fn)
+ .catch((err) => {
+ if (err.code === 'ENOENT') {
+ return Object.assign(
+ new Error('No matching content found for ' + sri.toString()),
+ { code: 'ENOENT' }
+ )
+ }
+ return err
+ })
+ }))
+ .then((results) => {
+ // Return the first non error if it is found
+ const result = results.find((r) => !(r instanceof Error))
+ if (result) {
+ return result
+ }
+
+ // Throw the No matching content found error
+ const enoentError = results.find((r) => r.code === 'ENOENT')
+ if (enoentError) {
+ throw enoentError
}
+
+ // Throw generic error
+ throw results.find((r) => r instanceof Error)
})
}
+ }
+
+ return new Promise((resolve, reject) => {
+ try {
+ tryFn()
+ .then(resolve)
+ .catch(reject)
+ } catch (err) {
+ reject(err)
+ }
})
}
@@ -167,19 +222,19 @@ function withContentSriSync (cache, integrity, fn) {
return fn(cpath, digests[0])
} else {
let lastErr = null
- for (const meta of sri[sri.pickAlgorithm()]) {
+ for (const meta of digests) {
try {
return withContentSriSync(cache, meta, fn)
} catch (err) {
lastErr = err
}
}
- if (lastErr) { throw lastErr }
+ throw lastErr
}
}
function sizeError (expected, found) {
- var err = new Error(Y`Bad data size: expected inserted data to be ${expected} bytes, but got ${found} instead`)
+ const err = new Error(`Bad data size: expected inserted data to be ${expected} bytes, but got ${found} instead`)
err.expected = expected
err.found = found
err.code = 'EBADSIZE'
@@ -187,7 +242,7 @@ function sizeError (expected, found) {
}
function integrityError (sri, path) {
- var err = new Error(Y`Integrity verification failed for ${sri} (${path})`)
+ const err = new Error(`Integrity verification failed for ${sri} (${path})`)
err.code = 'EINTEGRITY'
err.sri = sri
err.path = path
diff --git a/node_modules/cacache/lib/content/rm.js b/node_modules/cacache/lib/content/rm.js
index 12cf15823..50612364e 100644
--- a/node_modules/cacache/lib/content/rm.js
+++ b/node_modules/cacache/lib/content/rm.js
@@ -1,19 +1,18 @@
'use strict'
-const BB = require('bluebird')
+const util = require('util')
const contentPath = require('./path')
-const hasContent = require('./read').hasContent
-const rimraf = BB.promisify(require('rimraf'))
+const { hasContent } = require('./read')
+const rimraf = util.promisify(require('rimraf'))
module.exports = rm
+
function rm (cache, integrity) {
- return hasContent(cache, integrity).then(content => {
- if (content) {
- const sri = content.sri
- if (sri) {
- return rimraf(contentPath(cache, sri)).then(() => true)
- }
+ return hasContent(cache, integrity).then((content) => {
+ // ~pretty~ sure we can't end up with a content lacking sri, but be safe
+ if (content && content.sri) {
+ return rimraf(contentPath(cache, content.sri)).then(() => true)
} else {
return false
}
diff --git a/node_modules/cacache/lib/content/write.js b/node_modules/cacache/lib/content/write.js
index 4d96a3cff..e8f3e3534 100644
--- a/node_modules/cacache/lib/content/write.js
+++ b/node_modules/cacache/lib/content/write.js
@@ -1,152 +1,172 @@
'use strict'
-const BB = require('bluebird')
+const util = require('util')
const contentPath = require('./path')
const fixOwner = require('../util/fix-owner')
-const fs = require('graceful-fs')
+const fs = require('fs')
const moveFile = require('../util/move-file')
-const PassThrough = require('stream').PassThrough
+const Minipass = require('minipass')
+const Pipeline = require('minipass-pipeline')
+const Flush = require('minipass-flush')
const path = require('path')
-const pipe = BB.promisify(require('mississippi').pipe)
-const rimraf = BB.promisify(require('rimraf'))
+const rimraf = util.promisify(require('rimraf'))
const ssri = require('ssri')
-const to = require('mississippi').to
const uniqueFilename = require('unique-filename')
-const Y = require('../util/y.js')
+const { disposer } = require('./../util/disposer')
+const fsm = require('fs-minipass')
-const writeFileAsync = BB.promisify(fs.writeFile)
+const writeFile = util.promisify(fs.writeFile)
module.exports = write
-function write (cache, data, opts) {
- opts = opts || {}
- if (opts.algorithms && opts.algorithms.length > 1) {
- throw new Error(
- Y`opts.algorithms only supports a single algorithm for now`
- )
+
+function write (cache, data, opts = {}) {
+ const { algorithms, size, integrity } = opts
+ if (algorithms && algorithms.length > 1) {
+ throw new Error('opts.algorithms only supports a single algorithm for now')
}
- if (typeof opts.size === 'number' && data.length !== opts.size) {
- return BB.reject(sizeError(opts.size, data.length))
+ if (typeof size === 'number' && data.length !== size) {
+ return Promise.reject(sizeError(size, data.length))
}
- const sri = ssri.fromData(data, {
- algorithms: opts.algorithms
- })
- if (opts.integrity && !ssri.checkData(data, opts.integrity, opts)) {
- return BB.reject(checksumError(opts.integrity, sri))
+ const sri = ssri.fromData(data, algorithms ? { algorithms } : {})
+ if (integrity && !ssri.checkData(data, integrity, opts)) {
+ return Promise.reject(checksumError(integrity, sri))
}
- return BB.using(makeTmp(cache, opts), tmp => (
- writeFileAsync(
- tmp.target, data, { flag: 'wx' }
- ).then(() => (
- moveToDestination(tmp, cache, sri, opts)
- ))
- )).then(() => ({ integrity: sri, size: data.length }))
+
+ return disposer(makeTmp(cache, opts), makeTmpDisposer,
+ (tmp) => {
+ return writeFile(tmp.target, data, { flag: 'wx' })
+ .then(() => moveToDestination(tmp, cache, sri, opts))
+ })
+ .then(() => ({ integrity: sri, size: data.length }))
}
module.exports.stream = writeStream
-function writeStream (cache, opts) {
- opts = opts || {}
- const inputStream = new PassThrough()
- let inputErr = false
- function errCheck () {
- if (inputErr) { throw inputErr }
+
+// writes proxied to the 'inputStream' that is passed to the Promise
+// 'end' is deferred until content is handled.
+class CacacheWriteStream extends Flush {
+ constructor (cache, opts) {
+ super()
+ this.opts = opts
+ this.cache = cache
+ this.inputStream = new Minipass()
+ this.inputStream.on('error', er => this.emit('error', er))
+ this.inputStream.on('drain', () => this.emit('drain'))
+ this.handleContentP = null
}
- let allDone
- const ret = to((c, n, cb) => {
- if (!allDone) {
- allDone = handleContent(inputStream, cache, opts, errCheck)
+ write (chunk, encoding, cb) {
+ if (!this.handleContentP) {
+ this.handleContentP = handleContent(
+ this.inputStream,
+ this.cache,
+ this.opts
+ )
}
- inputStream.write(c, n, cb)
- }, cb => {
- inputStream.end(() => {
- if (!allDone) {
- const e = new Error(Y`Cache input stream was empty`)
+ return this.inputStream.write(chunk, encoding, cb)
+ }
+
+ flush (cb) {
+ this.inputStream.end(() => {
+ if (!this.handleContentP) {
+ const e = new Error('Cache input stream was empty')
e.code = 'ENODATA'
- return ret.emit('error', e)
+ // empty streams are probably emitting end right away.
+ // defer this one tick by rejecting a promise on it.
+ return Promise.reject(e).catch(cb)
}
- allDone.then(res => {
- res.integrity && ret.emit('integrity', res.integrity)
- res.size !== null && ret.emit('size', res.size)
- cb()
- }, e => {
- ret.emit('error', e)
- })
+ this.handleContentP.then(
+ (res) => {
+ res.integrity && this.emit('integrity', res.integrity)
+ res.size !== null && this.emit('size', res.size)
+ cb()
+ },
+ (er) => cb(er)
+ )
})
- })
- ret.once('error', e => {
- inputErr = e
- })
- return ret
+ }
}
-function handleContent (inputStream, cache, opts, errCheck) {
- return BB.using(makeTmp(cache, opts), tmp => {
- errCheck()
- return pipeToTmp(
- inputStream, cache, tmp.target, opts, errCheck
- ).then(res => {
- return moveToDestination(
- tmp, cache, res.integrity, opts, errCheck
- ).then(() => res)
- })
+function writeStream (cache, opts = {}) {
+ return new CacacheWriteStream(cache, opts)
+}
+
+function handleContent (inputStream, cache, opts) {
+ return disposer(makeTmp(cache, opts), makeTmpDisposer, (tmp) => {
+ return pipeToTmp(inputStream, cache, tmp.target, opts)
+ .then((res) => {
+ return moveToDestination(
+ tmp,
+ cache,
+ res.integrity,
+ opts
+ ).then(() => res)
+ })
})
}
-function pipeToTmp (inputStream, cache, tmpTarget, opts, errCheck) {
- return BB.resolve().then(() => {
- let integrity
- let size
- const hashStream = ssri.integrityStream({
- integrity: opts.integrity,
- algorithms: opts.algorithms,
- size: opts.size
- }).on('integrity', s => {
- integrity = s
- }).on('size', s => {
- size = s
- })
- const outStream = fs.createWriteStream(tmpTarget, {
- flags: 'wx'
- })
- errCheck()
- return pipe(inputStream, hashStream, outStream).then(() => {
- return { integrity, size }
- }).catch(err => {
- return rimraf(tmpTarget).then(() => { throw err })
- })
+function pipeToTmp (inputStream, cache, tmpTarget, opts) {
+ let integrity
+ let size
+ const hashStream = ssri.integrityStream({
+ integrity: opts.integrity,
+ algorithms: opts.algorithms,
+ size: opts.size
})
+ hashStream.on('integrity', i => { integrity = i })
+ hashStream.on('size', s => { size = s })
+
+ const outStream = new fsm.WriteStream(tmpTarget, {
+ flags: 'wx'
+ })
+
+ // NB: this can throw if the hashStream has a problem with
+ // it, and the data is fully written. but pipeToTmp is only
+ // called in promisory contexts where that is handled.
+ const pipeline = new Pipeline(
+ inputStream,
+ hashStream,
+ outStream
+ )
+
+ return pipeline.promise()
+ .then(() => ({ integrity, size }))
+ .catch(er => rimraf(tmpTarget).then(() => { throw er }))
}
function makeTmp (cache, opts) {
const tmpTarget = uniqueFilename(path.join(cache, 'tmp'), opts.tmpPrefix)
- return fixOwner.mkdirfix(
- cache, path.dirname(tmpTarget)
- ).then(() => ({
+ return fixOwner.mkdirfix(cache, path.dirname(tmpTarget)).then(() => ({
target: tmpTarget,
moved: false
- })).disposer(tmp => (!tmp.moved && rimraf(tmp.target)))
+ }))
+}
+
+function makeTmpDisposer (tmp) {
+ if (tmp.moved) {
+ return Promise.resolve()
+ }
+ return rimraf(tmp.target)
}
-function moveToDestination (tmp, cache, sri, opts, errCheck) {
- errCheck && errCheck()
+function moveToDestination (tmp, cache, sri, opts) {
const destination = contentPath(cache, sri)
const destDir = path.dirname(destination)
- return fixOwner.mkdirfix(
- cache, destDir
- ).then(() => {
- errCheck && errCheck()
- return moveFile(tmp.target, destination)
- }).then(() => {
- errCheck && errCheck()
- tmp.moved = true
- return fixOwner.chownr(cache, destination)
- })
+ return fixOwner
+ .mkdirfix(cache, destDir)
+ .then(() => {
+ return moveFile(tmp.target, destination)
+ })
+ .then(() => {
+ tmp.moved = true
+ return fixOwner.chownr(cache, destination)
+ })
}
function sizeError (expected, found) {
- var err = new Error(Y`Bad data size: expected inserted data to be ${expected} bytes, but got ${found} instead`)
+ const err = new Error(`Bad data size: expected inserted data to be ${expected} bytes, but got ${found} instead`)
err.expected = expected
err.found = found
err.code = 'EBADSIZE'
@@ -154,7 +174,7 @@ function sizeError (expected, found) {
}
function checksumError (expected, found) {
- var err = new Error(Y`Integrity check failed:
+ const err = new Error(`Integrity check failed:
Wanted: ${expected}
Found: ${found}`)
err.code = 'EINTEGRITY'
diff --git a/node_modules/cacache/lib/entry-index.js b/node_modules/cacache/lib/entry-index.js
index dee1824b1..58b205bfe 100644
--- a/node_modules/cacache/lib/entry-index.js
+++ b/node_modules/cacache/lib/entry-index.js
@@ -1,94 +1,87 @@
'use strict'
-const BB = require('bluebird')
+const util = require('util')
-const contentPath = require('./content/path')
const crypto = require('crypto')
-const figgyPudding = require('figgy-pudding')
-const fixOwner = require('./util/fix-owner')
-const fs = require('graceful-fs')
-const hashToSegments = require('./util/hash-to-segments')
-const ms = require('mississippi')
+const fs = require('fs')
+const Minipass = require('minipass')
const path = require('path')
const ssri = require('ssri')
-const Y = require('./util/y.js')
-
+const contentPath = require('./content/path')
+const fixOwner = require('./util/fix-owner')
+const hashToSegments = require('./util/hash-to-segments')
const indexV = require('../package.json')['cache-version'].index
-const appendFileAsync = BB.promisify(fs.appendFile)
-const readFileAsync = BB.promisify(fs.readFile)
-const readdirAsync = BB.promisify(fs.readdir)
-const concat = ms.concat
-const from = ms.from
+const appendFile = util.promisify(fs.appendFile)
+const readFile = util.promisify(fs.readFile)
+const readdir = util.promisify(fs.readdir)
module.exports.NotFoundError = class NotFoundError extends Error {
constructor (cache, key) {
- super(Y`No cache entry for \`${key}\` found in \`${cache}\``)
+ super(`No cache entry for ${key} found in ${cache}`)
this.code = 'ENOENT'
this.cache = cache
this.key = key
}
}
-const IndexOpts = figgyPudding({
- metadata: {},
- size: {}
-})
-
module.exports.insert = insert
-function insert (cache, key, integrity, opts) {
- opts = IndexOpts(opts)
+
+function insert (cache, key, integrity, opts = {}) {
+ const { metadata, size } = opts
const bucket = bucketPath(cache, key)
const entry = {
key,
integrity: integrity && ssri.stringify(integrity),
time: Date.now(),
- size: opts.size,
- metadata: opts.metadata
+ size,
+ metadata
}
- return fixOwner.mkdirfix(
- cache, path.dirname(bucket)
- ).then(() => {
- const stringified = JSON.stringify(entry)
- // NOTE - Cleverness ahoy!
- //
- // This works because it's tremendously unlikely for an entry to corrupt
- // another while still preserving the string length of the JSON in
- // question. So, we just slap the length in there and verify it on read.
- //
- // Thanks to @isaacs for the whiteboarding session that ended up with this.
- return appendFileAsync(
- bucket, `\n${hashEntry(stringified)}\t${stringified}`
- )
- }).then(
- () => fixOwner.chownr(cache, bucket)
- ).catch({ code: 'ENOENT' }, () => {
- // There's a class of race conditions that happen when things get deleted
- // during fixOwner, or between the two mkdirfix/chownr calls.
- //
- // It's perfectly fine to just not bother in those cases and lie
- // that the index entry was written. Because it's a cache.
- }).then(() => {
- return formatEntry(cache, entry)
- })
+ return fixOwner
+ .mkdirfix(cache, path.dirname(bucket))
+ .then(() => {
+ const stringified = JSON.stringify(entry)
+ // NOTE - Cleverness ahoy!
+ //
+ // This works because it's tremendously unlikely for an entry to corrupt
+ // another while still preserving the string length of the JSON in
+ // question. So, we just slap the length in there and verify it on read.
+ //
+ // Thanks to @isaacs for the whiteboarding session that ended up with this.
+ return appendFile(bucket, `\n${hashEntry(stringified)}\t${stringified}`)
+ })
+ .then(() => fixOwner.chownr(cache, bucket))
+ .catch((err) => {
+ if (err.code === 'ENOENT') {
+ return undefined
+ }
+ throw err
+ // There's a class of race conditions that happen when things get deleted
+ // during fixOwner, or between the two mkdirfix/chownr calls.
+ //
+ // It's perfectly fine to just not bother in those cases and lie
+ // that the index entry was written. Because it's a cache.
+ })
+ .then(() => {
+ return formatEntry(cache, entry)
+ })
}
module.exports.insert.sync = insertSync
-function insertSync (cache, key, integrity, opts) {
- opts = IndexOpts(opts)
+
+function insertSync (cache, key, integrity, opts = {}) {
+ const { metadata, size } = opts
const bucket = bucketPath(cache, key)
const entry = {
key,
integrity: integrity && ssri.stringify(integrity),
time: Date.now(),
- size: opts.size,
- metadata: opts.metadata
+ size,
+ metadata
}
fixOwner.mkdirfix.sync(cache, path.dirname(bucket))
const stringified = JSON.stringify(entry)
- fs.appendFileSync(
- bucket, `\n${hashEntry(stringified)}\t${stringified}`
- )
+ fs.appendFileSync(bucket, `\n${hashEntry(stringified)}\t${stringified}`)
try {
fixOwner.chownr.sync(cache, bucket)
} catch (err) {
@@ -100,26 +93,30 @@ function insertSync (cache, key, integrity, opts) {
}
module.exports.find = find
+
function find (cache, key) {
const bucket = bucketPath(cache, key)
- return bucketEntries(bucket).then(entries => {
- return entries.reduce((latest, next) => {
- if (next && next.key === key) {
- return formatEntry(cache, next)
+ return bucketEntries(bucket)
+ .then((entries) => {
+ return entries.reduce((latest, next) => {
+ if (next && next.key === key) {
+ return formatEntry(cache, next)
+ } else {
+ return latest
+ }
+ }, null)
+ })
+ .catch((err) => {
+ if (err.code === 'ENOENT') {
+ return null
} else {
- return latest
+ throw err
}
- }, null)
- }).catch(err => {
- if (err.code === 'ENOENT') {
- return null
- } else {
- throw err
- }
- })
+ })
}
module.exports.find.sync = findSync
+
function findSync (cache, key) {
const bucket = bucketPath(cache, key)
try {
@@ -140,70 +137,80 @@ function findSync (cache, key) {
}
module.exports.delete = del
+
function del (cache, key, opts) {
return insert(cache, key, null, opts)
}
module.exports.delete.sync = delSync
+
function delSync (cache, key, opts) {
return insertSync(cache, key, null, opts)
}
module.exports.lsStream = lsStream
+
function lsStream (cache) {
const indexDir = bucketDir(cache)
- const stream = from.obj()
-
- // "/cachename/*"
- readdirOrEmpty(indexDir).map(bucket => {
- const bucketPath = path.join(indexDir, bucket)
-
- // "/cachename/<bucket 0xFF>/*"
- return readdirOrEmpty(bucketPath).map(subbucket => {
- const subbucketPath = path.join(bucketPath, subbucket)
-
- // "/cachename/<bucket 0xFF>/<bucket 0xFF>/*"
- return readdirOrEmpty(subbucketPath).map(entry => {
- const getKeyToEntry = bucketEntries(
- path.join(subbucketPath, entry)
- ).reduce((acc, entry) => {
- acc.set(entry.key, entry)
- return acc
- }, new Map())
-
- return getKeyToEntry.then(reduced => {
- for (let entry of reduced.values()) {
- const formatted = formatEntry(cache, entry)
- formatted && stream.push(formatted)
- }
- }).catch({ code: 'ENOENT' }, nop)
- })
+ const stream = new Minipass({ objectMode: true })
+
+ readdirOrEmpty(indexDir).then(buckets => Promise.all(
+ buckets.map(bucket => {
+ const bucketPath = path.join(indexDir, bucket)
+ return readdirOrEmpty(bucketPath).then(subbuckets => Promise.all(
+ subbuckets.map(subbucket => {
+ const subbucketPath = path.join(bucketPath, subbucket)
+
+ // "/cachename/<bucket 0xFF>/<bucket 0xFF>./*"
+ return readdirOrEmpty(subbucketPath).then(entries => Promise.all(
+ entries.map(entry => {
+ const entryPath = path.join(subbucketPath, entry)
+ return bucketEntries(entryPath).then(entries =>
+ // using a Map here prevents duplicate keys from
+ // showing up twice, I guess?
+ entries.reduce((acc, entry) => {
+ acc.set(entry.key, entry)
+ return acc
+ }, new Map())
+ ).then(reduced => {
+ // reduced is a map of key => entry
+ for (const entry of reduced.values()) {
+ const formatted = formatEntry(cache, entry)
+ if (formatted) {
+ stream.write(formatted)
+ }
+ }
+ }).catch(err => {
+ if (err.code === 'ENOENT') { return undefined }
+ throw err
+ })
+ })
+ ))
+ })
+ ))
})
- }).then(() => {
- stream.push(null)
- }, err => {
- stream.emit('error', err)
- })
+ ))
+ .then(
+ () => stream.end(),
+ err => stream.emit('error', err)
+ )
return stream
}
module.exports.ls = ls
+
function ls (cache) {
- return BB.fromNode(cb => {
- lsStream(cache).on('error', cb).pipe(concat(entries => {
- cb(null, entries.reduce((acc, xs) => {
- acc[xs.key] = xs
- return acc
- }, {}))
- }))
- })
+ return lsStream(cache).collect().then(entries =>
+ entries.reduce((acc, xs) => {
+ acc[xs.key] = xs
+ return acc
+ }, {})
+ )
}
function bucketEntries (bucket, filter) {
- return readFileAsync(
- bucket, 'utf8'
- ).then(data => _bucketEntries(data, filter))
+ return readFile(bucket, 'utf8').then((data) => _bucketEntries(data, filter))
}
function bucketEntriesSync (bucket, filter) {
@@ -212,9 +219,11 @@ function bucketEntriesSync (bucket, filter) {
}
function _bucketEntries (data, filter) {
- let entries = []
- data.split('\n').forEach(entry => {
- if (!entry) { return }
+ const entries = []
+ data.split('\n').forEach((entry) => {
+ if (!entry) {
+ return
+ }
const pieces = entry.split('\t')
if (!pieces[1] || hashEntry(pieces[1]) !== pieces[0]) {
// Hash is no good! Corruption or malice? Doesn't matter!
@@ -235,25 +244,30 @@ function _bucketEntries (data, filter) {
return entries
}
-module.exports._bucketDir = bucketDir
+module.exports.bucketDir = bucketDir
+
function bucketDir (cache) {
return path.join(cache, `index-v${indexV}`)
}
-module.exports._bucketPath = bucketPath
+module.exports.bucketPath = bucketPath
+
function bucketPath (cache, key) {
const hashed = hashKey(key)
- return path.join.apply(path, [bucketDir(cache)].concat(
- hashToSegments(hashed)
- ))
+ return path.join.apply(
+ path,
+ [bucketDir(cache)].concat(hashToSegments(hashed))
+ )
}
-module.exports._hashKey = hashKey
+module.exports.hashKey = hashKey
+
function hashKey (key) {
return hash(key, 'sha256')
}
-module.exports._hashEntry = hashEntry
+module.exports.hashEntry = hashEntry
+
function hashEntry (str) {
return hash(str, 'sha1')
}
@@ -267,7 +281,9 @@ function hash (str, digest) {
function formatEntry (cache, entry) {
// Treat null digests as deletions. They'll shadow any previous entries.
- if (!entry.integrity) { return null }
+ if (!entry.integrity) {
+ return null
+ }
return {
key: entry.key,
integrity: entry.integrity,
@@ -279,10 +295,11 @@ function formatEntry (cache, entry) {
}
function readdirOrEmpty (dir) {
- return readdirAsync(dir)
- .catch({ code: 'ENOENT' }, () => [])
- .catch({ code: 'ENOTDIR' }, () => [])
-}
+ return readdir(dir).catch((err) => {
+ if (err.code === 'ENOENT' || err.code === 'ENOTDIR') {
+ return []
+ }
-function nop () {
+ throw err
+ })
}
diff --git a/node_modules/cacache/lib/memoization.js b/node_modules/cacache/lib/memoization.js
index 92179c7ac..185141d8e 100644
--- a/node_modules/cacache/lib/memoization.js
+++ b/node_modules/cacache/lib/memoization.js
@@ -5,19 +5,14 @@ const LRU = require('lru-cache')
const MAX_SIZE = 50 * 1024 * 1024 // 50MB
const MAX_AGE = 3 * 60 * 1000
-let MEMOIZED = new LRU({
+const MEMOIZED = new LRU({
max: MAX_SIZE,
maxAge: MAX_AGE,
- length: (entry, key) => {
- if (key.startsWith('key:')) {
- return entry.data.length
- } else if (key.startsWith('digest:')) {
- return entry.length
- }
- }
+ length: (entry, key) => key.startsWith('key:') ? entry.data.length : entry.length
})
module.exports.clearMemoized = clearMemoized
+
function clearMemoized () {
const old = {}
MEMOIZED.forEach((v, k) => {
@@ -28,22 +23,26 @@ function clearMemoized () {
}
module.exports.put = put
+
function put (cache, entry, data, opts) {
pickMem(opts).set(`key:${cache}:${entry.key}`, { entry, data })
putDigest(cache, entry.integrity, data, opts)
}
module.exports.put.byDigest = putDigest
+
function putDigest (cache, integrity, data, opts) {
pickMem(opts).set(`digest:${cache}:${integrity}`, data)
}
module.exports.get = get
+
function get (cache, key, opts) {
return pickMem(opts).get(`key:${cache}:${key}`)
}
module.exports.get.byDigest = getDigest
+
function getDigest (cache, integrity, opts) {
return pickMem(opts).get(`digest:${cache}:${integrity}`)
}
@@ -52,8 +51,14 @@ class ObjProxy {
constructor (obj) {
this.obj = obj
}
- get (key) { return this.obj[key] }
- set (key, val) { this.obj[key] = val }
+
+ get (key) {
+ return this.obj[key]
+ }
+
+ set (key, val) {
+ this.obj[key] = val
+ }
}
function pickMem (opts) {
diff --git a/node_modules/cacache/lib/util/disposer.js b/node_modules/cacache/lib/util/disposer.js
new file mode 100644
index 000000000..8a24ad2f2
--- /dev/null
+++ b/node_modules/cacache/lib/util/disposer.js
@@ -0,0 +1,30 @@
+'use strict'
+
+module.exports.disposer = disposer
+
+function disposer (creatorFn, disposerFn, fn) {
+ const runDisposer = (resource, result, shouldThrow = false) => {
+ return disposerFn(resource)
+ .then(
+ // disposer resolved, do something with original fn's promise
+ () => {
+ if (shouldThrow) {
+ throw result
+ }
+ return result
+ },
+ // Disposer fn failed, crash process
+ (err) => {
+ throw err
+ // Or process.exit?
+ })
+ }
+
+ return creatorFn
+ .then((resource) => {
+ // fn(resource) can throw, so wrap in a promise here
+ return Promise.resolve().then(() => fn(resource))
+ .then((result) => runDisposer(resource, result))
+ .catch((err) => runDisposer(resource, err, true))
+ })
+}
diff --git a/node_modules/cacache/lib/util/fix-owner.js b/node_modules/cacache/lib/util/fix-owner.js
index f5c33db5f..9afa638a8 100644
--- a/node_modules/cacache/lib/util/fix-owner.js
+++ b/node_modules/cacache/lib/util/fix-owner.js
@@ -1,9 +1,9 @@
'use strict'
-const BB = require('bluebird')
+const util = require('util')
-const chownr = BB.promisify(require('chownr'))
-const mkdirp = BB.promisify(require('mkdirp'))
+const chownr = util.promisify(require('chownr'))
+const mkdirp = require('mkdirp')
const inflight = require('promise-inflight')
const inferOwner = require('infer-owner')
@@ -32,19 +32,20 @@ const getSelf = () => {
}
module.exports.chownr = fixOwner
+
function fixOwner (cache, filepath) {
if (!process.getuid) {
// This platform doesn't need ownership fixing
- return BB.resolve()
+ return Promise.resolve()
}
getSelf()
if (self.uid !== 0) {
// almost certainly can't chown anyway
- return BB.resolve()
+ return Promise.resolve()
}
- return BB.resolve(inferOwner(cache)).then(owner => {
+ return Promise.resolve(inferOwner(cache)).then((owner) => {
const { uid, gid } = owner
// No need to override if it's already what we used.
@@ -52,18 +53,23 @@ function fixOwner (cache, filepath) {
return
}
- return inflight(
- 'fixOwner: fixing ownership on ' + filepath,
- () => chownr(
+ return inflight('fixOwner: fixing ownership on ' + filepath, () =>
+ chownr(
filepath,
typeof uid === 'number' ? uid : self.uid,
typeof gid === 'number' ? gid : self.gid
- ).catch({ code: 'ENOENT' }, () => null)
+ ).catch((err) => {
+ if (err.code === 'ENOENT') {
+ return null
+ }
+ throw err
+ })
)
})
}
module.exports.chownr.sync = fixOwnerSync
+
function fixOwnerSync (cache, filepath) {
if (!process.getuid) {
// This platform doesn't need ownership fixing
@@ -71,6 +77,11 @@ function fixOwnerSync (cache, filepath) {
}
const { uid, gid } = inferOwner.sync(cache)
getSelf()
+ if (self.uid !== 0) {
+ // almost certainly can't chown anyway
+ return
+ }
+
if (self.uid === uid && self.gid === gid) {
// No need to override if it's already what we used.
return
@@ -91,24 +102,30 @@ function fixOwnerSync (cache, filepath) {
}
module.exports.mkdirfix = mkdirfix
+
function mkdirfix (cache, p, cb) {
// we have to infer the owner _before_ making the directory, even though
// we aren't going to use the results, since the cache itself might not
// exist yet. If we mkdirp it, then our current uid/gid will be assumed
// to be correct if it creates the cache folder in the process.
- return BB.resolve(inferOwner(cache)).then(() => {
- return mkdirp(p).then(made => {
- if (made) {
- return fixOwner(cache, made).then(() => made)
- }
- }).catch({ code: 'EEXIST' }, () => {
- // There's a race in mkdirp!
- return fixOwner(cache, p).then(() => null)
- })
+ return Promise.resolve(inferOwner(cache)).then(() => {
+ return mkdirp(p)
+ .then((made) => {
+ if (made) {
+ return fixOwner(cache, made).then(() => made)
+ }
+ })
+ .catch((err) => {
+ if (err.code === 'EEXIST') {
+ return fixOwner(cache, p).then(() => null)
+ }
+ throw err
+ })
})
}
module.exports.mkdirfix.sync = mkdirfixSync
+
function mkdirfixSync (cache, p) {
try {
inferOwner.sync(cache)
diff --git a/node_modules/cacache/lib/util/hash-to-segments.js b/node_modules/cacache/lib/util/hash-to-segments.js
index 192be2a6d..445599b50 100644
--- a/node_modules/cacache/lib/util/hash-to-segments.js
+++ b/node_modules/cacache/lib/util/hash-to-segments.js
@@ -3,9 +3,5 @@
module.exports = hashToSegments
function hashToSegments (hash) {
- return [
- hash.slice(0, 2),
- hash.slice(2, 4),
- hash.slice(4)
- ]
+ return [hash.slice(0, 2), hash.slice(2, 4), hash.slice(4)]
}
diff --git a/node_modules/cacache/lib/util/move-file.js b/node_modules/cacache/lib/util/move-file.js
index b43744b3d..1b1d81067 100644
--- a/node_modules/cacache/lib/util/move-file.js
+++ b/node_modules/cacache/lib/util/move-file.js
@@ -1,14 +1,19 @@
'use strict'
-const fs = require('graceful-fs')
-const BB = require('bluebird')
-const chmod = BB.promisify(fs.chmod)
-const unlink = BB.promisify(fs.unlink)
-let move
-let pinflight
+const fs = require('fs')
+const util = require('util')
+const chmod = util.promisify(fs.chmod)
+const unlink = util.promisify(fs.unlink)
+const stat = util.promisify(fs.stat)
+const move = require('move-concurrently')
+const pinflight = require('promise-inflight')
module.exports = moveFile
+
function moveFile (src, dest) {
+ const isWindows = global.__CACACHE_TEST_FAKE_WINDOWS__ ||
+ process.platform === 'win32'
+
// This isn't quite an fs.rename -- the assumption is that
// if `dest` already exists, and we get certain errors while
// trying to move it, we should just not bother.
@@ -18,34 +23,46 @@ function moveFile (src, dest) {
// content their own way.
//
// Note that, as the name suggests, this strictly only supports file moves.
- return BB.fromNode(cb => {
- fs.link(src, dest, err => {
+ return new Promise((resolve, reject) => {
+ fs.link(src, dest, (err) => {
if (err) {
- if (err.code === 'EEXIST' || err.code === 'EBUSY') {
+ if (isWindows && err.code === 'EPERM') {
+ // XXX This is a really weird way to handle this situation, as it
+ // results in the src file being deleted even though the dest
+ // might not exist. Since we pretty much always write files to
+ // deterministic locations based on content hash, this is likely
+ // ok (or at worst, just ends in a future cache miss). But it would
+ // be worth investigating at some time in the future if this is
+ // really what we want to do here.
+ return resolve()
+ } else if (err.code === 'EEXIST' || err.code === 'EBUSY') {
// file already exists, so whatever
- } else if (err.code === 'EPERM' && process.platform === 'win32') {
- // file handle stayed open even past graceful-fs limits
+ return resolve()
} else {
- return cb(err)
+ return reject(err)
}
+ } else {
+ return resolve()
}
- return cb()
})
- }).then(() => {
- // content should never change for any reason, so make it read-only
- return BB.join(unlink(src), process.platform !== 'win32' && chmod(dest, '0444'))
- }).catch(() => {
- if (!pinflight) { pinflight = require('promise-inflight') }
- return pinflight('cacache-move-file:' + dest, () => {
- return BB.promisify(fs.stat)(dest).catch(err => {
- if (err.code !== 'ENOENT') {
- // Something else is wrong here. Bail bail bail
- throw err
- }
- // file doesn't already exist! let's try a rename -> copy fallback
- if (!move) { move = require('move-concurrently') }
- return move(src, dest, { BB, fs })
+ })
+ .then(() => {
+ // content should never change for any reason, so make it read-only
+ return Promise.all([
+ unlink(src),
+ !isWindows && chmod(dest, '0444')
+ ])
+ })
+ .catch(() => {
+ return pinflight('cacache-move-file:' + dest, () => {
+ return stat(dest).catch((err) => {
+ if (err.code !== 'ENOENT') {
+ // Something else is wrong here. Bail bail bail
+ throw err
+ }
+ // file doesn't already exist! let's try a rename -> copy fallback
+ return move(src, dest, { Promise, fs })
+ })
})
})
- })
}
diff --git a/node_modules/cacache/lib/util/tmp.js b/node_modules/cacache/lib/util/tmp.js
index 78494b8ea..fbcd2ab13 100644
--- a/node_modules/cacache/lib/util/tmp.js
+++ b/node_modules/cacache/lib/util/tmp.js
@@ -1,37 +1,35 @@
'use strict'
-const BB = require('bluebird')
+const util = require('util')
-const figgyPudding = require('figgy-pudding')
const fixOwner = require('./fix-owner')
const path = require('path')
-const rimraf = BB.promisify(require('rimraf'))
+const rimraf = util.promisify(require('rimraf'))
const uniqueFilename = require('unique-filename')
-
-const TmpOpts = figgyPudding({
- tmpPrefix: {}
-})
+const { disposer } = require('./disposer')
module.exports.mkdir = mktmpdir
-function mktmpdir (cache, opts) {
- opts = TmpOpts(opts)
- const tmpTarget = uniqueFilename(path.join(cache, 'tmp'), opts.tmpPrefix)
+
+function mktmpdir (cache, opts = {}) {
+ const { tmpPrefix } = opts
+ const tmpTarget = uniqueFilename(path.join(cache, 'tmp'), tmpPrefix)
return fixOwner.mkdirfix(cache, tmpTarget).then(() => {
return tmpTarget
})
}
module.exports.withTmp = withTmp
+
function withTmp (cache, opts, cb) {
if (!cb) {
cb = opts
- opts = null
+ opts = {}
}
- opts = TmpOpts(opts)
- return BB.using(mktmpdir(cache, opts).disposer(rimraf), cb)
+ return disposer(mktmpdir(cache, opts), rimraf, cb)
}
module.exports.fix = fixtmpdir
+
function fixtmpdir (cache) {
return fixOwner(cache, path.join(cache, 'tmp'))
}
diff --git a/node_modules/cacache/lib/util/y.js b/node_modules/cacache/lib/util/y.js
deleted file mode 100644
index d62bedacb..000000000
--- a/node_modules/cacache/lib/util/y.js
+++ /dev/null
@@ -1,25 +0,0 @@
-'use strict'
-
-const path = require('path')
-const y18n = require('y18n')({
- directory: path.join(__dirname, '../../locales'),
- locale: 'en',
- updateFiles: process.env.CACACHE_UPDATE_LOCALE_FILES === 'true'
-})
-
-module.exports = yTag
-function yTag (parts) {
- let str = ''
- parts.forEach((part, i) => {
- const arg = arguments[i + 1]
- str += part
- if (arg) {
- str += '%s'
- }
- })
- return y18n.__.apply(null, [str].concat([].slice.call(arguments, 1)))
-}
-
-module.exports.setLocale = locale => {
- y18n.setLocale(locale)
-}
diff --git a/node_modules/cacache/lib/verify.js b/node_modules/cacache/lib/verify.js
index 617d38db1..5a011a3f1 100644
--- a/node_modules/cacache/lib/verify.js
+++ b/node_modules/cacache/lib/verify.js
@@ -1,35 +1,39 @@
'use strict'
-const BB = require('bluebird')
+const util = require('util')
+const pMap = require('p-map')
const contentPath = require('./content/path')
-const figgyPudding = require('figgy-pudding')
-const finished = BB.promisify(require('mississippi').finished)
const fixOwner = require('./util/fix-owner')
-const fs = require('graceful-fs')
-const glob = BB.promisify(require('glob'))
+const fs = require('fs')
+const fsm = require('fs-minipass')
+const glob = util.promisify(require('glob'))
const index = require('./entry-index')
const path = require('path')
-const rimraf = BB.promisify(require('rimraf'))
+const rimraf = util.promisify(require('rimraf'))
const ssri = require('ssri')
-BB.promisifyAll(fs)
+const hasOwnProperty = (obj, key) =>
+ Object.prototype.hasOwnProperty.call(obj, key)
-const VerifyOpts = figgyPudding({
- concurrency: {
- default: 20
- },
- filter: {},
- log: {
- default: { silly () {} }
- }
+const stat = util.promisify(fs.stat)
+const truncate = util.promisify(fs.truncate)
+const writeFile = util.promisify(fs.writeFile)
+const readFile = util.promisify(fs.readFile)
+
+const verifyOpts = (opts) => ({
+ concurrency: 20,
+ log: { silly () {} },
+ ...opts
})
module.exports = verify
+
function verify (cache, opts) {
- opts = VerifyOpts(opts)
+ opts = verifyOpts(opts)
opts.log.silly('verify', 'verifying cache at', cache)
- return BB.reduce([
+
+ const steps = [
markStartTime,
fixPerms,
garbageCollect,
@@ -37,38 +41,57 @@ function verify (cache, opts) {
cleanTmp,
writeVerifile,
markEndTime
- ], (stats, step, i) => {
- const label = step.name || `step #${i}`
- const start = new Date()
- return BB.resolve(step(cache, opts)).then(s => {
- s && Object.keys(s).forEach(k => {
- stats[k] = s[k]
+ ]
+
+ return steps
+ .reduce((promise, step, i) => {
+ const label = step.name
+ const start = new Date()
+ return promise.then((stats) => {
+ return step(cache, opts).then((s) => {
+ s &&
+ Object.keys(s).forEach((k) => {
+ stats[k] = s[k]
+ })
+ const end = new Date()
+ if (!stats.runTime) {
+ stats.runTime = {}
+ }
+ stats.runTime[label] = end - start
+ return Promise.resolve(stats)
+ })
})
- const end = new Date()
- if (!stats.runTime) { stats.runTime = {} }
- stats.runTime[label] = end - start
+ }, Promise.resolve({}))
+ .then((stats) => {
+ stats.runTime.total = stats.endTime - stats.startTime
+ opts.log.silly(
+ 'verify',
+ 'verification finished for',
+ cache,
+ 'in',
+ `${stats.runTime.total}ms`
+ )
return stats
})
- }, {}).tap(stats => {
- stats.runTime.total = stats.endTime - stats.startTime
- opts.log.silly('verify', 'verification finished for', cache, 'in', `${stats.runTime.total}ms`)
- })
}
function markStartTime (cache, opts) {
- return { startTime: new Date() }
+ return Promise.resolve({ startTime: new Date() })
}
function markEndTime (cache, opts) {
- return { endTime: new Date() }
+ return Promise.resolve({ endTime: new Date() })
}
function fixPerms (cache, opts) {
opts.log.silly('verify', 'fixing cache permissions')
- return fixOwner.mkdirfix(cache, cache).then(() => {
- // TODO - fix file permissions too
- return fixOwner.chownr(cache, cache)
- }).then(() => null)
+ return fixOwner
+ .mkdirfix(cache, cache)
+ .then(() => {
+ // TODO - fix file permissions too
+ return fixOwner.chownr(cache, cache)
+ })
+ .then(() => null)
}
// Implements a naive mark-and-sweep tracing garbage collector.
@@ -84,85 +107,105 @@ function garbageCollect (cache, opts) {
opts.log.silly('verify', 'garbage collecting content')
const indexStream = index.lsStream(cache)
const liveContent = new Set()
- indexStream.on('data', entry => {
- if (opts.filter && !opts.filter(entry)) { return }
+ indexStream.on('data', (entry) => {
+ if (opts.filter && !opts.filter(entry)) {
+ return
+ }
liveContent.add(entry.integrity.toString())
})
- return finished(indexStream).then(() => {
- const contentDir = contentPath._contentDir(cache)
+ return new Promise((resolve, reject) => {
+ indexStream.on('end', resolve).on('error', reject)
+ }).then(() => {
+ const contentDir = contentPath.contentDir(cache)
return glob(path.join(contentDir, '**'), {
follow: false,
nodir: true,
nosort: true
- }).then(files => {
- return BB.resolve({
+ }).then((files) => {
+ return Promise.resolve({
verifiedContent: 0,
reclaimedCount: 0,
reclaimedSize: 0,
badContentCount: 0,
keptSize: 0
- }).tap((stats) => BB.map(files, (f) => {
- const split = f.split(/[/\\]/)
- const digest = split.slice(split.length - 3).join('')
- const algo = split[split.length - 4]
- const integrity = ssri.fromHex(digest, algo)
- if (liveContent.has(integrity.toString())) {
- return verifyContent(f, integrity).then(info => {
- if (!info.valid) {
- stats.reclaimedCount++
- stats.badContentCount++
- stats.reclaimedSize += info.size
+ }).then((stats) =>
+ pMap(
+ files,
+ (f) => {
+ const split = f.split(/[/\\]/)
+ const digest = split.slice(split.length - 3).join('')
+ const algo = split[split.length - 4]
+ const integrity = ssri.fromHex(digest, algo)
+ if (liveContent.has(integrity.toString())) {
+ return verifyContent(f, integrity).then((info) => {
+ if (!info.valid) {
+ stats.reclaimedCount++
+ stats.badContentCount++
+ stats.reclaimedSize += info.size
+ } else {
+ stats.verifiedContent++
+ stats.keptSize += info.size
+ }
+ return stats
+ })
} else {
- stats.verifiedContent++
- stats.keptSize += info.size
+ // No entries refer to this content. We can delete.
+ stats.reclaimedCount++
+ return stat(f).then((s) => {
+ return rimraf(f).then(() => {
+ stats.reclaimedSize += s.size
+ return stats
+ })
+ })
}
- return stats
- })
- } else {
- // No entries refer to this content. We can delete.
- stats.reclaimedCount++
- return fs.statAsync(f).then(s => {
- return rimraf(f).then(() => {
- stats.reclaimedSize += s.size
- return stats
- })
- })
- }
- }, { concurrency: opts.concurrency }))
+ },
+ { concurrency: opts.concurrency }
+ ).then(() => stats)
+ )
})
})
}
function verifyContent (filepath, sri) {
- return fs.statAsync(filepath).then(stat => {
- const contentInfo = {
- size: stat.size,
- valid: true
- }
- return ssri.checkStream(
- fs.createReadStream(filepath),
- sri
- ).catch(err => {
- if (err.code !== 'EINTEGRITY') { throw err }
- return rimraf(filepath).then(() => {
- contentInfo.valid = false
- })
- }).then(() => contentInfo)
- }).catch({ code: 'ENOENT' }, () => ({ size: 0, valid: false }))
+ return stat(filepath)
+ .then((s) => {
+ const contentInfo = {
+ size: s.size,
+ valid: true
+ }
+ return ssri
+ .checkStream(new fsm.ReadStream(filepath), sri)
+ .catch((err) => {
+ if (err.code !== 'EINTEGRITY') {
+ throw err
+ }
+ return rimraf(filepath).then(() => {
+ contentInfo.valid = false
+ })
+ })
+ .then(() => contentInfo)
+ })
+ .catch((err) => {
+ if (err.code === 'ENOENT') {
+ return { size: 0, valid: false }
+ }
+ throw err
+ })
}
function rebuildIndex (cache, opts) {
opts.log.silly('verify', 'rebuilding index')
- return index.ls(cache).then(entries => {
+ return index.ls(cache).then((entries) => {
const stats = {
missingContent: 0,
rejectedEntries: 0,
totalEntries: 0
}
const buckets = {}
- for (let k in entries) {
- if (entries.hasOwnProperty(k)) {
- const hashed = index._hashKey(k)
+ for (const k in entries) {
+ /* istanbul ignore else */
+ if (hasOwnProperty(entries, k)) {
+ const hashed = index.hashKey(k)
const entry = entries[k]
const excluded = opts.filter && !opts.filter(entry)
excluded && stats.rejectedEntries++
@@ -172,35 +215,51 @@ function rebuildIndex (cache, opts) {
// skip
} else if (excluded) {
buckets[hashed] = []
- buckets[hashed]._path = index._bucketPath(cache, k)
+ buckets[hashed]._path = index.bucketPath(cache, k)
} else {
buckets[hashed] = [entry]
- buckets[hashed]._path = index._bucketPath(cache, k)
+ buckets[hashed]._path = index.bucketPath(cache, k)
}
}
}
- return BB.map(Object.keys(buckets), key => {
- return rebuildBucket(cache, buckets[key], stats, opts)
- }, { concurrency: opts.concurrency }).then(() => stats)
+ return pMap(
+ Object.keys(buckets),
+ (key) => {
+ return rebuildBucket(cache, buckets[key], stats, opts)
+ },
+ { concurrency: opts.concurrency }
+ ).then(() => stats)
})
}
function rebuildBucket (cache, bucket, stats, opts) {
- return fs.truncateAsync(bucket._path).then(() => {
+ return truncate(bucket._path).then(() => {
// This needs to be serialized because cacache explicitly
// lets very racy bucket conflicts clobber each other.
- return BB.mapSeries(bucket, entry => {
- const content = contentPath(cache, entry.integrity)
- return fs.statAsync(content).then(() => {
- return index.insert(cache, entry.key, entry.integrity, {
- metadata: entry.metadata,
- size: entry.size
- }).then(() => { stats.totalEntries++ })
- }).catch({ code: 'ENOENT' }, () => {
- stats.rejectedEntries++
- stats.missingContent++
+ return bucket.reduce((promise, entry) => {
+ return promise.then(() => {
+ const content = contentPath(cache, entry.integrity)
+ return stat(content)
+ .then(() => {
+ return index
+ .insert(cache, entry.key, entry.integrity, {
+ metadata: entry.metadata,
+ size: entry.size
+ })
+ .then(() => {
+ stats.totalEntries++
+ })
+ })
+ .catch((err) => {
+ if (err.code === 'ENOENT') {
+ stats.rejectedEntries++
+ stats.missingContent++
+ return
+ }
+ throw err
+ })
})
- })
+ }, Promise.resolve())
})
}
@@ -213,15 +272,16 @@ function writeVerifile (cache, opts) {
const verifile = path.join(cache, '_lastverified')
opts.log.silly('verify', 'writing verifile to ' + verifile)
try {
- return fs.writeFileAsync(verifile, '' + (+(new Date())))
+ return writeFile(verifile, '' + +new Date())
} finally {
fixOwner.chownr.sync(cache, verifile)
}
}
module.exports.lastRun = lastRun
+
function lastRun (cache) {
- return fs.readFileAsync(
- path.join(cache, '_lastverified'), 'utf8'
- ).then(data => new Date(+data))
+ return readFile(path.join(cache, '_lastverified'), 'utf8').then(
+ (data) => new Date(+data)
+ )
}
diff --git a/node_modules/cacache/locales/en.js b/node_modules/cacache/locales/en.js
deleted file mode 100644
index 1715fdb53..000000000
--- a/node_modules/cacache/locales/en.js
+++ /dev/null
@@ -1,47 +0,0 @@
-'use strict'
-
-const ls = require('../ls.js')
-const get = require('../get.js')
-const put = require('../put.js')
-const rm = require('../rm.js')
-const verify = require('../verify.js')
-const setLocale = require('../lib/util/y.js').setLocale
-const clearMemoized = require('../lib/memoization.js').clearMemoized
-const tmp = require('../lib/util/tmp.js')
-
-setLocale('en')
-
-const x = module.exports
-
-x.ls = cache => ls(cache)
-x.ls.stream = cache => ls.stream(cache)
-
-x.get = (cache, key, opts) => get(cache, key, opts)
-x.get.byDigest = (cache, hash, opts) => get.byDigest(cache, hash, opts)
-x.get.sync = (cache, key, opts) => get.sync(cache, key, opts)
-x.get.sync.byDigest = (cache, key, opts) => get.sync.byDigest(cache, key, opts)
-x.get.stream = (cache, key, opts) => get.stream(cache, key, opts)
-x.get.stream.byDigest = (cache, hash, opts) => get.stream.byDigest(cache, hash, opts)
-x.get.copy = (cache, key, dest, opts) => get.copy(cache, key, dest, opts)
-x.get.copy.byDigest = (cache, hash, dest, opts) => get.copy.byDigest(cache, hash, dest, opts)
-x.get.info = (cache, key) => get.info(cache, key)
-x.get.hasContent = (cache, hash) => get.hasContent(cache, hash)
-x.get.hasContent.sync = (cache, hash) => get.hasContent.sync(cache, hash)
-
-x.put = (cache, key, data, opts) => put(cache, key, data, opts)
-x.put.stream = (cache, key, opts) => put.stream(cache, key, opts)
-
-x.rm = (cache, key) => rm.entry(cache, key)
-x.rm.all = cache => rm.all(cache)
-x.rm.entry = x.rm
-x.rm.content = (cache, hash) => rm.content(cache, hash)
-
-x.setLocale = lang => setLocale(lang)
-x.clearMemoized = () => clearMemoized()
-
-x.tmp = {}
-x.tmp.mkdir = (cache, opts) => tmp.mkdir(cache, opts)
-x.tmp.withTmp = (cache, opts, cb) => tmp.withTmp(cache, opts, cb)
-
-x.verify = (cache, opts) => verify(cache, opts)
-x.verify.lastRun = cache => verify.lastRun(cache)
diff --git a/node_modules/cacache/locales/en.json b/node_modules/cacache/locales/en.json
deleted file mode 100644
index 4f1452884..000000000
--- a/node_modules/cacache/locales/en.json
+++ /dev/null
@@ -1,7 +0,0 @@
-{
- "No cache entry for `%s` found in `%s`": "No cache entry for %s found in %s",
- "Integrity verification failed for %s (%s)": "Integrity verification failed for %s (%s)",
- "Bad data size: expected inserted data to be %s bytes, but got %s instead": "Bad data size: expected inserted data to be %s bytes, but got %s instead",
- "Cache input stream was empty": "Cache input stream was empty",
- "Integrity check failed:\n Wanted: %s\n Found: %s": "Integrity check failed:\n Wanted: %s\n Found: %s"
-} \ No newline at end of file
diff --git a/node_modules/cacache/locales/es.js b/node_modules/cacache/locales/es.js
deleted file mode 100644
index ac4e4cfe7..000000000
--- a/node_modules/cacache/locales/es.js
+++ /dev/null
@@ -1,49 +0,0 @@
-'use strict'
-
-const ls = require('../ls.js')
-const get = require('../get.js')
-const put = require('../put.js')
-const rm = require('../rm.js')
-const verify = require('../verify.js')
-const setLocale = require('../lib/util/y.js').setLocale
-const clearMemoized = require('../lib/memoization.js').clearMemoized
-const tmp = require('../lib/util/tmp.js')
-
-setLocale('es')
-
-const x = module.exports
-
-x.ls = cache => ls(cache)
-x.ls.flujo = cache => ls.stream(cache)
-
-x.saca = (cache, clave, ops) => get(cache, clave, ops)
-x.saca.porHacheo = (cache, hacheo, ops) => get.byDigest(cache, hacheo, ops)
-x.saca.sinc = (cache, clave, ops) => get.sync(cache, clave, ops)
-x.saca.sinc.porHacheo = (cache, hacheo, ops) => get.sync.byDigest(cache, hacheo, ops)
-x.saca.flujo = (cache, clave, ops) => get.stream(cache, clave, ops)
-x.saca.flujo.porHacheo = (cache, hacheo, ops) => get.stream.byDigest(cache, hacheo, ops)
-x.sava.copia = (cache, clave, destino, opts) => get.copy(cache, clave, destino, opts)
-x.sava.copia.porHacheo = (cache, hacheo, destino, opts) => get.copy.byDigest(cache, hacheo, destino, opts)
-x.saca.info = (cache, clave) => get.info(cache, clave)
-x.saca.tieneDatos = (cache, hacheo) => get.hasContent(cache, hacheo)
-x.saca.tieneDatos.sinc = (cache, hacheo) => get.hasContent.sync(cache, hacheo)
-
-x.mete = (cache, clave, datos, ops) => put(cache, clave, datos, ops)
-x.mete.flujo = (cache, clave, ops) => put.stream(cache, clave, ops)
-
-x.rm = (cache, clave) => rm.entry(cache, clave)
-x.rm.todo = cache => rm.all(cache)
-x.rm.entrada = x.rm
-x.rm.datos = (cache, hacheo) => rm.content(cache, hacheo)
-
-x.ponLenguaje = lang => setLocale(lang)
-x.limpiaMemoizado = () => clearMemoized()
-
-x.tmp = {}
-x.tmp.mkdir = (cache, ops) => tmp.mkdir(cache, ops)
-x.tmp.hazdir = x.tmp.mkdir
-x.tmp.conTmp = (cache, ops, cb) => tmp.withTmp(cache, ops, cb)
-
-x.verifica = (cache, ops) => verify(cache, ops)
-x.verifica.ultimaVez = cache => verify.lastRun(cache)
-x.verifica.últimaVez = x.verifica.ultimaVez
diff --git a/node_modules/cacache/locales/es.json b/node_modules/cacache/locales/es.json
deleted file mode 100644
index a91d76225..000000000
--- a/node_modules/cacache/locales/es.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "No cache entry for `%s` found in `%s`": "No existe ninguna entrada para «%s» en «%s»",
- "Integrity verification failed for %s (%s)": "Verificación de integridad falló para «%s» (%s)",
- "Bad data size: expected inserted data to be %s bytes, but got %s instead": "Tamaño incorrecto de datos: los datos insertados debieron haber sido %s octetos, pero fueron %s",
- "Cache input stream was empty": "El stream de entrada al caché estaba vacío"
-}
diff --git a/node_modules/cacache/ls.js b/node_modules/cacache/ls.js
index 9f49b388a..6006c99e3 100644
--- a/node_modules/cacache/ls.js
+++ b/node_modules/cacache/ls.js
@@ -1,6 +1,6 @@
'use strict'
-var index = require('./lib/entry-index')
+const index = require('./lib/entry-index')
module.exports = index.ls
module.exports.stream = index.lsStream
diff --git a/node_modules/cacache/node_modules/.bin/mkdirp b/node_modules/cacache/node_modules/.bin/mkdirp
new file mode 120000
index 000000000..017896ceb
--- /dev/null
+++ b/node_modules/cacache/node_modules/.bin/mkdirp
@@ -0,0 +1 @@
+../mkdirp/bin/cmd.js \ No newline at end of file
diff --git a/node_modules/cacache/node_modules/.bin/rimraf b/node_modules/cacache/node_modules/.bin/rimraf
new file mode 120000
index 000000000..4cd49a49d
--- /dev/null
+++ b/node_modules/cacache/node_modules/.bin/rimraf
@@ -0,0 +1 @@
+../rimraf/bin.js \ No newline at end of file
diff --git a/node_modules/cacache/node_modules/fs-minipass/LICENSE b/node_modules/cacache/node_modules/fs-minipass/LICENSE
new file mode 100644
index 000000000..19129e315
--- /dev/null
+++ b/node_modules/cacache/node_modules/fs-minipass/LICENSE
@@ -0,0 +1,15 @@
+The ISC License
+
+Copyright (c) Isaac Z. Schlueter and Contributors
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
+IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/node_modules/cacache/node_modules/fs-minipass/README.md b/node_modules/cacache/node_modules/fs-minipass/README.md
new file mode 100644
index 000000000..1e61241cf
--- /dev/null
+++ b/node_modules/cacache/node_modules/fs-minipass/README.md
@@ -0,0 +1,70 @@
+# fs-minipass
+
+Filesystem streams based on [minipass](http://npm.im/minipass).
+
+4 classes are exported:
+
+- ReadStream
+- ReadStreamSync
+- WriteStream
+- WriteStreamSync
+
+When using `ReadStreamSync`, all of the data is made available
+immediately upon consuming the stream. Nothing is buffered in memory
+when the stream is constructed. If the stream is piped to a writer,
+then it will synchronously `read()` and emit data into the writer as
+fast as the writer can consume it. (That is, it will respect
+backpressure.) If you call `stream.read()` then it will read the
+entire file and return the contents.
+
+When using `WriteStreamSync`, every write is flushed to the file
+synchronously. If your writes all come in a single tick, then it'll
+write it all out in a single tick. It's as synchronous as you are.
+
+The async versions work much like their node builtin counterparts,
+with the exception of introducing significantly less Stream machinery
+overhead.
+
+## USAGE
+
+It's just streams, you pipe them or read() them or write() to them.
+
+```js
+const fsm = require('fs-minipass')
+const readStream = new fsm.ReadStream('file.txt')
+const writeStream = new fsm.WriteStream('output.txt')
+writeStream.write('some file header or whatever\n')
+readStream.pipe(writeStream)
+```
+
+## ReadStream(path, options)
+
+Path string is required, but somewhat irrelevant if an open file
+descriptor is passed in as an option.
+
+Options:
+
+- `fd` Pass in a numeric file descriptor, if the file is already open.
+- `readSize` The size of reads to do, defaults to 16MB
+- `size` The size of the file, if known. Prevents zero-byte read()
+ call at the end.
+- `autoClose` Set to `false` to prevent the file descriptor from being
+ closed when the file is done being read.
+
+## WriteStream(path, options)
+
+Path string is required, but somewhat irrelevant if an open file
+descriptor is passed in as an option.
+
+Options:
+
+- `fd` Pass in a numeric file descriptor, if the file is already open.
+- `mode` The mode to create the file with. Defaults to `0o666`.
+- `start` The position in the file to start reading. If not
+ specified, then the file will start writing at position zero, and be
+ truncated by default.
+- `autoClose` Set to `false` to prevent the file descriptor from being
+ closed when the stream is ended.
+- `flags` Flags to use when opening the file. Irrelevant if `fd` is
+ passed in, since file won't be opened in that case. Defaults to
+ `'a'` if a `pos` is specified, or `'w'` otherwise.
diff --git a/node_modules/cacache/node_modules/fs-minipass/index.js b/node_modules/cacache/node_modules/fs-minipass/index.js
new file mode 100644
index 000000000..9b0779c80
--- /dev/null
+++ b/node_modules/cacache/node_modules/fs-minipass/index.js
@@ -0,0 +1,422 @@
+'use strict'
+const MiniPass = require('minipass')
+const EE = require('events').EventEmitter
+const fs = require('fs')
+
+let writev = fs.writev
+/* istanbul ignore next */
+if (!writev) {
+ // This entire block can be removed if support for earlier than Node.js
+ // 12.9.0 is not needed.
+ const binding = process.binding('fs')
+ const FSReqWrap = binding.FSReqWrap || binding.FSReqCallback
+
+ writev = (fd, iovec, pos, cb) => {
+ const done = (er, bw) => cb(er, bw, iovec)
+ const req = new FSReqWrap()
+ req.oncomplete = done
+ binding.writeBuffers(fd, iovec, pos, req)
+ }
+}
+
+const _autoClose = Symbol('_autoClose')
+const _close = Symbol('_close')
+const _ended = Symbol('_ended')
+const _fd = Symbol('_fd')
+const _finished = Symbol('_finished')
+const _flags = Symbol('_flags')
+const _flush = Symbol('_flush')
+const _handleChunk = Symbol('_handleChunk')
+const _makeBuf = Symbol('_makeBuf')
+const _mode = Symbol('_mode')
+const _needDrain = Symbol('_needDrain')
+const _onerror = Symbol('_onerror')
+const _onopen = Symbol('_onopen')
+const _onread = Symbol('_onread')
+const _onwrite = Symbol('_onwrite')
+const _open = Symbol('_open')
+const _path = Symbol('_path')
+const _pos = Symbol('_pos')
+const _queue = Symbol('_queue')
+const _read = Symbol('_read')
+const _readSize = Symbol('_readSize')
+const _reading = Symbol('_reading')
+const _remain = Symbol('_remain')
+const _size = Symbol('_size')
+const _write = Symbol('_write')
+const _writing = Symbol('_writing')
+const _defaultFlag = Symbol('_defaultFlag')
+const _errored = Symbol('_errored')
+
+class ReadStream extends MiniPass {
+ constructor (path, opt) {
+ opt = opt || {}
+ super(opt)
+
+ this.readable = true
+ this.writable = false
+
+ if (typeof path !== 'string')
+ throw new TypeError('path must be a string')
+
+ this[_errored] = false
+ this[_fd] = typeof opt.fd === 'number' ? opt.fd : null
+ this[_path] = path
+ this[_readSize] = opt.readSize || 16*1024*1024
+ this[_reading] = false
+ this[_size] = typeof opt.size === 'number' ? opt.size : Infinity
+ this[_remain] = this[_size]
+ this[_autoClose] = typeof opt.autoClose === 'boolean' ?
+ opt.autoClose : true
+
+ if (typeof this[_fd] === 'number')
+ this[_read]()
+ else
+ this[_open]()
+ }
+
+ get fd () { return this[_fd] }
+ get path () { return this[_path] }
+
+ write () {
+ throw new TypeError('this is a readable stream')
+ }
+
+ end () {
+ throw new TypeError('this is a readable stream')
+ }
+
+ [_open] () {
+ fs.open(this[_path], 'r', (er, fd) => this[_onopen](er, fd))
+ }
+
+ [_onopen] (er, fd) {
+ if (er)
+ this[_onerror](er)
+ else {
+ this[_fd] = fd
+ this.emit('open', fd)
+ this[_read]()
+ }
+ }
+
+ [_makeBuf] () {
+ return Buffer.allocUnsafe(Math.min(this[_readSize], this[_remain]))
+ }
+
+ [_read] () {
+ if (!this[_reading]) {
+ this[_reading] = true
+ const buf = this[_makeBuf]()
+ /* istanbul ignore if */
+ if (buf.length === 0)
+ return process.nextTick(() => this[_onread](null, 0, buf))
+ fs.read(this[_fd], buf, 0, buf.length, null, (er, br, buf) =>
+ this[_onread](er, br, buf))
+ }
+ }
+
+ [_onread] (er, br, buf) {
+ this[_reading] = false
+ if (er)
+ this[_onerror](er)
+ else if (this[_handleChunk](br, buf))
+ this[_read]()
+ }
+
+ [_close] () {
+ if (this[_autoClose] && typeof this[_fd] === 'number') {
+ const fd = this[_fd]
+ this[_fd] = null
+ fs.close(fd, er => er ? this.emit('error', er) : this.emit('close'))
+ }
+ }
+
+ [_onerror] (er) {
+ this[_reading] = true
+ this[_close]()
+ this.emit('error', er)
+ }
+
+ [_handleChunk] (br, buf) {
+ let ret = false
+ // no effect if infinite
+ this[_remain] -= br
+ if (br > 0)
+ ret = super.write(br < buf.length ? buf.slice(0, br) : buf)
+
+ if (br === 0 || this[_remain] <= 0) {
+ ret = false
+ this[_close]()
+ super.end()
+ }
+
+ return ret
+ }
+
+ emit (ev, data) {
+ switch (ev) {
+ case 'prefinish':
+ case 'finish':
+ break
+
+ case 'drain':
+ if (typeof this[_fd] === 'number')
+ this[_read]()
+ break
+
+ case 'error':
+ if (this[_errored])
+ return
+ this[_errored] = true
+ return super.emit(ev, data)
+
+ default:
+ return super.emit(ev, data)
+ }
+ }
+}
+
+class ReadStreamSync extends ReadStream {
+ [_open] () {
+ let threw = true
+ try {
+ this[_onopen](null, fs.openSync(this[_path], 'r'))
+ threw = false
+ } finally {
+ if (threw)
+ this[_close]()
+ }
+ }
+
+ [_read] () {
+ let threw = true
+ try {
+ if (!this[_reading]) {
+ this[_reading] = true
+ do {
+ const buf = this[_makeBuf]()
+ /* istanbul ignore next */
+ const br = buf.length === 0 ? 0
+ : fs.readSync(this[_fd], buf, 0, buf.length, null)
+ if (!this[_handleChunk](br, buf))
+ break
+ } while (true)
+ this[_reading] = false
+ }
+ threw = false
+ } finally {
+ if (threw)
+ this[_close]()
+ }
+ }
+
+ [_close] () {
+ if (this[_autoClose] && typeof this[_fd] === 'number') {
+ const fd = this[_fd]
+ this[_fd] = null
+ fs.closeSync(fd)
+ this.emit('close')
+ }
+ }
+}
+
+class WriteStream extends EE {
+ constructor (path, opt) {
+ opt = opt || {}
+ super(opt)
+ this.readable = false
+ this.writable = true
+ this[_errored] = false
+ this[_writing] = false
+ this[_ended] = false
+ this[_needDrain] = false
+ this[_queue] = []
+ this[_path] = path
+ this[_fd] = typeof opt.fd === 'number' ? opt.fd : null
+ this[_mode] = opt.mode === undefined ? 0o666 : opt.mode
+ this[_pos] = typeof opt.start === 'number' ? opt.start : null
+ this[_autoClose] = typeof opt.autoClose === 'boolean' ?
+ opt.autoClose : true
+
+ // truncating makes no sense when writing into the middle
+ const defaultFlag = this[_pos] !== null ? 'r+' : 'w'
+ this[_defaultFlag] = opt.flags === undefined
+ this[_flags] = this[_defaultFlag] ? defaultFlag : opt.flags
+
+ if (this[_fd] === null)
+ this[_open]()
+ }
+
+ emit (ev, data) {
+ if (ev === 'error') {
+ if (this[_errored])
+ return
+ this[_errored] = true
+ }
+ return super.emit(ev, data)
+ }
+
+
+ get fd () { return this[_fd] }
+ get path () { return this[_path] }
+
+ [_onerror] (er) {
+ this[_close]()
+ this[_writing] = true
+ this.emit('error', er)
+ }
+
+ [_open] () {
+ fs.open(this[_path], this[_flags], this[_mode],
+ (er, fd) => this[_onopen](er, fd))
+ }
+
+ [_onopen] (er, fd) {
+ if (this[_defaultFlag] &&
+ this[_flags] === 'r+' &&
+ er && er.code === 'ENOENT') {
+ this[_flags] = 'w'
+ this[_open]()
+ } else if (er)
+ this[_onerror](er)
+ else {
+ this[_fd] = fd
+ this.emit('open', fd)
+ this[_flush]()
+ }
+ }
+
+ end (buf, enc) {
+ if (buf)
+ this.write(buf, enc)
+
+ this[_ended] = true
+
+ // synthetic after-write logic, where drain/finish live
+ if (!this[_writing] && !this[_queue].length &&
+ typeof this[_fd] === 'number')
+ this[_onwrite](null, 0)
+ return this
+ }
+
+ write (buf, enc) {
+ if (typeof buf === 'string')
+ buf = Buffer.from(buf, enc)
+
+ if (this[_ended]) {
+ this.emit('error', new Error('write() after end()'))
+ return false
+ }
+
+ if (this[_fd] === null || this[_writing] || this[_queue].length) {
+ this[_queue].push(buf)
+ this[_needDrain] = true
+ return false
+ }
+
+ this[_writing] = true
+ this[_write](buf)
+ return true
+ }
+
+ [_write] (buf) {
+ fs.write(this[_fd], buf, 0, buf.length, this[_pos], (er, bw) =>
+ this[_onwrite](er, bw))
+ }
+
+ [_onwrite] (er, bw) {
+ if (er)
+ this[_onerror](er)
+ else {
+ if (this[_pos] !== null)
+ this[_pos] += bw
+ if (this[_queue].length)
+ this[_flush]()
+ else {
+ this[_writing] = false
+
+ if (this[_ended] && !this[_finished]) {
+ this[_finished] = true
+ this[_close]()
+ this.emit('finish')
+ } else if (this[_needDrain]) {
+ this[_needDrain] = false
+ this.emit('drain')
+ }
+ }
+ }
+ }
+
+ [_flush] () {
+ if (this[_queue].length === 0) {
+ if (this[_ended])
+ this[_onwrite](null, 0)
+ } else if (this[_queue].length === 1)
+ this[_write](this[_queue].pop())
+ else {
+ const iovec = this[_queue]
+ this[_queue] = []
+ writev(this[_fd], iovec, this[_pos],
+ (er, bw) => this[_onwrite](er, bw))
+ }
+ }
+
+ [_close] () {
+ if (this[_autoClose] && typeof this[_fd] === 'number') {
+ const fd = this[_fd]
+ this[_fd] = null
+ fs.close(fd, er => er ? this.emit('error', er) : this.emit('close'))
+ }
+ }
+}
+
+class WriteStreamSync extends WriteStream {
+ [_open] () {
+ let fd
+ // only wrap in a try{} block if we know we'll retry, to avoid
+ // the rethrow obscuring the error's source frame in most cases.
+ if (this[_defaultFlag] && this[_flags] === 'r+') {
+ try {
+ fd = fs.openSync(this[_path], this[_flags], this[_mode])
+ } catch (er) {
+ if (er.code === 'ENOENT') {
+ this[_flags] = 'w'
+ return this[_open]()
+ } else
+ throw er
+ }
+ } else
+ fd = fs.openSync(this[_path], this[_flags], this[_mode])
+
+ this[_onopen](null, fd)
+ }
+
+ [_close] () {
+ if (this[_autoClose] && typeof this[_fd] === 'number') {
+ const fd = this[_fd]
+ this[_fd] = null
+ fs.closeSync(fd)
+ this.emit('close')
+ }
+ }
+
+ [_write] (buf) {
+ // throw the original, but try to close if it fails
+ let threw = true
+ try {
+ this[_onwrite](null,
+ fs.writeSync(this[_fd], buf, 0, buf.length, this[_pos]))
+ threw = false
+ } finally {
+ if (threw)
+ try { this[_close]() } catch (_) {}
+ }
+ }
+}
+
+exports.ReadStream = ReadStream
+exports.ReadStreamSync = ReadStreamSync
+
+exports.WriteStream = WriteStream
+exports.WriteStreamSync = WriteStreamSync
diff --git a/node_modules/cacache/node_modules/fs-minipass/package.json b/node_modules/cacache/node_modules/fs-minipass/package.json
new file mode 100644
index 000000000..c36a7b5da
--- /dev/null
+++ b/node_modules/cacache/node_modules/fs-minipass/package.json
@@ -0,0 +1,69 @@
+{
+ "_from": "fs-minipass@^2.0.0",
+ "_id": "fs-minipass@2.1.0",
+ "_inBundle": false,
+ "_integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==",
+ "_location": "/cacache/fs-minipass",
+ "_phantomChildren": {},
+ "_requested": {
+ "type": "range",
+ "registry": true,
+ "raw": "fs-minipass@^2.0.0",
+ "name": "fs-minipass",
+ "escapedName": "fs-minipass",
+ "rawSpec": "^2.0.0",
+ "saveSpec": null,
+ "fetchSpec": "^2.0.0"
+ },
+ "_requiredBy": [
+ "/cacache",
+ "/cacache/tar"
+ ],
+ "_resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz",
+ "_shasum": "7f5036fdbf12c63c169190cbe4199c852271f9fb",
+ "_spec": "fs-minipass@^2.0.0",
+ "_where": "/Users/claudiahdz/npm/cli/node_modules/cacache",
+ "author": {
+ "name": "Isaac Z. Schlueter",
+ "email": "i@izs.me",
+ "url": "http://blog.izs.me/"
+ },
+ "bugs": {
+ "url": "https://github.com/npm/fs-minipass/issues"
+ },
+ "bundleDependencies": false,
+ "dependencies": {
+ "minipass": "^3.0.0"
+ },
+ "deprecated": false,
+ "description": "fs read and write streams based on minipass",
+ "devDependencies": {
+ "mutate-fs": "^2.0.1",
+ "tap": "^14.6.4"
+ },
+ "engines": {
+ "node": ">= 8"
+ },
+ "files": [
+ "index.js"
+ ],
+ "homepage": "https://github.com/npm/fs-minipass#readme",
+ "keywords": [],
+ "license": "ISC",
+ "main": "index.js",
+ "name": "fs-minipass",
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/npm/fs-minipass.git"
+ },
+ "scripts": {
+ "postpublish": "git push origin --follow-tags",
+ "postversion": "npm publish",
+ "preversion": "npm test",
+ "test": "tap"
+ },
+ "tap": {
+ "check-coverage": true
+ },
+ "version": "2.1.0"
+}
diff --git a/node_modules/cacache/node_modules/minipass/LICENSE b/node_modules/cacache/node_modules/minipass/LICENSE
new file mode 100644
index 000000000..20a476254
--- /dev/null
+++ b/node_modules/cacache/node_modules/minipass/LICENSE
@@ -0,0 +1,15 @@
+The ISC License
+
+Copyright (c) npm, Inc. and Contributors
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
+IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/node_modules/cacache/node_modules/minipass/README.md b/node_modules/cacache/node_modules/minipass/README.md
new file mode 100644
index 000000000..32ace2fb9
--- /dev/null
+++ b/node_modules/cacache/node_modules/minipass/README.md
@@ -0,0 +1,606 @@
+# minipass
+
+A _very_ minimal implementation of a [PassThrough
+stream](https://nodejs.org/api/stream.html#stream_class_stream_passthrough)
+
+[It's very
+fast](https://docs.google.com/spreadsheets/d/1oObKSrVwLX_7Ut4Z6g3fZW-AX1j1-k6w-cDsrkaSbHM/edit#gid=0)
+for objects, strings, and buffers.
+
+Supports pipe()ing (including multi-pipe() and backpressure transmission),
+buffering data until either a `data` event handler or `pipe()` is added (so
+you don't lose the first chunk), and most other cases where PassThrough is
+a good idea.
+
+There is a `read()` method, but it's much more efficient to consume data
+from this stream via `'data'` events or by calling `pipe()` into some other
+stream. Calling `read()` requires the buffer to be flattened in some
+cases, which requires copying memory.
+
+There is also no `unpipe()` method. Once you start piping, there is no
+stopping it!
+
+If you set `objectMode: true` in the options, then whatever is written will
+be emitted. Otherwise, it'll do a minimal amount of Buffer copying to
+ensure proper Streams semantics when `read(n)` is called.
+
+`objectMode` can also be set by doing `stream.objectMode = true`, or by
+writing any non-string/non-buffer data. `objectMode` cannot be set to
+false once it is set.
+
+This is not a `through` or `through2` stream. It doesn't transform the
+data, it just passes it right through. If you want to transform the data,
+extend the class, and override the `write()` method. Once you're done
+transforming the data however you want, call `super.write()` with the
+transform output.
+
+For some examples of streams that extend Minipass in various ways, check
+out:
+
+- [minizlib](http://npm.im/minizlib)
+- [fs-minipass](http://npm.im/fs-minipass)
+- [tar](http://npm.im/tar)
+- [minipass-collect](http://npm.im/minipass-collect)
+- [minipass-flush](http://npm.im/minipass-flush)
+- [minipass-pipeline](http://npm.im/minipass-pipeline)
+- [tap](http://npm.im/tap)
+- [tap-parser](http://npm.im/tap)
+- [treport](http://npm.im/tap)
+- [minipass-fetch](http://npm.im/minipass-fetch)
+
+## Differences from Node.js Streams
+
+There are several things that make Minipass streams different from (and in
+some ways superior to) Node.js core streams.
+
+Please read these caveats if you are familiar with noode-core streams and
+intend to use Minipass streams in your programs.
+
+### Timing
+
+Minipass streams are designed to support synchronous use-cases. Thus, data
+is emitted as soon as it is available, always. It is buffered until read,
+but no longer. Another way to look at it is that Minipass streams are
+exactly as synchronous as the logic that writes into them.
+
+This can be surprising if your code relies on `PassThrough.write()` always
+providing data on the next tick rather than the current one, or being able
+to call `resume()` and not have the entire buffer disappear immediately.
+
+However, without this synchronicity guarantee, there would be no way for
+Minipass to achieve the speeds it does, or support the synchronous use
+cases that it does. Simply put, waiting takes time.
+
+This non-deferring approach makes Minipass streams much easier to reason
+about, especially in the context of Promises and other flow-control
+mechanisms.
+
+### No High/Low Water Marks
+
+Node.js core streams will optimistically fill up a buffer, returning `true`
+on all writes until the limit is hit, even if the data has nowhere to go.
+Then, they will not attempt to draw more data in until the buffer size dips
+below a minimum value.
+
+Minipass streams are much simpler. The `write()` method will return `true`
+if the data has somewhere to go (which is to say, given the timing
+guarantees, that the data is already there by the time `write()` returns).
+
+If the data has nowhere to go, then `write()` returns false, and the data
+sits in a buffer, to be drained out immediately as soon as anyone consumes
+it.
+
+### Hazards of Buffering (or: Why Minipass Is So Fast)
+
+Since data written to a Minipass stream is immediately written all the way
+through the pipeline, and `write()` always returns true/false based on
+whether the data was fully flushed, backpressure is communicated
+immediately to the upstream caller. This minimizes buffering.
+
+Consider this case:
+
+```js
+const {PassThrough} = require('stream')
+const p1 = new PassThrough({ highWaterMark: 1024 })
+const p2 = new PassThrough({ highWaterMark: 1024 })
+const p3 = new PassThrough({ highWaterMark: 1024 })
+const p4 = new PassThrough({ highWaterMark: 1024 })
+
+p1.pipe(p2).pipe(p3).pipe(p4)
+p4.on('data', () => console.log('made it through'))
+
+// this returns false and buffers, then writes to p2 on next tick (1)
+// p2 returns false and buffers, pausing p1, then writes to p3 on next tick (2)
+// p3 returns false and buffers, pausing p2, then writes to p4 on next tick (3)
+// p4 returns false and buffers, pausing p3, then emits 'data' and 'drain'
+// on next tick (4)
+// p3 sees p4's 'drain' event, and calls resume(), emitting 'resume' and
+// 'drain' on next tick (5)
+// p2 sees p3's 'drain', calls resume(), emits 'resume' and 'drain' on next tick (6)
+// p1 sees p2's 'drain', calls resume(), emits 'resume' and 'drain' on next
+// tick (7)
+
+p1.write(Buffer.alloc(2048)) // returns false
+```
+
+Along the way, the data was buffered and deferred at each stage, and
+multiple event deferrals happened, for an unblocked pipeline where it was
+perfectly safe to write all the way through!
+
+Furthermore, setting a `highWaterMark` of `1024` might lead someone reading
+the code to think an advisory maximum of 1KiB is being set for the
+pipeline. However, the actual advisory buffering level is the _sum_ of
+`highWaterMark` values, since each one has its own bucket.
+
+Consider the Minipass case:
+
+```js
+const m1 = new Minipass()
+const m2 = new Minipass()
+const m3 = new Minipass()
+const m4 = new Minipass()
+
+m1.pipe(m2).pipe(m3).pipe(m4)
+m4.on('data', () => console.log('made it through'))
+
+// m1 is flowing, so it writes the data to m2 immediately
+// m2 is flowing, so it writes the data to m3 immediately
+// m3 is flowing, so it writes the data to m4 immediately
+// m4 is flowing, so it fires the 'data' event immediately, returns true
+// m4's write returned true, so m3 is still flowing, returns true
+// m3's write returned true, so m2 is still flowing, returns true
+// m2's write returned true, so m1 is still flowing, returns true
+// No event deferrals or buffering along the way!
+
+m1.write(Buffer.alloc(2048)) // returns true
+```
+
+It is extremely unlikely that you _don't_ want to buffer any data written,
+or _ever_ buffer data that can be flushed all the way through. Neither
+node-core streams nor Minipass ever fail to buffer written data, but
+node-core streams do a lot of unnecessary buffering and pausing.
+
+As always, the faster implementation is the one that does less stuff and
+waits less time to do it.
+
+### Immediately emit `end` for empty streams (when not paused)
+
+If a stream is not paused, and `end()` is called before writing any data
+into it, then it will emit `end` immediately.
+
+If you have logic that occurs on the `end` event which you don't want to
+potentially happen immediately (for example, closing file descriptors,
+moving on to the next entry in an archive parse stream, etc.) then be sure
+to call `stream.pause()` on creation, and then `stream.resume()` once you
+are ready to respond to the `end` event.
+
+### Emit `end` When Asked
+
+One hazard of immediately emitting `'end'` is that you may not yet have had
+a chance to add a listener. In order to avoid this hazard, Minipass
+streams safely re-emit the `'end'` event if a new listener is added after
+`'end'` has been emitted.
+
+Ie, if you do `stream.on('end', someFunction)`, and the stream has already
+emitted `end`, then it will call the handler right away. (You can think of
+this somewhat like attaching a new `.then(fn)` to a previously-resolved
+Promise.)
+
+To prevent calling handlers multiple times who would not expect multiple
+ends to occur, all listeners are removed from the `'end'` event whenever it
+is emitted.
+
+### Impact of "immediate flow" on Tee-streams
+
+A "tee stream" is a stream piping to multiple destinations:
+
+```js
+const tee = new Minipass()
+t.pipe(dest1)
+t.pipe(dest2)
+t.write('foo') // goes to both destinations
+```
+
+Since Minipass streams _immediately_ process any pending data through the
+pipeline when a new pipe destination is added, this can have surprising
+effects, especially when a stream comes in from some other function and may
+or may not have data in its buffer.
+
+```js
+// WARNING! WILL LOSE DATA!
+const src = new Minipass()
+src.write('foo')
+src.pipe(dest1) // 'foo' chunk flows to dest1 immediately, and is gone
+src.pipe(dest2) // gets nothing!
+```
+
+The solution is to create a dedicated tee-stream junction that pipes to
+both locations, and then pipe to _that_ instead.
+
+```js
+// Safe example: tee to both places
+const src = new Minipass()
+src.write('foo')
+const tee = new Minipass()
+tee.pipe(dest1)
+tee.pipe(dest2)
+stream.pipe(tee) // tee gets 'foo', pipes to both locations
+```
+
+The same caveat applies to `on('data')` event listeners. The first one
+added will _immediately_ receive all of the data, leaving nothing for the
+second:
+
+```js
+// WARNING! WILL LOSE DATA!
+const src = new Minipass()
+src.write('foo')
+src.on('data', handler1) // receives 'foo' right away
+src.on('data', handler2) // nothing to see here!
+```
+
+Using a dedicated tee-stream can be used in this case as well:
+
+```js
+// Safe example: tee to both data handlers
+const src = new Minipass()
+src.write('foo')
+const tee = new Minipass()
+tee.on('data', handler1)
+tee.on('data', handler2)
+src.pipe(tee)
+```
+
+## USAGE
+
+It's a stream! Use it like a stream and it'll most likely do what you
+want.
+
+```js
+const Minipass = require('minipass')
+const mp = new Minipass(options) // optional: { encoding, objectMode }
+mp.write('foo')
+mp.pipe(someOtherStream)
+mp.end('bar')
+```
+
+### OPTIONS
+
+* `encoding` How would you like the data coming _out_ of the stream to be
+ encoded? Accepts any values that can be passed to `Buffer.toString()`.
+* `objectMode` Emit data exactly as it comes in. This will be flipped on
+ by default if you write() something other than a string or Buffer at any
+ point. Setting `objectMode: true` will prevent setting any encoding
+ value.
+
+### API
+
+Implements the user-facing portions of Node.js's `Readable` and `Writable`
+streams.
+
+### Methods
+
+* `write(chunk, [encoding], [callback])` - Put data in. (Note that, in the
+ base Minipass class, the same data will come out.) Returns `false` if
+ the stream will buffer the next write, or true if it's still in "flowing"
+ mode.
+* `end([chunk, [encoding]], [callback])` - Signal that you have no more
+ data to write. This will queue an `end` event to be fired when all the
+ data has been consumed.
+* `setEncoding(encoding)` - Set the encoding for data coming of the stream.
+ This can only be done once.
+* `pause()` - No more data for a while, please. This also prevents `end`
+ from being emitted for empty streams until the stream is resumed.
+* `resume()` - Resume the stream. If there's data in the buffer, it is all
+ discarded. Any buffered events are immediately emitted.
+* `pipe(dest)` - Send all output to the stream provided. There is no way
+ to unpipe. When data is emitted, it is immediately written to any and
+ all pipe destinations.
+* `on(ev, fn)`, `emit(ev, fn)` - Minipass streams are EventEmitters. Some
+ events are given special treatment, however. (See below under "events".)
+* `promise()` - Returns a Promise that resolves when the stream emits
+ `end`, or rejects if the stream emits `error`.
+* `collect()` - Return a Promise that resolves on `end` with an array
+ containing each chunk of data that was emitted, or rejects if the stream
+ emits `error`. Note that this consumes the stream data.
+* `concat()` - Same as `collect()`, but concatenates the data into a single
+ Buffer object. Will reject the returned promise if the stream is in
+ objectMode, or if it goes into objectMode by the end of the data.
+* `read(n)` - Consume `n` bytes of data out of the buffer. If `n` is not
+ provided, then consume all of it. If `n` bytes are not available, then
+ it returns null. **Note** consuming streams in this way is less
+ efficient, and can lead to unnecessary Buffer copying.
+* `destroy([er])` - Destroy the stream. If an error is provided, then an
+ `'error'` event is emitted. If the stream has a `close()` method, and
+ has not emitted a `'close'` event yet, then `stream.close()` will be
+ called. Any Promises returned by `.promise()`, `.collect()` or
+ `.concat()` will be rejected. After being destroyed, writing to the
+ stream will emit an error. No more data will be emitted if the stream is
+ destroyed, even if it was previously buffered.
+
+### Properties
+
+* `bufferLength` Read-only. Total number of bytes buffered, or in the case
+ of objectMode, the total number of objects.
+* `encoding` The encoding that has been set. (Setting this is equivalent
+ to calling `setEncoding(enc)` and has the same prohibition against
+ setting multiple times.)
+* `flowing` Read-only. Boolean indicating whether a chunk written to the
+ stream will be immediately emitted.
+* `emittedEnd` Read-only. Boolean indicating whether the end-ish events
+ (ie, `end`, `prefinish`, `finish`) have been emitted. Note that
+ listening on any end-ish event will immediateyl re-emit it if it has
+ already been emitted.
+* `writable` Whether the stream is writable. Default `true`. Set to
+ `false` when `end()`
+* `readable` Whether the stream is readable. Default `true`.
+* `buffer` A [yallist](http://npm.im/yallist) linked list of chunks written
+ to the stream that have not yet been emitted. (It's probably a bad idea
+ to mess with this.)
+* `pipes` A [yallist](http://npm.im/yallist) linked list of streams that
+ this stream is piping into. (It's probably a bad idea to mess with
+ this.)
+* `destroyed` A getter that indicates whether the stream was destroyed.
+* `paused` True if the stream has been explicitly paused, otherwise false.
+* `objectMode` Indicates whether the stream is in `objectMode`. Once set
+ to `true`, it cannot be set to `false`.
+
+### Events
+
+* `data` Emitted when there's data to read. Argument is the data to read.
+ This is never emitted while not flowing. If a listener is attached, that
+ will resume the stream.
+* `end` Emitted when there's no more data to read. This will be emitted
+ immediately for empty streams when `end()` is called. If a listener is
+ attached, and `end` was already emitted, then it will be emitted again.
+ All listeners are removed when `end` is emitted.
+* `prefinish` An end-ish event that follows the same logic as `end` and is
+ emitted in the same conditions where `end` is emitted. Emitted after
+ `'end'`.
+* `finish` An end-ish event that follows the same logic as `end` and is
+ emitted in the same conditions where `end` is emitted. Emitted after
+ `'prefinish'`.
+* `close` An indication that an underlying resource has been released.
+ Minipass does not emit this event, but will defer it until after `end`
+ has been emitted, since it throws off some stream libraries otherwise.
+* `drain` Emitted when the internal buffer empties, and it is again
+ suitable to `write()` into the stream.
+* `readable` Emitted when data is buffered and ready to be read by a
+ consumer.
+* `resume` Emitted when stream changes state from buffering to flowing
+ mode. (Ie, when `resume` is called, `pipe` is called, or a `data` event
+ listener is added.)
+
+### Static Methods
+
+* `Minipass.isStream(stream)` Returns `true` if the argument is a stream,
+ and false otherwise. To be considered a stream, the object must be
+ either an instance of Minipass, or an EventEmitter that has either a
+ `pipe()` method, or both `write()` and `end()` methods. (Pretty much any
+ stream in node-land will return `true` for this.)
+
+## EXAMPLES
+
+Here are some examples of things you can do with Minipass streams.
+
+### simple "are you done yet" promise
+
+```js
+mp.promise().then(() => {
+ // stream is finished
+}, er => {
+ // stream emitted an error
+})
+```
+
+### collecting
+
+```js
+mp.collect().then(all => {
+ // all is an array of all the data emitted
+ // encoding is supported in this case, so
+ // so the result will be a collection of strings if
+ // an encoding is specified, or buffers/objects if not.
+ //
+ // In an async function, you may do
+ // const data = await stream.collect()
+})
+```
+
+### collecting into a single blob
+
+This is a bit slower because it concatenates the data into one chunk for
+you, but if you're going to do it yourself anyway, it's convenient this
+way:
+
+```js
+mp.concat().then(onebigchunk => {
+ // onebigchunk is a string if the stream
+ // had an encoding set, or a buffer otherwise.
+})
+```
+
+### iteration
+
+You can iterate over streams synchronously or asynchronously in platforms
+that support it.
+
+Synchronous iteration will end when the currently available data is
+consumed, even if the `end` event has not been reached. In string and
+buffer mode, the data is concatenated, so unless multiple writes are
+occurring in the same tick as the `read()`, sync iteration loops will
+generally only have a single iteration.
+
+To consume chunks in this way exactly as they have been written, with no
+flattening, create the stream with the `{ objectMode: true }` option.
+
+```js
+const mp = new Minipass({ objectMode: true })
+mp.write('a')
+mp.write('b')
+for (let letter of mp) {
+ console.log(letter) // a, b
+}
+mp.write('c')
+mp.write('d')
+for (let letter of mp) {
+ console.log(letter) // c, d
+}
+mp.write('e')
+mp.end()
+for (let letter of mp) {
+ console.log(letter) // e
+}
+for (let letter of mp) {
+ console.log(letter) // nothing
+}
+```
+
+Asynchronous iteration will continue until the end event is reached,
+consuming all of the data.
+
+```js
+const mp = new Minipass({ encoding: 'utf8' })
+
+// some source of some data
+let i = 5
+const inter = setInterval(() => {
+ if (i --> 0)
+ mp.write(Buffer.from('foo\n', 'utf8'))
+ else {
+ mp.end()
+ clearInterval(inter)
+ }
+}, 100)
+
+// consume the data with asynchronous iteration
+async function consume () {
+ for await (let chunk of mp) {
+ console.log(chunk)
+ }
+ return 'ok'
+}
+
+consume().then(res => console.log(res))
+// logs `foo\n` 5 times, and then `ok`
+```
+
+### subclass that `console.log()`s everything written into it
+
+```js
+class Logger extends Minipass {
+ write (chunk, encoding, callback) {
+ console.log('WRITE', chunk, encoding)
+ return super.write(chunk, encoding, callback)
+ }
+ end (chunk, encoding, callback) {
+ console.log('END', chunk, encoding)
+ return super.end(chunk, encoding, callback)
+ }
+}
+
+someSource.pipe(new Logger()).pipe(someDest)
+```
+
+### same thing, but using an inline anonymous class
+
+```js
+// js classes are fun
+someSource
+ .pipe(new (class extends Minipass {
+ emit (ev, ...data) {
+ // let's also log events, because debugging some weird thing
+ console.log('EMIT', ev)
+ return super.emit(ev, ...data)
+ }
+ write (chunk, encoding, callback) {
+ console.log('WRITE', chunk, encoding)
+ return super.write(chunk, encoding, callback)
+ }
+ end (chunk, encoding, callback) {
+ console.log('END', chunk, encoding)
+ return super.end(chunk, encoding, callback)
+ }
+ }))
+ .pipe(someDest)
+```
+
+### subclass that defers 'end' for some reason
+
+```js
+class SlowEnd extends Minipass {
+ emit (ev, ...args) {
+ if (ev === 'end') {
+ console.log('going to end, hold on a sec')
+ setTimeout(() => {
+ console.log('ok, ready to end now')
+ super.emit('end', ...args)
+ }, 100)
+ } else {
+ return super.emit(ev, ...args)
+ }
+ }
+}
+```
+
+### transform that creates newline-delimited JSON
+
+```js
+class NDJSONEncode extends Minipass {
+ write (obj, cb) {
+ try {
+ // JSON.stringify can throw, emit an error on that
+ return super.write(JSON.stringify(obj) + '\n', 'utf8', cb)
+ } catch (er) {
+ this.emit('error', er)
+ }
+ }
+ end (obj, cb) {
+ if (typeof obj === 'function') {
+ cb = obj
+ obj = undefined
+ }
+ if (obj !== undefined) {
+ this.write(obj)
+ }
+ return super.end(cb)
+ }
+}
+```
+
+### transform that parses newline-delimited JSON
+
+```js
+class NDJSONDecode extends Minipass {
+ constructor (options) {
+ // always be in object mode, as far as Minipass is concerned
+ super({ objectMode: true })
+ this._jsonBuffer = ''
+ }
+ write (chunk, encoding, cb) {
+ if (typeof chunk === 'string' &&
+ typeof encoding === 'string' &&
+ encoding !== 'utf8') {
+ chunk = Buffer.from(chunk, encoding).toString()
+ } else if (Buffer.isBuffer(chunk))
+ chunk = chunk.toString()
+ }
+ if (typeof encoding === 'function') {
+ cb = encoding
+ }
+ const jsonData = (this._jsonBuffer + chunk).split('\n')
+ this._jsonBuffer = jsonData.pop()
+ for (let i = 0; i < jsonData.length; i++) {
+ let parsed
+ try {
+ super.write(parsed)
+ } catch (er) {
+ this.emit('error', er)
+ continue
+ }
+ }
+ if (cb)
+ cb()
+ }
+}
+```
diff --git a/node_modules/cacache/node_modules/minipass/index.js b/node_modules/cacache/node_modules/minipass/index.js
new file mode 100644
index 000000000..55ea0f3dd
--- /dev/null
+++ b/node_modules/cacache/node_modules/minipass/index.js
@@ -0,0 +1,538 @@
+'use strict'
+const EE = require('events')
+const Stream = require('stream')
+const Yallist = require('yallist')
+const SD = require('string_decoder').StringDecoder
+
+const EOF = Symbol('EOF')
+const MAYBE_EMIT_END = Symbol('maybeEmitEnd')
+const EMITTED_END = Symbol('emittedEnd')
+const EMITTING_END = Symbol('emittingEnd')
+const CLOSED = Symbol('closed')
+const READ = Symbol('read')
+const FLUSH = Symbol('flush')
+const FLUSHCHUNK = Symbol('flushChunk')
+const ENCODING = Symbol('encoding')
+const DECODER = Symbol('decoder')
+const FLOWING = Symbol('flowing')
+const PAUSED = Symbol('paused')
+const RESUME = Symbol('resume')
+const BUFFERLENGTH = Symbol('bufferLength')
+const BUFFERPUSH = Symbol('bufferPush')
+const BUFFERSHIFT = Symbol('bufferShift')
+const OBJECTMODE = Symbol('objectMode')
+const DESTROYED = Symbol('destroyed')
+
+// TODO remove when Node v8 support drops
+const doIter = global._MP_NO_ITERATOR_SYMBOLS_ !== '1'
+const ASYNCITERATOR = doIter && Symbol.asyncIterator
+ || Symbol('asyncIterator not implemented')
+const ITERATOR = doIter && Symbol.iterator
+ || Symbol('iterator not implemented')
+
+// events that mean 'the stream is over'
+// these are treated specially, and re-emitted
+// if they are listened for after emitting.
+const isEndish = ev =>
+ ev === 'end' ||
+ ev === 'finish' ||
+ ev === 'prefinish'
+
+const isArrayBuffer = b => b instanceof ArrayBuffer ||
+ typeof b === 'object' &&
+ b.constructor &&
+ b.constructor.name === 'ArrayBuffer' &&
+ b.byteLength >= 0
+
+const isArrayBufferView = b => !Buffer.isBuffer(b) && ArrayBuffer.isView(b)
+
+module.exports = class Minipass extends Stream {
+ constructor (options) {
+ super()
+ this[FLOWING] = false
+ // whether we're explicitly paused
+ this[PAUSED] = false
+ this.pipes = new Yallist()
+ this.buffer = new Yallist()
+ this[OBJECTMODE] = options && options.objectMode || false
+ if (this[OBJECTMODE])
+ this[ENCODING] = null
+ else
+ this[ENCODING] = options && options.encoding || null
+ if (this[ENCODING] === 'buffer')
+ this[ENCODING] = null
+ this[DECODER] = this[ENCODING] ? new SD(this[ENCODING]) : null
+ this[EOF] = false
+ this[EMITTED_END] = false
+ this[EMITTING_END] = false
+ this[CLOSED] = false
+ this.writable = true
+ this.readable = true
+ this[BUFFERLENGTH] = 0
+ this[DESTROYED] = false
+ }
+
+ get bufferLength () { return this[BUFFERLENGTH] }
+
+ get encoding () { return this[ENCODING] }
+ set encoding (enc) {
+ if (this[OBJECTMODE])
+ throw new Error('cannot set encoding in objectMode')
+
+ if (this[ENCODING] && enc !== this[ENCODING] &&
+ (this[DECODER] && this[DECODER].lastNeed || this[BUFFERLENGTH]))
+ throw new Error('cannot change encoding')
+
+ if (this[ENCODING] !== enc) {
+ this[DECODER] = enc ? new SD(enc) : null
+ if (this.buffer.length)
+ this.buffer = this.buffer.map(chunk => this[DECODER].write(chunk))
+ }
+
+ this[ENCODING] = enc
+ }
+
+ setEncoding (enc) {
+ this.encoding = enc
+ }
+
+ get objectMode () { return this[OBJECTMODE] }
+ set objectMode (ॐ ) { this[OBJECTMODE] = this[OBJECTMODE] || !!ॐ }
+
+ write (chunk, encoding, cb) {
+ if (this[EOF])
+ throw new Error('write after end')
+
+ if (this[DESTROYED]) {
+ this.emit('error', Object.assign(
+ new Error('Cannot call write after a stream was destroyed'),
+ { code: 'ERR_STREAM_DESTROYED' }
+ ))
+ return true
+ }
+
+ if (typeof encoding === 'function')
+ cb = encoding, encoding = 'utf8'
+
+ if (!encoding)
+ encoding = 'utf8'
+
+ // convert array buffers and typed array views into buffers
+ // at some point in the future, we may want to do the opposite!
+ // leave strings and buffers as-is
+ // anything else switches us into object mode
+ if (!this[OBJECTMODE] && !Buffer.isBuffer(chunk)) {
+ if (isArrayBufferView(chunk))
+ chunk = Buffer.from(chunk.buffer, chunk.byteOffset, chunk.byteLength)
+ else if (isArrayBuffer(chunk))
+ chunk = Buffer.from(chunk)
+ else if (typeof chunk !== 'string')
+ // use the setter so we throw if we have encoding set
+ this.objectMode = true
+ }
+
+ // this ensures at this point that the chunk is a buffer or string
+ // don't buffer it up or send it to the decoder
+ if (!this.objectMode && !chunk.length) {
+ const ret = this.flowing
+ if (this[BUFFERLENGTH] !== 0)
+ this.emit('readable')
+ if (cb)
+ cb()
+ return ret
+ }
+
+ // fast-path writing strings of same encoding to a stream with
+ // an empty buffer, skipping the buffer/decoder dance
+ if (typeof chunk === 'string' && !this[OBJECTMODE] &&
+ // unless it is a string already ready for us to use
+ !(encoding === this[ENCODING] && !this[DECODER].lastNeed)) {
+ chunk = Buffer.from(chunk, encoding)
+ }
+
+ if (Buffer.isBuffer(chunk) && this[ENCODING])
+ chunk = this[DECODER].write(chunk)
+
+ try {
+ return this.flowing
+ ? (this.emit('data', chunk), this.flowing)
+ : (this[BUFFERPUSH](chunk), false)
+ } finally {
+ if (this[BUFFERLENGTH] !== 0)
+ this.emit('readable')
+ if (cb)
+ cb()
+ }
+ }
+
+ read (n) {
+ if (this[DESTROYED])
+ return null
+
+ try {
+ if (this[BUFFERLENGTH] === 0 || n === 0 || n > this[BUFFERLENGTH])
+ return null
+
+ if (this[OBJECTMODE])
+ n = null
+
+ if (this.buffer.length > 1 && !this[OBJECTMODE]) {
+ if (this.encoding)
+ this.buffer = new Yallist([
+ Array.from(this.buffer).join('')
+ ])
+ else
+ this.buffer = new Yallist([
+ Buffer.concat(Array.from(this.buffer), this[BUFFERLENGTH])
+ ])
+ }
+
+ return this[READ](n || null, this.buffer.head.value)
+ } finally {
+ this[MAYBE_EMIT_END]()
+ }
+ }
+
+ [READ] (n, chunk) {
+ if (n === chunk.length || n === null)
+ this[BUFFERSHIFT]()
+ else {
+ this.buffer.head.value = chunk.slice(n)
+ chunk = chunk.slice(0, n)
+ this[BUFFERLENGTH] -= n
+ }
+
+ this.emit('data', chunk)
+
+ if (!this.buffer.length && !this[EOF])
+ this.emit('drain')
+
+ return chunk
+ }
+
+ end (chunk, encoding, cb) {
+ if (typeof chunk === 'function')
+ cb = chunk, chunk = null
+ if (typeof encoding === 'function')
+ cb = encoding, encoding = 'utf8'
+ if (chunk)
+ this.write(chunk, encoding)
+ if (cb)
+ this.once('end', cb)
+ this[EOF] = true
+ this.writable = false
+
+ // if we haven't written anything, then go ahead and emit,
+ // even if we're not reading.
+ // we'll re-emit if a new 'end' listener is added anyway.
+ // This makes MP more suitable to write-only use cases.
+ if (this.flowing || !this[PAUSED])
+ this[MAYBE_EMIT_END]()
+ return this
+ }
+
+ // don't let the internal resume be overwritten
+ [RESUME] () {
+ if (this[DESTROYED])
+ return
+
+ this[PAUSED] = false
+ this[FLOWING] = true
+ this.emit('resume')
+ if (this.buffer.length)
+ this[FLUSH]()
+ else if (this[EOF])
+ this[MAYBE_EMIT_END]()
+ else
+ this.emit('drain')
+ }
+
+ resume () {
+ return this[RESUME]()
+ }
+
+ pause () {
+ this[FLOWING] = false
+ this[PAUSED] = true
+ }
+
+ get destroyed () {
+ return this[DESTROYED]
+ }
+
+ get flowing () {
+ return this[FLOWING]
+ }
+
+ get paused () {
+ return this[PAUSED]
+ }
+
+ [BUFFERPUSH] (chunk) {
+ if (this[OBJECTMODE])
+ this[BUFFERLENGTH] += 1
+ else
+ this[BUFFERLENGTH] += chunk.length
+ return this.buffer.push(chunk)
+ }
+
+ [BUFFERSHIFT] () {
+ if (this.buffer.length) {
+ if (this[OBJECTMODE])
+ this[BUFFERLENGTH] -= 1
+ else
+ this[BUFFERLENGTH] -= this.buffer.head.value.length
+ }
+ return this.buffer.shift()
+ }
+
+ [FLUSH] () {
+ do {} while (this[FLUSHCHUNK](this[BUFFERSHIFT]()))
+
+ if (!this.buffer.length && !this[EOF])
+ this.emit('drain')
+ }
+
+ [FLUSHCHUNK] (chunk) {
+ return chunk ? (this.emit('data', chunk), this.flowing) : false
+ }
+
+ pipe (dest, opts) {
+ if (this[DESTROYED])
+ return
+
+ const ended = this[EMITTED_END]
+ opts = opts || {}
+ if (dest === process.stdout || dest === process.stderr)
+ opts.end = false
+ else
+ opts.end = opts.end !== false
+
+ const p = { dest: dest, opts: opts, ondrain: _ => this[RESUME]() }
+ this.pipes.push(p)
+
+ dest.on('drain', p.ondrain)
+ this[RESUME]()
+ // piping an ended stream ends immediately
+ if (ended && p.opts.end)
+ p.dest.end()
+ return dest
+ }
+
+ addListener (ev, fn) {
+ return this.on(ev, fn)
+ }
+
+ on (ev, fn) {
+ try {
+ return super.on(ev, fn)
+ } finally {
+ if (ev === 'data' && !this.pipes.length && !this.flowing)
+ this[RESUME]()
+ else if (isEndish(ev) && this[EMITTED_END]) {
+ super.emit(ev)
+ this.removeAllListeners(ev)
+ }
+ }
+ }
+
+ get emittedEnd () {
+ return this[EMITTED_END]
+ }
+
+ [MAYBE_EMIT_END] () {
+ if (!this[EMITTING_END] &&
+ !this[EMITTED_END] &&
+ !this[DESTROYED] &&
+ this.buffer.length === 0 &&
+ this[EOF]) {
+ this[EMITTING_END] = true
+ this.emit('end')
+ this.emit('prefinish')
+ this.emit('finish')
+ if (this[CLOSED])
+ this.emit('close')
+ this[EMITTING_END] = false
+ }
+ }
+
+ emit (ev, data) {
+ // error and close are only events allowed after calling destroy()
+ if (ev !== 'error' && ev !== 'close' && ev !== DESTROYED && this[DESTROYED])
+ return
+ else if (ev === 'data') {
+ if (!data)
+ return
+
+ if (this.pipes.length)
+ this.pipes.forEach(p =>
+ p.dest.write(data) === false && this.pause())
+ } else if (ev === 'end') {
+ // only actual end gets this treatment
+ if (this[EMITTED_END] === true)
+ return
+
+ this[EMITTED_END] = true
+ this.readable = false
+
+ if (this[DECODER]) {
+ data = this[DECODER].end()
+ if (data) {
+ this.pipes.forEach(p => p.dest.write(data))
+ super.emit('data', data)
+ }
+ }
+
+ this.pipes.forEach(p => {
+ p.dest.removeListener('drain', p.ondrain)
+ if (p.opts.end)
+ p.dest.end()
+ })
+ } else if (ev === 'close') {
+ this[CLOSED] = true
+ // don't emit close before 'end' and 'finish'
+ if (!this[EMITTED_END] && !this[DESTROYED])
+ return
+ }
+
+ // TODO: replace with a spread operator when Node v4 support drops
+ const args = new Array(arguments.length)
+ args[0] = ev
+ args[1] = data
+ if (arguments.length > 2) {
+ for (let i = 2; i < arguments.length; i++) {
+ args[i] = arguments[i]
+ }
+ }
+
+ try {
+ return super.emit.apply(this, args)
+ } finally {
+ if (!isEndish(ev))
+ this[MAYBE_EMIT_END]()
+ else
+ this.removeAllListeners(ev)
+ }
+ }
+
+ // const all = await stream.collect()
+ collect () {
+ const buf = []
+ if (!this[OBJECTMODE])
+ buf.dataLength = 0
+ // set the promise first, in case an error is raised
+ // by triggering the flow here.
+ const p = this.promise()
+ this.on('data', c => {
+ buf.push(c)
+ if (!this[OBJECTMODE])
+ buf.dataLength += c.length
+ })
+ return p.then(() => buf)
+ }
+
+ // const data = await stream.concat()
+ concat () {
+ return this[OBJECTMODE]
+ ? Promise.reject(new Error('cannot concat in objectMode'))
+ : this.collect().then(buf =>
+ this[OBJECTMODE]
+ ? Promise.reject(new Error('cannot concat in objectMode'))
+ : this[ENCODING] ? buf.join('') : Buffer.concat(buf, buf.dataLength))
+ }
+
+ // stream.promise().then(() => done, er => emitted error)
+ promise () {
+ return new Promise((resolve, reject) => {
+ this.on(DESTROYED, () => reject(new Error('stream destroyed')))
+ this.on('end', () => resolve())
+ this.on('error', er => reject(er))
+ })
+ }
+
+ // for await (let chunk of stream)
+ [ASYNCITERATOR] () {
+ const next = () => {
+ const res = this.read()
+ if (res !== null)
+ return Promise.resolve({ done: false, value: res })
+
+ if (this[EOF])
+ return Promise.resolve({ done: true })
+
+ let resolve = null
+ let reject = null
+ const onerr = er => {
+ this.removeListener('data', ondata)
+ this.removeListener('end', onend)
+ reject(er)
+ }
+ const ondata = value => {
+ this.removeListener('error', onerr)
+ this.removeListener('end', onend)
+ this.pause()
+ resolve({ value: value, done: !!this[EOF] })
+ }
+ const onend = () => {
+ this.removeListener('error', onerr)
+ this.removeListener('data', ondata)
+ resolve({ done: true })
+ }
+ const ondestroy = () => onerr(new Error('stream destroyed'))
+ return new Promise((res, rej) => {
+ reject = rej
+ resolve = res
+ this.once(DESTROYED, ondestroy)
+ this.once('error', onerr)
+ this.once('end', onend)
+ this.once('data', ondata)
+ })
+ }
+
+ return { next }
+ }
+
+ // for (let chunk of stream)
+ [ITERATOR] () {
+ const next = () => {
+ const value = this.read()
+ const done = value === null
+ return { value, done }
+ }
+ return { next }
+ }
+
+ destroy (er) {
+ if (this[DESTROYED]) {
+ if (er)
+ this.emit('error', er)
+ else
+ this.emit(DESTROYED)
+ return this
+ }
+
+ this[DESTROYED] = true
+
+ // throw away all buffered data, it's never coming out
+ this.buffer = new Yallist()
+ this[BUFFERLENGTH] = 0
+
+ if (typeof this.close === 'function' && !this[CLOSED])
+ this.close()
+
+ if (er)
+ this.emit('error', er)
+ else // if no error to emit, still reject pending promises
+ this.emit(DESTROYED)
+
+ return this
+ }
+
+ static isStream (s) {
+ return !!s && (s instanceof Minipass || s instanceof Stream ||
+ s instanceof EE && (
+ typeof s.pipe === 'function' || // readable
+ (typeof s.write === 'function' && typeof s.end === 'function') // writable
+ ))
+ }
+}
diff --git a/node_modules/cacache/node_modules/minipass/package.json b/node_modules/cacache/node_modules/minipass/package.json
new file mode 100644
index 000000000..c1bb24213
--- /dev/null
+++ b/node_modules/cacache/node_modules/minipass/package.json
@@ -0,0 +1,75 @@
+{
+ "_from": "minipass@^3.0.0",
+ "_id": "minipass@3.1.1",
+ "_inBundle": false,
+ "_integrity": "sha512-UFqVihv6PQgwj8/yTGvl9kPz7xIAY+R5z6XYjRInD3Gk3qx6QGSD6zEcpeG4Dy/lQnv1J6zv8ejV90hyYIKf3w==",
+ "_location": "/cacache/minipass",
+ "_phantomChildren": {},
+ "_requested": {
+ "type": "range",
+ "registry": true,
+ "raw": "minipass@^3.0.0",
+ "name": "minipass",
+ "escapedName": "minipass",
+ "rawSpec": "^3.0.0",
+ "saveSpec": null,
+ "fetchSpec": "^3.0.0"
+ },
+ "_requiredBy": [
+ "/cacache",
+ "/cacache/fs-minipass",
+ "/cacache/minizlib",
+ "/cacache/tar"
+ ],
+ "_resolved": "https://registry.npmjs.org/minipass/-/minipass-3.1.1.tgz",
+ "_shasum": "7607ce778472a185ad6d89082aa2070f79cedcd5",
+ "_spec": "minipass@^3.0.0",
+ "_where": "/Users/claudiahdz/npm/cli/node_modules/cacache",
+ "author": {
+ "name": "Isaac Z. Schlueter",
+ "email": "i@izs.me",
+ "url": "http://blog.izs.me/"
+ },
+ "bugs": {
+ "url": "https://github.com/isaacs/minipass/issues"
+ },
+ "bundleDependencies": false,
+ "dependencies": {
+ "yallist": "^4.0.0"
+ },
+ "deprecated": false,
+ "description": "minimal implementation of a PassThrough stream",
+ "devDependencies": {
+ "end-of-stream": "^1.4.0",
+ "tap": "^14.6.5",
+ "through2": "^2.0.3"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "files": [
+ "index.js"
+ ],
+ "homepage": "https://github.com/isaacs/minipass#readme",
+ "keywords": [
+ "passthrough",
+ "stream"
+ ],
+ "license": "ISC",
+ "main": "index.js",
+ "name": "minipass",
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/isaacs/minipass.git"
+ },
+ "scripts": {
+ "postpublish": "git push origin --follow-tags",
+ "postversion": "npm publish --tag=next",
+ "preversion": "npm test",
+ "test": "tap"
+ },
+ "tap": {
+ "check-coverage": true
+ },
+ "version": "3.1.1"
+}
diff --git a/node_modules/cacache/node_modules/minizlib/LICENSE b/node_modules/cacache/node_modules/minizlib/LICENSE
new file mode 100644
index 000000000..ffce7383f
--- /dev/null
+++ b/node_modules/cacache/node_modules/minizlib/LICENSE
@@ -0,0 +1,26 @@
+Minizlib was created by Isaac Z. Schlueter.
+It is a derivative work of the Node.js project.
+
+"""
+Copyright Isaac Z. Schlueter and Contributors
+Copyright Node.js contributors. All rights reserved.
+Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+"""
diff --git a/node_modules/cacache/node_modules/minizlib/README.md b/node_modules/cacache/node_modules/minizlib/README.md
new file mode 100644
index 000000000..80e067ab3
--- /dev/null
+++ b/node_modules/cacache/node_modules/minizlib/README.md
@@ -0,0 +1,60 @@
+# minizlib
+
+A fast zlib stream built on [minipass](http://npm.im/minipass) and
+Node.js's zlib binding.
+
+This module was created to serve the needs of
+[node-tar](http://npm.im/tar) and
+[minipass-fetch](http://npm.im/minipass-fetch).
+
+Brotli is supported in versions of node with a Brotli binding.
+
+## How does this differ from the streams in `require('zlib')`?
+
+First, there are no convenience methods to compress or decompress a
+buffer. If you want those, use the built-in `zlib` module. This is
+only streams. That being said, Minipass streams to make it fairly easy to
+use as one-liners: `new zlib.Deflate().end(data).read()` will return the
+deflate compressed result.
+
+This module compresses and decompresses the data as fast as you feed
+it in. It is synchronous, and runs on the main process thread. Zlib
+and Brotli operations can be high CPU, but they're very fast, and doing it
+this way means much less bookkeeping and artificial deferral.
+
+Node's built in zlib streams are built on top of `stream.Transform`.
+They do the maximally safe thing with respect to consistent
+asynchrony, buffering, and backpressure.
+
+See [Minipass](http://npm.im/minipass) for more on the differences between
+Node.js core streams and Minipass streams, and the convenience methods
+provided by that class.
+
+## Classes
+
+- Deflate
+- Inflate
+- Gzip
+- Gunzip
+- DeflateRaw
+- InflateRaw
+- Unzip
+- BrotliCompress (Node v10 and higher)
+- BrotliDecompress (Node v10 and higher)
+
+## USAGE
+
+```js
+const zlib = require('minizlib')
+const input = sourceOfCompressedData()
+const decode = new zlib.BrotliDecompress()
+const output = whereToWriteTheDecodedData()
+input.pipe(decode).pipe(output)
+```
+
+## REPRODUCIBLE BUILDS
+
+To create reproducible gzip compressed files across different operating
+systems, set `portable: true` in the options. This causes minizlib to set
+the `OS` indicator in byte 9 of the extended gzip header to `0xFF` for
+'unknown'.
diff --git a/node_modules/cacache/node_modules/minizlib/constants.js b/node_modules/cacache/node_modules/minizlib/constants.js
new file mode 100644
index 000000000..641ebc731
--- /dev/null
+++ b/node_modules/cacache/node_modules/minizlib/constants.js
@@ -0,0 +1,115 @@
+// Update with any zlib constants that are added or changed in the future.
+// Node v6 didn't export this, so we just hard code the version and rely
+// on all the other hard-coded values from zlib v4736. When node v6
+// support drops, we can just export the realZlibConstants object.
+const realZlibConstants = require('zlib').constants ||
+ /* istanbul ignore next */ { ZLIB_VERNUM: 4736 }
+
+module.exports = Object.freeze(Object.assign(Object.create(null), {
+ Z_NO_FLUSH: 0,
+ Z_PARTIAL_FLUSH: 1,
+ Z_SYNC_FLUSH: 2,
+ Z_FULL_FLUSH: 3,
+ Z_FINISH: 4,
+ Z_BLOCK: 5,
+ Z_OK: 0,
+ Z_STREAM_END: 1,
+ Z_NEED_DICT: 2,
+ Z_ERRNO: -1,
+ Z_STREAM_ERROR: -2,
+ Z_DATA_ERROR: -3,
+ Z_MEM_ERROR: -4,
+ Z_BUF_ERROR: -5,
+ Z_VERSION_ERROR: -6,
+ Z_NO_COMPRESSION: 0,
+ Z_BEST_SPEED: 1,
+ Z_BEST_COMPRESSION: 9,
+ Z_DEFAULT_COMPRESSION: -1,
+ Z_FILTERED: 1,
+ Z_HUFFMAN_ONLY: 2,
+ Z_RLE: 3,
+ Z_FIXED: 4,
+ Z_DEFAULT_STRATEGY: 0,
+ DEFLATE: 1,
+ INFLATE: 2,
+ GZIP: 3,
+ GUNZIP: 4,
+ DEFLATERAW: 5,
+ INFLATERAW: 6,
+ UNZIP: 7,
+ BROTLI_DECODE: 8,
+ BROTLI_ENCODE: 9,
+ Z_MIN_WINDOWBITS: 8,
+ Z_MAX_WINDOWBITS: 15,
+ Z_DEFAULT_WINDOWBITS: 15,
+ Z_MIN_CHUNK: 64,
+ Z_MAX_CHUNK: Infinity,
+ Z_DEFAULT_CHUNK: 16384,
+ Z_MIN_MEMLEVEL: 1,
+ Z_MAX_MEMLEVEL: 9,
+ Z_DEFAULT_MEMLEVEL: 8,
+ Z_MIN_LEVEL: -1,
+ Z_MAX_LEVEL: 9,
+ Z_DEFAULT_LEVEL: -1,
+ BROTLI_OPERATION_PROCESS: 0,
+ BROTLI_OPERATION_FLUSH: 1,
+ BROTLI_OPERATION_FINISH: 2,
+ BROTLI_OPERATION_EMIT_METADATA: 3,
+ BROTLI_MODE_GENERIC: 0,
+ BROTLI_MODE_TEXT: 1,
+ BROTLI_MODE_FONT: 2,
+ BROTLI_DEFAULT_MODE: 0,
+ BROTLI_MIN_QUALITY: 0,
+ BROTLI_MAX_QUALITY: 11,
+ BROTLI_DEFAULT_QUALITY: 11,
+ BROTLI_MIN_WINDOW_BITS: 10,
+ BROTLI_MAX_WINDOW_BITS: 24,
+ BROTLI_LARGE_MAX_WINDOW_BITS: 30,
+ BROTLI_DEFAULT_WINDOW: 22,
+ BROTLI_MIN_INPUT_BLOCK_BITS: 16,
+ BROTLI_MAX_INPUT_BLOCK_BITS: 24,
+ BROTLI_PARAM_MODE: 0,
+ BROTLI_PARAM_QUALITY: 1,
+ BROTLI_PARAM_LGWIN: 2,
+ BROTLI_PARAM_LGBLOCK: 3,
+ BROTLI_PARAM_DISABLE_LITERAL_CONTEXT_MODELING: 4,
+ BROTLI_PARAM_SIZE_HINT: 5,
+ BROTLI_PARAM_LARGE_WINDOW: 6,
+ BROTLI_PARAM_NPOSTFIX: 7,
+ BROTLI_PARAM_NDIRECT: 8,
+ BROTLI_DECODER_RESULT_ERROR: 0,
+ BROTLI_DECODER_RESULT_SUCCESS: 1,
+ BROTLI_DECODER_RESULT_NEEDS_MORE_INPUT: 2,
+ BROTLI_DECODER_RESULT_NEEDS_MORE_OUTPUT: 3,
+ BROTLI_DECODER_PARAM_DISABLE_RING_BUFFER_REALLOCATION: 0,
+ BROTLI_DECODER_PARAM_LARGE_WINDOW: 1,
+ BROTLI_DECODER_NO_ERROR: 0,
+ BROTLI_DECODER_SUCCESS: 1,
+ BROTLI_DECODER_NEEDS_MORE_INPUT: 2,
+ BROTLI_DECODER_NEEDS_MORE_OUTPUT: 3,
+ BROTLI_DECODER_ERROR_FORMAT_EXUBERANT_NIBBLE: -1,
+ BROTLI_DECODER_ERROR_FORMAT_RESERVED: -2,
+ BROTLI_DECODER_ERROR_FORMAT_EXUBERANT_META_NIBBLE: -3,
+ BROTLI_DECODER_ERROR_FORMAT_SIMPLE_HUFFMAN_ALPHABET: -4,
+ BROTLI_DECODER_ERROR_FORMAT_SIMPLE_HUFFMAN_SAME: -5,
+ BROTLI_DECODER_ERROR_FORMAT_CL_SPACE: -6,
+ BROTLI_DECODER_ERROR_FORMAT_HUFFMAN_SPACE: -7,
+ BROTLI_DECODER_ERROR_FORMAT_CONTEXT_MAP_REPEAT: -8,
+ BROTLI_DECODER_ERROR_FORMAT_BLOCK_LENGTH_1: -9,
+ BROTLI_DECODER_ERROR_FORMAT_BLOCK_LENGTH_2: -10,
+ BROTLI_DECODER_ERROR_FORMAT_TRANSFORM: -11,
+ BROTLI_DECODER_ERROR_FORMAT_DICTIONARY: -12,
+ BROTLI_DECODER_ERROR_FORMAT_WINDOW_BITS: -13,
+ BROTLI_DECODER_ERROR_FORMAT_PADDING_1: -14,
+ BROTLI_DECODER_ERROR_FORMAT_PADDING_2: -15,
+ BROTLI_DECODER_ERROR_FORMAT_DISTANCE: -16,
+ BROTLI_DECODER_ERROR_DICTIONARY_NOT_SET: -19,
+ BROTLI_DECODER_ERROR_INVALID_ARGUMENTS: -20,
+ BROTLI_DECODER_ERROR_ALLOC_CONTEXT_MODES: -21,
+ BROTLI_DECODER_ERROR_ALLOC_TREE_GROUPS: -22,
+ BROTLI_DECODER_ERROR_ALLOC_CONTEXT_MAP: -25,
+ BROTLI_DECODER_ERROR_ALLOC_RING_BUFFER_1: -26,
+ BROTLI_DECODER_ERROR_ALLOC_RING_BUFFER_2: -27,
+ BROTLI_DECODER_ERROR_ALLOC_BLOCK_TYPE_TREES: -30,
+ BROTLI_DECODER_ERROR_UNREACHABLE: -31,
+}, realZlibConstants))
diff --git a/node_modules/cacache/node_modules/minizlib/index.js b/node_modules/cacache/node_modules/minizlib/index.js
new file mode 100644
index 000000000..c84bda1b5
--- /dev/null
+++ b/node_modules/cacache/node_modules/minizlib/index.js
@@ -0,0 +1,338 @@
+'use strict'
+
+const assert = require('assert')
+const Buffer = require('buffer').Buffer
+const realZlib = require('zlib')
+
+const constants = exports.constants = require('./constants.js')
+const Minipass = require('minipass')
+
+const OriginalBufferConcat = Buffer.concat
+
+const _superWrite = Symbol('_superWrite')
+class ZlibError extends Error {
+ constructor (err) {
+ super('zlib: ' + err.message)
+ this.code = err.code
+ this.errno = err.errno
+ /* istanbul ignore if */
+ if (!this.code)
+ this.code = 'ZLIB_ERROR'
+
+ this.message = 'zlib: ' + err.message
+ Error.captureStackTrace(this, this.constructor)
+ }
+
+ get name () {
+ return 'ZlibError'
+ }
+}
+
+// the Zlib class they all inherit from
+// This thing manages the queue of requests, and returns
+// true or false if there is anything in the queue when
+// you call the .write() method.
+const _opts = Symbol('opts')
+const _flushFlag = Symbol('flushFlag')
+const _finishFlushFlag = Symbol('finishFlushFlag')
+const _fullFlushFlag = Symbol('fullFlushFlag')
+const _handle = Symbol('handle')
+const _onError = Symbol('onError')
+const _sawError = Symbol('sawError')
+const _level = Symbol('level')
+const _strategy = Symbol('strategy')
+const _ended = Symbol('ended')
+const _defaultFullFlush = Symbol('_defaultFullFlush')
+
+class ZlibBase extends Minipass {
+ constructor (opts, mode) {
+ if (!opts || typeof opts !== 'object')
+ throw new TypeError('invalid options for ZlibBase constructor')
+
+ super(opts)
+ this[_ended] = false
+ this[_opts] = opts
+
+ this[_flushFlag] = opts.flush
+ this[_finishFlushFlag] = opts.finishFlush
+ // this will throw if any options are invalid for the class selected
+ try {
+ this[_handle] = new realZlib[mode](opts)
+ } catch (er) {
+ // make sure that all errors get decorated properly
+ throw new ZlibError(er)
+ }
+
+ this[_onError] = (err) => {
+ this[_sawError] = true
+ // there is no way to cleanly recover.
+ // continuing only obscures problems.
+ this.close()
+ this.emit('error', err)
+ }
+
+ this[_handle].on('error', er => this[_onError](new ZlibError(er)))
+ this.once('end', () => this.close)
+ }
+
+ close () {
+ if (this[_handle]) {
+ this[_handle].close()
+ this[_handle] = null
+ this.emit('close')
+ }
+ }
+
+ reset () {
+ if (!this[_sawError]) {
+ assert(this[_handle], 'zlib binding closed')
+ return this[_handle].reset()
+ }
+ }
+
+ flush (flushFlag) {
+ if (this.ended)
+ return
+
+ if (typeof flushFlag !== 'number')
+ flushFlag = this[_fullFlushFlag]
+ this.write(Object.assign(Buffer.alloc(0), { [_flushFlag]: flushFlag }))
+ }
+
+ end (chunk, encoding, cb) {
+ if (chunk)
+ this.write(chunk, encoding)
+ this.flush(this[_finishFlushFlag])
+ this[_ended] = true
+ return super.end(null, null, cb)
+ }
+
+ get ended () {
+ return this[_ended]
+ }
+
+ write (chunk, encoding, cb) {
+ // process the chunk using the sync process
+ // then super.write() all the outputted chunks
+ if (typeof encoding === 'function')
+ cb = encoding, encoding = 'utf8'
+
+ if (typeof chunk === 'string')
+ chunk = Buffer.from(chunk, encoding)
+
+ if (this[_sawError])
+ return
+ assert(this[_handle], 'zlib binding closed')
+
+ // _processChunk tries to .close() the native handle after it's done, so we
+ // intercept that by temporarily making it a no-op.
+ const nativeHandle = this[_handle]._handle
+ const originalNativeClose = nativeHandle.close
+ nativeHandle.close = () => {}
+ const originalClose = this[_handle].close
+ this[_handle].close = () => {}
+ // It also calls `Buffer.concat()` at the end, which may be convenient
+ // for some, but which we are not interested in as it slows us down.
+ Buffer.concat = (args) => args
+ let result
+ try {
+ const flushFlag = typeof chunk[_flushFlag] === 'number'
+ ? chunk[_flushFlag] : this[_flushFlag]
+ result = this[_handle]._processChunk(chunk, flushFlag)
+ // if we don't throw, reset it back how it was
+ Buffer.concat = OriginalBufferConcat
+ } catch (err) {
+ // or if we do, put Buffer.concat() back before we emit error
+ // Error events call into user code, which may call Buffer.concat()
+ Buffer.concat = OriginalBufferConcat
+ this[_onError](new ZlibError(err))
+ } finally {
+ if (this[_handle]) {
+ // Core zlib resets `_handle` to null after attempting to close the
+ // native handle. Our no-op handler prevented actual closure, but we
+ // need to restore the `._handle` property.
+ this[_handle]._handle = nativeHandle
+ nativeHandle.close = originalNativeClose
+ this[_handle].close = originalClose
+ // `_processChunk()` adds an 'error' listener. If we don't remove it
+ // after each call, these handlers start piling up.
+ this[_handle].removeAllListeners('error')
+ }
+ }
+
+ let writeReturn
+ if (result) {
+ if (Array.isArray(result) && result.length > 0) {
+ // The first buffer is always `handle._outBuffer`, which would be
+ // re-used for later invocations; so, we always have to copy that one.
+ writeReturn = this[_superWrite](Buffer.from(result[0]))
+ for (let i = 1; i < result.length; i++) {
+ writeReturn = this[_superWrite](result[i])
+ }
+ } else {
+ writeReturn = this[_superWrite](Buffer.from(result))
+ }
+ }
+
+ if (cb)
+ cb()
+ return writeReturn
+ }
+
+ [_superWrite] (data) {
+ return super.write(data)
+ }
+}
+
+class Zlib extends ZlibBase {
+ constructor (opts, mode) {
+ opts = opts || {}
+
+ opts.flush = opts.flush || constants.Z_NO_FLUSH
+ opts.finishFlush = opts.finishFlush || constants.Z_FINISH
+ super(opts, mode)
+
+ this[_fullFlushFlag] = constants.Z_FULL_FLUSH
+ this[_level] = opts.level
+ this[_strategy] = opts.strategy
+ }
+
+ params (level, strategy) {
+ if (this[_sawError])
+ return
+
+ if (!this[_handle])
+ throw new Error('cannot switch params when binding is closed')
+
+ // no way to test this without also not supporting params at all
+ /* istanbul ignore if */
+ if (!this[_handle].params)
+ throw new Error('not supported in this implementation')
+
+ if (this[_level] !== level || this[_strategy] !== strategy) {
+ this.flush(constants.Z_SYNC_FLUSH)
+ assert(this[_handle], 'zlib binding closed')
+ // .params() calls .flush(), but the latter is always async in the
+ // core zlib. We override .flush() temporarily to intercept that and
+ // flush synchronously.
+ const origFlush = this[_handle].flush
+ this[_handle].flush = (flushFlag, cb) => {
+ this.flush(flushFlag)
+ cb()
+ }
+ try {
+ this[_handle].params(level, strategy)
+ } finally {
+ this[_handle].flush = origFlush
+ }
+ /* istanbul ignore else */
+ if (this[_handle]) {
+ this[_level] = level
+ this[_strategy] = strategy
+ }
+ }
+ }
+}
+
+// minimal 2-byte header
+class Deflate extends Zlib {
+ constructor (opts) {
+ super(opts, 'Deflate')
+ }
+}
+
+class Inflate extends Zlib {
+ constructor (opts) {
+ super(opts, 'Inflate')
+ }
+}
+
+// gzip - bigger header, same deflate compression
+const _portable = Symbol('_portable')
+class Gzip extends Zlib {
+ constructor (opts) {
+ super(opts, 'Gzip')
+ this[_portable] = opts && !!opts.portable
+ }
+
+ [_superWrite] (data) {
+ if (!this[_portable])
+ return super[_superWrite](data)
+
+ // we'll always get the header emitted in one first chunk
+ // overwrite the OS indicator byte with 0xFF
+ this[_portable] = false
+ data[9] = 255
+ return super[_superWrite](data)
+ }
+}
+
+class Gunzip extends Zlib {
+ constructor (opts) {
+ super(opts, 'Gunzip')
+ }
+}
+
+// raw - no header
+class DeflateRaw extends Zlib {
+ constructor (opts) {
+ super(opts, 'DeflateRaw')
+ }
+}
+
+class InflateRaw extends Zlib {
+ constructor (opts) {
+ super(opts, 'InflateRaw')
+ }
+}
+
+// auto-detect header.
+class Unzip extends Zlib {
+ constructor (opts) {
+ super(opts, 'Unzip')
+ }
+}
+
+class Brotli extends ZlibBase {
+ constructor (opts, mode) {
+ opts = opts || {}
+
+ opts.flush = opts.flush || constants.BROTLI_OPERATION_PROCESS
+ opts.finishFlush = opts.finishFlush || constants.BROTLI_OPERATION_FINISH
+
+ super(opts, mode)
+
+ this[_fullFlushFlag] = constants.BROTLI_OPERATION_FLUSH
+ }
+}
+
+class BrotliCompress extends Brotli {
+ constructor (opts) {
+ super(opts, 'BrotliCompress')
+ }
+}
+
+class BrotliDecompress extends Brotli {
+ constructor (opts) {
+ super(opts, 'BrotliDecompress')
+ }
+}
+
+exports.Deflate = Deflate
+exports.Inflate = Inflate
+exports.Gzip = Gzip
+exports.Gunzip = Gunzip
+exports.DeflateRaw = DeflateRaw
+exports.InflateRaw = InflateRaw
+exports.Unzip = Unzip
+/* istanbul ignore else */
+if (typeof realZlib.BrotliCompress === 'function') {
+ exports.BrotliCompress = BrotliCompress
+ exports.BrotliDecompress = BrotliDecompress
+} else {
+ exports.BrotliCompress = exports.BrotliDecompress = class {
+ constructor () {
+ throw new Error('Brotli is not supported in this version of Node.js')
+ }
+ }
+}
diff --git a/node_modules/cacache/node_modules/minizlib/package.json b/node_modules/cacache/node_modules/minizlib/package.json
new file mode 100644
index 000000000..427b73647
--- /dev/null
+++ b/node_modules/cacache/node_modules/minizlib/package.json
@@ -0,0 +1,75 @@
+{
+ "_from": "minizlib@^2.1.0",
+ "_id": "minizlib@2.1.0",
+ "_inBundle": false,
+ "_integrity": "sha512-EzTZN/fjSvifSX0SlqUERCN39o6T40AMarPbv0MrarSFtIITCBh7bi+dU8nxGFHuqs9jdIAeoYoKuQAAASsPPA==",
+ "_location": "/cacache/minizlib",
+ "_phantomChildren": {},
+ "_requested": {
+ "type": "range",
+ "registry": true,
+ "raw": "minizlib@^2.1.0",
+ "name": "minizlib",
+ "escapedName": "minizlib",
+ "rawSpec": "^2.1.0",
+ "saveSpec": null,
+ "fetchSpec": "^2.1.0"
+ },
+ "_requiredBy": [
+ "/cacache/tar"
+ ],
+ "_resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.0.tgz",
+ "_shasum": "fd52c645301ef09a63a2c209697c294c6ce02cf3",
+ "_spec": "minizlib@^2.1.0",
+ "_where": "/Users/claudiahdz/npm/cli/node_modules/cacache/node_modules/tar",
+ "author": {
+ "name": "Isaac Z. Schlueter",
+ "email": "i@izs.me",
+ "url": "http://blog.izs.me/"
+ },
+ "bugs": {
+ "url": "https://github.com/isaacs/minizlib/issues"
+ },
+ "bundleDependencies": false,
+ "dependencies": {
+ "minipass": "^3.0.0",
+ "yallist": "^4.0.0"
+ },
+ "deprecated": false,
+ "description": "A small fast zlib stream built on [minipass](http://npm.im/minipass) and Node.js's zlib binding.",
+ "devDependencies": {
+ "tap": "^14.6.9"
+ },
+ "engines": {
+ "node": ">= 8"
+ },
+ "files": [
+ "index.js",
+ "constants.js"
+ ],
+ "homepage": "https://github.com/isaacs/minizlib#readme",
+ "keywords": [
+ "zlib",
+ "gzip",
+ "gunzip",
+ "deflate",
+ "inflate",
+ "compression",
+ "zip",
+ "unzip"
+ ],
+ "license": "MIT",
+ "main": "index.js",
+ "name": "minizlib",
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/isaacs/minizlib.git"
+ },
+ "scripts": {
+ "postpublish": "git push origin --all; git push origin --tags",
+ "postversion": "npm publish",
+ "preversion": "npm test",
+ "test": "tap test/*.js --100 -J"
+ },
+ "version": "2.1.0"
+}
diff --git a/node_modules/cacache/node_modules/mkdirp/CHANGELOG.md b/node_modules/cacache/node_modules/mkdirp/CHANGELOG.md
new file mode 100644
index 000000000..81458380b
--- /dev/null
+++ b/node_modules/cacache/node_modules/mkdirp/CHANGELOG.md
@@ -0,0 +1,15 @@
+# Changers Lorgs!
+
+## 1.0
+
+Full rewrite. Essentially a brand new module.
+
+- Return a promise instead of taking a callback.
+- Use native `fs.mkdir(path, { recursive: true })` when available.
+- Drop support for outdated Node.js versions. (Technically still works on
+ Node.js v8, but only 10 and above are officially supported.)
+
+## 0.x
+
+Original and most widely used recursive directory creation implementation
+in JavaScript, dating back to 2010.
diff --git a/node_modules/cacache/node_modules/mkdirp/LICENSE b/node_modules/cacache/node_modules/mkdirp/LICENSE
new file mode 100644
index 000000000..13fcd15f0
--- /dev/null
+++ b/node_modules/cacache/node_modules/mkdirp/LICENSE
@@ -0,0 +1,21 @@
+Copyright James Halliday (mail@substack.net) and Isaac Z. Schlueter (i@izs.me)
+
+This project is free software released under the MIT license:
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/node_modules/cacache/node_modules/mkdirp/bin/cmd.js b/node_modules/cacache/node_modules/mkdirp/bin/cmd.js
new file mode 100755
index 000000000..6e0aa8dc4
--- /dev/null
+++ b/node_modules/cacache/node_modules/mkdirp/bin/cmd.js
@@ -0,0 +1,68 @@
+#!/usr/bin/env node
+
+const usage = () => `
+usage: mkdirp [DIR1,DIR2..] {OPTIONS}
+
+ Create each supplied directory including any necessary parent directories
+ that don't yet exist.
+
+ If the directory already exists, do nothing.
+
+OPTIONS are:
+
+ -m<mode> If a directory needs to be created, set the mode as an octal
+ --mode=<mode> permission string.
+
+ -v --version Print the mkdirp version number
+
+ -h --help Print this helpful banner
+
+ -p --print Print the first directories created for each path provided
+
+ --manual Use manual implementation, even if native is available
+`
+
+const dirs = []
+const opts = {}
+let print = false
+let dashdash = false
+let manual = false
+for (const arg of process.argv.slice(2)) {
+ if (dashdash)
+ dirs.push(arg)
+ else if (arg === '--')
+ dashdash = true
+ else if (arg === '--manual')
+ manual = true
+ else if (/^-h/.test(arg) || /^--help/.test(arg)) {
+ console.log(usage())
+ process.exit(0)
+ } else if (arg === '-v' || arg === '--version') {
+ console.log(require('../package.json').version)
+ process.exit(0)
+ } else if (arg === '-p' || arg === '--print') {
+ print = true
+ } else if (/^-m/.test(arg) || /^--mode=/.test(arg)) {
+ const mode = parseInt(arg.replace(/^(-m|--mode=)/, ''), 8)
+ if (isNaN(mode)) {
+ console.error(`invalid mode argument: ${arg}\nMust be an octal number.`)
+ process.exit(1)
+ }
+ opts.mode = mode
+ } else
+ dirs.push(arg)
+}
+
+const mkdirp = require('../')
+const impl = manual ? mkdirp.manual : mkdirp
+if (dirs.length === 0)
+ console.error(usage())
+
+Promise.all(dirs.map(dir => impl(dir, opts)))
+ .then(made => print ? made.forEach(m => m && console.log(m)) : null)
+ .catch(er => {
+ console.error(er.message)
+ if (er.code)
+ console.error(' code: ' + er.code)
+ process.exit(1)
+ })
diff --git a/node_modules/cacache/node_modules/mkdirp/index.js b/node_modules/cacache/node_modules/mkdirp/index.js
new file mode 100644
index 000000000..ad7a16c9f
--- /dev/null
+++ b/node_modules/cacache/node_modules/mkdirp/index.js
@@ -0,0 +1,31 @@
+const optsArg = require('./lib/opts-arg.js')
+const pathArg = require('./lib/path-arg.js')
+
+const {mkdirpNative, mkdirpNativeSync} = require('./lib/mkdirp-native.js')
+const {mkdirpManual, mkdirpManualSync} = require('./lib/mkdirp-manual.js')
+const {useNative, useNativeSync} = require('./lib/use-native.js')
+
+
+const mkdirp = (path, opts) => {
+ path = pathArg(path)
+ opts = optsArg(opts)
+ return useNative(opts)
+ ? mkdirpNative(path, opts)
+ : mkdirpManual(path, opts)
+}
+
+const mkdirpSync = (path, opts) => {
+ path = pathArg(path)
+ opts = optsArg(opts)
+ return useNativeSync(opts)
+ ? mkdirpNativeSync(path, opts)
+ : mkdirpManualSync(path, opts)
+}
+
+mkdirp.sync = mkdirpSync
+mkdirp.native = (path, opts) => mkdirpNative(pathArg(path), optsArg(opts))
+mkdirp.manual = (path, opts) => mkdirpManual(pathArg(path), optsArg(opts))
+mkdirp.nativeSync = (path, opts) => mkdirpNativeSync(pathArg(path), optsArg(opts))
+mkdirp.manualSync = (path, opts) => mkdirpManualSync(pathArg(path), optsArg(opts))
+
+module.exports = mkdirp
diff --git a/node_modules/cacache/node_modules/mkdirp/lib/find-made.js b/node_modules/cacache/node_modules/mkdirp/lib/find-made.js
new file mode 100644
index 000000000..022e492c0
--- /dev/null
+++ b/node_modules/cacache/node_modules/mkdirp/lib/find-made.js
@@ -0,0 +1,29 @@
+const {dirname} = require('path')
+
+const findMade = (opts, parent, path = undefined) => {
+ // we never want the 'made' return value to be a root directory
+ if (path === parent)
+ return Promise.resolve()
+
+ return opts.statAsync(parent).then(
+ st => st.isDirectory() ? path : undefined, // will fail later
+ er => er.code === 'ENOENT'
+ ? findMade(opts, dirname(parent), parent)
+ : undefined
+ )
+}
+
+const findMadeSync = (opts, parent, path = undefined) => {
+ if (path === parent)
+ return undefined
+
+ try {
+ return opts.statSync(parent).isDirectory() ? path : undefined
+ } catch (er) {
+ return er.code === 'ENOENT'
+ ? findMadeSync(opts, dirname(parent), parent)
+ : undefined
+ }
+}
+
+module.exports = {findMade, findMadeSync}
diff --git a/node_modules/cacache/node_modules/mkdirp/lib/mkdirp-manual.js b/node_modules/cacache/node_modules/mkdirp/lib/mkdirp-manual.js
new file mode 100644
index 000000000..2eb18cd64
--- /dev/null
+++ b/node_modules/cacache/node_modules/mkdirp/lib/mkdirp-manual.js
@@ -0,0 +1,64 @@
+const {dirname} = require('path')
+
+const mkdirpManual = (path, opts, made) => {
+ opts.recursive = false
+ const parent = dirname(path)
+ if (parent === path) {
+ return opts.mkdirAsync(path, opts).catch(er => {
+ // swallowed by recursive implementation on posix systems
+ // any other error is a failure
+ if (er.code !== 'EISDIR')
+ throw er
+ })
+ }
+
+ return opts.mkdirAsync(path, opts).then(() => made || path, er => {
+ if (er.code === 'ENOENT')
+ return mkdirpManual(parent, opts)
+ .then(made => mkdirpManual(path, opts, made))
+ if (er.code !== 'EEXIST' && er.code !== 'EROFS')
+ throw er
+ return opts.statAsync(path).then(st => {
+ if (st.isDirectory())
+ return made
+ else
+ throw er
+ }, () => { throw er })
+ })
+}
+
+const mkdirpManualSync = (path, opts, made) => {
+ const parent = dirname(path)
+ opts.recursive = false
+
+ if (parent === path) {
+ try {
+ return opts.mkdirSync(path, opts)
+ } catch (er) {
+ // swallowed by recursive implementation on posix systems
+ // any other error is a failure
+ if (er.code !== 'EISDIR')
+ throw er
+ else
+ return
+ }
+ }
+
+ try {
+ opts.mkdirSync(path, opts)
+ return made || path
+ } catch (er) {
+ if (er.code === 'ENOENT')
+ return mkdirpManualSync(path, opts, mkdirpManualSync(parent, opts, made))
+ if (er.code !== 'EEXIST' && er.code !== 'EROFS')
+ throw er
+ try {
+ if (!opts.statSync(path).isDirectory())
+ throw er
+ } catch (_) {
+ throw er
+ }
+ }
+}
+
+module.exports = {mkdirpManual, mkdirpManualSync}
diff --git a/node_modules/cacache/node_modules/mkdirp/lib/mkdirp-native.js b/node_modules/cacache/node_modules/mkdirp/lib/mkdirp-native.js
new file mode 100644
index 000000000..c7a6b6980
--- /dev/null
+++ b/node_modules/cacache/node_modules/mkdirp/lib/mkdirp-native.js
@@ -0,0 +1,39 @@
+const {dirname} = require('path')
+const {findMade, findMadeSync} = require('./find-made.js')
+const {mkdirpManual, mkdirpManualSync} = require('./mkdirp-manual.js')
+
+const mkdirpNative = (path, opts) => {
+ opts.recursive = true
+ const parent = dirname(path)
+ if (parent === path)
+ return opts.mkdirAsync(path, opts)
+
+ return findMade(opts, path).then(made =>
+ opts.mkdirAsync(path, opts).then(() => made)
+ .catch(er => {
+ if (er.code === 'ENOENT')
+ return mkdirpManual(path, opts)
+ else
+ throw er
+ }))
+}
+
+const mkdirpNativeSync = (path, opts) => {
+ opts.recursive = true
+ const parent = dirname(path)
+ if (parent === path)
+ return opts.mkdirSync(path, opts)
+
+ const made = findMadeSync(opts, path)
+ try {
+ opts.mkdirSync(path, opts)
+ return made
+ } catch (er) {
+ if (er.code === 'ENOENT')
+ return mkdirpManualSync(path, opts)
+ else
+ throw er
+ }
+}
+
+module.exports = {mkdirpNative, mkdirpNativeSync}
diff --git a/node_modules/cacache/node_modules/mkdirp/lib/opts-arg.js b/node_modules/cacache/node_modules/mkdirp/lib/opts-arg.js
new file mode 100644
index 000000000..488bd44c3
--- /dev/null
+++ b/node_modules/cacache/node_modules/mkdirp/lib/opts-arg.js
@@ -0,0 +1,23 @@
+const { promisify } = require('util')
+const fs = require('fs')
+const optsArg = opts => {
+ if (!opts)
+ opts = { mode: 0o777 & (~process.umask()), fs }
+ else if (typeof opts === 'object')
+ opts = { mode: 0o777 & (~process.umask()), fs, ...opts }
+ else if (typeof opts === 'number')
+ opts = { mode: opts, fs }
+ else if (typeof opts === 'string')
+ opts = { mode: parseInt(opts, 8), fs }
+ else
+ throw new TypeError('invalid options argument')
+
+ opts.mkdir = opts.mkdir || opts.fs.mkdir || fs.mkdir
+ opts.mkdirAsync = promisify(opts.mkdir)
+ opts.stat = opts.stat || opts.fs.stat || fs.stat
+ opts.statAsync = promisify(opts.stat)
+ opts.statSync = opts.statSync || opts.fs.statSync || fs.statSync
+ opts.mkdirSync = opts.mkdirSync || opts.fs.mkdirSync || fs.mkdirSync
+ return opts
+}
+module.exports = optsArg
diff --git a/node_modules/cacache/node_modules/mkdirp/lib/path-arg.js b/node_modules/cacache/node_modules/mkdirp/lib/path-arg.js
new file mode 100644
index 000000000..cc07de5a6
--- /dev/null
+++ b/node_modules/cacache/node_modules/mkdirp/lib/path-arg.js
@@ -0,0 +1,29 @@
+const platform = process.env.__TESTING_MKDIRP_PLATFORM__ || process.platform
+const { resolve, parse } = require('path')
+const pathArg = path => {
+ if (/\0/.test(path)) {
+ // simulate same failure that node raises
+ throw Object.assign(
+ new TypeError('path must be a string without null bytes'),
+ {
+ path,
+ code: 'ERR_INVALID_ARG_VALUE',
+ }
+ )
+ }
+
+ path = resolve(path)
+ if (platform === 'win32') {
+ const badWinChars = /[*|"<>?:]/
+ const {root} = parse(path)
+ if (badWinChars.test(path.substr(root.length))) {
+ throw Object.assign(new Error('Illegal characters in path.'), {
+ path,
+ code: 'EINVAL',
+ })
+ }
+ }
+
+ return path
+}
+module.exports = pathArg
diff --git a/node_modules/cacache/node_modules/mkdirp/lib/use-native.js b/node_modules/cacache/node_modules/mkdirp/lib/use-native.js
new file mode 100644
index 000000000..079361de1
--- /dev/null
+++ b/node_modules/cacache/node_modules/mkdirp/lib/use-native.js
@@ -0,0 +1,10 @@
+const fs = require('fs')
+
+const version = process.env.__TESTING_MKDIRP_NODE_VERSION__ || process.version
+const versArr = version.replace(/^v/, '').split('.')
+const hasNative = +versArr[0] > 10 || +versArr[0] === 10 && +versArr[1] >= 12
+
+const useNative = !hasNative ? () => false : opts => opts.mkdir === fs.mkdir
+const useNativeSync = !hasNative ? () => false : opts => opts.mkdirSync === fs.mkdirSync
+
+module.exports = {useNative, useNativeSync}
diff --git a/node_modules/cacache/node_modules/mkdirp/package.json b/node_modules/cacache/node_modules/mkdirp/package.json
new file mode 100644
index 000000000..1050f8218
--- /dev/null
+++ b/node_modules/cacache/node_modules/mkdirp/package.json
@@ -0,0 +1,76 @@
+{
+ "_from": "mkdirp@^1.0.3",
+ "_id": "mkdirp@1.0.3",
+ "_inBundle": false,
+ "_integrity": "sha512-6uCP4Qc0sWsgMLy1EOqqS/3rjDHOEnsStVr/4vtAIK2Y5i2kA7lFFejYrpIyiN9w0pYf4ckeCYT9f1r1P9KX5g==",
+ "_location": "/cacache/mkdirp",
+ "_phantomChildren": {},
+ "_requested": {
+ "type": "range",
+ "registry": true,
+ "raw": "mkdirp@^1.0.3",
+ "name": "mkdirp",
+ "escapedName": "mkdirp",
+ "rawSpec": "^1.0.3",
+ "saveSpec": null,
+ "fetchSpec": "^1.0.3"
+ },
+ "_requiredBy": [
+ "/cacache",
+ "/cacache/tar"
+ ],
+ "_resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.3.tgz",
+ "_shasum": "4cf2e30ad45959dddea53ad97d518b6c8205e1ea",
+ "_spec": "mkdirp@^1.0.3",
+ "_where": "/Users/claudiahdz/npm/cli/node_modules/cacache",
+ "bin": {
+ "mkdirp": "bin/cmd.js"
+ },
+ "bugs": {
+ "url": "https://github.com/isaacs/node-mkdirp/issues"
+ },
+ "bundleDependencies": false,
+ "deprecated": false,
+ "description": "Recursively mkdir, like `mkdir -p`",
+ "devDependencies": {
+ "require-inject": "^1.4.4",
+ "tap": "^14.10.6"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "files": [
+ "bin",
+ "lib",
+ "index.js"
+ ],
+ "homepage": "https://github.com/isaacs/node-mkdirp#readme",
+ "keywords": [
+ "mkdir",
+ "directory",
+ "make dir",
+ "make",
+ "dir",
+ "recursive",
+ "native"
+ ],
+ "license": "MIT",
+ "main": "index.js",
+ "name": "mkdirp",
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/isaacs/node-mkdirp.git"
+ },
+ "scripts": {
+ "postpublish": "git push origin --follow-tags",
+ "postversion": "npm publish",
+ "preversion": "npm test",
+ "snap": "tap",
+ "test": "tap"
+ },
+ "tap": {
+ "check-coverage": true,
+ "coverage-map": "map.js"
+ },
+ "version": "1.0.3"
+}
diff --git a/node_modules/cacache/node_modules/mkdirp/readme.markdown b/node_modules/cacache/node_modules/mkdirp/readme.markdown
new file mode 100644
index 000000000..827de5905
--- /dev/null
+++ b/node_modules/cacache/node_modules/mkdirp/readme.markdown
@@ -0,0 +1,266 @@
+# mkdirp
+
+Like `mkdir -p`, but in Node.js!
+
+Now with a modern API and no\* bugs!
+
+<small>\* may contain some bugs</small>
+
+# example
+
+## pow.js
+
+```js
+const mkdirp = require('mkdirp')
+
+// return value is a Promise resolving to the first directory created
+mkdirp('/tmp/foo/bar/baz').then(made =>
+ console.log(`made directories, starting with ${made}`))
+```
+
+Output (where `/tmp/foo` already exists)
+
+```
+made directories, starting with /tmp/foo/bar
+```
+
+Or, if you don't have time to wait around for promises:
+
+```js
+const mkdirp = require('mkdirp')
+
+// return value is the first directory created
+const made = mkdirp.sync('/tmp/foo/bar/baz')
+console.log(`made directories, starting with ${made}`)
+```
+
+And now /tmp/foo/bar/baz exists, huzzah!
+
+# methods
+
+```js
+const mkdirp = require('mkdirp')
+```
+
+## mkdirp(dir, [opts]) -> Promise<String | undefined>
+
+Create a new directory and any necessary subdirectories at `dir` with octal
+permission string `opts.mode`. If `opts` is a string or number, it will be
+treated as the `opts.mode`.
+
+If `opts.mode` isn't specified, it defaults to `0o777 &
+(~process.umask())`.
+
+Promise resolves to first directory `made` that had to be created, or
+`undefined` if everything already exists. Promise rejects if any errors
+are encountered. Note that, in the case of promise rejection, some
+directories _may_ have been created, as recursive directory creation is not
+an atomic operation.
+
+You can optionally pass in an alternate `fs` implementation by passing in
+`opts.fs`. Your implementation should have `opts.fs.mkdir(path, opts, cb)`
+and `opts.fs.stat(path, cb)`.
+
+You can also override just one or the other of `mkdir` and `stat` by
+passing in `opts.stat` or `opts.mkdir`, or providing an `fs` option that
+only overrides one of these.
+
+## mkdirp.sync(dir, opts) -> String|null
+
+Synchronously create a new directory and any necessary subdirectories at
+`dir` with octal permission string `opts.mode`. If `opts` is a string or
+number, it will be treated as the `opts.mode`.
+
+If `opts.mode` isn't specified, it defaults to `0o777 &
+(~process.umask())`.
+
+Returns the first directory that had to be created, or undefined if
+everything already exists.
+
+You can optionally pass in an alternate `fs` implementation by passing in
+`opts.fs`. Your implementation should have `opts.fs.mkdirSync(path, mode)`
+and `opts.fs.statSync(path)`.
+
+You can also override just one or the other of `mkdirSync` and `statSync`
+by passing in `opts.statSync` or `opts.mkdirSync`, or providing an `fs`
+option that only overrides one of these.
+
+## mkdirp.manual, mkdirp.manualSync
+
+Use the manual implementation (not the native one). This is the default
+when the native implementation is not available or the stat/mkdir
+implementation is overridden.
+
+## mkdirp.native, mkdirp.nativeSync
+
+Use the native implementation (not the manual one). This is the default
+when the native implementation is available and stat/mkdir are not
+overridden.
+
+# implementation
+
+On Node.js v10.12.0 and above, use the native `fs.mkdir(p,
+{recursive:true})` option, unless `fs.mkdir`/`fs.mkdirSync` has been
+overridden by an option.
+
+## native implementation
+
+- If the path is a root directory, then pass it to the underlying
+ implementation and return the result/error. (In this case, it'll either
+ succeed or fail, but we aren't actually creating any dirs.)
+- Walk up the path statting each directory, to find the first path that
+ will be created, `made`.
+- Call `fs.mkdir(path, { recursive: true })` (or `fs.mkdirSync`)
+- If error, raise it to the caller.
+- Return `made`.
+
+## manual implementation
+
+- Call underlying `fs.mkdir` implementation, with `recursive: false`
+- If error:
+ - If path is a root directory, raise to the caller and do not handle it
+ - If ENOENT, mkdirp parent dir, store result as `made`
+ - stat(path)
+ - If error, raise original `mkdir` error
+ - If directory, return `made`
+ - Else, raise original `mkdir` error
+- else
+ - return `undefined` if a root dir, or `made` if set, or `path`
+
+## windows vs unix caveat
+
+On Windows file systems, attempts to create a root directory (ie, a drive
+letter or root UNC path) will fail. If the root directory exists, then it
+will fail with `EPERM`. If the root directory does not exist, then it will
+fail with `ENOENT`.
+
+On posix file systems, attempts to create a root directory (in recursive
+mode) will succeed silently, as it is treated like just another directory
+that already exists. (In non-recursive mode, of course, it fails with
+`EEXIST`.)
+
+In order to preserve this system-specific behavior (and because it's not as
+if we can create the parent of a root directory anyway), attempts to create
+a root directory are passed directly to the `fs` implementation, and any
+errors encountered are not handled.
+
+## native error caveat
+
+The native implementation (as of at least Node.js v13.4.0) does not provide
+appropriate errors in some cases (see
+[nodejs/node#31481](https://github.com/nodejs/node/issues/31481) and
+[nodejs/node#28015](https://github.com/nodejs/node/issues/28015)).
+
+In order to work around this issue, the native implementation will fall
+back to the manual implementation if an `ENOENT` error is encountered.
+
+# choosing a recursive mkdir implementation
+
+There are a few to choose from! Use the one that suits your needs best :D
+
+## use `fs.mkdir(path, {recursive: true}, cb)` if:
+
+- You wish to optimize performance even at the expense of other factors.
+- You don't need to know the first dir created.
+- You are ok with getting `ENOENT` as the error when some other problem is
+ the actual cause.
+- You can limit your platforms to Node.js v10.12 and above.
+- You're ok with using callbacks instead of promises.
+- You don't need/want a CLI.
+- You don't need to override the `fs` methods in use.
+
+## use this module (mkdirp 1.x) if:
+
+- You need to know the first directory that was created.
+- You wish to use the native implementation if available, but fall back
+ when it's not.
+- You prefer promise-returning APIs to callback-taking APIs.
+- You want more useful error messages than the native recursive mkdir
+ provides (at least as of Node.js v13.4), and are ok with re-trying on
+ `ENOENT` to achieve this.
+- You need (or at least, are ok with) a CLI.
+- You need to override the `fs` methods in use.
+
+## use [`make-dir`](http://npm.im/make-dir) if:
+
+- You do not need to know the first dir created (and wish to save a few
+ `stat` calls when using the native implementation for this reason).
+- You wish to use the native implementation if available, but fall back
+ when it's not.
+- You prefer promise-returning APIs to callback-taking APIs.
+- You are ok with occasionally getting `ENOENT` errors for failures that
+ are actually related to something other than a missing file system entry.
+- You don't need/want a CLI.
+- You need to override the `fs` methods in use.
+
+## use mkdirp 0.x if:
+
+- You need to know the first directory that was created.
+- You need (or at least, are ok with) a CLI.
+- You need to override the `fs` methods in use.
+- You're ok with using callbacks instead of promises.
+- You are not running on Windows, where the root-level ENOENT errors can
+ lead to infinite regress.
+- You think vinyl just sounds warmer and richer for some weird reason.
+- You are supporting truly ancient Node.js versions, before even the advent
+ of a `Promise` language primitive. (Please don't. You deserve better.)
+
+# cli
+
+This package also ships with a `mkdirp` command.
+
+```
+$ mkdirp -h
+
+usage: mkdirp [DIR1,DIR2..] {OPTIONS}
+
+ Create each supplied directory including any necessary parent directories
+ that don't yet exist.
+
+ If the directory already exists, do nothing.
+
+OPTIONS are:
+
+ -m<mode> If a directory needs to be created, set the mode as an octal
+ --mode=<mode> permission string.
+
+ -v --version Print the mkdirp version number
+
+ -h --help Print this helpful banner
+
+ -p --print Print the first directories created for each path provided
+
+ --manual Use manual implementation, even if native is available
+```
+
+# install
+
+With [npm](http://npmjs.org) do:
+
+```
+npm install mkdirp
+```
+
+to get the library locally, or
+
+```
+npm install -g mkdirp
+```
+
+to get the command everywhere, or
+
+```
+npx mkdirp ...
+```
+
+to run the command without installing it globally.
+
+# platform support
+
+This module works on node v8, but only v10 and above are officially
+supported, as Node v8 reached its LTS end of life 2020-01-01, which is in
+the past, as of this writing.
+
+# license
+
+MIT
diff --git a/node_modules/cacache/node_modules/rimraf/LICENSE b/node_modules/cacache/node_modules/rimraf/LICENSE
new file mode 100644
index 000000000..19129e315
--- /dev/null
+++ b/node_modules/cacache/node_modules/rimraf/LICENSE
@@ -0,0 +1,15 @@
+The ISC License
+
+Copyright (c) Isaac Z. Schlueter and Contributors
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
+IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/node_modules/cacache/node_modules/rimraf/README.md b/node_modules/cacache/node_modules/rimraf/README.md
new file mode 100644
index 000000000..423b8cf85
--- /dev/null
+++ b/node_modules/cacache/node_modules/rimraf/README.md
@@ -0,0 +1,101 @@
+[![Build Status](https://travis-ci.org/isaacs/rimraf.svg?branch=master)](https://travis-ci.org/isaacs/rimraf) [![Dependency Status](https://david-dm.org/isaacs/rimraf.svg)](https://david-dm.org/isaacs/rimraf) [![devDependency Status](https://david-dm.org/isaacs/rimraf/dev-status.svg)](https://david-dm.org/isaacs/rimraf#info=devDependencies)
+
+The [UNIX command](http://en.wikipedia.org/wiki/Rm_(Unix)) `rm -rf` for node.
+
+Install with `npm install rimraf`, or just drop rimraf.js somewhere.
+
+## API
+
+`rimraf(f, [opts], callback)`
+
+The first parameter will be interpreted as a globbing pattern for files. If you
+want to disable globbing you can do so with `opts.disableGlob` (defaults to
+`false`). This might be handy, for instance, if you have filenames that contain
+globbing wildcard characters.
+
+The callback will be called with an error if there is one. Certain
+errors are handled for you:
+
+* Windows: `EBUSY` and `ENOTEMPTY` - rimraf will back off a maximum of
+ `opts.maxBusyTries` times before giving up, adding 100ms of wait
+ between each attempt. The default `maxBusyTries` is 3.
+* `ENOENT` - If the file doesn't exist, rimraf will return
+ successfully, since your desired outcome is already the case.
+* `EMFILE` - Since `readdir` requires opening a file descriptor, it's
+ possible to hit `EMFILE` if too many file descriptors are in use.
+ In the sync case, there's nothing to be done for this. But in the
+ async case, rimraf will gradually back off with timeouts up to
+ `opts.emfileWait` ms, which defaults to 1000.
+
+## options
+
+* unlink, chmod, stat, lstat, rmdir, readdir,
+ unlinkSync, chmodSync, statSync, lstatSync, rmdirSync, readdirSync
+
+ In order to use a custom file system library, you can override
+ specific fs functions on the options object.
+
+ If any of these functions are present on the options object, then
+ the supplied function will be used instead of the default fs
+ method.
+
+ Sync methods are only relevant for `rimraf.sync()`, of course.
+
+ For example:
+
+ ```javascript
+ var myCustomFS = require('some-custom-fs')
+
+ rimraf('some-thing', myCustomFS, callback)
+ ```
+
+* maxBusyTries
+
+ If an `EBUSY`, `ENOTEMPTY`, or `EPERM` error code is encountered
+ on Windows systems, then rimraf will retry with a linear backoff
+ wait of 100ms longer on each try. The default maxBusyTries is 3.
+
+ Only relevant for async usage.
+
+* emfileWait
+
+ If an `EMFILE` error is encountered, then rimraf will retry
+ repeatedly with a linear backoff of 1ms longer on each try, until
+ the timeout counter hits this max. The default limit is 1000.
+
+ If you repeatedly encounter `EMFILE` errors, then consider using
+ [graceful-fs](http://npm.im/graceful-fs) in your program.
+
+ Only relevant for async usage.
+
+* glob
+
+ Set to `false` to disable [glob](http://npm.im/glob) pattern
+ matching.
+
+ Set to an object to pass options to the glob module. The default
+ glob options are `{ nosort: true, silent: true }`.
+
+ Glob version 6 is used in this module.
+
+ Relevant for both sync and async usage.
+
+* disableGlob
+
+ Set to any non-falsey value to disable globbing entirely.
+ (Equivalent to setting `glob: false`.)
+
+## rimraf.sync
+
+It can remove stuff synchronously, too. But that's not so good. Use
+the async API. It's better.
+
+## CLI
+
+If installed with `npm install rimraf -g` it can be used as a global
+command `rimraf <path> [<path> ...]` which is useful for cross platform support.
+
+## mkdirp
+
+If you need to create a directory recursively, check out
+[mkdirp](https://github.com/substack/node-mkdirp).
diff --git a/node_modules/cacache/node_modules/rimraf/bin.js b/node_modules/cacache/node_modules/rimraf/bin.js
new file mode 100755
index 000000000..0d1e17be7
--- /dev/null
+++ b/node_modules/cacache/node_modules/rimraf/bin.js
@@ -0,0 +1,50 @@
+#!/usr/bin/env node
+
+var rimraf = require('./')
+
+var help = false
+var dashdash = false
+var noglob = false
+var args = process.argv.slice(2).filter(function(arg) {
+ if (dashdash)
+ return !!arg
+ else if (arg === '--')
+ dashdash = true
+ else if (arg === '--no-glob' || arg === '-G')
+ noglob = true
+ else if (arg === '--glob' || arg === '-g')
+ noglob = false
+ else if (arg.match(/^(-+|\/)(h(elp)?|\?)$/))
+ help = true
+ else
+ return !!arg
+})
+
+if (help || args.length === 0) {
+ // If they didn't ask for help, then this is not a "success"
+ var log = help ? console.log : console.error
+ log('Usage: rimraf <path> [<path> ...]')
+ log('')
+ log(' Deletes all files and folders at "path" recursively.')
+ log('')
+ log('Options:')
+ log('')
+ log(' -h, --help Display this usage info')
+ log(' -G, --no-glob Do not expand glob patterns in arguments')
+ log(' -g, --glob Expand glob patterns in arguments (default)')
+ process.exit(help ? 0 : 1)
+} else
+ go(0)
+
+function go (n) {
+ if (n >= args.length)
+ return
+ var options = {}
+ if (noglob)
+ options = { glob: false }
+ rimraf(args[n], options, function (er) {
+ if (er)
+ throw er
+ go(n+1)
+ })
+}
diff --git a/node_modules/cacache/node_modules/rimraf/package.json b/node_modules/cacache/node_modules/rimraf/package.json
new file mode 100644
index 000000000..3c36faa09
--- /dev/null
+++ b/node_modules/cacache/node_modules/rimraf/package.json
@@ -0,0 +1,67 @@
+{
+ "_from": "rimraf@^2.7.1",
+ "_id": "rimraf@2.7.1",
+ "_inBundle": false,
+ "_integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==",
+ "_location": "/cacache/rimraf",
+ "_phantomChildren": {},
+ "_requested": {
+ "type": "range",
+ "registry": true,
+ "raw": "rimraf@^2.7.1",
+ "name": "rimraf",
+ "escapedName": "rimraf",
+ "rawSpec": "^2.7.1",
+ "saveSpec": null,
+ "fetchSpec": "^2.7.1"
+ },
+ "_requiredBy": [
+ "/cacache"
+ ],
+ "_resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz",
+ "_shasum": "35797f13a7fdadc566142c29d4f07ccad483e3ec",
+ "_spec": "rimraf@^2.7.1",
+ "_where": "/Users/claudiahdz/npm/cli/node_modules/cacache",
+ "author": {
+ "name": "Isaac Z. Schlueter",
+ "email": "i@izs.me",
+ "url": "http://blog.izs.me/"
+ },
+ "bin": {
+ "rimraf": "bin.js"
+ },
+ "bugs": {
+ "url": "https://github.com/isaacs/rimraf/issues"
+ },
+ "bundleDependencies": false,
+ "dependencies": {
+ "glob": "^7.1.3"
+ },
+ "deprecated": false,
+ "description": "A deep deletion module for node (like `rm -rf`)",
+ "devDependencies": {
+ "mkdirp": "^0.5.1",
+ "tap": "^12.1.1"
+ },
+ "files": [
+ "LICENSE",
+ "README.md",
+ "bin.js",
+ "rimraf.js"
+ ],
+ "homepage": "https://github.com/isaacs/rimraf#readme",
+ "license": "ISC",
+ "main": "rimraf.js",
+ "name": "rimraf",
+ "repository": {
+ "type": "git",
+ "url": "git://github.com/isaacs/rimraf.git"
+ },
+ "scripts": {
+ "postpublish": "git push origin --all; git push origin --tags",
+ "postversion": "npm publish",
+ "preversion": "npm test",
+ "test": "tap test/*.js"
+ },
+ "version": "2.7.1"
+}
diff --git a/node_modules/cacache/node_modules/rimraf/rimraf.js b/node_modules/cacache/node_modules/rimraf/rimraf.js
new file mode 100644
index 000000000..a90ad029f
--- /dev/null
+++ b/node_modules/cacache/node_modules/rimraf/rimraf.js
@@ -0,0 +1,372 @@
+module.exports = rimraf
+rimraf.sync = rimrafSync
+
+var assert = require("assert")
+var path = require("path")
+var fs = require("fs")
+var glob = undefined
+try {
+ glob = require("glob")
+} catch (_err) {
+ // treat glob as optional.
+}
+var _0666 = parseInt('666', 8)
+
+var defaultGlobOpts = {
+ nosort: true,
+ silent: true
+}
+
+// for EMFILE handling
+var timeout = 0
+
+var isWindows = (process.platform === "win32")
+
+function defaults (options) {
+ var methods = [
+ 'unlink',
+ 'chmod',
+ 'stat',
+ 'lstat',
+ 'rmdir',
+ 'readdir'
+ ]
+ methods.forEach(function(m) {
+ options[m] = options[m] || fs[m]
+ m = m + 'Sync'
+ options[m] = options[m] || fs[m]
+ })
+
+ options.maxBusyTries = options.maxBusyTries || 3
+ options.emfileWait = options.emfileWait || 1000
+ if (options.glob === false) {
+ options.disableGlob = true
+ }
+ if (options.disableGlob !== true && glob === undefined) {
+ throw Error('glob dependency not found, set `options.disableGlob = true` if intentional')
+ }
+ options.disableGlob = options.disableGlob || false
+ options.glob = options.glob || defaultGlobOpts
+}
+
+function rimraf (p, options, cb) {
+ if (typeof options === 'function') {
+ cb = options
+ options = {}
+ }
+
+ assert(p, 'rimraf: missing path')
+ assert.equal(typeof p, 'string', 'rimraf: path should be a string')
+ assert.equal(typeof cb, 'function', 'rimraf: callback function required')
+ assert(options, 'rimraf: invalid options argument provided')
+ assert.equal(typeof options, 'object', 'rimraf: options should be object')
+
+ defaults(options)
+
+ var busyTries = 0
+ var errState = null
+ var n = 0
+
+ if (options.disableGlob || !glob.hasMagic(p))
+ return afterGlob(null, [p])
+
+ options.lstat(p, function (er, stat) {
+ if (!er)
+ return afterGlob(null, [p])
+
+ glob(p, options.glob, afterGlob)
+ })
+
+ function next (er) {
+ errState = errState || er
+ if (--n === 0)
+ cb(errState)
+ }
+
+ function afterGlob (er, results) {
+ if (er)
+ return cb(er)
+
+ n = results.length
+ if (n === 0)
+ return cb()
+
+ results.forEach(function (p) {
+ rimraf_(p, options, function CB (er) {
+ if (er) {
+ if ((er.code === "EBUSY" || er.code === "ENOTEMPTY" || er.code === "EPERM") &&
+ busyTries < options.maxBusyTries) {
+ busyTries ++
+ var time = busyTries * 100
+ // try again, with the same exact callback as this one.
+ return setTimeout(function () {
+ rimraf_(p, options, CB)
+ }, time)
+ }
+
+ // this one won't happen if graceful-fs is used.
+ if (er.code === "EMFILE" && timeout < options.emfileWait) {
+ return setTimeout(function () {
+ rimraf_(p, options, CB)
+ }, timeout ++)
+ }
+
+ // already gone
+ if (er.code === "ENOENT") er = null
+ }
+
+ timeout = 0
+ next(er)
+ })
+ })
+ }
+}
+
+// Two possible strategies.
+// 1. Assume it's a file. unlink it, then do the dir stuff on EPERM or EISDIR
+// 2. Assume it's a directory. readdir, then do the file stuff on ENOTDIR
+//
+// Both result in an extra syscall when you guess wrong. However, there
+// are likely far more normal files in the world than directories. This
+// is based on the assumption that a the average number of files per
+// directory is >= 1.
+//
+// If anyone ever complains about this, then I guess the strategy could
+// be made configurable somehow. But until then, YAGNI.
+function rimraf_ (p, options, cb) {
+ assert(p)
+ assert(options)
+ assert(typeof cb === 'function')
+
+ // sunos lets the root user unlink directories, which is... weird.
+ // so we have to lstat here and make sure it's not a dir.
+ options.lstat(p, function (er, st) {
+ if (er && er.code === "ENOENT")
+ return cb(null)
+
+ // Windows can EPERM on stat. Life is suffering.
+ if (er && er.code === "EPERM" && isWindows)
+ fixWinEPERM(p, options, er, cb)
+
+ if (st && st.isDirectory())
+ return rmdir(p, options, er, cb)
+
+ options.unlink(p, function (er) {
+ if (er) {
+ if (er.code === "ENOENT")
+ return cb(null)
+ if (er.code === "EPERM")
+ return (isWindows)
+ ? fixWinEPERM(p, options, er, cb)
+ : rmdir(p, options, er, cb)
+ if (er.code === "EISDIR")
+ return rmdir(p, options, er, cb)
+ }
+ return cb(er)
+ })
+ })
+}
+
+function fixWinEPERM (p, options, er, cb) {
+ assert(p)
+ assert(options)
+ assert(typeof cb === 'function')
+ if (er)
+ assert(er instanceof Error)
+
+ options.chmod(p, _0666, function (er2) {
+ if (er2)
+ cb(er2.code === "ENOENT" ? null : er)
+ else
+ options.stat(p, function(er3, stats) {
+ if (er3)
+ cb(er3.code === "ENOENT" ? null : er)
+ else if (stats.isDirectory())
+ rmdir(p, options, er, cb)
+ else
+ options.unlink(p, cb)
+ })
+ })
+}
+
+function fixWinEPERMSync (p, options, er) {
+ assert(p)
+ assert(options)
+ if (er)
+ assert(er instanceof Error)
+
+ try {
+ options.chmodSync(p, _0666)
+ } catch (er2) {
+ if (er2.code === "ENOENT")
+ return
+ else
+ throw er
+ }
+
+ try {
+ var stats = options.statSync(p)
+ } catch (er3) {
+ if (er3.code === "ENOENT")
+ return
+ else
+ throw er
+ }
+
+ if (stats.isDirectory())
+ rmdirSync(p, options, er)
+ else
+ options.unlinkSync(p)
+}
+
+function rmdir (p, options, originalEr, cb) {
+ assert(p)
+ assert(options)
+ if (originalEr)
+ assert(originalEr instanceof Error)
+ assert(typeof cb === 'function')
+
+ // try to rmdir first, and only readdir on ENOTEMPTY or EEXIST (SunOS)
+ // if we guessed wrong, and it's not a directory, then
+ // raise the original error.
+ options.rmdir(p, function (er) {
+ if (er && (er.code === "ENOTEMPTY" || er.code === "EEXIST" || er.code === "EPERM"))
+ rmkids(p, options, cb)
+ else if (er && er.code === "ENOTDIR")
+ cb(originalEr)
+ else
+ cb(er)
+ })
+}
+
+function rmkids(p, options, cb) {
+ assert(p)
+ assert(options)
+ assert(typeof cb === 'function')
+
+ options.readdir(p, function (er, files) {
+ if (er)
+ return cb(er)
+ var n = files.length
+ if (n === 0)
+ return options.rmdir(p, cb)
+ var errState
+ files.forEach(function (f) {
+ rimraf(path.join(p, f), options, function (er) {
+ if (errState)
+ return
+ if (er)
+ return cb(errState = er)
+ if (--n === 0)
+ options.rmdir(p, cb)
+ })
+ })
+ })
+}
+
+// this looks simpler, and is strictly *faster*, but will
+// tie up the JavaScript thread and fail on excessively
+// deep directory trees.
+function rimrafSync (p, options) {
+ options = options || {}
+ defaults(options)
+
+ assert(p, 'rimraf: missing path')
+ assert.equal(typeof p, 'string', 'rimraf: path should be a string')
+ assert(options, 'rimraf: missing options')
+ assert.equal(typeof options, 'object', 'rimraf: options should be object')
+
+ var results
+
+ if (options.disableGlob || !glob.hasMagic(p)) {
+ results = [p]
+ } else {
+ try {
+ options.lstatSync(p)
+ results = [p]
+ } catch (er) {
+ results = glob.sync(p, options.glob)
+ }
+ }
+
+ if (!results.length)
+ return
+
+ for (var i = 0; i < results.length; i++) {
+ var p = results[i]
+
+ try {
+ var st = options.lstatSync(p)
+ } catch (er) {
+ if (er.code === "ENOENT")
+ return
+
+ // Windows can EPERM on stat. Life is suffering.
+ if (er.code === "EPERM" && isWindows)
+ fixWinEPERMSync(p, options, er)
+ }
+
+ try {
+ // sunos lets the root user unlink directories, which is... weird.
+ if (st && st.isDirectory())
+ rmdirSync(p, options, null)
+ else
+ options.unlinkSync(p)
+ } catch (er) {
+ if (er.code === "ENOENT")
+ return
+ if (er.code === "EPERM")
+ return isWindows ? fixWinEPERMSync(p, options, er) : rmdirSync(p, options, er)
+ if (er.code !== "EISDIR")
+ throw er
+
+ rmdirSync(p, options, er)
+ }
+ }
+}
+
+function rmdirSync (p, options, originalEr) {
+ assert(p)
+ assert(options)
+ if (originalEr)
+ assert(originalEr instanceof Error)
+
+ try {
+ options.rmdirSync(p)
+ } catch (er) {
+ if (er.code === "ENOENT")
+ return
+ if (er.code === "ENOTDIR")
+ throw originalEr
+ if (er.code === "ENOTEMPTY" || er.code === "EEXIST" || er.code === "EPERM")
+ rmkidsSync(p, options)
+ }
+}
+
+function rmkidsSync (p, options) {
+ assert(p)
+ assert(options)
+ options.readdirSync(p).forEach(function (f) {
+ rimrafSync(path.join(p, f), options)
+ })
+
+ // We only end up here once we got ENOTEMPTY at least once, and
+ // at this point, we are guaranteed to have removed all the kids.
+ // So, we know that it won't be ENOENT or ENOTDIR or anything else.
+ // try really hard to delete stuff on windows, because it has a
+ // PROFOUNDLY annoying habit of not closing handles promptly when
+ // files are deleted, resulting in spurious ENOTEMPTY errors.
+ var retries = isWindows ? 100 : 1
+ var i = 0
+ do {
+ var threw = true
+ try {
+ var ret = options.rmdirSync(p, options)
+ threw = false
+ return ret
+ } finally {
+ if (++i < retries && threw)
+ continue
+ }
+ } while (true)
+}
diff --git a/node_modules/cacache/node_modules/ssri/CHANGELOG.md b/node_modules/cacache/node_modules/ssri/CHANGELOG.md
deleted file mode 100644
index d4c589790..000000000
--- a/node_modules/cacache/node_modules/ssri/CHANGELOG.md
+++ /dev/null
@@ -1,286 +0,0 @@
-# Change Log
-
-All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines.
-
-<a name="6.0.1"></a>
-## [6.0.1](https://github.com/zkat/ssri/compare/v6.0.0...v6.0.1) (2018-08-27)
-
-
-### Bug Fixes
-
-* **opts:** use figgy-pudding to specify consumed opts ([cf86553](https://github.com/zkat/ssri/commit/cf86553))
-
-
-
-<a name="6.0.0"></a>
-# [6.0.0](https://github.com/zkat/ssri/compare/v5.3.0...v6.0.0) (2018-04-09)
-
-
-### Bug Fixes
-
-* **docs:** minor typo ([b71ef17](https://github.com/zkat/ssri/commit/b71ef17))
-
-
-### meta
-
-* drop support for node@4 ([d9bf359](https://github.com/zkat/ssri/commit/d9bf359))
-
-
-### BREAKING CHANGES
-
-* node@4 is no longer supported
-
-
-
-<a name="5.3.0"></a>
-# [5.3.0](https://github.com/zkat/ssri/compare/v5.2.4...v5.3.0) (2018-03-13)
-
-
-### Features
-
-* **checkData:** optionally throw when checkData fails ([bf26b84](https://github.com/zkat/ssri/commit/bf26b84))
-
-
-
-<a name="5.2.4"></a>
-## [5.2.4](https://github.com/zkat/ssri/compare/v5.2.3...v5.2.4) (2018-02-16)
-
-
-
-<a name="5.2.3"></a>
-## [5.2.3](https://github.com/zkat/ssri/compare/v5.2.2...v5.2.3) (2018-02-16)
-
-
-### Bug Fixes
-
-* **hashes:** filter hash priority list by available hashes ([2fa30b8](https://github.com/zkat/ssri/commit/2fa30b8))
-* **integrityStream:** dedupe algorithms to generate ([d56c654](https://github.com/zkat/ssri/commit/d56c654))
-
-
-
-<a name="5.2.2"></a>
-## [5.2.2](https://github.com/zkat/ssri/compare/v5.2.1...v5.2.2) (2018-02-14)
-
-
-### Bug Fixes
-
-* **security:** tweak strict SRI regex ([#10](https://github.com/zkat/ssri/issues/10)) ([d0ebcdc](https://github.com/zkat/ssri/commit/d0ebcdc))
-
-
-
-<a name="5.2.1"></a>
-## [5.2.1](https://github.com/zkat/ssri/compare/v5.2.0...v5.2.1) (2018-02-06)
-
-
-
-<a name="5.2.0"></a>
-# [5.2.0](https://github.com/zkat/ssri/compare/v5.1.0...v5.2.0) (2018-02-06)
-
-
-### Features
-
-* **match:** add integrity.match() ([3c49cc4](https://github.com/zkat/ssri/commit/3c49cc4))
-
-
-
-<a name="5.1.0"></a>
-# [5.1.0](https://github.com/zkat/ssri/compare/v5.0.0...v5.1.0) (2018-01-18)
-
-
-### Bug Fixes
-
-* **checkStream:** integrityStream now takes opts.integrity algos into account ([d262910](https://github.com/zkat/ssri/commit/d262910))
-
-
-### Features
-
-* **sha3:** do some guesswork about upcoming sha3 ([7fdd9df](https://github.com/zkat/ssri/commit/7fdd9df))
-
-
-
-<a name="5.0.0"></a>
-# [5.0.0](https://github.com/zkat/ssri/compare/v4.1.6...v5.0.0) (2017-10-23)
-
-
-### Features
-
-* **license:** relicense to ISC (#9) ([c82983a](https://github.com/zkat/ssri/commit/c82983a))
-
-
-### BREAKING CHANGES
-
-* **license:** the license has been changed from CC0-1.0 to ISC.
-
-
-
-<a name="4.1.6"></a>
-## [4.1.6](https://github.com/zkat/ssri/compare/v4.1.5...v4.1.6) (2017-06-07)
-
-
-### Bug Fixes
-
-* **checkStream:** make sure to pass all opts through ([0b1bcbe](https://github.com/zkat/ssri/commit/0b1bcbe))
-
-
-
-<a name="4.1.5"></a>
-## [4.1.5](https://github.com/zkat/ssri/compare/v4.1.4...v4.1.5) (2017-06-05)
-
-
-### Bug Fixes
-
-* **integrityStream:** stop crashing if opts.algorithms and opts.integrity have an algo mismatch ([fb1293e](https://github.com/zkat/ssri/commit/fb1293e))
-
-
-
-<a name="4.1.4"></a>
-## [4.1.4](https://github.com/zkat/ssri/compare/v4.1.3...v4.1.4) (2017-05-31)
-
-
-### Bug Fixes
-
-* **node:** older versions of node[@4](https://github.com/4) do not support base64buffer string parsing ([513df4e](https://github.com/zkat/ssri/commit/513df4e))
-
-
-
-<a name="4.1.3"></a>
-## [4.1.3](https://github.com/zkat/ssri/compare/v4.1.2...v4.1.3) (2017-05-24)
-
-
-### Bug Fixes
-
-* **check:** handle various bad hash corner cases better ([c2c262b](https://github.com/zkat/ssri/commit/c2c262b))
-
-
-
-<a name="4.1.2"></a>
-## [4.1.2](https://github.com/zkat/ssri/compare/v4.1.1...v4.1.2) (2017-04-18)
-
-
-### Bug Fixes
-
-* **stream:** _flush can be called multiple times. use on("end") ([b1c4805](https://github.com/zkat/ssri/commit/b1c4805))
-
-
-
-<a name="4.1.1"></a>
-## [4.1.1](https://github.com/zkat/ssri/compare/v4.1.0...v4.1.1) (2017-04-12)
-
-
-### Bug Fixes
-
-* **pickAlgorithm:** error if pickAlgorithm() is used in an empty Integrity ([fab470e](https://github.com/zkat/ssri/commit/fab470e))
-
-
-
-<a name="4.1.0"></a>
-# [4.1.0](https://github.com/zkat/ssri/compare/v4.0.0...v4.1.0) (2017-04-07)
-
-
-### Features
-
-* adding ssri.create for a crypto style interface (#2) ([96f52ad](https://github.com/zkat/ssri/commit/96f52ad))
-
-
-
-<a name="4.0.0"></a>
-# [4.0.0](https://github.com/zkat/ssri/compare/v3.0.2...v4.0.0) (2017-04-03)
-
-
-### Bug Fixes
-
-* **integrity:** should have changed the error code before. oops ([8381afa](https://github.com/zkat/ssri/commit/8381afa))
-
-
-### BREAKING CHANGES
-
-* **integrity:** EBADCHECKSUM -> EINTEGRITY for verification errors
-
-
-
-<a name="3.0.2"></a>
-## [3.0.2](https://github.com/zkat/ssri/compare/v3.0.1...v3.0.2) (2017-04-03)
-
-
-
-<a name="3.0.1"></a>
-## [3.0.1](https://github.com/zkat/ssri/compare/v3.0.0...v3.0.1) (2017-04-03)
-
-
-### Bug Fixes
-
-* **package.json:** really should have these in the keywords because search ([a6ac6d0](https://github.com/zkat/ssri/commit/a6ac6d0))
-
-
-
-<a name="3.0.0"></a>
-# [3.0.0](https://github.com/zkat/ssri/compare/v2.0.0...v3.0.0) (2017-04-03)
-
-
-### Bug Fixes
-
-* **hashes:** IntegrityMetadata -> Hash ([d04aa1f](https://github.com/zkat/ssri/commit/d04aa1f))
-
-
-### Features
-
-* **check:** return IntegrityMetadata on check success ([2301e74](https://github.com/zkat/ssri/commit/2301e74))
-* **fromHex:** ssri.fromHex to make it easier to generate them from hex valus ([049b89e](https://github.com/zkat/ssri/commit/049b89e))
-* **hex:** utility function for getting hex version of digest ([a9f021c](https://github.com/zkat/ssri/commit/a9f021c))
-* **hexDigest:** added hexDigest method to Integrity objects too ([85208ba](https://github.com/zkat/ssri/commit/85208ba))
-* **integrity:** add .isIntegrity and .isIntegrityMetadata ([1b29e6f](https://github.com/zkat/ssri/commit/1b29e6f))
-* **integrityStream:** new stream that can both generate and check streamed data ([fd23e1b](https://github.com/zkat/ssri/commit/fd23e1b))
-* **parse:** allow parsing straight into a single IntegrityMetadata object ([c8ddf48](https://github.com/zkat/ssri/commit/c8ddf48))
-* **pickAlgorithm:** Intergrity#pickAlgorithm() added ([b97a796](https://github.com/zkat/ssri/commit/b97a796))
-* **size:** calculate and update stream sizes ([02ed1ad](https://github.com/zkat/ssri/commit/02ed1ad))
-
-
-### BREAKING CHANGES
-
-* **hashes:** `.isIntegrityMetadata` is now `.isHash`. Also, any references to `IntegrityMetadata` now refer to `Hash`.
-* **integrityStream:** createCheckerStream has been removed and replaced with a general-purpose integrityStream.
-
-To convert existing createCheckerStream code, move the `sri` argument into `opts.integrity` in integrityStream. All other options should be the same.
-* **check:** `checkData`, `checkStream`, and `createCheckerStream` now yield a whole IntegrityMetadata instance representing the first successful hash match.
-
-
-
-<a name="2.0.0"></a>
-# [2.0.0](https://github.com/zkat/ssri/compare/v1.0.0...v2.0.0) (2017-03-24)
-
-
-### Bug Fixes
-
-* **strict-mode:** make regexes more rigid ([122a32c](https://github.com/zkat/ssri/commit/122a32c))
-
-
-### Features
-
-* **api:** added serialize alias for unparse ([999b421](https://github.com/zkat/ssri/commit/999b421))
-* **concat:** add Integrity#concat() ([cae12c7](https://github.com/zkat/ssri/commit/cae12c7))
-* **pickAlgo:** pick the strongest algorithm provided, by default ([58c18f7](https://github.com/zkat/ssri/commit/58c18f7))
-* **strict-mode:** strict SRI support ([3f0b64c](https://github.com/zkat/ssri/commit/3f0b64c))
-* **stringify:** replaced unparse/serialize with stringify ([4acad30](https://github.com/zkat/ssri/commit/4acad30))
-* **verification:** add opts.pickAlgorithm ([f72e658](https://github.com/zkat/ssri/commit/f72e658))
-
-
-### BREAKING CHANGES
-
-* **pickAlgo:** ssri will prioritize specific hashes now
-* **stringify:** serialize and unparse have been removed. Use ssri.stringify instead.
-* **strict-mode:** functions that accepted an optional `sep` argument now expect `opts.sep`.
-
-
-
-<a name="1.0.0"></a>
-# 1.0.0 (2017-03-23)
-
-
-### Features
-
-* **api:** implemented initial api ([4fbb16b](https://github.com/zkat/ssri/commit/4fbb16b))
-
-
-### BREAKING CHANGES
-
-* **api:** Initial API established.
diff --git a/node_modules/cacache/node_modules/ssri/LICENSE.md b/node_modules/cacache/node_modules/ssri/LICENSE.md
deleted file mode 100644
index 8d28acf86..000000000
--- a/node_modules/cacache/node_modules/ssri/LICENSE.md
+++ /dev/null
@@ -1,16 +0,0 @@
-ISC License
-
-Copyright (c) npm, Inc.
-
-Permission to use, copy, modify, and/or distribute this software for
-any purpose with or without fee is hereby granted, provided that the
-above copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE COPYRIGHT HOLDER DISCLAIMS
-ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
-COPYRIGHT HOLDER BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
-CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
-OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE
-USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/node_modules/cacache/node_modules/ssri/README.md b/node_modules/cacache/node_modules/ssri/README.md
deleted file mode 100644
index c250961bd..000000000
--- a/node_modules/cacache/node_modules/ssri/README.md
+++ /dev/null
@@ -1,488 +0,0 @@
-# ssri [![npm version](https://img.shields.io/npm/v/ssri.svg)](https://npm.im/ssri) [![license](https://img.shields.io/npm/l/ssri.svg)](https://npm.im/ssri) [![Travis](https://img.shields.io/travis/zkat/ssri.svg)](https://travis-ci.org/zkat/ssri) [![AppVeyor](https://ci.appveyor.com/api/projects/status/github/zkat/ssri?svg=true)](https://ci.appveyor.com/project/zkat/ssri) [![Coverage Status](https://coveralls.io/repos/github/zkat/ssri/badge.svg?branch=latest)](https://coveralls.io/github/zkat/ssri?branch=latest)
-
-[`ssri`](https://github.com/zkat/ssri), short for Standard Subresource
-Integrity, is a Node.js utility for parsing, manipulating, serializing,
-generating, and verifying [Subresource
-Integrity](https://w3c.github.io/webappsec/specs/subresourceintegrity/) hashes.
-
-## Install
-
-`$ npm install --save ssri`
-
-## Table of Contents
-
-* [Example](#example)
-* [Features](#features)
-* [Contributing](#contributing)
-* [API](#api)
- * Parsing & Serializing
- * [`parse`](#parse)
- * [`stringify`](#stringify)
- * [`Integrity#concat`](#integrity-concat)
- * [`Integrity#toString`](#integrity-to-string)
- * [`Integrity#toJSON`](#integrity-to-json)
- * [`Integrity#match`](#integrity-match)
- * [`Integrity#pickAlgorithm`](#integrity-pick-algorithm)
- * [`Integrity#hexDigest`](#integrity-hex-digest)
- * Integrity Generation
- * [`fromHex`](#from-hex)
- * [`fromData`](#from-data)
- * [`fromStream`](#from-stream)
- * [`create`](#create)
- * Integrity Verification
- * [`checkData`](#check-data)
- * [`checkStream`](#check-stream)
- * [`integrityStream`](#integrity-stream)
-
-### Example
-
-```javascript
-const ssri = require('ssri')
-
-const integrity = 'sha512-9KhgCRIx/AmzC8xqYJTZRrnO8OW2Pxyl2DIMZSBOr0oDvtEFyht3xpp71j/r/pAe1DM+JI/A+line3jUBgzQ7A==?foo'
-
-// Parsing and serializing
-const parsed = ssri.parse(integrity)
-ssri.stringify(parsed) // === integrity (works on non-Integrity objects)
-parsed.toString() // === integrity
-
-// Async stream functions
-ssri.checkStream(fs.createReadStream('./my-file'), integrity).then(...)
-ssri.fromStream(fs.createReadStream('./my-file')).then(sri => {
- sri.toString() === integrity
-})
-fs.createReadStream('./my-file').pipe(ssri.createCheckerStream(sri))
-
-// Sync data functions
-ssri.fromData(fs.readFileSync('./my-file')) // === parsed
-ssri.checkData(fs.readFileSync('./my-file'), integrity) // => 'sha512'
-```
-
-### Features
-
-* Parses and stringifies SRI strings.
-* Generates SRI strings from raw data or Streams.
-* Strict standard compliance.
-* `?foo` metadata option support.
-* Multiple entries for the same algorithm.
-* Object-based integrity hash manipulation.
-* Small footprint: no dependencies, concise implementation.
-* Full test coverage.
-* Customizable algorithm picker.
-
-### Contributing
-
-The ssri team enthusiastically welcomes contributions and project participation!
-There's a bunch of things you can do if you want to contribute! The [Contributor
-Guide](CONTRIBUTING.md) has all the information you need for everything from
-reporting bugs to contributing entire new features. Please don't hesitate to
-jump in if you'd like to, or even ask us questions if something isn't clear.
-
-### API
-
-#### <a name="parse"></a> `> ssri.parse(sri, [opts]) -> Integrity`
-
-Parses `sri` into an `Integrity` data structure. `sri` can be an integrity
-string, an `Hash`-like with `digest` and `algorithm` fields and an optional
-`options` field, or an `Integrity`-like object. The resulting object will be an
-`Integrity` instance that has this shape:
-
-```javascript
-{
- 'sha1': [{algorithm: 'sha1', digest: 'deadbeef', options: []}],
- 'sha512': [
- {algorithm: 'sha512', digest: 'c0ffee', options: []},
- {algorithm: 'sha512', digest: 'bad1dea', options: ['foo']}
- ],
-}
-```
-
-If `opts.single` is truthy, a single `Hash` object will be returned. That is, a
-single object that looks like `{algorithm, digest, options}`, as opposed to a
-larger object with multiple of these.
-
-If `opts.strict` is truthy, the resulting object will be filtered such that
-it strictly follows the Subresource Integrity spec, throwing away any entries
-with any invalid components. This also means a restricted set of algorithms
-will be used -- the spec limits them to `sha256`, `sha384`, and `sha512`.
-
-Strict mode is recommended if the integrity strings are intended for use in
-browsers, or in other situations where strict adherence to the spec is needed.
-
-##### Example
-
-```javascript
-ssri.parse('sha512-9KhgCRIx/AmzC8xqYJTZRrnO8OW2Pxyl2DIMZSBOr0oDvtEFyht3xpp71j/r/pAe1DM+JI/A+line3jUBgzQ7A==?foo') // -> Integrity object
-```
-
-#### <a name="stringify"></a> `> ssri.stringify(sri, [opts]) -> String`
-
-This function is identical to [`Integrity#toString()`](#integrity-to-string),
-except it can be used on _any_ object that [`parse`](#parse) can handle -- that
-is, a string, an `Hash`-like, or an `Integrity`-like.
-
-The `opts.sep` option defines the string to use when joining multiple entries
-together. To be spec-compliant, this _must_ be whitespace. The default is a
-single space (`' '`).
-
-If `opts.strict` is true, the integrity string will be created using strict
-parsing rules. See [`ssri.parse`](#parse).
-
-##### Example
-
-```javascript
-// Useful for cleaning up input SRI strings:
-ssri.stringify('\n\rsha512-foo\n\t\tsha384-bar')
-// -> 'sha512-foo sha384-bar'
-
-// Hash-like: only a single entry.
-ssri.stringify({
- algorithm: 'sha512',
- digest:'9KhgCRIx/AmzC8xqYJTZRrnO8OW2Pxyl2DIMZSBOr0oDvtEFyht3xpp71j/r/pAe1DM+JI/A+line3jUBgzQ7A==',
- options: ['foo']
-})
-// ->
-// 'sha512-9KhgCRIx/AmzC8xqYJTZRrnO8OW2Pxyl2DIMZSBOr0oDvtEFyht3xpp71j/r/pAe1DM+JI/A+line3jUBgzQ7A==?foo'
-
-// Integrity-like: full multi-entry syntax. Similar to output of `ssri.parse`
-ssri.stringify({
- 'sha512': [
- {
- algorithm: 'sha512',
- digest:'9KhgCRIx/AmzC8xqYJTZRrnO8OW2Pxyl2DIMZSBOr0oDvtEFyht3xpp71j/r/pAe1DM+JI/A+line3jUBgzQ7A==',
- options: ['foo']
- }
- ]
-})
-// ->
-// 'sha512-9KhgCRIx/AmzC8xqYJTZRrnO8OW2Pxyl2DIMZSBOr0oDvtEFyht3xpp71j/r/pAe1DM+JI/A+line3jUBgzQ7A==?foo'
-```
-
-#### <a name="integrity-concat"></a> `> Integrity#concat(otherIntegrity, [opts]) -> Integrity`
-
-Concatenates an `Integrity` object with another IntegrityLike, or an integrity
-string.
-
-This is functionally equivalent to concatenating the string format of both
-integrity arguments, and calling [`ssri.parse`](#ssri-parse) on the new string.
-
-If `opts.strict` is true, the new `Integrity` will be created using strict
-parsing rules. See [`ssri.parse`](#parse).
-
-##### Example
-
-```javascript
-// This will combine the integrity checks for two different versions of
-// your index.js file so you can use a single integrity string and serve
-// either of these to clients, from a single `<script>` tag.
-const desktopIntegrity = ssri.fromData(fs.readFileSync('./index.desktop.js'))
-const mobileIntegrity = ssri.fromData(fs.readFileSync('./index.mobile.js'))
-
-// Note that browsers (and ssri) will succeed as long as ONE of the entries
-// for the *prioritized* algorithm succeeds. That is, in order for this fallback
-// to work, both desktop and mobile *must* use the same `algorithm` values.
-desktopIntegrity.concat(mobileIntegrity)
-```
-
-#### <a name="integrity-to-string"></a> `> Integrity#toString([opts]) -> String`
-
-Returns the string representation of an `Integrity` object. All hash entries
-will be concatenated in the string by `opts.sep`, which defaults to `' '`.
-
-If you want to serialize an object that didn't come from an `ssri` function,
-use [`ssri.stringify()`](#stringify).
-
-If `opts.strict` is true, the integrity string will be created using strict
-parsing rules. See [`ssri.parse`](#parse).
-
-##### Example
-
-```javascript
-const integrity = 'sha512-9KhgCRIx/AmzC8xqYJTZRrnO8OW2Pxyl2DIMZSBOr0oDvtEFyht3xpp71j/r/pAe1DM+JI/A+line3jUBgzQ7A==?foo'
-
-ssri.parse(integrity).toString() === integrity
-```
-
-#### <a name="integrity-to-json"></a> `> Integrity#toJSON() -> String`
-
-Returns the string representation of an `Integrity` object. All hash entries
-will be concatenated in the string by `' '`.
-
-This is a convenience method so you can pass an `Integrity` object directly to `JSON.stringify`.
-For more info check out [toJSON() behavior on mdn](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/JSON/stringify#toJSON%28%29_behavior).
-
-##### Example
-
-```javascript
-const integrity = '"sha512-9KhgCRIx/AmzC8xqYJTZRrnO8OW2Pxyl2DIMZSBOr0oDvtEFyht3xpp71j/r/pAe1DM+JI/A+line3jUBgzQ7A==?foo"'
-
-JSON.stringify(ssri.parse(integrity)) === integrity
-```
-
-#### <a name="integrity-match"></a> `> Integrity#match(sri, [opts]) -> Hash | false`
-
-Returns the matching (truthy) hash if `Integrity` matches the argument passed as
-`sri`, which can be anything that [`parse`](#parse) will accept. `opts` will be
-passed through to `parse` and [`pickAlgorithm()`](#integrity-pick-algorithm).
-
-##### Example
-
-```javascript
-const integrity = 'sha512-9KhgCRIx/AmzC8xqYJTZRrnO8OW2Pxyl2DIMZSBOr0oDvtEFyht3xpp71j/r/pAe1DM+JI/A+line3jUBgzQ7A=='
-
-ssri.parse(integrity).match(integrity)
-// Hash {
-// digest: '9KhgCRIx/AmzC8xqYJTZRrnO8OW2Pxyl2DIMZSBOr0oDvtEFyht3xpp71j/r/pAe1DM+JI/A+line3jUBgzQ7A=='
-// algorithm: 'sha512'
-// }
-
-ssri.parse(integrity).match('sha1-deadbeef')
-// false
-```
-
-#### <a name="integrity-pick-algorithm"></a> `> Integrity#pickAlgorithm([opts]) -> String`
-
-Returns the "best" algorithm from those available in the integrity object.
-
-If `opts.pickAlgorithm` is provided, it will be passed two algorithms as
-arguments. ssri will prioritize whichever of the two algorithms is returned by
-this function. Note that the function may be called multiple times, and it
-**must** return one of the two algorithms provided. By default, ssri will make
-a best-effort to pick the strongest/most reliable of the given algorithms. It
-may intentionally deprioritize algorithms with known vulnerabilities.
-
-##### Example
-
-```javascript
-ssri.parse('sha1-WEakDigEST sha512-yzd8ELD1piyANiWnmdnpCL5F52f10UfUdEkHywVZeqTt0ymgrxR63Qz0GB7TKPoeeZQmWCaz7T1').pickAlgorithm() // sha512
-```
-
-#### <a name="integrity-hex-digest"></a> `> Integrity#hexDigest() -> String`
-
-`Integrity` is assumed to be either a single-hash `Integrity` instance, or a
-`Hash` instance. Returns its `digest`, converted to a hex representation of the
-base64 data.
-
-##### Example
-
-```javascript
-ssri.parse('sha1-deadbeef').hexDigest() // '75e69d6de79f'
-```
-
-#### <a name="from-hex"></a> `> ssri.fromHex(hexDigest, algorithm, [opts]) -> Integrity`
-
-Creates an `Integrity` object with a single entry, based on a hex-formatted
-hash. This is a utility function to help convert existing shasums to the
-Integrity format, and is roughly equivalent to something like:
-
-```javascript
-algorithm + '-' + Buffer.from(hexDigest, 'hex').toString('base64')
-```
-
-`opts.options` may optionally be passed in: it must be an array of option
-strings that will be added to all generated integrity hashes generated by
-`fromData`. This is a loosely-specified feature of SRIs, and currently has no
-specified semantics besides being `?`-separated. Use at your own risk, and
-probably avoid if your integrity strings are meant to be used with browsers.
-
-If `opts.strict` is true, the integrity object will be created using strict
-parsing rules. See [`ssri.parse`](#parse).
-
-If `opts.single` is true, a single `Hash` object will be returned.
-
-##### Example
-
-```javascript
-ssri.fromHex('75e69d6de79f', 'sha1').toString() // 'sha1-deadbeef'
-```
-
-#### <a name="from-data"></a> `> ssri.fromData(data, [opts]) -> Integrity`
-
-Creates an `Integrity` object from either string or `Buffer` data, calculating
-all the requested hashes and adding any specified options to the object.
-
-`opts.algorithms` determines which algorithms to generate hashes for. All
-results will be included in a single `Integrity` object. The default value for
-`opts.algorithms` is `['sha512']`. All algorithm strings must be hashes listed
-in `crypto.getHashes()` for the host Node.js platform.
-
-`opts.options` may optionally be passed in: it must be an array of option
-strings that will be added to all generated integrity hashes generated by
-`fromData`. This is a loosely-specified feature of SRIs, and currently has no
-specified semantics besides being `?`-separated. Use at your own risk, and
-probably avoid if your integrity strings are meant to be used with browsers.
-
-If `opts.strict` is true, the integrity object will be created using strict
-parsing rules. See [`ssri.parse`](#parse).
-
-##### Example
-
-```javascript
-const integrityObj = ssri.fromData('foobarbaz', {
- algorithms: ['sha256', 'sha384', 'sha512']
-})
-integrity.toString('\n')
-// ->
-// sha256-l981iLWj8kurw4UbNy8Lpxqdzd7UOxS50Glhv8FwfZ0=
-// sha384-irnCxQ0CfQhYGlVAUdwTPC9bF3+YWLxlaDGM4xbYminxpbXEq+D+2GCEBTxcjES9
-// sha512-yzd8ELD1piyANiWnmdnpCL5F52f10UfUdEkHywVZeqTt0ymgrxR63Qz0GB7TKPoeeZQmWCaz7T1+9vBnypkYWg==
-```
-
-#### <a name="from-stream"></a> `> ssri.fromStream(stream, [opts]) -> Promise<Integrity>`
-
-Returns a Promise of an Integrity object calculated by reading data from
-a given `stream`.
-
-It accepts both `opts.algorithms` and `opts.options`, which are documented as
-part of [`ssri.fromData`](#from-data).
-
-Additionally, `opts.Promise` may be passed in to inject a Promise library of
-choice. By default, ssri will use Node's built-in Promises.
-
-If `opts.strict` is true, the integrity object will be created using strict
-parsing rules. See [`ssri.parse`](#parse).
-
-##### Example
-
-```javascript
-ssri.fromStream(fs.createReadStream('index.js'), {
- algorithms: ['sha1', 'sha512']
-}).then(integrity => {
- return ssri.checkStream(fs.createReadStream('index.js'), integrity)
-}) // succeeds
-```
-
-#### <a name="create"></a> `> ssri.create([opts]) -> <Hash>`
-
-Returns a Hash object with `update(<Buffer or string>[,enc])` and `digest()` methods.
-
-
-The Hash object provides the same methods as [crypto class Hash](https://nodejs.org/dist/latest-v6.x/docs/api/crypto.html#crypto_class_hash).
-`digest()` accepts no arguments and returns an Integrity object calculated by reading data from
-calls to update.
-
-It accepts both `opts.algorithms` and `opts.options`, which are documented as
-part of [`ssri.fromData`](#from-data).
-
-If `opts.strict` is true, the integrity object will be created using strict
-parsing rules. See [`ssri.parse`](#parse).
-
-##### Example
-
-```javascript
-const integrity = ssri.create().update('foobarbaz').digest()
-integrity.toString()
-// ->
-// sha512-yzd8ELD1piyANiWnmdnpCL5F52f10UfUdEkHywVZeqTt0ymgrxR63Qz0GB7TKPoeeZQmWCaz7T1+9vBnypkYWg==
-```
-
-#### <a name="check-data"></a> `> ssri.checkData(data, sri, [opts]) -> Hash|false`
-
-Verifies `data` integrity against an `sri` argument. `data` may be either a
-`String` or a `Buffer`, and `sri` can be any subresource integrity
-representation that [`ssri.parse`](#parse) can handle.
-
-If verification succeeds, `checkData` will return the name of the algorithm that
-was used for verification (a truthy value). Otherwise, it will return `false`.
-
-If `opts.pickAlgorithm` is provided, it will be used by
-[`Integrity#pickAlgorithm`](#integrity-pick-algorithm) when deciding which of
-the available digests to match against.
-
-If `opts.error` is true, and verification fails, `checkData` will throw either
-an `EBADSIZE` or an `EINTEGRITY` error, instead of just returning false.
-
-##### Example
-
-```javascript
-const data = fs.readFileSync('index.js')
-ssri.checkData(data, ssri.fromData(data)) // -> 'sha512'
-ssri.checkData(data, 'sha256-l981iLWj8kurw4UbNy8Lpxqdzd7UOxS50Glhv8FwfZ0')
-ssri.checkData(data, 'sha1-BaDDigEST') // -> false
-ssri.checkData(data, 'sha1-BaDDigEST', {error: true}) // -> Error! EINTEGRITY
-```
-
-#### <a name="check-stream"></a> `> ssri.checkStream(stream, sri, [opts]) -> Promise<Hash>`
-
-Verifies the contents of `stream` against an `sri` argument. `stream` will be
-consumed in its entirety by this process. `sri` can be any subresource integrity
-representation that [`ssri.parse`](#parse) can handle.
-
-`checkStream` will return a Promise that either resolves to the
-`Hash` that succeeded verification, or, if the verification fails
-or an error happens with `stream`, the Promise will be rejected.
-
-If the Promise is rejected because verification failed, the returned error will
-have `err.code` as `EINTEGRITY`.
-
-If `opts.size` is given, it will be matched against the stream size. An error
-with `err.code` `EBADSIZE` will be returned by a rejection if the expected size
-and actual size fail to match.
-
-If `opts.pickAlgorithm` is provided, it will be used by
-[`Integrity#pickAlgorithm`](#integrity-pick-algorithm) when deciding which of
-the available digests to match against.
-
-##### Example
-
-```javascript
-const integrity = ssri.fromData(fs.readFileSync('index.js'))
-
-ssri.checkStream(
- fs.createReadStream('index.js'),
- integrity
-)
-// ->
-// Promise<{
-// algorithm: 'sha512',
-// digest: 'sha512-yzd8ELD1piyANiWnmdnpCL5F52f10UfUdEkHywVZeqTt0ymgrxR63Qz0GB7TKPoeeZQmWCaz7T1'
-// }>
-
-ssri.checkStream(
- fs.createReadStream('index.js'),
- 'sha256-l981iLWj8kurw4UbNy8Lpxqdzd7UOxS50Glhv8FwfZ0'
-) // -> Promise<Hash>
-
-ssri.checkStream(
- fs.createReadStream('index.js'),
- 'sha1-BaDDigEST'
-) // -> Promise<Error<{code: 'EINTEGRITY'}>>
-```
-
-#### <a name="integrity-stream"></a> `> integrityStream([opts]) -> IntegrityStream`
-
-Returns a `Transform` stream that data can be piped through in order to generate
-and optionally check data integrity for piped data. When the stream completes
-successfully, it emits `size` and `integrity` events, containing the total
-number of bytes processed and a calculated `Integrity` instance based on stream
-data, respectively.
-
-If `opts.algorithms` is passed in, the listed algorithms will be calculated when
-generating the final `Integrity` instance. The default is `['sha512']`.
-
-If `opts.single` is passed in, a single `Hash` instance will be returned.
-
-If `opts.integrity` is passed in, it should be an `integrity` value understood
-by [`parse`](#parse) that the stream will check the data against. If
-verification succeeds, the integrity stream will emit a `verified` event whose
-value is a single `Hash` object that is the one that succeeded verification. If
-verification fails, the stream will error with an `EINTEGRITY` error code.
-
-If `opts.size` is given, it will be matched against the stream size. An error
-with `err.code` `EBADSIZE` will be emitted by the stream if the expected size
-and actual size fail to match.
-
-If `opts.pickAlgorithm` is provided, it will be passed two algorithms as
-arguments. ssri will prioritize whichever of the two algorithms is returned by
-this function. Note that the function may be called multiple times, and it
-**must** return one of the two algorithms provided. By default, ssri will make
-a best-effort to pick the strongest/most reliable of the given algorithms. It
-may intentionally deprioritize algorithms with known vulnerabilities.
-
-##### Example
-
-```javascript
-const integrity = ssri.fromData(fs.readFileSync('index.js'))
-fs.createReadStream('index.js')
-.pipe(ssri.integrityStream({integrity}))
-```
diff --git a/node_modules/cacache/node_modules/ssri/index.js b/node_modules/cacache/node_modules/ssri/index.js
deleted file mode 100644
index e102892b0..000000000
--- a/node_modules/cacache/node_modules/ssri/index.js
+++ /dev/null
@@ -1,395 +0,0 @@
-'use strict'
-
-const crypto = require('crypto')
-const figgyPudding = require('figgy-pudding')
-const Transform = require('stream').Transform
-
-const SPEC_ALGORITHMS = ['sha256', 'sha384', 'sha512']
-
-const BASE64_REGEX = /^[a-z0-9+/]+(?:=?=?)$/i
-const SRI_REGEX = /^([^-]+)-([^?]+)([?\S*]*)$/
-const STRICT_SRI_REGEX = /^([^-]+)-([A-Za-z0-9+/=]{44,88})(\?[\x21-\x7E]*)*$/
-const VCHAR_REGEX = /^[\x21-\x7E]+$/
-
-const SsriOpts = figgyPudding({
- algorithms: {default: ['sha512']},
- error: {default: false},
- integrity: {},
- options: {default: []},
- pickAlgorithm: {default: () => getPrioritizedHash},
- Promise: {default: () => Promise},
- sep: {default: ' '},
- single: {default: false},
- size: {},
- strict: {default: false}
-})
-
-class Hash {
- get isHash () { return true }
- constructor (hash, opts) {
- opts = SsriOpts(opts)
- const strict = !!opts.strict
- this.source = hash.trim()
- // 3.1. Integrity metadata (called "Hash" by ssri)
- // https://w3c.github.io/webappsec-subresource-integrity/#integrity-metadata-description
- const match = this.source.match(
- strict
- ? STRICT_SRI_REGEX
- : SRI_REGEX
- )
- if (!match) { return }
- if (strict && !SPEC_ALGORITHMS.some(a => a === match[1])) { return }
- this.algorithm = match[1]
- this.digest = match[2]
-
- const rawOpts = match[3]
- this.options = rawOpts ? rawOpts.slice(1).split('?') : []
- }
- hexDigest () {
- return this.digest && Buffer.from(this.digest, 'base64').toString('hex')
- }
- toJSON () {
- return this.toString()
- }
- toString (opts) {
- opts = SsriOpts(opts)
- if (opts.strict) {
- // Strict mode enforces the standard as close to the foot of the
- // letter as it can.
- if (!(
- // The spec has very restricted productions for algorithms.
- // https://www.w3.org/TR/CSP2/#source-list-syntax
- SPEC_ALGORITHMS.some(x => x === this.algorithm) &&
- // Usually, if someone insists on using a "different" base64, we
- // leave it as-is, since there's multiple standards, and the
- // specified is not a URL-safe variant.
- // https://www.w3.org/TR/CSP2/#base64_value
- this.digest.match(BASE64_REGEX) &&
- // Option syntax is strictly visual chars.
- // https://w3c.github.io/webappsec-subresource-integrity/#grammardef-option-expression
- // https://tools.ietf.org/html/rfc5234#appendix-B.1
- (this.options || []).every(opt => opt.match(VCHAR_REGEX))
- )) {
- return ''
- }
- }
- const options = this.options && this.options.length
- ? `?${this.options.join('?')}`
- : ''
- return `${this.algorithm}-${this.digest}${options}`
- }
-}
-
-class Integrity {
- get isIntegrity () { return true }
- toJSON () {
- return this.toString()
- }
- toString (opts) {
- opts = SsriOpts(opts)
- let sep = opts.sep || ' '
- if (opts.strict) {
- // Entries must be separated by whitespace, according to spec.
- sep = sep.replace(/\S+/g, ' ')
- }
- return Object.keys(this).map(k => {
- return this[k].map(hash => {
- return Hash.prototype.toString.call(hash, opts)
- }).filter(x => x.length).join(sep)
- }).filter(x => x.length).join(sep)
- }
- concat (integrity, opts) {
- opts = SsriOpts(opts)
- const other = typeof integrity === 'string'
- ? integrity
- : stringify(integrity, opts)
- return parse(`${this.toString(opts)} ${other}`, opts)
- }
- hexDigest () {
- return parse(this, {single: true}).hexDigest()
- }
- match (integrity, opts) {
- opts = SsriOpts(opts)
- const other = parse(integrity, opts)
- const algo = other.pickAlgorithm(opts)
- return (
- this[algo] &&
- other[algo] &&
- this[algo].find(hash =>
- other[algo].find(otherhash =>
- hash.digest === otherhash.digest
- )
- )
- ) || false
- }
- pickAlgorithm (opts) {
- opts = SsriOpts(opts)
- const pickAlgorithm = opts.pickAlgorithm
- const keys = Object.keys(this)
- if (!keys.length) {
- throw new Error(`No algorithms available for ${
- JSON.stringify(this.toString())
- }`)
- }
- return keys.reduce((acc, algo) => {
- return pickAlgorithm(acc, algo) || acc
- })
- }
-}
-
-module.exports.parse = parse
-function parse (sri, opts) {
- opts = SsriOpts(opts)
- if (typeof sri === 'string') {
- return _parse(sri, opts)
- } else if (sri.algorithm && sri.digest) {
- const fullSri = new Integrity()
- fullSri[sri.algorithm] = [sri]
- return _parse(stringify(fullSri, opts), opts)
- } else {
- return _parse(stringify(sri, opts), opts)
- }
-}
-
-function _parse (integrity, opts) {
- // 3.4.3. Parse metadata
- // https://w3c.github.io/webappsec-subresource-integrity/#parse-metadata
- if (opts.single) {
- return new Hash(integrity, opts)
- }
- return integrity.trim().split(/\s+/).reduce((acc, string) => {
- const hash = new Hash(string, opts)
- if (hash.algorithm && hash.digest) {
- const algo = hash.algorithm
- if (!acc[algo]) { acc[algo] = [] }
- acc[algo].push(hash)
- }
- return acc
- }, new Integrity())
-}
-
-module.exports.stringify = stringify
-function stringify (obj, opts) {
- opts = SsriOpts(opts)
- if (obj.algorithm && obj.digest) {
- return Hash.prototype.toString.call(obj, opts)
- } else if (typeof obj === 'string') {
- return stringify(parse(obj, opts), opts)
- } else {
- return Integrity.prototype.toString.call(obj, opts)
- }
-}
-
-module.exports.fromHex = fromHex
-function fromHex (hexDigest, algorithm, opts) {
- opts = SsriOpts(opts)
- const optString = opts.options && opts.options.length
- ? `?${opts.options.join('?')}`
- : ''
- return parse(
- `${algorithm}-${
- Buffer.from(hexDigest, 'hex').toString('base64')
- }${optString}`, opts
- )
-}
-
-module.exports.fromData = fromData
-function fromData (data, opts) {
- opts = SsriOpts(opts)
- const algorithms = opts.algorithms
- const optString = opts.options && opts.options.length
- ? `?${opts.options.join('?')}`
- : ''
- return algorithms.reduce((acc, algo) => {
- const digest = crypto.createHash(algo).update(data).digest('base64')
- const hash = new Hash(
- `${algo}-${digest}${optString}`,
- opts
- )
- if (hash.algorithm && hash.digest) {
- const algo = hash.algorithm
- if (!acc[algo]) { acc[algo] = [] }
- acc[algo].push(hash)
- }
- return acc
- }, new Integrity())
-}
-
-module.exports.fromStream = fromStream
-function fromStream (stream, opts) {
- opts = SsriOpts(opts)
- const P = opts.Promise || Promise
- const istream = integrityStream(opts)
- return new P((resolve, reject) => {
- stream.pipe(istream)
- stream.on('error', reject)
- istream.on('error', reject)
- let sri
- istream.on('integrity', s => { sri = s })
- istream.on('end', () => resolve(sri))
- istream.on('data', () => {})
- })
-}
-
-module.exports.checkData = checkData
-function checkData (data, sri, opts) {
- opts = SsriOpts(opts)
- sri = parse(sri, opts)
- if (!Object.keys(sri).length) {
- if (opts.error) {
- throw Object.assign(
- new Error('No valid integrity hashes to check against'), {
- code: 'EINTEGRITY'
- }
- )
- } else {
- return false
- }
- }
- const algorithm = sri.pickAlgorithm(opts)
- const digest = crypto.createHash(algorithm).update(data).digest('base64')
- const newSri = parse({algorithm, digest})
- const match = newSri.match(sri, opts)
- if (match || !opts.error) {
- return match
- } else if (typeof opts.size === 'number' && (data.length !== opts.size)) {
- const err = new Error(`data size mismatch when checking ${sri}.\n Wanted: ${opts.size}\n Found: ${data.length}`)
- err.code = 'EBADSIZE'
- err.found = data.length
- err.expected = opts.size
- err.sri = sri
- throw err
- } else {
- const err = new Error(`Integrity checksum failed when using ${algorithm}: Wanted ${sri}, but got ${newSri}. (${data.length} bytes)`)
- err.code = 'EINTEGRITY'
- err.found = newSri
- err.expected = sri
- err.algorithm = algorithm
- err.sri = sri
- throw err
- }
-}
-
-module.exports.checkStream = checkStream
-function checkStream (stream, sri, opts) {
- opts = SsriOpts(opts)
- const P = opts.Promise || Promise
- const checker = integrityStream(opts.concat({
- integrity: sri
- }))
- return new P((resolve, reject) => {
- stream.pipe(checker)
- stream.on('error', reject)
- checker.on('error', reject)
- let sri
- checker.on('verified', s => { sri = s })
- checker.on('end', () => resolve(sri))
- checker.on('data', () => {})
- })
-}
-
-module.exports.integrityStream = integrityStream
-function integrityStream (opts) {
- opts = SsriOpts(opts)
- // For verification
- const sri = opts.integrity && parse(opts.integrity, opts)
- const goodSri = sri && Object.keys(sri).length
- const algorithm = goodSri && sri.pickAlgorithm(opts)
- const digests = goodSri && sri[algorithm]
- // Calculating stream
- const algorithms = Array.from(
- new Set(opts.algorithms.concat(algorithm ? [algorithm] : []))
- )
- const hashes = algorithms.map(crypto.createHash)
- let streamSize = 0
- const stream = new Transform({
- transform (chunk, enc, cb) {
- streamSize += chunk.length
- hashes.forEach(h => h.update(chunk, enc))
- cb(null, chunk, enc)
- }
- }).on('end', () => {
- const optString = (opts.options && opts.options.length)
- ? `?${opts.options.join('?')}`
- : ''
- const newSri = parse(hashes.map((h, i) => {
- return `${algorithms[i]}-${h.digest('base64')}${optString}`
- }).join(' '), opts)
- // Integrity verification mode
- const match = goodSri && newSri.match(sri, opts)
- if (typeof opts.size === 'number' && streamSize !== opts.size) {
- const err = new Error(`stream size mismatch when checking ${sri}.\n Wanted: ${opts.size}\n Found: ${streamSize}`)
- err.code = 'EBADSIZE'
- err.found = streamSize
- err.expected = opts.size
- err.sri = sri
- stream.emit('error', err)
- } else if (opts.integrity && !match) {
- const err = new Error(`${sri} integrity checksum failed when using ${algorithm}: wanted ${digests} but got ${newSri}. (${streamSize} bytes)`)
- err.code = 'EINTEGRITY'
- err.found = newSri
- err.expected = digests
- err.algorithm = algorithm
- err.sri = sri
- stream.emit('error', err)
- } else {
- stream.emit('size', streamSize)
- stream.emit('integrity', newSri)
- match && stream.emit('verified', match)
- }
- })
- return stream
-}
-
-module.exports.create = createIntegrity
-function createIntegrity (opts) {
- opts = SsriOpts(opts)
- const algorithms = opts.algorithms
- const optString = opts.options.length
- ? `?${opts.options.join('?')}`
- : ''
-
- const hashes = algorithms.map(crypto.createHash)
-
- return {
- update: function (chunk, enc) {
- hashes.forEach(h => h.update(chunk, enc))
- return this
- },
- digest: function (enc) {
- const integrity = algorithms.reduce((acc, algo) => {
- const digest = hashes.shift().digest('base64')
- const hash = new Hash(
- `${algo}-${digest}${optString}`,
- opts
- )
- if (hash.algorithm && hash.digest) {
- const algo = hash.algorithm
- if (!acc[algo]) { acc[algo] = [] }
- acc[algo].push(hash)
- }
- return acc
- }, new Integrity())
-
- return integrity
- }
- }
-}
-
-const NODE_HASHES = new Set(crypto.getHashes())
-
-// This is a Best Effort™ at a reasonable priority for hash algos
-const DEFAULT_PRIORITY = [
- 'md5', 'whirlpool', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512',
- // TODO - it's unclear _which_ of these Node will actually use as its name
- // for the algorithm, so we guesswork it based on the OpenSSL names.
- 'sha3',
- 'sha3-256', 'sha3-384', 'sha3-512',
- 'sha3_256', 'sha3_384', 'sha3_512'
-].filter(algo => NODE_HASHES.has(algo))
-
-function getPrioritizedHash (algo1, algo2) {
- return DEFAULT_PRIORITY.indexOf(algo1.toLowerCase()) >= DEFAULT_PRIORITY.indexOf(algo2.toLowerCase())
- ? algo1
- : algo2
-}
diff --git a/node_modules/cacache/node_modules/ssri/package.json b/node_modules/cacache/node_modules/ssri/package.json
deleted file mode 100644
index 002c66229..000000000
--- a/node_modules/cacache/node_modules/ssri/package.json
+++ /dev/null
@@ -1,89 +0,0 @@
-{
- "_from": "ssri@^6.0.1",
- "_id": "ssri@6.0.1",
- "_inBundle": false,
- "_integrity": "sha512-3Wge10hNcT1Kur4PDFwEieXSCMCJs/7WvSACcrMYrNp+b8kDL1/0wJch5Ni2WrtwEa2IO8OsVfeKIciKCDx/QA==",
- "_location": "/cacache/ssri",
- "_phantomChildren": {},
- "_requested": {
- "type": "range",
- "registry": true,
- "raw": "ssri@^6.0.1",
- "name": "ssri",
- "escapedName": "ssri",
- "rawSpec": "^6.0.1",
- "saveSpec": null,
- "fetchSpec": "^6.0.1"
- },
- "_requiredBy": [
- "/cacache"
- ],
- "_resolved": "https://registry.npmjs.org/ssri/-/ssri-6.0.1.tgz",
- "_shasum": "2a3c41b28dd45b62b63676ecb74001265ae9edd8",
- "_spec": "ssri@^6.0.1",
- "_where": "/Users/claudiahdz/npm/cli/node_modules/cacache",
- "author": {
- "name": "Kat Marchán",
- "email": "kzm@sykosomatic.org"
- },
- "bugs": {
- "url": "https://github.com/zkat/ssri/issues"
- },
- "bundleDependencies": false,
- "config": {
- "nyc": {
- "exclude": [
- "node_modules/**",
- "test/**"
- ]
- }
- },
- "dependencies": {
- "figgy-pudding": "^3.5.1"
- },
- "deprecated": false,
- "description": "Standard Subresource Integrity library -- parses, serializes, generates, and verifies integrity metadata according to the SRI spec.",
- "devDependencies": {
- "nyc": "^11.4.1",
- "standard": "^10.0.3",
- "standard-version": "^4.3.0",
- "tap": "^11.1.0",
- "weallbehave": "^1.2.0",
- "weallcontribute": "^1.0.8"
- },
- "files": [
- "*.js"
- ],
- "homepage": "https://github.com/zkat/ssri#readme",
- "keywords": [
- "w3c",
- "web",
- "security",
- "integrity",
- "checksum",
- "hashing",
- "subresource integrity",
- "sri",
- "sri hash",
- "sri string",
- "sri generator",
- "html"
- ],
- "license": "ISC",
- "main": "index.js",
- "name": "ssri",
- "repository": {
- "type": "git",
- "url": "git+https://github.com/zkat/ssri.git"
- },
- "scripts": {
- "postrelease": "npm publish && git push --follow-tags",
- "prerelease": "npm t",
- "pretest": "standard",
- "release": "standard-version -s",
- "test": "tap -J --coverage test/*.js",
- "update-coc": "weallbehave -o . && git add CODE_OF_CONDUCT.md && git commit -m 'docs(coc): updated CODE_OF_CONDUCT.md'",
- "update-contrib": "weallcontribute -o . && git add CONTRIBUTING.md && git commit -m 'docs(contributing): updated CONTRIBUTING.md'"
- },
- "version": "6.0.1"
-}
diff --git a/node_modules/cacache/node_modules/tar/CHANGELOG.md b/node_modules/cacache/node_modules/tar/CHANGELOG.md
new file mode 100644
index 000000000..14b5c4ee8
--- /dev/null
+++ b/node_modules/cacache/node_modules/tar/CHANGELOG.md
@@ -0,0 +1,66 @@
+# Charge Long
+
+## 6.0
+
+- Drop support for node 6 and 8
+- fix symlinks and hardlinks on windows being packed with `\`-style path
+ targets
+
+## 5.0
+
+- Address unpack race conditions using path reservations
+- Change large-numbers errors from TypeError to Error
+- Add `TAR_*` error codes
+- do not treat ignored entries as an invalid archive
+- drop support for node v4
+- unpack: conditionally use a file mapping to write files on Windows
+- Set more portable 'mode' value in portable mode
+- Set `portable` gzip option in portable mode
+
+## 4.4
+
+- Add 'mtime' option to tar creation to force mtime
+- unpack: only reuse file fs entries if nlink = 1
+- unpack: rename before unlinking files on Windows
+- Fix encoding/decoding of base-256 numbers
+- Use `stat` instead of `lstat` when checking CWD
+- Always provide a callback to fs.close()
+
+## 4.3
+
+- Add 'transform' unpack option
+
+## 4.2
+
+- Fail when zlib fails
+
+## 4.1
+
+- Add noMtime flag for tar creation
+
+## 4.0
+
+- unpack: raise error if cwd is missing or not a dir
+- pack: don't drop dots from dotfiles when prefixing
+
+## 3.1
+
+- Support `@file.tar` as an entry argument to copy entries from one tar
+ file to another.
+- Add `noPax` option
+- `noResume` option for tar.t
+- win32: convert `>|<?:` chars to windows-friendly form
+- Exclude mtime for dirs in portable mode
+
+## 3.0
+
+- Minipass-based implementation
+- Entirely new API surface, `tar.c()`, `tar.x()` etc., much closer to
+ system tar semantics
+- Massive performance improvement
+- Require node 4.x and higher
+
+## 0.x, 1.x, 2.x - 2011-2014
+
+- fstream-based implementation
+- slow and kinda bad, but better than npm shelling out to the system `tar`
diff --git a/node_modules/cacache/node_modules/tar/LICENSE b/node_modules/cacache/node_modules/tar/LICENSE
new file mode 100644
index 000000000..19129e315
--- /dev/null
+++ b/node_modules/cacache/node_modules/tar/LICENSE
@@ -0,0 +1,15 @@
+The ISC License
+
+Copyright (c) Isaac Z. Schlueter and Contributors
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
+IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/node_modules/cacache/node_modules/tar/README.md b/node_modules/cacache/node_modules/tar/README.md
new file mode 100644
index 000000000..5e635e622
--- /dev/null
+++ b/node_modules/cacache/node_modules/tar/README.md
@@ -0,0 +1,1031 @@
+# node-tar
+
+[![Build Status](https://travis-ci.org/npm/node-tar.svg?branch=master)](https://travis-ci.org/npm/node-tar)
+
+[Fast](./benchmarks) and full-featured Tar for Node.js
+
+The API is designed to mimic the behavior of `tar(1)` on unix systems.
+If you are familiar with how tar works, most of this will hopefully be
+straightforward for you. If not, then hopefully this module can teach
+you useful unix skills that may come in handy someday :)
+
+## Background
+
+A "tar file" or "tarball" is an archive of file system entries
+(directories, files, links, etc.) The name comes from "tape archive".
+If you run `man tar` on almost any Unix command line, you'll learn
+quite a bit about what it can do, and its history.
+
+Tar has 5 main top-level commands:
+
+* `c` Create an archive
+* `r` Replace entries within an archive
+* `u` Update entries within an archive (ie, replace if they're newer)
+* `t` List out the contents of an archive
+* `x` Extract an archive to disk
+
+The other flags and options modify how this top level function works.
+
+## High-Level API
+
+These 5 functions are the high-level API. All of them have a
+single-character name (for unix nerds familiar with `tar(1)`) as well
+as a long name (for everyone else).
+
+All the high-level functions take the following arguments, all three
+of which are optional and may be omitted.
+
+1. `options` - An optional object specifying various options
+2. `paths` - An array of paths to add or extract
+3. `callback` - Called when the command is completed, if async. (If
+ sync or no file specified, providing a callback throws a
+ `TypeError`.)
+
+If the command is sync (ie, if `options.sync=true`), then the
+callback is not allowed, since the action will be completed immediately.
+
+If a `file` argument is specified, and the command is async, then a
+`Promise` is returned. In this case, if async, a callback may be
+provided which is called when the command is completed.
+
+If a `file` option is not specified, then a stream is returned. For
+`create`, this is a readable stream of the generated archive. For
+`list` and `extract` this is a writable stream that an archive should
+be written into. If a file is not specified, then a callback is not
+allowed, because you're already getting a stream to work with.
+
+`replace` and `update` only work on existing archives, and so require
+a `file` argument.
+
+Sync commands without a file argument return a stream that acts on its
+input immediately in the same tick. For readable streams, this means
+that all of the data is immediately available by calling
+`stream.read()`. For writable streams, it will be acted upon as soon
+as it is provided, but this can be at any time.
+
+### Warnings and Errors
+
+Tar emits warnings and errors for recoverable and unrecoverable situations,
+respectively. In many cases, a warning only affects a single entry in an
+archive, or is simply informing you that it's modifying an entry to comply
+with the settings provided.
+
+Unrecoverable warnings will always raise an error (ie, emit `'error'` on
+streaming actions, throw for non-streaming sync actions, reject the
+returned Promise for non-streaming async operations, or call a provided
+callback with an `Error` as the first argument). Recoverable errors will
+raise an error only if `strict: true` is set in the options.
+
+Respond to (recoverable) warnings by listening to the `warn` event.
+Handlers receive 3 arguments:
+
+- `code` String. One of the error codes below. This may not match
+ `data.code`, which preserves the original error code from fs and zlib.
+- `message` String. More details about the error.
+- `data` Metadata about the error. An `Error` object for errors raised by
+ fs and zlib. All fields are attached to errors raisd by tar. Typically
+ contains the following fields, as relevant:
+ - `tarCode` The tar error code.
+ - `code` Either the tar error code, or the error code set by the
+ underlying system.
+ - `file` The archive file being read or written.
+ - `cwd` Working directory for creation and extraction operations.
+ - `entry` The entry object (if it could be created) for `TAR_ENTRY_INFO`,
+ `TAR_ENTRY_INVALID`, and `TAR_ENTRY_ERROR` warnings.
+ - `header` The header object (if it could be created, and the entry could
+ not be created) for `TAR_ENTRY_INFO` and `TAR_ENTRY_INVALID` warnings.
+ - `recoverable` Boolean. If `false`, then the warning will emit an
+ `error`, even in non-strict mode.
+
+#### Error Codes
+
+* `TAR_ENTRY_INFO` An informative error indicating that an entry is being
+ modified, but otherwise processed normally. For example, removing `/` or
+ `C:\` from absolute paths if `preservePaths` is not set.
+
+* `TAR_ENTRY_INVALID` An indication that a given entry is not a valid tar
+ archive entry, and will be skipped. This occurs when:
+ - a checksum fails,
+ - a `linkpath` is missing for a link type, or
+ - a `linkpath` is provided for a non-link type.
+
+ If every entry in a parsed archive raises an `TAR_ENTRY_INVALID` error,
+ then the archive is presumed to be unrecoverably broken, and
+ `TAR_BAD_ARCHIVE` will be raised.
+
+* `TAR_ENTRY_ERROR` The entry appears to be a valid tar archive entry, but
+ encountered an error which prevented it from being unpacked. This occurs
+ when:
+ - an unrecoverable fs error happens during unpacking,
+ - an entry has `..` in the path and `preservePaths` is not set, or
+ - an entry is extracting through a symbolic link, when `preservePaths` is
+ not set.
+
+* `TAR_ENTRY_UNSUPPORTED` An indication that a given entry is
+ a valid archive entry, but of a type that is unsupported, and so will be
+ skipped in archive creation or extracting.
+
+* `TAR_ABORT` When parsing gzipped-encoded archives, the parser will
+ abort the parse process raise a warning for any zlib errors encountered.
+ Aborts are considered unrecoverable for both parsing and unpacking.
+
+* `TAR_BAD_ARCHIVE` The archive file is totally hosed. This can happen for
+ a number of reasons, and always occurs at the end of a parse or extract:
+
+ - An entry body was truncated before seeing the full number of bytes.
+ - The archive contained only invalid entries, indicating that it is
+ likely not an archive, or at least, not an archive this library can
+ parse.
+
+ `TAR_BAD_ARCHIVE` is considered informative for parse operations, but
+ unrecoverable for extraction. Note that, if encountered at the end of an
+ extraction, tar WILL still have extracted as much it could from the
+ archive, so there may be some garbage files to clean up.
+
+Errors that occur deeper in the system (ie, either the filesystem or zlib)
+will have their error codes left intact, and a `tarCode` matching one of
+the above will be added to the warning metadata or the raised error object.
+
+Errors generated by tar will have one of the above codes set as the
+`error.code` field as well, but since errors originating in zlib or fs will
+have their original codes, it's better to read `error.tarCode` if you wish
+to see how tar is handling the issue.
+
+### Examples
+
+The API mimics the `tar(1)` command line functionality, with aliases
+for more human-readable option and function names. The goal is that
+if you know how to use `tar(1)` in Unix, then you know how to use
+`require('tar')` in JavaScript.
+
+To replicate `tar czf my-tarball.tgz files and folders`, you'd do:
+
+```js
+tar.c(
+ {
+ gzip: <true|gzip options>,
+ file: 'my-tarball.tgz'
+ },
+ ['some', 'files', 'and', 'folders']
+).then(_ => { .. tarball has been created .. })
+```
+
+To replicate `tar cz files and folders > my-tarball.tgz`, you'd do:
+
+```js
+tar.c( // or tar.create
+ {
+ gzip: <true|gzip options>
+ },
+ ['some', 'files', 'and', 'folders']
+).pipe(fs.createWriteStream('my-tarball.tgz'))
+```
+
+To replicate `tar xf my-tarball.tgz` you'd do:
+
+```js
+tar.x( // or tar.extract(
+ {
+ file: 'my-tarball.tgz'
+ }
+).then(_=> { .. tarball has been dumped in cwd .. })
+```
+
+To replicate `cat my-tarball.tgz | tar x -C some-dir --strip=1`:
+
+```js
+fs.createReadStream('my-tarball.tgz').pipe(
+ tar.x({
+ strip: 1,
+ C: 'some-dir' // alias for cwd:'some-dir', also ok
+ })
+)
+```
+
+To replicate `tar tf my-tarball.tgz`, do this:
+
+```js
+tar.t({
+ file: 'my-tarball.tgz',
+ onentry: entry => { .. do whatever with it .. }
+})
+```
+
+To replicate `cat my-tarball.tgz | tar t` do:
+
+```js
+fs.createReadStream('my-tarball.tgz')
+ .pipe(tar.t())
+ .on('entry', entry => { .. do whatever with it .. })
+```
+
+To do anything synchronous, add `sync: true` to the options. Note
+that sync functions don't take a callback and don't return a promise.
+When the function returns, it's already done. Sync methods without a
+file argument return a sync stream, which flushes immediately. But,
+of course, it still won't be done until you `.end()` it.
+
+To filter entries, add `filter: <function>` to the options.
+Tar-creating methods call the filter with `filter(path, stat)`.
+Tar-reading methods (including extraction) call the filter with
+`filter(path, entry)`. The filter is called in the `this`-context of
+the `Pack` or `Unpack` stream object.
+
+The arguments list to `tar t` and `tar x` specify a list of filenames
+to extract or list, so they're equivalent to a filter that tests if
+the file is in the list.
+
+For those who _aren't_ fans of tar's single-character command names:
+
+```
+tar.c === tar.create
+tar.r === tar.replace (appends to archive, file is required)
+tar.u === tar.update (appends if newer, file is required)
+tar.x === tar.extract
+tar.t === tar.list
+```
+
+Keep reading for all the command descriptions and options, as well as
+the low-level API that they are built on.
+
+### tar.c(options, fileList, callback) [alias: tar.create]
+
+Create a tarball archive.
+
+The `fileList` is an array of paths to add to the tarball. Adding a
+directory also adds its children recursively.
+
+An entry in `fileList` that starts with an `@` symbol is a tar archive
+whose entries will be added. To add a file that starts with `@`,
+prepend it with `./`.
+
+The following options are supported:
+
+- `file` Write the tarball archive to the specified filename. If this
+ is specified, then the callback will be fired when the file has been
+ written, and a promise will be returned that resolves when the file
+ is written. If a filename is not specified, then a Readable Stream
+ will be returned which will emit the file data. [Alias: `f`]
+- `sync` Act synchronously. If this is set, then any provided file
+ will be fully written after the call to `tar.c`. If this is set,
+ and a file is not provided, then the resulting stream will already
+ have the data ready to `read` or `emit('data')` as soon as you
+ request it.
+- `onwarn` A function that will get called with `(code, message, data)` for
+ any warnings encountered. (See "Warnings and Errors")
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `cwd` The current working directory for creating the archive.
+ Defaults to `process.cwd()`. [Alias: `C`]
+- `prefix` A path portion to prefix onto the entries in the archive.
+- `gzip` Set to any truthy value to create a gzipped archive, or an
+ object with settings for `zlib.Gzip()` [Alias: `z`]
+- `filter` A function that gets called with `(path, stat)` for each
+ entry being added. Return `true` to add the entry to the archive,
+ or `false` to omit it.
+- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
+ `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
+ that `mtime` is still included, because this is necessary for other
+ time-based operations. Additionally, `mode` is set to a "reasonable
+ default" for most unix systems, based on a `umask` value of `0o22`.
+- `preservePaths` Allow absolute paths. By default, `/` is stripped
+ from absolute paths. [Alias: `P`]
+- `mode` The mode to set on the created file archive
+- `noDirRecurse` Do not recursively archive the contents of
+ directories. [Alias: `n`]
+- `follow` Set to true to pack the targets of symbolic links. Without
+ this option, symbolic links are archived as such. [Alias: `L`, `h`]
+- `noPax` Suppress pax extended headers. Note that this means that
+ long paths and linkpaths will be truncated, and large or negative
+ numeric values may be interpreted incorrectly.
+- `noMtime` Set to true to omit writing `mtime` values for entries.
+ Note that this prevents using other mtime-based features like
+ `tar.update` or the `keepNewer` option with the resulting tar archive.
+ [Alias: `m`, `no-mtime`]
+- `mtime` Set to a `Date` object to force a specific `mtime` for
+ everything added to the archive. Overridden by `noMtime`.
+
+
+The following options are mostly internal, but can be modified in some
+advanced use cases, such as re-using caches between runs.
+
+- `linkCache` A Map object containing the device and inode value for
+ any file whose nlink is > 1, to identify hard links.
+- `statCache` A Map object that caches calls `lstat`.
+- `readdirCache` A Map object that caches calls to `readdir`.
+- `jobs` A number specifying how many concurrent jobs to run.
+ Defaults to 4.
+- `maxReadSize` The maximum buffer size for `fs.read()` operations.
+ Defaults to 16 MB.
+
+### tar.x(options, fileList, callback) [alias: tar.extract]
+
+Extract a tarball archive.
+
+The `fileList` is an array of paths to extract from the tarball. If
+no paths are provided, then all the entries are extracted.
+
+If the archive is gzipped, then tar will detect this and unzip it.
+
+Note that all directories that are created will be forced to be
+writable, readable, and listable by their owner, to avoid cases where
+a directory prevents extraction of child entries by virtue of its
+mode.
+
+Most extraction errors will cause a `warn` event to be emitted. If
+the `cwd` is missing, or not a directory, then the extraction will
+fail completely.
+
+The following options are supported:
+
+- `cwd` Extract files relative to the specified directory. Defaults
+ to `process.cwd()`. If provided, this must exist and must be a
+ directory. [Alias: `C`]
+- `file` The archive file to extract. If not specified, then a
+ Writable stream is returned where the archive data should be
+ written. [Alias: `f`]
+- `sync` Create files and directories synchronously.
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `filter` A function that gets called with `(path, entry)` for each
+ entry being unpacked. Return `true` to unpack the entry from the
+ archive, or `false` to skip it.
+- `newer` Set to true to keep the existing file on disk if it's newer
+ than the file in the archive. [Alias: `keep-newer`,
+ `keep-newer-files`]
+- `keep` Do not overwrite existing files. In particular, if a file
+ appears more than once in an archive, later copies will not
+ overwrite earlier copies. [Alias: `k`, `keep-existing`]
+- `preservePaths` Allow absolute paths, paths containing `..`, and
+ extracting through symbolic links. By default, `/` is stripped from
+ absolute paths, `..` paths are not extracted, and any file whose
+ location would be modified by a symbolic link is not extracted.
+ [Alias: `P`]
+- `unlink` Unlink files before creating them. Without this option,
+ tar overwrites existing files, which preserves existing hardlinks.
+ With this option, existing hardlinks will be broken, as will any
+ symlink that would affect the location of an extracted file. [Alias:
+ `U`]
+- `strip` Remove the specified number of leading path elements.
+ Pathnames with fewer elements will be silently skipped. Note that
+ the pathname is edited after applying the filter, but before
+ security checks. [Alias: `strip-components`, `stripComponents`]
+- `onwarn` A function that will get called with `(code, message, data)` for
+ any warnings encountered. (See "Warnings and Errors")
+- `preserveOwner` If true, tar will set the `uid` and `gid` of
+ extracted entries to the `uid` and `gid` fields in the archive.
+ This defaults to true when run as root, and false otherwise. If
+ false, then files and directories will be set with the owner and
+ group of the user running the process. This is similar to `-p` in
+ `tar(1)`, but ACLs and other system-specific data is never unpacked
+ in this implementation, and modes are set by default already.
+ [Alias: `p`]
+- `uid` Set to a number to force ownership of all extracted files and
+ folders, and all implicitly created directories, to be owned by the
+ specified user id, regardless of the `uid` field in the archive.
+ Cannot be used along with `preserveOwner`. Requires also setting a
+ `gid` option.
+- `gid` Set to a number to force ownership of all extracted files and
+ folders, and all implicitly created directories, to be owned by the
+ specified group id, regardless of the `gid` field in the archive.
+ Cannot be used along with `preserveOwner`. Requires also setting a
+ `uid` option.
+- `noMtime` Set to true to omit writing `mtime` value for extracted
+ entries. [Alias: `m`, `no-mtime`]
+- `transform` Provide a function that takes an `entry` object, and
+ returns a stream, or any falsey value. If a stream is provided,
+ then that stream's data will be written instead of the contents of
+ the archive entry. If a falsey value is provided, then the entry is
+ written to disk as normal. (To exclude items from extraction, use
+ the `filter` option described above.)
+- `onentry` A function that gets called with `(entry)` for each entry
+ that passes the filter.
+
+The following options are mostly internal, but can be modified in some
+advanced use cases, such as re-using caches between runs.
+
+- `maxReadSize` The maximum buffer size for `fs.read()` operations.
+ Defaults to 16 MB.
+- `umask` Filter the modes of entries like `process.umask()`.
+- `dmode` Default mode for directories
+- `fmode` Default mode for files
+- `dirCache` A Map object of which directories exist.
+- `maxMetaEntrySize` The maximum size of meta entries that is
+ supported. Defaults to 1 MB.
+
+Note that using an asynchronous stream type with the `transform`
+option will cause undefined behavior in sync extractions.
+[MiniPass](http://npm.im/minipass)-based streams are designed for this
+use case.
+
+### tar.t(options, fileList, callback) [alias: tar.list]
+
+List the contents of a tarball archive.
+
+The `fileList` is an array of paths to list from the tarball. If
+no paths are provided, then all the entries are listed.
+
+If the archive is gzipped, then tar will detect this and unzip it.
+
+Returns an event emitter that emits `entry` events with
+`tar.ReadEntry` objects. However, they don't emit `'data'` or `'end'`
+events. (If you want to get actual readable entries, use the
+`tar.Parse` class instead.)
+
+The following options are supported:
+
+- `cwd` Extract files relative to the specified directory. Defaults
+ to `process.cwd()`. [Alias: `C`]
+- `file` The archive file to list. If not specified, then a
+ Writable stream is returned where the archive data should be
+ written. [Alias: `f`]
+- `sync` Read the specified file synchronously. (This has no effect
+ when a file option isn't specified, because entries are emitted as
+ fast as they are parsed from the stream anyway.)
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `filter` A function that gets called with `(path, entry)` for each
+ entry being listed. Return `true` to emit the entry from the
+ archive, or `false` to skip it.
+- `onentry` A function that gets called with `(entry)` for each entry
+ that passes the filter. This is important for when both `file` and
+ `sync` are set, because it will be called synchronously.
+- `maxReadSize` The maximum buffer size for `fs.read()` operations.
+ Defaults to 16 MB.
+- `noResume` By default, `entry` streams are resumed immediately after
+ the call to `onentry`. Set `noResume: true` to suppress this
+ behavior. Note that by opting into this, the stream will never
+ complete until the entry data is consumed.
+
+### tar.u(options, fileList, callback) [alias: tar.update]
+
+Add files to an archive if they are newer than the entry already in
+the tarball archive.
+
+The `fileList` is an array of paths to add to the tarball. Adding a
+directory also adds its children recursively.
+
+An entry in `fileList` that starts with an `@` symbol is a tar archive
+whose entries will be added. To add a file that starts with `@`,
+prepend it with `./`.
+
+The following options are supported:
+
+- `file` Required. Write the tarball archive to the specified
+ filename. [Alias: `f`]
+- `sync` Act synchronously. If this is set, then any provided file
+ will be fully written after the call to `tar.c`.
+- `onwarn` A function that will get called with `(code, message, data)` for
+ any warnings encountered. (See "Warnings and Errors")
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `cwd` The current working directory for adding entries to the
+ archive. Defaults to `process.cwd()`. [Alias: `C`]
+- `prefix` A path portion to prefix onto the entries in the archive.
+- `gzip` Set to any truthy value to create a gzipped archive, or an
+ object with settings for `zlib.Gzip()` [Alias: `z`]
+- `filter` A function that gets called with `(path, stat)` for each
+ entry being added. Return `true` to add the entry to the archive,
+ or `false` to omit it.
+- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
+ `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
+ that `mtime` is still included, because this is necessary for other
+ time-based operations. Additionally, `mode` is set to a "reasonable
+ default" for most unix systems, based on a `umask` value of `0o22`.
+- `preservePaths` Allow absolute paths. By default, `/` is stripped
+ from absolute paths. [Alias: `P`]
+- `maxReadSize` The maximum buffer size for `fs.read()` operations.
+ Defaults to 16 MB.
+- `noDirRecurse` Do not recursively archive the contents of
+ directories. [Alias: `n`]
+- `follow` Set to true to pack the targets of symbolic links. Without
+ this option, symbolic links are archived as such. [Alias: `L`, `h`]
+- `noPax` Suppress pax extended headers. Note that this means that
+ long paths and linkpaths will be truncated, and large or negative
+ numeric values may be interpreted incorrectly.
+- `noMtime` Set to true to omit writing `mtime` values for entries.
+ Note that this prevents using other mtime-based features like
+ `tar.update` or the `keepNewer` option with the resulting tar archive.
+ [Alias: `m`, `no-mtime`]
+- `mtime` Set to a `Date` object to force a specific `mtime` for
+ everything added to the archive. Overridden by `noMtime`.
+
+### tar.r(options, fileList, callback) [alias: tar.replace]
+
+Add files to an existing archive. Because later entries override
+earlier entries, this effectively replaces any existing entries.
+
+The `fileList` is an array of paths to add to the tarball. Adding a
+directory also adds its children recursively.
+
+An entry in `fileList` that starts with an `@` symbol is a tar archive
+whose entries will be added. To add a file that starts with `@`,
+prepend it with `./`.
+
+The following options are supported:
+
+- `file` Required. Write the tarball archive to the specified
+ filename. [Alias: `f`]
+- `sync` Act synchronously. If this is set, then any provided file
+ will be fully written after the call to `tar.c`.
+- `onwarn` A function that will get called with `(code, message, data)` for
+ any warnings encountered. (See "Warnings and Errors")
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `cwd` The current working directory for adding entries to the
+ archive. Defaults to `process.cwd()`. [Alias: `C`]
+- `prefix` A path portion to prefix onto the entries in the archive.
+- `gzip` Set to any truthy value to create a gzipped archive, or an
+ object with settings for `zlib.Gzip()` [Alias: `z`]
+- `filter` A function that gets called with `(path, stat)` for each
+ entry being added. Return `true` to add the entry to the archive,
+ or `false` to omit it.
+- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
+ `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
+ that `mtime` is still included, because this is necessary for other
+ time-based operations. Additionally, `mode` is set to a "reasonable
+ default" for most unix systems, based on a `umask` value of `0o22`.
+- `preservePaths` Allow absolute paths. By default, `/` is stripped
+ from absolute paths. [Alias: `P`]
+- `maxReadSize` The maximum buffer size for `fs.read()` operations.
+ Defaults to 16 MB.
+- `noDirRecurse` Do not recursively archive the contents of
+ directories. [Alias: `n`]
+- `follow` Set to true to pack the targets of symbolic links. Without
+ this option, symbolic links are archived as such. [Alias: `L`, `h`]
+- `noPax` Suppress pax extended headers. Note that this means that
+ long paths and linkpaths will be truncated, and large or negative
+ numeric values may be interpreted incorrectly.
+- `noMtime` Set to true to omit writing `mtime` values for entries.
+ Note that this prevents using other mtime-based features like
+ `tar.update` or the `keepNewer` option with the resulting tar archive.
+ [Alias: `m`, `no-mtime`]
+- `mtime` Set to a `Date` object to force a specific `mtime` for
+ everything added to the archive. Overridden by `noMtime`.
+
+
+## Low-Level API
+
+### class tar.Pack
+
+A readable tar stream.
+
+Has all the standard readable stream interface stuff. `'data'` and
+`'end'` events, `read()` method, `pause()` and `resume()`, etc.
+
+#### constructor(options)
+
+The following options are supported:
+
+- `onwarn` A function that will get called with `(code, message, data)` for
+ any warnings encountered. (See "Warnings and Errors")
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `cwd` The current working directory for creating the archive.
+ Defaults to `process.cwd()`.
+- `prefix` A path portion to prefix onto the entries in the archive.
+- `gzip` Set to any truthy value to create a gzipped archive, or an
+ object with settings for `zlib.Gzip()`
+- `filter` A function that gets called with `(path, stat)` for each
+ entry being added. Return `true` to add the entry to the archive,
+ or `false` to omit it.
+- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
+ `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
+ that `mtime` is still included, because this is necessary for other
+ time-based operations. Additionally, `mode` is set to a "reasonable
+ default" for most unix systems, based on a `umask` value of `0o22`.
+- `preservePaths` Allow absolute paths. By default, `/` is stripped
+ from absolute paths.
+- `linkCache` A Map object containing the device and inode value for
+ any file whose nlink is > 1, to identify hard links.
+- `statCache` A Map object that caches calls `lstat`.
+- `readdirCache` A Map object that caches calls to `readdir`.
+- `jobs` A number specifying how many concurrent jobs to run.
+ Defaults to 4.
+- `maxReadSize` The maximum buffer size for `fs.read()` operations.
+ Defaults to 16 MB.
+- `noDirRecurse` Do not recursively archive the contents of
+ directories.
+- `follow` Set to true to pack the targets of symbolic links. Without
+ this option, symbolic links are archived as such.
+- `noPax` Suppress pax extended headers. Note that this means that
+ long paths and linkpaths will be truncated, and large or negative
+ numeric values may be interpreted incorrectly.
+- `noMtime` Set to true to omit writing `mtime` values for entries.
+ Note that this prevents using other mtime-based features like
+ `tar.update` or the `keepNewer` option with the resulting tar archive.
+- `mtime` Set to a `Date` object to force a specific `mtime` for
+ everything added to the archive. Overridden by `noMtime`.
+
+#### add(path)
+
+Adds an entry to the archive. Returns the Pack stream.
+
+#### write(path)
+
+Adds an entry to the archive. Returns true if flushed.
+
+#### end()
+
+Finishes the archive.
+
+### class tar.Pack.Sync
+
+Synchronous version of `tar.Pack`.
+
+### class tar.Unpack
+
+A writable stream that unpacks a tar archive onto the file system.
+
+All the normal writable stream stuff is supported. `write()` and
+`end()` methods, `'drain'` events, etc.
+
+Note that all directories that are created will be forced to be
+writable, readable, and listable by their owner, to avoid cases where
+a directory prevents extraction of child entries by virtue of its
+mode.
+
+`'close'` is emitted when it's done writing stuff to the file system.
+
+Most unpack errors will cause a `warn` event to be emitted. If the
+`cwd` is missing, or not a directory, then an error will be emitted.
+
+#### constructor(options)
+
+- `cwd` Extract files relative to the specified directory. Defaults
+ to `process.cwd()`. If provided, this must exist and must be a
+ directory.
+- `filter` A function that gets called with `(path, entry)` for each
+ entry being unpacked. Return `true` to unpack the entry from the
+ archive, or `false` to skip it.
+- `newer` Set to true to keep the existing file on disk if it's newer
+ than the file in the archive.
+- `keep` Do not overwrite existing files. In particular, if a file
+ appears more than once in an archive, later copies will not
+ overwrite earlier copies.
+- `preservePaths` Allow absolute paths, paths containing `..`, and
+ extracting through symbolic links. By default, `/` is stripped from
+ absolute paths, `..` paths are not extracted, and any file whose
+ location would be modified by a symbolic link is not extracted.
+- `unlink` Unlink files before creating them. Without this option,
+ tar overwrites existing files, which preserves existing hardlinks.
+ With this option, existing hardlinks will be broken, as will any
+ symlink that would affect the location of an extracted file.
+- `strip` Remove the specified number of leading path elements.
+ Pathnames with fewer elements will be silently skipped. Note that
+ the pathname is edited after applying the filter, but before
+ security checks.
+- `onwarn` A function that will get called with `(code, message, data)` for
+ any warnings encountered. (See "Warnings and Errors")
+- `umask` Filter the modes of entries like `process.umask()`.
+- `dmode` Default mode for directories
+- `fmode` Default mode for files
+- `dirCache` A Map object of which directories exist.
+- `maxMetaEntrySize` The maximum size of meta entries that is
+ supported. Defaults to 1 MB.
+- `preserveOwner` If true, tar will set the `uid` and `gid` of
+ extracted entries to the `uid` and `gid` fields in the archive.
+ This defaults to true when run as root, and false otherwise. If
+ false, then files and directories will be set with the owner and
+ group of the user running the process. This is similar to `-p` in
+ `tar(1)`, but ACLs and other system-specific data is never unpacked
+ in this implementation, and modes are set by default already.
+- `win32` True if on a windows platform. Causes behavior where
+ filenames containing `<|>?` chars are converted to
+ windows-compatible values while being unpacked.
+- `uid` Set to a number to force ownership of all extracted files and
+ folders, and all implicitly created directories, to be owned by the
+ specified user id, regardless of the `uid` field in the archive.
+ Cannot be used along with `preserveOwner`. Requires also setting a
+ `gid` option.
+- `gid` Set to a number to force ownership of all extracted files and
+ folders, and all implicitly created directories, to be owned by the
+ specified group id, regardless of the `gid` field in the archive.
+ Cannot be used along with `preserveOwner`. Requires also setting a
+ `uid` option.
+- `noMtime` Set to true to omit writing `mtime` value for extracted
+ entries.
+- `transform` Provide a function that takes an `entry` object, and
+ returns a stream, or any falsey value. If a stream is provided,
+ then that stream's data will be written instead of the contents of
+ the archive entry. If a falsey value is provided, then the entry is
+ written to disk as normal. (To exclude items from extraction, use
+ the `filter` option described above.)
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `onentry` A function that gets called with `(entry)` for each entry
+ that passes the filter.
+- `onwarn` A function that will get called with `(code, message, data)` for
+ any warnings encountered. (See "Warnings and Errors")
+
+### class tar.Unpack.Sync
+
+Synchronous version of `tar.Unpack`.
+
+Note that using an asynchronous stream type with the `transform`
+option will cause undefined behavior in sync unpack streams.
+[MiniPass](http://npm.im/minipass)-based streams are designed for this
+use case.
+
+### class tar.Parse
+
+A writable stream that parses a tar archive stream. All the standard
+writable stream stuff is supported.
+
+If the archive is gzipped, then tar will detect this and unzip it.
+
+Emits `'entry'` events with `tar.ReadEntry` objects, which are
+themselves readable streams that you can pipe wherever.
+
+Each `entry` will not emit until the one before it is flushed through,
+so make sure to either consume the data (with `on('data', ...)` or
+`.pipe(...)`) or throw it away with `.resume()` to keep the stream
+flowing.
+
+#### constructor(options)
+
+Returns an event emitter that emits `entry` events with
+`tar.ReadEntry` objects.
+
+The following options are supported:
+
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `filter` A function that gets called with `(path, entry)` for each
+ entry being listed. Return `true` to emit the entry from the
+ archive, or `false` to skip it.
+- `onentry` A function that gets called with `(entry)` for each entry
+ that passes the filter.
+- `onwarn` A function that will get called with `(code, message, data)` for
+ any warnings encountered. (See "Warnings and Errors")
+
+#### abort(error)
+
+Stop all parsing activities. This is called when there are zlib
+errors. It also emits an unrecoverable warning with the error provided.
+
+### class tar.ReadEntry extends [MiniPass](http://npm.im/minipass)
+
+A representation of an entry that is being read out of a tar archive.
+
+It has the following fields:
+
+- `extended` The extended metadata object provided to the constructor.
+- `globalExtended` The global extended metadata object provided to the
+ constructor.
+- `remain` The number of bytes remaining to be written into the
+ stream.
+- `blockRemain` The number of 512-byte blocks remaining to be written
+ into the stream.
+- `ignore` Whether this entry should be ignored.
+- `meta` True if this represents metadata about the next entry, false
+ if it represents a filesystem object.
+- All the fields from the header, extended header, and global extended
+ header are added to the ReadEntry object. So it has `path`, `type`,
+ `size, `mode`, and so on.
+
+#### constructor(header, extended, globalExtended)
+
+Create a new ReadEntry object with the specified header, extended
+header, and global extended header values.
+
+### class tar.WriteEntry extends [MiniPass](http://npm.im/minipass)
+
+A representation of an entry that is being written from the file
+system into a tar archive.
+
+Emits data for the Header, and for the Pax Extended Header if one is
+required, as well as any body data.
+
+Creating a WriteEntry for a directory does not also create
+WriteEntry objects for all of the directory contents.
+
+It has the following fields:
+
+- `path` The path field that will be written to the archive. By
+ default, this is also the path from the cwd to the file system
+ object.
+- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
+ `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
+ that `mtime` is still included, because this is necessary for other
+ time-based operations. Additionally, `mode` is set to a "reasonable
+ default" for most unix systems, based on a `umask` value of `0o22`.
+- `myuid` If supported, the uid of the user running the current
+ process.
+- `myuser` The `env.USER` string if set, or `''`. Set as the entry
+ `uname` field if the file's `uid` matches `this.myuid`.
+- `maxReadSize` The maximum buffer size for `fs.read()` operations.
+ Defaults to 1 MB.
+- `linkCache` A Map object containing the device and inode value for
+ any file whose nlink is > 1, to identify hard links.
+- `statCache` A Map object that caches calls `lstat`.
+- `preservePaths` Allow absolute paths. By default, `/` is stripped
+ from absolute paths.
+- `cwd` The current working directory for creating the archive.
+ Defaults to `process.cwd()`.
+- `absolute` The absolute path to the entry on the filesystem. By
+ default, this is `path.resolve(this.cwd, this.path)`, but it can be
+ overridden explicitly.
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `win32` True if on a windows platform. Causes behavior where paths
+ replace `\` with `/` and filenames containing the windows-compatible
+ forms of `<|>?:` characters are converted to actual `<|>?:` characters
+ in the archive.
+- `noPax` Suppress pax extended headers. Note that this means that
+ long paths and linkpaths will be truncated, and large or negative
+ numeric values may be interpreted incorrectly.
+- `noMtime` Set to true to omit writing `mtime` values for entries.
+ Note that this prevents using other mtime-based features like
+ `tar.update` or the `keepNewer` option with the resulting tar archive.
+
+
+#### constructor(path, options)
+
+`path` is the path of the entry as it is written in the archive.
+
+The following options are supported:
+
+- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
+ `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
+ that `mtime` is still included, because this is necessary for other
+ time-based operations. Additionally, `mode` is set to a "reasonable
+ default" for most unix systems, based on a `umask` value of `0o22`.
+- `maxReadSize` The maximum buffer size for `fs.read()` operations.
+ Defaults to 1 MB.
+- `linkCache` A Map object containing the device and inode value for
+ any file whose nlink is > 1, to identify hard links.
+- `statCache` A Map object that caches calls `lstat`.
+- `preservePaths` Allow absolute paths. By default, `/` is stripped
+ from absolute paths.
+- `cwd` The current working directory for creating the archive.
+ Defaults to `process.cwd()`.
+- `absolute` The absolute path to the entry on the filesystem. By
+ default, this is `path.resolve(this.cwd, this.path)`, but it can be
+ overridden explicitly.
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `win32` True if on a windows platform. Causes behavior where paths
+ replace `\` with `/`.
+- `onwarn` A function that will get called with `(code, message, data)` for
+ any warnings encountered. (See "Warnings and Errors")
+- `noMtime` Set to true to omit writing `mtime` values for entries.
+ Note that this prevents using other mtime-based features like
+ `tar.update` or the `keepNewer` option with the resulting tar archive.
+- `umask` Set to restrict the modes on the entries in the archive,
+ somewhat like how umask works on file creation. Defaults to
+ `process.umask()` on unix systems, or `0o22` on Windows.
+
+#### warn(message, data)
+
+If strict, emit an error with the provided message.
+
+Othewise, emit a `'warn'` event with the provided message and data.
+
+### class tar.WriteEntry.Sync
+
+Synchronous version of tar.WriteEntry
+
+### class tar.WriteEntry.Tar
+
+A version of tar.WriteEntry that gets its data from a tar.ReadEntry
+instead of from the filesystem.
+
+#### constructor(readEntry, options)
+
+`readEntry` is the entry being read out of another archive.
+
+The following options are supported:
+
+- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
+ `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
+ that `mtime` is still included, because this is necessary for other
+ time-based operations. Additionally, `mode` is set to a "reasonable
+ default" for most unix systems, based on a `umask` value of `0o22`.
+- `preservePaths` Allow absolute paths. By default, `/` is stripped
+ from absolute paths.
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `onwarn` A function that will get called with `(code, message, data)` for
+ any warnings encountered. (See "Warnings and Errors")
+- `noMtime` Set to true to omit writing `mtime` values for entries.
+ Note that this prevents using other mtime-based features like
+ `tar.update` or the `keepNewer` option with the resulting tar archive.
+
+### class tar.Header
+
+A class for reading and writing header blocks.
+
+It has the following fields:
+
+- `nullBlock` True if decoding a block which is entirely composed of
+ `0x00` null bytes. (Useful because tar files are terminated by
+ at least 2 null blocks.)
+- `cksumValid` True if the checksum in the header is valid, false
+ otherwise.
+- `needPax` True if the values, as encoded, will require a Pax
+ extended header.
+- `path` The path of the entry.
+- `mode` The 4 lowest-order octal digits of the file mode. That is,
+ read/write/execute permissions for world, group, and owner, and the
+ setuid, setgid, and sticky bits.
+- `uid` Numeric user id of the file owner
+- `gid` Numeric group id of the file owner
+- `size` Size of the file in bytes
+- `mtime` Modified time of the file
+- `cksum` The checksum of the header. This is generated by adding all
+ the bytes of the header block, treating the checksum field itself as
+ all ascii space characters (that is, `0x20`).
+- `type` The human-readable name of the type of entry this represents,
+ or the alphanumeric key if unknown.
+- `typeKey` The alphanumeric key for the type of entry this header
+ represents.
+- `linkpath` The target of Link and SymbolicLink entries.
+- `uname` Human-readable user name of the file owner
+- `gname` Human-readable group name of the file owner
+- `devmaj` The major portion of the device number. Always `0` for
+ files, directories, and links.
+- `devmin` The minor portion of the device number. Always `0` for
+ files, directories, and links.
+- `atime` File access time.
+- `ctime` File change time.
+
+#### constructor(data, [offset=0])
+
+`data` is optional. It is either a Buffer that should be interpreted
+as a tar Header starting at the specified offset and continuing for
+512 bytes, or a data object of keys and values to set on the header
+object, and eventually encode as a tar Header.
+
+#### decode(block, offset)
+
+Decode the provided buffer starting at the specified offset.
+
+Buffer length must be greater than 512 bytes.
+
+#### set(data)
+
+Set the fields in the data object.
+
+#### encode(buffer, offset)
+
+Encode the header fields into the buffer at the specified offset.
+
+Returns `this.needPax` to indicate whether a Pax Extended Header is
+required to properly encode the specified data.
+
+### class tar.Pax
+
+An object representing a set of key-value pairs in an Pax extended
+header entry.
+
+It has the following fields. Where the same name is used, they have
+the same semantics as the tar.Header field of the same name.
+
+- `global` True if this represents a global extended header, or false
+ if it is for a single entry.
+- `atime`
+- `charset`
+- `comment`
+- `ctime`
+- `gid`
+- `gname`
+- `linkpath`
+- `mtime`
+- `path`
+- `size`
+- `uid`
+- `uname`
+- `dev`
+- `ino`
+- `nlink`
+
+#### constructor(object, global)
+
+Set the fields set in the object. `global` is a boolean that defaults
+to false.
+
+#### encode()
+
+Return a Buffer containing the header and body for the Pax extended
+header entry, or `null` if there is nothing to encode.
+
+#### encodeBody()
+
+Return a string representing the body of the pax extended header
+entry.
+
+#### encodeField(fieldName)
+
+Return a string representing the key/value encoding for the specified
+fieldName, or `''` if the field is unset.
+
+### tar.Pax.parse(string, extended, global)
+
+Return a new Pax object created by parsing the contents of the string
+provided.
+
+If the `extended` object is set, then also add the fields from that
+object. (This is necessary because multiple metadata entries can
+occur in sequence.)
+
+### tar.types
+
+A translation table for the `type` field in tar headers.
+
+#### tar.types.name.get(code)
+
+Get the human-readable name for a given alphanumeric code.
+
+#### tar.types.code.get(name)
+
+Get the alphanumeric code for a given human-readable name.
diff --git a/node_modules/cacache/node_modules/tar/index.js b/node_modules/cacache/node_modules/tar/index.js
new file mode 100644
index 000000000..c9ae06e79
--- /dev/null
+++ b/node_modules/cacache/node_modules/tar/index.js
@@ -0,0 +1,18 @@
+'use strict'
+
+// high-level commands
+exports.c = exports.create = require('./lib/create.js')
+exports.r = exports.replace = require('./lib/replace.js')
+exports.t = exports.list = require('./lib/list.js')
+exports.u = exports.update = require('./lib/update.js')
+exports.x = exports.extract = require('./lib/extract.js')
+
+// classes
+exports.Pack = require('./lib/pack.js')
+exports.Unpack = require('./lib/unpack.js')
+exports.Parse = require('./lib/parse.js')
+exports.ReadEntry = require('./lib/read-entry.js')
+exports.WriteEntry = require('./lib/write-entry.js')
+exports.Header = require('./lib/header.js')
+exports.Pax = require('./lib/pax.js')
+exports.types = require('./lib/types.js')
diff --git a/node_modules/cacache/node_modules/tar/lib/create.js b/node_modules/cacache/node_modules/tar/lib/create.js
new file mode 100644
index 000000000..a37aa52e6
--- /dev/null
+++ b/node_modules/cacache/node_modules/tar/lib/create.js
@@ -0,0 +1,105 @@
+'use strict'
+
+// tar -c
+const hlo = require('./high-level-opt.js')
+
+const Pack = require('./pack.js')
+const fs = require('fs')
+const fsm = require('fs-minipass')
+const t = require('./list.js')
+const path = require('path')
+
+const c = module.exports = (opt_, files, cb) => {
+ if (typeof files === 'function')
+ cb = files
+
+ if (Array.isArray(opt_))
+ files = opt_, opt_ = {}
+
+ if (!files || !Array.isArray(files) || !files.length)
+ throw new TypeError('no files or directories specified')
+
+ files = Array.from(files)
+
+ const opt = hlo(opt_)
+
+ if (opt.sync && typeof cb === 'function')
+ throw new TypeError('callback not supported for sync tar functions')
+
+ if (!opt.file && typeof cb === 'function')
+ throw new TypeError('callback only supported with file option')
+
+ return opt.file && opt.sync ? createFileSync(opt, files)
+ : opt.file ? createFile(opt, files, cb)
+ : opt.sync ? createSync(opt, files)
+ : create(opt, files)
+}
+
+const createFileSync = (opt, files) => {
+ const p = new Pack.Sync(opt)
+ const stream = new fsm.WriteStreamSync(opt.file, {
+ mode: opt.mode || 0o666
+ })
+ p.pipe(stream)
+ addFilesSync(p, files)
+}
+
+const createFile = (opt, files, cb) => {
+ const p = new Pack(opt)
+ const stream = new fsm.WriteStream(opt.file, {
+ mode: opt.mode || 0o666
+ })
+ p.pipe(stream)
+
+ const promise = new Promise((res, rej) => {
+ stream.on('error', rej)
+ stream.on('close', res)
+ p.on('error', rej)
+ })
+
+ addFilesAsync(p, files)
+
+ return cb ? promise.then(cb, cb) : promise
+}
+
+const addFilesSync = (p, files) => {
+ files.forEach(file => {
+ if (file.charAt(0) === '@')
+ t({
+ file: path.resolve(p.cwd, file.substr(1)),
+ sync: true,
+ noResume: true,
+ onentry: entry => p.add(entry)
+ })
+ else
+ p.add(file)
+ })
+ p.end()
+}
+
+const addFilesAsync = (p, files) => {
+ while (files.length) {
+ const file = files.shift()
+ if (file.charAt(0) === '@')
+ return t({
+ file: path.resolve(p.cwd, file.substr(1)),
+ noResume: true,
+ onentry: entry => p.add(entry)
+ }).then(_ => addFilesAsync(p, files))
+ else
+ p.add(file)
+ }
+ p.end()
+}
+
+const createSync = (opt, files) => {
+ const p = new Pack.Sync(opt)
+ addFilesSync(p, files)
+ return p
+}
+
+const create = (opt, files) => {
+ const p = new Pack(opt)
+ addFilesAsync(p, files)
+ return p
+}
diff --git a/node_modules/cacache/node_modules/tar/lib/extract.js b/node_modules/cacache/node_modules/tar/lib/extract.js
new file mode 100644
index 000000000..cbb458a0a
--- /dev/null
+++ b/node_modules/cacache/node_modules/tar/lib/extract.js
@@ -0,0 +1,112 @@
+'use strict'
+
+// tar -x
+const hlo = require('./high-level-opt.js')
+const Unpack = require('./unpack.js')
+const fs = require('fs')
+const fsm = require('fs-minipass')
+const path = require('path')
+
+const x = module.exports = (opt_, files, cb) => {
+ if (typeof opt_ === 'function')
+ cb = opt_, files = null, opt_ = {}
+ else if (Array.isArray(opt_))
+ files = opt_, opt_ = {}
+
+ if (typeof files === 'function')
+ cb = files, files = null
+
+ if (!files)
+ files = []
+ else
+ files = Array.from(files)
+
+ const opt = hlo(opt_)
+
+ if (opt.sync && typeof cb === 'function')
+ throw new TypeError('callback not supported for sync tar functions')
+
+ if (!opt.file && typeof cb === 'function')
+ throw new TypeError('callback only supported with file option')
+
+ if (files.length)
+ filesFilter(opt, files)
+
+ return opt.file && opt.sync ? extractFileSync(opt)
+ : opt.file ? extractFile(opt, cb)
+ : opt.sync ? extractSync(opt)
+ : extract(opt)
+}
+
+// construct a filter that limits the file entries listed
+// include child entries if a dir is included
+const filesFilter = (opt, files) => {
+ const map = new Map(files.map(f => [f.replace(/\/+$/, ''), true]))
+ const filter = opt.filter
+
+ const mapHas = (file, r) => {
+ const root = r || path.parse(file).root || '.'
+ const ret = file === root ? false
+ : map.has(file) ? map.get(file)
+ : mapHas(path.dirname(file), root)
+
+ map.set(file, ret)
+ return ret
+ }
+
+ opt.filter = filter
+ ? (file, entry) => filter(file, entry) && mapHas(file.replace(/\/+$/, ''))
+ : file => mapHas(file.replace(/\/+$/, ''))
+}
+
+const extractFileSync = opt => {
+ const u = new Unpack.Sync(opt)
+
+ const file = opt.file
+ let threw = true
+ let fd
+ const stat = fs.statSync(file)
+ // This trades a zero-byte read() syscall for a stat
+ // However, it will usually result in less memory allocation
+ const readSize = opt.maxReadSize || 16*1024*1024
+ const stream = new fsm.ReadStreamSync(file, {
+ readSize: readSize,
+ size: stat.size
+ })
+ stream.pipe(u)
+}
+
+const extractFile = (opt, cb) => {
+ const u = new Unpack(opt)
+ const readSize = opt.maxReadSize || 16*1024*1024
+
+ const file = opt.file
+ const p = new Promise((resolve, reject) => {
+ u.on('error', reject)
+ u.on('close', resolve)
+
+ // This trades a zero-byte read() syscall for a stat
+ // However, it will usually result in less memory allocation
+ fs.stat(file, (er, stat) => {
+ if (er)
+ reject(er)
+ else {
+ const stream = new fsm.ReadStream(file, {
+ readSize: readSize,
+ size: stat.size
+ })
+ stream.on('error', reject)
+ stream.pipe(u)
+ }
+ })
+ })
+ return cb ? p.then(cb, cb) : p
+}
+
+const extractSync = opt => {
+ return new Unpack.Sync(opt)
+}
+
+const extract = opt => {
+ return new Unpack(opt)
+}
diff --git a/node_modules/cacache/node_modules/tar/lib/get-write-flag.js b/node_modules/cacache/node_modules/tar/lib/get-write-flag.js
new file mode 100644
index 000000000..e86959996
--- /dev/null
+++ b/node_modules/cacache/node_modules/tar/lib/get-write-flag.js
@@ -0,0 +1,20 @@
+// Get the appropriate flag to use for creating files
+// We use fmap on Windows platforms for files less than
+// 512kb. This is a fairly low limit, but avoids making
+// things slower in some cases. Since most of what this
+// library is used for is extracting tarballs of many
+// relatively small files in npm packages and the like,
+// it can be a big boost on Windows platforms.
+// Only supported in Node v12.9.0 and above.
+const platform = process.env.__FAKE_PLATFORM__ || process.platform
+const isWindows = platform === 'win32'
+const fs = global.__FAKE_TESTING_FS__ || require('fs')
+
+/* istanbul ignore next */
+const { O_CREAT, O_TRUNC, O_WRONLY, UV_FS_O_FILEMAP = 0 } = fs.constants
+
+const fMapEnabled = isWindows && !!UV_FS_O_FILEMAP
+const fMapLimit = 512 * 1024
+const fMapFlag = UV_FS_O_FILEMAP | O_TRUNC | O_CREAT | O_WRONLY
+module.exports = !fMapEnabled ? () => 'w'
+ : size => size < fMapLimit ? fMapFlag : 'w'
diff --git a/node_modules/cacache/node_modules/tar/lib/header.js b/node_modules/cacache/node_modules/tar/lib/header.js
new file mode 100644
index 000000000..5d88f6cf8
--- /dev/null
+++ b/node_modules/cacache/node_modules/tar/lib/header.js
@@ -0,0 +1,288 @@
+'use strict'
+// parse a 512-byte header block to a data object, or vice-versa
+// encode returns `true` if a pax extended header is needed, because
+// the data could not be faithfully encoded in a simple header.
+// (Also, check header.needPax to see if it needs a pax header.)
+
+const types = require('./types.js')
+const pathModule = require('path').posix
+const large = require('./large-numbers.js')
+
+const SLURP = Symbol('slurp')
+const TYPE = Symbol('type')
+
+class Header {
+ constructor (data, off, ex, gex) {
+ this.cksumValid = false
+ this.needPax = false
+ this.nullBlock = false
+
+ this.block = null
+ this.path = null
+ this.mode = null
+ this.uid = null
+ this.gid = null
+ this.size = null
+ this.mtime = null
+ this.cksum = null
+ this[TYPE] = '0'
+ this.linkpath = null
+ this.uname = null
+ this.gname = null
+ this.devmaj = 0
+ this.devmin = 0
+ this.atime = null
+ this.ctime = null
+
+ if (Buffer.isBuffer(data))
+ this.decode(data, off || 0, ex, gex)
+ else if (data)
+ this.set(data)
+ }
+
+ decode (buf, off, ex, gex) {
+ if (!off)
+ off = 0
+
+ if (!buf || !(buf.length >= off + 512))
+ throw new Error('need 512 bytes for header')
+
+ this.path = decString(buf, off, 100)
+ this.mode = decNumber(buf, off + 100, 8)
+ this.uid = decNumber(buf, off + 108, 8)
+ this.gid = decNumber(buf, off + 116, 8)
+ this.size = decNumber(buf, off + 124, 12)
+ this.mtime = decDate(buf, off + 136, 12)
+ this.cksum = decNumber(buf, off + 148, 12)
+
+ // if we have extended or global extended headers, apply them now
+ // See https://github.com/npm/node-tar/pull/187
+ this[SLURP](ex)
+ this[SLURP](gex, true)
+
+ // old tar versions marked dirs as a file with a trailing /
+ this[TYPE] = decString(buf, off + 156, 1)
+ if (this[TYPE] === '')
+ this[TYPE] = '0'
+ if (this[TYPE] === '0' && this.path.substr(-1) === '/')
+ this[TYPE] = '5'
+
+ // tar implementations sometimes incorrectly put the stat(dir).size
+ // as the size in the tarball, even though Directory entries are
+ // not able to have any body at all. In the very rare chance that
+ // it actually DOES have a body, we weren't going to do anything with
+ // it anyway, and it'll just be a warning about an invalid header.
+ if (this[TYPE] === '5')
+ this.size = 0
+
+ this.linkpath = decString(buf, off + 157, 100)
+ if (buf.slice(off + 257, off + 265).toString() === 'ustar\u000000') {
+ this.uname = decString(buf, off + 265, 32)
+ this.gname = decString(buf, off + 297, 32)
+ this.devmaj = decNumber(buf, off + 329, 8)
+ this.devmin = decNumber(buf, off + 337, 8)
+ if (buf[off + 475] !== 0) {
+ // definitely a prefix, definitely >130 chars.
+ const prefix = decString(buf, off + 345, 155)
+ this.path = prefix + '/' + this.path
+ } else {
+ const prefix = decString(buf, off + 345, 130)
+ if (prefix)
+ this.path = prefix + '/' + this.path
+ this.atime = decDate(buf, off + 476, 12)
+ this.ctime = decDate(buf, off + 488, 12)
+ }
+ }
+
+ let sum = 8 * 0x20
+ for (let i = off; i < off + 148; i++) {
+ sum += buf[i]
+ }
+ for (let i = off + 156; i < off + 512; i++) {
+ sum += buf[i]
+ }
+ this.cksumValid = sum === this.cksum
+ if (this.cksum === null && sum === 8 * 0x20)
+ this.nullBlock = true
+ }
+
+ [SLURP] (ex, global) {
+ for (let k in ex) {
+ // we slurp in everything except for the path attribute in
+ // a global extended header, because that's weird.
+ if (ex[k] !== null && ex[k] !== undefined &&
+ !(global && k === 'path'))
+ this[k] = ex[k]
+ }
+ }
+
+ encode (buf, off) {
+ if (!buf) {
+ buf = this.block = Buffer.alloc(512)
+ off = 0
+ }
+
+ if (!off)
+ off = 0
+
+ if (!(buf.length >= off + 512))
+ throw new Error('need 512 bytes for header')
+
+ const prefixSize = this.ctime || this.atime ? 130 : 155
+ const split = splitPrefix(this.path || '', prefixSize)
+ const path = split[0]
+ const prefix = split[1]
+ this.needPax = split[2]
+
+ this.needPax = encString(buf, off, 100, path) || this.needPax
+ this.needPax = encNumber(buf, off + 100, 8, this.mode) || this.needPax
+ this.needPax = encNumber(buf, off + 108, 8, this.uid) || this.needPax
+ this.needPax = encNumber(buf, off + 116, 8, this.gid) || this.needPax
+ this.needPax = encNumber(buf, off + 124, 12, this.size) || this.needPax
+ this.needPax = encDate(buf, off + 136, 12, this.mtime) || this.needPax
+ buf[off + 156] = this[TYPE].charCodeAt(0)
+ this.needPax = encString(buf, off + 157, 100, this.linkpath) || this.needPax
+ buf.write('ustar\u000000', off + 257, 8)
+ this.needPax = encString(buf, off + 265, 32, this.uname) || this.needPax
+ this.needPax = encString(buf, off + 297, 32, this.gname) || this.needPax
+ this.needPax = encNumber(buf, off + 329, 8, this.devmaj) || this.needPax
+ this.needPax = encNumber(buf, off + 337, 8, this.devmin) || this.needPax
+ this.needPax = encString(buf, off + 345, prefixSize, prefix) || this.needPax
+ if (buf[off + 475] !== 0)
+ this.needPax = encString(buf, off + 345, 155, prefix) || this.needPax
+ else {
+ this.needPax = encString(buf, off + 345, 130, prefix) || this.needPax
+ this.needPax = encDate(buf, off + 476, 12, this.atime) || this.needPax
+ this.needPax = encDate(buf, off + 488, 12, this.ctime) || this.needPax
+ }
+
+ let sum = 8 * 0x20
+ for (let i = off; i < off + 148; i++) {
+ sum += buf[i]
+ }
+ for (let i = off + 156; i < off + 512; i++) {
+ sum += buf[i]
+ }
+ this.cksum = sum
+ encNumber(buf, off + 148, 8, this.cksum)
+ this.cksumValid = true
+
+ return this.needPax
+ }
+
+ set (data) {
+ for (let i in data) {
+ if (data[i] !== null && data[i] !== undefined)
+ this[i] = data[i]
+ }
+ }
+
+ get type () {
+ return types.name.get(this[TYPE]) || this[TYPE]
+ }
+
+ get typeKey () {
+ return this[TYPE]
+ }
+
+ set type (type) {
+ if (types.code.has(type))
+ this[TYPE] = types.code.get(type)
+ else
+ this[TYPE] = type
+ }
+}
+
+const splitPrefix = (p, prefixSize) => {
+ const pathSize = 100
+ let pp = p
+ let prefix = ''
+ let ret
+ const root = pathModule.parse(p).root || '.'
+
+ if (Buffer.byteLength(pp) < pathSize)
+ ret = [pp, prefix, false]
+ else {
+ // first set prefix to the dir, and path to the base
+ prefix = pathModule.dirname(pp)
+ pp = pathModule.basename(pp)
+
+ do {
+ // both fit!
+ if (Buffer.byteLength(pp) <= pathSize &&
+ Buffer.byteLength(prefix) <= prefixSize)
+ ret = [pp, prefix, false]
+
+ // prefix fits in prefix, but path doesn't fit in path
+ else if (Buffer.byteLength(pp) > pathSize &&
+ Buffer.byteLength(prefix) <= prefixSize)
+ ret = [pp.substr(0, pathSize - 1), prefix, true]
+
+ else {
+ // make path take a bit from prefix
+ pp = pathModule.join(pathModule.basename(prefix), pp)
+ prefix = pathModule.dirname(prefix)
+ }
+ } while (prefix !== root && !ret)
+
+ // at this point, found no resolution, just truncate
+ if (!ret)
+ ret = [p.substr(0, pathSize - 1), '', true]
+ }
+ return ret
+}
+
+const decString = (buf, off, size) =>
+ buf.slice(off, off + size).toString('utf8').replace(/\0.*/, '')
+
+const decDate = (buf, off, size) =>
+ numToDate(decNumber(buf, off, size))
+
+const numToDate = num => num === null ? null : new Date(num * 1000)
+
+const decNumber = (buf, off, size) =>
+ buf[off] & 0x80 ? large.parse(buf.slice(off, off + size))
+ : decSmallNumber(buf, off, size)
+
+const nanNull = value => isNaN(value) ? null : value
+
+const decSmallNumber = (buf, off, size) =>
+ nanNull(parseInt(
+ buf.slice(off, off + size)
+ .toString('utf8').replace(/\0.*$/, '').trim(), 8))
+
+// the maximum encodable as a null-terminated octal, by field size
+const MAXNUM = {
+ 12: 0o77777777777,
+ 8 : 0o7777777
+}
+
+const encNumber = (buf, off, size, number) =>
+ number === null ? false :
+ number > MAXNUM[size] || number < 0
+ ? (large.encode(number, buf.slice(off, off + size)), true)
+ : (encSmallNumber(buf, off, size, number), false)
+
+const encSmallNumber = (buf, off, size, number) =>
+ buf.write(octalString(number, size), off, size, 'ascii')
+
+const octalString = (number, size) =>
+ padOctal(Math.floor(number).toString(8), size)
+
+const padOctal = (string, size) =>
+ (string.length === size - 1 ? string
+ : new Array(size - string.length - 1).join('0') + string + ' ') + '\0'
+
+const encDate = (buf, off, size, date) =>
+ date === null ? false :
+ encNumber(buf, off, size, date.getTime() / 1000)
+
+// enough to fill the longest string we've got
+const NULLS = new Array(156).join('\0')
+// pad with nulls, return true if it's longer or non-ascii
+const encString = (buf, off, size, string) =>
+ string === null ? false :
+ (buf.write(string + NULLS, off, size, 'utf8'),
+ string.length !== Buffer.byteLength(string) || string.length > size)
+
+module.exports = Header
diff --git a/node_modules/cacache/node_modules/tar/lib/high-level-opt.js b/node_modules/cacache/node_modules/tar/lib/high-level-opt.js
new file mode 100644
index 000000000..7333db915
--- /dev/null
+++ b/node_modules/cacache/node_modules/tar/lib/high-level-opt.js
@@ -0,0 +1,29 @@
+'use strict'
+
+// turn tar(1) style args like `C` into the more verbose things like `cwd`
+
+const argmap = new Map([
+ ['C', 'cwd'],
+ ['f', 'file'],
+ ['z', 'gzip'],
+ ['P', 'preservePaths'],
+ ['U', 'unlink'],
+ ['strip-components', 'strip'],
+ ['stripComponents', 'strip'],
+ ['keep-newer', 'newer'],
+ ['keepNewer', 'newer'],
+ ['keep-newer-files', 'newer'],
+ ['keepNewerFiles', 'newer'],
+ ['k', 'keep'],
+ ['keep-existing', 'keep'],
+ ['keepExisting', 'keep'],
+ ['m', 'noMtime'],
+ ['no-mtime', 'noMtime'],
+ ['p', 'preserveOwner'],
+ ['L', 'follow'],
+ ['h', 'follow']
+])
+
+const parse = module.exports = opt => opt ? Object.keys(opt).map(k => [
+ argmap.has(k) ? argmap.get(k) : k, opt[k]
+]).reduce((set, kv) => (set[kv[0]] = kv[1], set), Object.create(null)) : {}
diff --git a/node_modules/cacache/node_modules/tar/lib/large-numbers.js b/node_modules/cacache/node_modules/tar/lib/large-numbers.js
new file mode 100644
index 000000000..ad30bc350
--- /dev/null
+++ b/node_modules/cacache/node_modules/tar/lib/large-numbers.js
@@ -0,0 +1,97 @@
+'use strict'
+// Tar can encode large and negative numbers using a leading byte of
+// 0xff for negative, and 0x80 for positive.
+
+const encode = exports.encode = (num, buf) => {
+ if (!Number.isSafeInteger(num))
+ // The number is so large that javascript cannot represent it with integer
+ // precision.
+ throw Error('cannot encode number outside of javascript safe integer range')
+ else if (num < 0)
+ encodeNegative(num, buf)
+ else
+ encodePositive(num, buf)
+ return buf
+}
+
+const encodePositive = (num, buf) => {
+ buf[0] = 0x80
+
+ for (var i = buf.length; i > 1; i--) {
+ buf[i-1] = num & 0xff
+ num = Math.floor(num / 0x100)
+ }
+}
+
+const encodeNegative = (num, buf) => {
+ buf[0] = 0xff
+ var flipped = false
+ num = num * -1
+ for (var i = buf.length; i > 1; i--) {
+ var byte = num & 0xff
+ num = Math.floor(num / 0x100)
+ if (flipped)
+ buf[i-1] = onesComp(byte)
+ else if (byte === 0)
+ buf[i-1] = 0
+ else {
+ flipped = true
+ buf[i-1] = twosComp(byte)
+ }
+ }
+}
+
+const parse = exports.parse = (buf) => {
+ var post = buf[buf.length - 1]
+ var pre = buf[0]
+ var value;
+ if (pre === 0x80)
+ value = pos(buf.slice(1, buf.length))
+ else if (pre === 0xff)
+ value = twos(buf)
+ else
+ throw Error('invalid base256 encoding')
+
+ if (!Number.isSafeInteger(value))
+ // The number is so large that javascript cannot represent it with integer
+ // precision.
+ throw Error('parsed number outside of javascript safe integer range')
+
+ return value
+}
+
+const twos = (buf) => {
+ var len = buf.length
+ var sum = 0
+ var flipped = false
+ for (var i = len - 1; i > -1; i--) {
+ var byte = buf[i]
+ var f
+ if (flipped)
+ f = onesComp(byte)
+ else if (byte === 0)
+ f = byte
+ else {
+ flipped = true
+ f = twosComp(byte)
+ }
+ if (f !== 0)
+ sum -= f * Math.pow(256, len - i - 1)
+ }
+ return sum
+}
+
+const pos = (buf) => {
+ var len = buf.length
+ var sum = 0
+ for (var i = len - 1; i > -1; i--) {
+ var byte = buf[i]
+ if (byte !== 0)
+ sum += byte * Math.pow(256, len - i - 1)
+ }
+ return sum
+}
+
+const onesComp = byte => (0xff ^ byte) & 0xff
+
+const twosComp = byte => ((0xff ^ byte) + 1) & 0xff
diff --git a/node_modules/cacache/node_modules/tar/lib/list.js b/node_modules/cacache/node_modules/tar/lib/list.js
new file mode 100644
index 000000000..9da3f812c
--- /dev/null
+++ b/node_modules/cacache/node_modules/tar/lib/list.js
@@ -0,0 +1,128 @@
+'use strict'
+
+// XXX: This shares a lot in common with extract.js
+// maybe some DRY opportunity here?
+
+// tar -t
+const hlo = require('./high-level-opt.js')
+const Parser = require('./parse.js')
+const fs = require('fs')
+const fsm = require('fs-minipass')
+const path = require('path')
+
+const t = module.exports = (opt_, files, cb) => {
+ if (typeof opt_ === 'function')
+ cb = opt_, files = null, opt_ = {}
+ else if (Array.isArray(opt_))
+ files = opt_, opt_ = {}
+
+ if (typeof files === 'function')
+ cb = files, files = null
+
+ if (!files)
+ files = []
+ else
+ files = Array.from(files)
+
+ const opt = hlo(opt_)
+
+ if (opt.sync && typeof cb === 'function')
+ throw new TypeError('callback not supported for sync tar functions')
+
+ if (!opt.file && typeof cb === 'function')
+ throw new TypeError('callback only supported with file option')
+
+ if (files.length)
+ filesFilter(opt, files)
+
+ if (!opt.noResume)
+ onentryFunction(opt)
+
+ return opt.file && opt.sync ? listFileSync(opt)
+ : opt.file ? listFile(opt, cb)
+ : list(opt)
+}
+
+const onentryFunction = opt => {
+ const onentry = opt.onentry
+ opt.onentry = onentry ? e => {
+ onentry(e)
+ e.resume()
+ } : e => e.resume()
+}
+
+// construct a filter that limits the file entries listed
+// include child entries if a dir is included
+const filesFilter = (opt, files) => {
+ const map = new Map(files.map(f => [f.replace(/\/+$/, ''), true]))
+ const filter = opt.filter
+
+ const mapHas = (file, r) => {
+ const root = r || path.parse(file).root || '.'
+ const ret = file === root ? false
+ : map.has(file) ? map.get(file)
+ : mapHas(path.dirname(file), root)
+
+ map.set(file, ret)
+ return ret
+ }
+
+ opt.filter = filter
+ ? (file, entry) => filter(file, entry) && mapHas(file.replace(/\/+$/, ''))
+ : file => mapHas(file.replace(/\/+$/, ''))
+}
+
+const listFileSync = opt => {
+ const p = list(opt)
+ const file = opt.file
+ let threw = true
+ let fd
+ try {
+ const stat = fs.statSync(file)
+ const readSize = opt.maxReadSize || 16*1024*1024
+ if (stat.size < readSize) {
+ p.end(fs.readFileSync(file))
+ } else {
+ let pos = 0
+ const buf = Buffer.allocUnsafe(readSize)
+ fd = fs.openSync(file, 'r')
+ while (pos < stat.size) {
+ let bytesRead = fs.readSync(fd, buf, 0, readSize, pos)
+ pos += bytesRead
+ p.write(buf.slice(0, bytesRead))
+ }
+ p.end()
+ }
+ threw = false
+ } finally {
+ if (threw && fd)
+ try { fs.closeSync(fd) } catch (er) {}
+ }
+}
+
+const listFile = (opt, cb) => {
+ const parse = new Parser(opt)
+ const readSize = opt.maxReadSize || 16*1024*1024
+
+ const file = opt.file
+ const p = new Promise((resolve, reject) => {
+ parse.on('error', reject)
+ parse.on('end', resolve)
+
+ fs.stat(file, (er, stat) => {
+ if (er)
+ reject(er)
+ else {
+ const stream = new fsm.ReadStream(file, {
+ readSize: readSize,
+ size: stat.size
+ })
+ stream.on('error', reject)
+ stream.pipe(parse)
+ }
+ })
+ })
+ return cb ? p.then(cb, cb) : p
+}
+
+const list = opt => new Parser(opt)
diff --git a/node_modules/cacache/node_modules/tar/lib/mkdir.js b/node_modules/cacache/node_modules/tar/lib/mkdir.js
new file mode 100644
index 000000000..381d0e1b3
--- /dev/null
+++ b/node_modules/cacache/node_modules/tar/lib/mkdir.js
@@ -0,0 +1,206 @@
+'use strict'
+// wrapper around mkdirp for tar's needs.
+
+// TODO: This should probably be a class, not functionally
+// passing around state in a gazillion args.
+
+const mkdirp = require('mkdirp')
+const fs = require('fs')
+const path = require('path')
+const chownr = require('chownr')
+
+class SymlinkError extends Error {
+ constructor (symlink, path) {
+ super('Cannot extract through symbolic link')
+ this.path = path
+ this.symlink = symlink
+ }
+
+ get name () {
+ return 'SylinkError'
+ }
+}
+
+class CwdError extends Error {
+ constructor (path, code) {
+ super(code + ': Cannot cd into \'' + path + '\'')
+ this.path = path
+ this.code = code
+ }
+
+ get name () {
+ return 'CwdError'
+ }
+}
+
+const mkdir = module.exports = (dir, opt, cb) => {
+ // if there's any overlap between mask and mode,
+ // then we'll need an explicit chmod
+ const umask = opt.umask
+ const mode = opt.mode | 0o0700
+ const needChmod = (mode & umask) !== 0
+
+ const uid = opt.uid
+ const gid = opt.gid
+ const doChown = typeof uid === 'number' &&
+ typeof gid === 'number' &&
+ ( uid !== opt.processUid || gid !== opt.processGid )
+
+ const preserve = opt.preserve
+ const unlink = opt.unlink
+ const cache = opt.cache
+ const cwd = opt.cwd
+
+ const done = (er, created) => {
+ if (er)
+ cb(er)
+ else {
+ cache.set(dir, true)
+ if (created && doChown)
+ chownr(created, uid, gid, er => done(er))
+ else if (needChmod)
+ fs.chmod(dir, mode, cb)
+ else
+ cb()
+ }
+ }
+
+ if (cache && cache.get(dir) === true)
+ return done()
+
+ if (dir === cwd)
+ return fs.stat(dir, (er, st) => {
+ if (er || !st.isDirectory())
+ er = new CwdError(dir, er && er.code || 'ENOTDIR')
+ done(er)
+ })
+
+ if (preserve)
+ return mkdirp(dir, {mode}).then(made => done(null, made), done)
+
+ const sub = path.relative(cwd, dir)
+ const parts = sub.split(/\/|\\/)
+ mkdir_(cwd, parts, mode, cache, unlink, cwd, null, done)
+}
+
+const mkdir_ = (base, parts, mode, cache, unlink, cwd, created, cb) => {
+ if (!parts.length)
+ return cb(null, created)
+ const p = parts.shift()
+ const part = base + '/' + p
+ if (cache.get(part))
+ return mkdir_(part, parts, mode, cache, unlink, cwd, created, cb)
+ fs.mkdir(part, mode, onmkdir(part, parts, mode, cache, unlink, cwd, created, cb))
+}
+
+const onmkdir = (part, parts, mode, cache, unlink, cwd, created, cb) => er => {
+ if (er) {
+ if (er.path && path.dirname(er.path) === cwd &&
+ (er.code === 'ENOTDIR' || er.code === 'ENOENT'))
+ return cb(new CwdError(cwd, er.code))
+
+ fs.lstat(part, (statEr, st) => {
+ if (statEr)
+ cb(statEr)
+ else if (st.isDirectory())
+ mkdir_(part, parts, mode, cache, unlink, cwd, created, cb)
+ else if (unlink)
+ fs.unlink(part, er => {
+ if (er)
+ return cb(er)
+ fs.mkdir(part, mode, onmkdir(part, parts, mode, cache, unlink, cwd, created, cb))
+ })
+ else if (st.isSymbolicLink())
+ return cb(new SymlinkError(part, part + '/' + parts.join('/')))
+ else
+ cb(er)
+ })
+ } else {
+ created = created || part
+ mkdir_(part, parts, mode, cache, unlink, cwd, created, cb)
+ }
+}
+
+const mkdirSync = module.exports.sync = (dir, opt) => {
+ // if there's any overlap between mask and mode,
+ // then we'll need an explicit chmod
+ const umask = opt.umask
+ const mode = opt.mode | 0o0700
+ const needChmod = (mode & umask) !== 0
+
+ const uid = opt.uid
+ const gid = opt.gid
+ const doChown = typeof uid === 'number' &&
+ typeof gid === 'number' &&
+ ( uid !== opt.processUid || gid !== opt.processGid )
+
+ const preserve = opt.preserve
+ const unlink = opt.unlink
+ const cache = opt.cache
+ const cwd = opt.cwd
+
+ const done = (created) => {
+ cache.set(dir, true)
+ if (created && doChown)
+ chownr.sync(created, uid, gid)
+ if (needChmod)
+ fs.chmodSync(dir, mode)
+ }
+
+ if (cache && cache.get(dir) === true)
+ return done()
+
+ if (dir === cwd) {
+ let ok = false
+ let code = 'ENOTDIR'
+ try {
+ ok = fs.statSync(dir).isDirectory()
+ } catch (er) {
+ code = er.code
+ } finally {
+ if (!ok)
+ throw new CwdError(dir, code)
+ }
+ done()
+ return
+ }
+
+ if (preserve)
+ return done(mkdirp.sync(dir, mode))
+
+ const sub = path.relative(cwd, dir)
+ const parts = sub.split(/\/|\\/)
+ let created = null
+ for (let p = parts.shift(), part = cwd;
+ p && (part += '/' + p);
+ p = parts.shift()) {
+
+ if (cache.get(part))
+ continue
+
+ try {
+ fs.mkdirSync(part, mode)
+ created = created || part
+ cache.set(part, true)
+ } catch (er) {
+ if (er.path && path.dirname(er.path) === cwd &&
+ (er.code === 'ENOTDIR' || er.code === 'ENOENT'))
+ return new CwdError(cwd, er.code)
+
+ const st = fs.lstatSync(part)
+ if (st.isDirectory()) {
+ cache.set(part, true)
+ continue
+ } else if (unlink) {
+ fs.unlinkSync(part)
+ fs.mkdirSync(part, mode)
+ created = created || part
+ cache.set(part, true)
+ continue
+ } else if (st.isSymbolicLink())
+ return new SymlinkError(part, part + '/' + parts.join('/'))
+ }
+ }
+
+ return done(created)
+}
diff --git a/node_modules/cacache/node_modules/tar/lib/mode-fix.js b/node_modules/cacache/node_modules/tar/lib/mode-fix.js
new file mode 100644
index 000000000..c3758741c
--- /dev/null
+++ b/node_modules/cacache/node_modules/tar/lib/mode-fix.js
@@ -0,0 +1,24 @@
+'use strict'
+module.exports = (mode, isDir, portable) => {
+ mode &= 0o7777
+
+ // in portable mode, use the minimum reasonable umask
+ // if this system creates files with 0o664 by default
+ // (as some linux distros do), then we'll write the
+ // archive with 0o644 instead. Also, don't ever create
+ // a file that is not readable/writable by the owner.
+ if (portable) {
+ mode = (mode | 0o600) &~0o22
+ }
+
+ // if dirs are readable, then they should be listable
+ if (isDir) {
+ if (mode & 0o400)
+ mode |= 0o100
+ if (mode & 0o40)
+ mode |= 0o10
+ if (mode & 0o4)
+ mode |= 0o1
+ }
+ return mode
+}
diff --git a/node_modules/cacache/node_modules/tar/lib/pack.js b/node_modules/cacache/node_modules/tar/lib/pack.js
new file mode 100644
index 000000000..0fca4ae25
--- /dev/null
+++ b/node_modules/cacache/node_modules/tar/lib/pack.js
@@ -0,0 +1,403 @@
+'use strict'
+
+// A readable tar stream creator
+// Technically, this is a transform stream that you write paths into,
+// and tar format comes out of.
+// The `add()` method is like `write()` but returns this,
+// and end() return `this` as well, so you can
+// do `new Pack(opt).add('files').add('dir').end().pipe(output)
+// You could also do something like:
+// streamOfPaths().pipe(new Pack()).pipe(new fs.WriteStream('out.tar'))
+
+class PackJob {
+ constructor (path, absolute) {
+ this.path = path || './'
+ this.absolute = absolute
+ this.entry = null
+ this.stat = null
+ this.readdir = null
+ this.pending = false
+ this.ignore = false
+ this.piped = false
+ }
+}
+
+const MiniPass = require('minipass')
+const zlib = require('minizlib')
+const ReadEntry = require('./read-entry.js')
+const WriteEntry = require('./write-entry.js')
+const WriteEntrySync = WriteEntry.Sync
+const WriteEntryTar = WriteEntry.Tar
+const Yallist = require('yallist')
+const EOF = Buffer.alloc(1024)
+const ONSTAT = Symbol('onStat')
+const ENDED = Symbol('ended')
+const QUEUE = Symbol('queue')
+const CURRENT = Symbol('current')
+const PROCESS = Symbol('process')
+const PROCESSING = Symbol('processing')
+const PROCESSJOB = Symbol('processJob')
+const JOBS = Symbol('jobs')
+const JOBDONE = Symbol('jobDone')
+const ADDFSENTRY = Symbol('addFSEntry')
+const ADDTARENTRY = Symbol('addTarEntry')
+const STAT = Symbol('stat')
+const READDIR = Symbol('readdir')
+const ONREADDIR = Symbol('onreaddir')
+const PIPE = Symbol('pipe')
+const ENTRY = Symbol('entry')
+const ENTRYOPT = Symbol('entryOpt')
+const WRITEENTRYCLASS = Symbol('writeEntryClass')
+const WRITE = Symbol('write')
+const ONDRAIN = Symbol('ondrain')
+
+const fs = require('fs')
+const path = require('path')
+const warner = require('./warn-mixin.js')
+
+const Pack = warner(class Pack extends MiniPass {
+ constructor (opt) {
+ super(opt)
+ opt = opt || Object.create(null)
+ this.opt = opt
+ this.file = opt.file || ''
+ this.cwd = opt.cwd || process.cwd()
+ this.maxReadSize = opt.maxReadSize
+ this.preservePaths = !!opt.preservePaths
+ this.strict = !!opt.strict
+ this.noPax = !!opt.noPax
+ this.prefix = (opt.prefix || '').replace(/(\\|\/)+$/, '')
+ this.linkCache = opt.linkCache || new Map()
+ this.statCache = opt.statCache || new Map()
+ this.readdirCache = opt.readdirCache || new Map()
+
+ this[WRITEENTRYCLASS] = WriteEntry
+ if (typeof opt.onwarn === 'function')
+ this.on('warn', opt.onwarn)
+
+ this.portable = !!opt.portable
+ this.zip = null
+ if (opt.gzip) {
+ if (typeof opt.gzip !== 'object')
+ opt.gzip = {}
+ if (this.portable)
+ opt.gzip.portable = true
+ this.zip = new zlib.Gzip(opt.gzip)
+ this.zip.on('data', chunk => super.write(chunk))
+ this.zip.on('end', _ => super.end())
+ this.zip.on('drain', _ => this[ONDRAIN]())
+ this.on('resume', _ => this.zip.resume())
+ } else
+ this.on('drain', this[ONDRAIN])
+
+ this.noDirRecurse = !!opt.noDirRecurse
+ this.follow = !!opt.follow
+ this.noMtime = !!opt.noMtime
+ this.mtime = opt.mtime || null
+
+ this.filter = typeof opt.filter === 'function' ? opt.filter : _ => true
+
+ this[QUEUE] = new Yallist
+ this[JOBS] = 0
+ this.jobs = +opt.jobs || 4
+ this[PROCESSING] = false
+ this[ENDED] = false
+ }
+
+ [WRITE] (chunk) {
+ return super.write(chunk)
+ }
+
+ add (path) {
+ this.write(path)
+ return this
+ }
+
+ end (path) {
+ if (path)
+ this.write(path)
+ this[ENDED] = true
+ this[PROCESS]()
+ return this
+ }
+
+ write (path) {
+ if (this[ENDED])
+ throw new Error('write after end')
+
+ if (path instanceof ReadEntry)
+ this[ADDTARENTRY](path)
+ else
+ this[ADDFSENTRY](path)
+ return this.flowing
+ }
+
+ [ADDTARENTRY] (p) {
+ const absolute = path.resolve(this.cwd, p.path)
+ if (this.prefix)
+ p.path = this.prefix + '/' + p.path.replace(/^\.(\/+|$)/, '')
+
+ // in this case, we don't have to wait for the stat
+ if (!this.filter(p.path, p))
+ p.resume()
+ else {
+ const job = new PackJob(p.path, absolute, false)
+ job.entry = new WriteEntryTar(p, this[ENTRYOPT](job))
+ job.entry.on('end', _ => this[JOBDONE](job))
+ this[JOBS] += 1
+ this[QUEUE].push(job)
+ }
+
+ this[PROCESS]()
+ }
+
+ [ADDFSENTRY] (p) {
+ const absolute = path.resolve(this.cwd, p)
+ if (this.prefix)
+ p = this.prefix + '/' + p.replace(/^\.(\/+|$)/, '')
+
+ this[QUEUE].push(new PackJob(p, absolute))
+ this[PROCESS]()
+ }
+
+ [STAT] (job) {
+ job.pending = true
+ this[JOBS] += 1
+ const stat = this.follow ? 'stat' : 'lstat'
+ fs[stat](job.absolute, (er, stat) => {
+ job.pending = false
+ this[JOBS] -= 1
+ if (er)
+ this.emit('error', er)
+ else
+ this[ONSTAT](job, stat)
+ })
+ }
+
+ [ONSTAT] (job, stat) {
+ this.statCache.set(job.absolute, stat)
+ job.stat = stat
+
+ // now we have the stat, we can filter it.
+ if (!this.filter(job.path, stat))
+ job.ignore = true
+
+ this[PROCESS]()
+ }
+
+ [READDIR] (job) {
+ job.pending = true
+ this[JOBS] += 1
+ fs.readdir(job.absolute, (er, entries) => {
+ job.pending = false
+ this[JOBS] -= 1
+ if (er)
+ return this.emit('error', er)
+ this[ONREADDIR](job, entries)
+ })
+ }
+
+ [ONREADDIR] (job, entries) {
+ this.readdirCache.set(job.absolute, entries)
+ job.readdir = entries
+ this[PROCESS]()
+ }
+
+ [PROCESS] () {
+ if (this[PROCESSING])
+ return
+
+ this[PROCESSING] = true
+ for (let w = this[QUEUE].head;
+ w !== null && this[JOBS] < this.jobs;
+ w = w.next) {
+ this[PROCESSJOB](w.value)
+ if (w.value.ignore) {
+ const p = w.next
+ this[QUEUE].removeNode(w)
+ w.next = p
+ }
+ }
+
+ this[PROCESSING] = false
+
+ if (this[ENDED] && !this[QUEUE].length && this[JOBS] === 0) {
+ if (this.zip)
+ this.zip.end(EOF)
+ else {
+ super.write(EOF)
+ super.end()
+ }
+ }
+ }
+
+ get [CURRENT] () {
+ return this[QUEUE] && this[QUEUE].head && this[QUEUE].head.value
+ }
+
+ [JOBDONE] (job) {
+ this[QUEUE].shift()
+ this[JOBS] -= 1
+ this[PROCESS]()
+ }
+
+ [PROCESSJOB] (job) {
+ if (job.pending)
+ return
+
+ if (job.entry) {
+ if (job === this[CURRENT] && !job.piped)
+ this[PIPE](job)
+ return
+ }
+
+ if (!job.stat) {
+ if (this.statCache.has(job.absolute))
+ this[ONSTAT](job, this.statCache.get(job.absolute))
+ else
+ this[STAT](job)
+ }
+ if (!job.stat)
+ return
+
+ // filtered out!
+ if (job.ignore)
+ return
+
+ if (!this.noDirRecurse && job.stat.isDirectory() && !job.readdir) {
+ if (this.readdirCache.has(job.absolute))
+ this[ONREADDIR](job, this.readdirCache.get(job.absolute))
+ else
+ this[READDIR](job)
+ if (!job.readdir)
+ return
+ }
+
+ // we know it doesn't have an entry, because that got checked above
+ job.entry = this[ENTRY](job)
+ if (!job.entry) {
+ job.ignore = true
+ return
+ }
+
+ if (job === this[CURRENT] && !job.piped)
+ this[PIPE](job)
+ }
+
+ [ENTRYOPT] (job) {
+ return {
+ onwarn: (code, msg, data) => this.warn(code, msg, data),
+ noPax: this.noPax,
+ cwd: this.cwd,
+ absolute: job.absolute,
+ preservePaths: this.preservePaths,
+ maxReadSize: this.maxReadSize,
+ strict: this.strict,
+ portable: this.portable,
+ linkCache: this.linkCache,
+ statCache: this.statCache,
+ noMtime: this.noMtime,
+ mtime: this.mtime
+ }
+ }
+
+ [ENTRY] (job) {
+ this[JOBS] += 1
+ try {
+ return new this[WRITEENTRYCLASS](job.path, this[ENTRYOPT](job))
+ .on('end', () => this[JOBDONE](job))
+ .on('error', er => this.emit('error', er))
+ } catch (er) {
+ this.emit('error', er)
+ }
+ }
+
+ [ONDRAIN] () {
+ if (this[CURRENT] && this[CURRENT].entry)
+ this[CURRENT].entry.resume()
+ }
+
+ // like .pipe() but using super, because our write() is special
+ [PIPE] (job) {
+ job.piped = true
+
+ if (job.readdir)
+ job.readdir.forEach(entry => {
+ const p = this.prefix ?
+ job.path.slice(this.prefix.length + 1) || './'
+ : job.path
+
+ const base = p === './' ? '' : p.replace(/\/*$/, '/')
+ this[ADDFSENTRY](base + entry)
+ })
+
+ const source = job.entry
+ const zip = this.zip
+
+ if (zip)
+ source.on('data', chunk => {
+ if (!zip.write(chunk))
+ source.pause()
+ })
+ else
+ source.on('data', chunk => {
+ if (!super.write(chunk))
+ source.pause()
+ })
+ }
+
+ pause () {
+ if (this.zip)
+ this.zip.pause()
+ return super.pause()
+ }
+})
+
+class PackSync extends Pack {
+ constructor (opt) {
+ super(opt)
+ this[WRITEENTRYCLASS] = WriteEntrySync
+ }
+
+ // pause/resume are no-ops in sync streams.
+ pause () {}
+ resume () {}
+
+ [STAT] (job) {
+ const stat = this.follow ? 'statSync' : 'lstatSync'
+ this[ONSTAT](job, fs[stat](job.absolute))
+ }
+
+ [READDIR] (job, stat) {
+ this[ONREADDIR](job, fs.readdirSync(job.absolute))
+ }
+
+ // gotta get it all in this tick
+ [PIPE] (job) {
+ const source = job.entry
+ const zip = this.zip
+
+ if (job.readdir)
+ job.readdir.forEach(entry => {
+ const p = this.prefix ?
+ job.path.slice(this.prefix.length + 1) || './'
+ : job.path
+
+ const base = p === './' ? '' : p.replace(/\/*$/, '/')
+ this[ADDFSENTRY](base + entry)
+ })
+
+ if (zip)
+ source.on('data', chunk => {
+ zip.write(chunk)
+ })
+ else
+ source.on('data', chunk => {
+ super[WRITE](chunk)
+ })
+ }
+}
+
+Pack.Sync = PackSync
+
+module.exports = Pack
diff --git a/node_modules/cacache/node_modules/tar/lib/parse.js b/node_modules/cacache/node_modules/tar/lib/parse.js
new file mode 100644
index 000000000..d9a49ad1f
--- /dev/null
+++ b/node_modules/cacache/node_modules/tar/lib/parse.js
@@ -0,0 +1,483 @@
+'use strict'
+
+// this[BUFFER] is the remainder of a chunk if we're waiting for
+// the full 512 bytes of a header to come in. We will Buffer.concat()
+// it to the next write(), which is a mem copy, but a small one.
+//
+// this[QUEUE] is a Yallist of entries that haven't been emitted
+// yet this can only get filled up if the user keeps write()ing after
+// a write() returns false, or does a write() with more than one entry
+//
+// We don't buffer chunks, we always parse them and either create an
+// entry, or push it into the active entry. The ReadEntry class knows
+// to throw data away if .ignore=true
+//
+// Shift entry off the buffer when it emits 'end', and emit 'entry' for
+// the next one in the list.
+//
+// At any time, we're pushing body chunks into the entry at WRITEENTRY,
+// and waiting for 'end' on the entry at READENTRY
+//
+// ignored entries get .resume() called on them straight away
+
+const warner = require('./warn-mixin.js')
+const path = require('path')
+const Header = require('./header.js')
+const EE = require('events')
+const Yallist = require('yallist')
+const maxMetaEntrySize = 1024 * 1024
+const Entry = require('./read-entry.js')
+const Pax = require('./pax.js')
+const zlib = require('minizlib')
+
+const gzipHeader = Buffer.from([0x1f, 0x8b])
+const STATE = Symbol('state')
+const WRITEENTRY = Symbol('writeEntry')
+const READENTRY = Symbol('readEntry')
+const NEXTENTRY = Symbol('nextEntry')
+const PROCESSENTRY = Symbol('processEntry')
+const EX = Symbol('extendedHeader')
+const GEX = Symbol('globalExtendedHeader')
+const META = Symbol('meta')
+const EMITMETA = Symbol('emitMeta')
+const BUFFER = Symbol('buffer')
+const QUEUE = Symbol('queue')
+const ENDED = Symbol('ended')
+const EMITTEDEND = Symbol('emittedEnd')
+const EMIT = Symbol('emit')
+const UNZIP = Symbol('unzip')
+const CONSUMECHUNK = Symbol('consumeChunk')
+const CONSUMECHUNKSUB = Symbol('consumeChunkSub')
+const CONSUMEBODY = Symbol('consumeBody')
+const CONSUMEMETA = Symbol('consumeMeta')
+const CONSUMEHEADER = Symbol('consumeHeader')
+const CONSUMING = Symbol('consuming')
+const BUFFERCONCAT = Symbol('bufferConcat')
+const MAYBEEND = Symbol('maybeEnd')
+const WRITING = Symbol('writing')
+const ABORTED = Symbol('aborted')
+const DONE = Symbol('onDone')
+const SAW_VALID_ENTRY = Symbol('sawValidEntry')
+const SAW_NULL_BLOCK = Symbol('sawNullBlock')
+const SAW_EOF = Symbol('sawEOF')
+
+const noop = _ => true
+
+module.exports = warner(class Parser extends EE {
+ constructor (opt) {
+ opt = opt || {}
+ super(opt)
+
+ this.file = opt.file || ''
+
+ // set to boolean false when an entry starts. 1024 bytes of \0
+ // is technically a valid tarball, albeit a boring one.
+ this[SAW_VALID_ENTRY] = null
+
+ // these BADARCHIVE errors can't be detected early. listen on DONE.
+ this.on(DONE, _ => {
+ if (this[STATE] === 'begin' || this[SAW_VALID_ENTRY] === false) {
+ // either less than 1 block of data, or all entries were invalid.
+ // Either way, probably not even a tarball.
+ this.warn('TAR_BAD_ARCHIVE', 'Unrecognized archive format')
+ }
+ })
+
+ if (opt.ondone)
+ this.on(DONE, opt.ondone)
+ else
+ this.on(DONE, _ => {
+ this.emit('prefinish')
+ this.emit('finish')
+ this.emit('end')
+ this.emit('close')
+ })
+
+ this.strict = !!opt.strict
+ this.maxMetaEntrySize = opt.maxMetaEntrySize || maxMetaEntrySize
+ this.filter = typeof opt.filter === 'function' ? opt.filter : noop
+
+ // have to set this so that streams are ok piping into it
+ this.writable = true
+ this.readable = false
+
+ this[QUEUE] = new Yallist()
+ this[BUFFER] = null
+ this[READENTRY] = null
+ this[WRITEENTRY] = null
+ this[STATE] = 'begin'
+ this[META] = ''
+ this[EX] = null
+ this[GEX] = null
+ this[ENDED] = false
+ this[UNZIP] = null
+ this[ABORTED] = false
+ this[SAW_NULL_BLOCK] = false
+ this[SAW_EOF] = false
+ if (typeof opt.onwarn === 'function')
+ this.on('warn', opt.onwarn)
+ if (typeof opt.onentry === 'function')
+ this.on('entry', opt.onentry)
+ }
+
+ [CONSUMEHEADER] (chunk, position) {
+ if (this[SAW_VALID_ENTRY] === null)
+ this[SAW_VALID_ENTRY] = false
+ let header
+ try {
+ header = new Header(chunk, position, this[EX], this[GEX])
+ } catch (er) {
+ return this.warn('TAR_ENTRY_INVALID', er)
+ }
+
+ if (header.nullBlock) {
+ if (this[SAW_NULL_BLOCK]) {
+ this[SAW_EOF] = true
+ // ending an archive with no entries. pointless, but legal.
+ if (this[STATE] === 'begin')
+ this[STATE] = 'header'
+ this[EMIT]('eof')
+ } else {
+ this[SAW_NULL_BLOCK] = true
+ this[EMIT]('nullBlock')
+ }
+ } else {
+ this[SAW_NULL_BLOCK] = false
+ if (!header.cksumValid)
+ this.warn('TAR_ENTRY_INVALID', 'checksum failure', {header})
+ else if (!header.path)
+ this.warn('TAR_ENTRY_INVALID', 'path is required', {header})
+ else {
+ const type = header.type
+ if (/^(Symbolic)?Link$/.test(type) && !header.linkpath)
+ this.warn('TAR_ENTRY_INVALID', 'linkpath required', {header})
+ else if (!/^(Symbolic)?Link$/.test(type) && header.linkpath)
+ this.warn('TAR_ENTRY_INVALID', 'linkpath forbidden', {header})
+ else {
+ const entry = this[WRITEENTRY] = new Entry(header, this[EX], this[GEX])
+
+ // we do this for meta & ignored entries as well, because they
+ // are still valid tar, or else we wouldn't know to ignore them
+ if (!this[SAW_VALID_ENTRY]) {
+ if (entry.remain) {
+ // this might be the one!
+ const onend = () => {
+ if (!entry.invalid)
+ this[SAW_VALID_ENTRY] = true
+ }
+ entry.on('end', onend)
+ } else {
+ this[SAW_VALID_ENTRY] = true
+ }
+ }
+
+ if (entry.meta) {
+ if (entry.size > this.maxMetaEntrySize) {
+ entry.ignore = true
+ this[EMIT]('ignoredEntry', entry)
+ this[STATE] = 'ignore'
+ entry.resume()
+ } else if (entry.size > 0) {
+ this[META] = ''
+ entry.on('data', c => this[META] += c)
+ this[STATE] = 'meta'
+ }
+ } else {
+ this[EX] = null
+ entry.ignore = entry.ignore || !this.filter(entry.path, entry)
+
+ if (entry.ignore) {
+ // probably valid, just not something we care about
+ this[EMIT]('ignoredEntry', entry)
+ this[STATE] = entry.remain ? 'ignore' : 'header'
+ entry.resume()
+ } else {
+ if (entry.remain)
+ this[STATE] = 'body'
+ else {
+ this[STATE] = 'header'
+ entry.end()
+ }
+
+ if (!this[READENTRY]) {
+ this[QUEUE].push(entry)
+ this[NEXTENTRY]()
+ } else
+ this[QUEUE].push(entry)
+ }
+ }
+ }
+ }
+ }
+ }
+
+ [PROCESSENTRY] (entry) {
+ let go = true
+
+ if (!entry) {
+ this[READENTRY] = null
+ go = false
+ } else if (Array.isArray(entry))
+ this.emit.apply(this, entry)
+ else {
+ this[READENTRY] = entry
+ this.emit('entry', entry)
+ if (!entry.emittedEnd) {
+ entry.on('end', _ => this[NEXTENTRY]())
+ go = false
+ }
+ }
+
+ return go
+ }
+
+ [NEXTENTRY] () {
+ do {} while (this[PROCESSENTRY](this[QUEUE].shift()))
+
+ if (!this[QUEUE].length) {
+ // At this point, there's nothing in the queue, but we may have an
+ // entry which is being consumed (readEntry).
+ // If we don't, then we definitely can handle more data.
+ // If we do, and either it's flowing, or it has never had any data
+ // written to it, then it needs more.
+ // The only other possibility is that it has returned false from a
+ // write() call, so we wait for the next drain to continue.
+ const re = this[READENTRY]
+ const drainNow = !re || re.flowing || re.size === re.remain
+ if (drainNow) {
+ if (!this[WRITING])
+ this.emit('drain')
+ } else
+ re.once('drain', _ => this.emit('drain'))
+ }
+ }
+
+ [CONSUMEBODY] (chunk, position) {
+ // write up to but no more than writeEntry.blockRemain
+ const entry = this[WRITEENTRY]
+ const br = entry.blockRemain
+ const c = (br >= chunk.length && position === 0) ? chunk
+ : chunk.slice(position, position + br)
+
+ entry.write(c)
+
+ if (!entry.blockRemain) {
+ this[STATE] = 'header'
+ this[WRITEENTRY] = null
+ entry.end()
+ }
+
+ return c.length
+ }
+
+ [CONSUMEMETA] (chunk, position) {
+ const entry = this[WRITEENTRY]
+ const ret = this[CONSUMEBODY](chunk, position)
+
+ // if we finished, then the entry is reset
+ if (!this[WRITEENTRY])
+ this[EMITMETA](entry)
+
+ return ret
+ }
+
+ [EMIT] (ev, data, extra) {
+ if (!this[QUEUE].length && !this[READENTRY])
+ this.emit(ev, data, extra)
+ else
+ this[QUEUE].push([ev, data, extra])
+ }
+
+ [EMITMETA] (entry) {
+ this[EMIT]('meta', this[META])
+ switch (entry.type) {
+ case 'ExtendedHeader':
+ case 'OldExtendedHeader':
+ this[EX] = Pax.parse(this[META], this[EX], false)
+ break
+
+ case 'GlobalExtendedHeader':
+ this[GEX] = Pax.parse(this[META], this[GEX], true)
+ break
+
+ case 'NextFileHasLongPath':
+ case 'OldGnuLongPath':
+ this[EX] = this[EX] || Object.create(null)
+ this[EX].path = this[META].replace(/\0.*/, '')
+ break
+
+ case 'NextFileHasLongLinkpath':
+ this[EX] = this[EX] || Object.create(null)
+ this[EX].linkpath = this[META].replace(/\0.*/, '')
+ break
+
+ /* istanbul ignore next */
+ default: throw new Error('unknown meta: ' + entry.type)
+ }
+ }
+
+ abort (error) {
+ this[ABORTED] = true
+ this.emit('abort', error)
+ // always throws, even in non-strict mode
+ this.warn('TAR_ABORT', error, { recoverable: false })
+ }
+
+ write (chunk) {
+ if (this[ABORTED])
+ return
+
+ // first write, might be gzipped
+ if (this[UNZIP] === null && chunk) {
+ if (this[BUFFER]) {
+ chunk = Buffer.concat([this[BUFFER], chunk])
+ this[BUFFER] = null
+ }
+ if (chunk.length < gzipHeader.length) {
+ this[BUFFER] = chunk
+ return true
+ }
+ for (let i = 0; this[UNZIP] === null && i < gzipHeader.length; i++) {
+ if (chunk[i] !== gzipHeader[i])
+ this[UNZIP] = false
+ }
+ if (this[UNZIP] === null) {
+ const ended = this[ENDED]
+ this[ENDED] = false
+ this[UNZIP] = new zlib.Unzip()
+ this[UNZIP].on('data', chunk => this[CONSUMECHUNK](chunk))
+ this[UNZIP].on('error', er => this.abort(er))
+ this[UNZIP].on('end', _ => {
+ this[ENDED] = true
+ this[CONSUMECHUNK]()
+ })
+ this[WRITING] = true
+ const ret = this[UNZIP][ended ? 'end' : 'write' ](chunk)
+ this[WRITING] = false
+ return ret
+ }
+ }
+
+ this[WRITING] = true
+ if (this[UNZIP])
+ this[UNZIP].write(chunk)
+ else
+ this[CONSUMECHUNK](chunk)
+ this[WRITING] = false
+
+ // return false if there's a queue, or if the current entry isn't flowing
+ const ret =
+ this[QUEUE].length ? false :
+ this[READENTRY] ? this[READENTRY].flowing :
+ true
+
+ // if we have no queue, then that means a clogged READENTRY
+ if (!ret && !this[QUEUE].length)
+ this[READENTRY].once('drain', _ => this.emit('drain'))
+
+ return ret
+ }
+
+ [BUFFERCONCAT] (c) {
+ if (c && !this[ABORTED])
+ this[BUFFER] = this[BUFFER] ? Buffer.concat([this[BUFFER], c]) : c
+ }
+
+ [MAYBEEND] () {
+ if (this[ENDED] &&
+ !this[EMITTEDEND] &&
+ !this[ABORTED] &&
+ !this[CONSUMING]) {
+ this[EMITTEDEND] = true
+ const entry = this[WRITEENTRY]
+ if (entry && entry.blockRemain) {
+ // truncated, likely a damaged file
+ const have = this[BUFFER] ? this[BUFFER].length : 0
+ this.warn('TAR_BAD_ARCHIVE', `Truncated input (needed ${
+ entry.blockRemain} more bytes, only ${have} available)`, {entry})
+ if (this[BUFFER])
+ entry.write(this[BUFFER])
+ entry.end()
+ }
+ this[EMIT](DONE)
+ }
+ }
+
+ [CONSUMECHUNK] (chunk) {
+ if (this[CONSUMING])
+ this[BUFFERCONCAT](chunk)
+ else if (!chunk && !this[BUFFER])
+ this[MAYBEEND]()
+ else {
+ this[CONSUMING] = true
+ if (this[BUFFER]) {
+ this[BUFFERCONCAT](chunk)
+ const c = this[BUFFER]
+ this[BUFFER] = null
+ this[CONSUMECHUNKSUB](c)
+ } else {
+ this[CONSUMECHUNKSUB](chunk)
+ }
+
+ while (this[BUFFER] &&
+ this[BUFFER].length >= 512 &&
+ !this[ABORTED] &&
+ !this[SAW_EOF]) {
+ const c = this[BUFFER]
+ this[BUFFER] = null
+ this[CONSUMECHUNKSUB](c)
+ }
+ this[CONSUMING] = false
+ }
+
+ if (!this[BUFFER] || this[ENDED])
+ this[MAYBEEND]()
+ }
+
+ [CONSUMECHUNKSUB] (chunk) {
+ // we know that we are in CONSUMING mode, so anything written goes into
+ // the buffer. Advance the position and put any remainder in the buffer.
+ let position = 0
+ let length = chunk.length
+ while (position + 512 <= length && !this[ABORTED] && !this[SAW_EOF]) {
+ switch (this[STATE]) {
+ case 'begin':
+ case 'header':
+ this[CONSUMEHEADER](chunk, position)
+ position += 512
+ break
+
+ case 'ignore':
+ case 'body':
+ position += this[CONSUMEBODY](chunk, position)
+ break
+
+ case 'meta':
+ position += this[CONSUMEMETA](chunk, position)
+ break
+
+ /* istanbul ignore next */
+ default:
+ throw new Error('invalid state: ' + this[STATE])
+ }
+ }
+
+ if (position < length) {
+ if (this[BUFFER])
+ this[BUFFER] = Buffer.concat([chunk.slice(position), this[BUFFER]])
+ else
+ this[BUFFER] = chunk.slice(position)
+ }
+ }
+
+ end (chunk) {
+ if (!this[ABORTED]) {
+ if (this[UNZIP])
+ this[UNZIP].end(chunk)
+ else {
+ this[ENDED] = true
+ this.write(chunk)
+ }
+ }
+ }
+})
diff --git a/node_modules/cacache/node_modules/tar/lib/path-reservations.js b/node_modules/cacache/node_modules/tar/lib/path-reservations.js
new file mode 100644
index 000000000..3cf0c2c12
--- /dev/null
+++ b/node_modules/cacache/node_modules/tar/lib/path-reservations.js
@@ -0,0 +1,125 @@
+// A path exclusive reservation system
+// reserve([list, of, paths], fn)
+// When the fn is first in line for all its paths, it
+// is called with a cb that clears the reservation.
+//
+// Used by async unpack to avoid clobbering paths in use,
+// while still allowing maximal safe parallelization.
+
+const assert = require('assert')
+
+module.exports = () => {
+ // path => [function or Set]
+ // A Set object means a directory reservation
+ // A fn is a direct reservation on that path
+ const queues = new Map()
+
+ // fn => {paths:[path,...], dirs:[path, ...]}
+ const reservations = new Map()
+
+ // return a set of parent dirs for a given path
+ const { join } = require('path')
+ const getDirs = path =>
+ join(path).split(/[\\\/]/).slice(0, -1).reduce((set, path) =>
+ set.length ? set.concat(join(set[set.length-1], path)) : [path], [])
+
+ // functions currently running
+ const running = new Set()
+
+ // return the queues for each path the function cares about
+ // fn => {paths, dirs}
+ const getQueues = fn => {
+ const res = reservations.get(fn)
+ /* istanbul ignore if - unpossible */
+ if (!res)
+ throw new Error('function does not have any path reservations')
+ return {
+ paths: res.paths.map(path => queues.get(path)),
+ dirs: [...res.dirs].map(path => queues.get(path)),
+ }
+ }
+
+ // check if fn is first in line for all its paths, and is
+ // included in the first set for all its dir queues
+ const check = fn => {
+ const {paths, dirs} = getQueues(fn)
+ return paths.every(q => q[0] === fn) &&
+ dirs.every(q => q[0] instanceof Set && q[0].has(fn))
+ }
+
+ // run the function if it's first in line and not already running
+ const run = fn => {
+ if (running.has(fn) || !check(fn))
+ return false
+ running.add(fn)
+ fn(() => clear(fn))
+ return true
+ }
+
+ const clear = fn => {
+ if (!running.has(fn))
+ return false
+
+ const { paths, dirs } = reservations.get(fn)
+ const next = new Set()
+
+ paths.forEach(path => {
+ const q = queues.get(path)
+ assert.equal(q[0], fn)
+ if (q.length === 1)
+ queues.delete(path)
+ else {
+ q.shift()
+ if (typeof q[0] === 'function')
+ next.add(q[0])
+ else
+ q[0].forEach(fn => next.add(fn))
+ }
+ })
+
+ dirs.forEach(dir => {
+ const q = queues.get(dir)
+ assert(q[0] instanceof Set)
+ if (q[0].size === 1 && q.length === 1) {
+ queues.delete(dir)
+ } else if (q[0].size === 1) {
+ q.shift()
+
+ // must be a function or else the Set would've been reused
+ next.add(q[0])
+ } else
+ q[0].delete(fn)
+ })
+ running.delete(fn)
+
+ next.forEach(fn => run(fn))
+ return true
+ }
+
+ const reserve = (paths, fn) => {
+ const dirs = new Set(
+ paths.map(path => getDirs(path)).reduce((a, b) => a.concat(b))
+ )
+ reservations.set(fn, {dirs, paths})
+ paths.forEach(path => {
+ const q = queues.get(path)
+ if (!q)
+ queues.set(path, [fn])
+ else
+ q.push(fn)
+ })
+ dirs.forEach(dir => {
+ const q = queues.get(dir)
+ if (!q)
+ queues.set(dir, [new Set([fn])])
+ else if (q[q.length-1] instanceof Set)
+ q[q.length-1].add(fn)
+ else
+ q.push(new Set([fn]))
+ })
+
+ return run(fn)
+ }
+
+ return { check, reserve }
+}
diff --git a/node_modules/cacache/node_modules/tar/lib/pax.js b/node_modules/cacache/node_modules/tar/lib/pax.js
new file mode 100644
index 000000000..214a459f3
--- /dev/null
+++ b/node_modules/cacache/node_modules/tar/lib/pax.js
@@ -0,0 +1,145 @@
+'use strict'
+const Header = require('./header.js')
+const path = require('path')
+
+class Pax {
+ constructor (obj, global) {
+ this.atime = obj.atime || null
+ this.charset = obj.charset || null
+ this.comment = obj.comment || null
+ this.ctime = obj.ctime || null
+ this.gid = obj.gid || null
+ this.gname = obj.gname || null
+ this.linkpath = obj.linkpath || null
+ this.mtime = obj.mtime || null
+ this.path = obj.path || null
+ this.size = obj.size || null
+ this.uid = obj.uid || null
+ this.uname = obj.uname || null
+ this.dev = obj.dev || null
+ this.ino = obj.ino || null
+ this.nlink = obj.nlink || null
+ this.global = global || false
+ }
+
+ encode () {
+ const body = this.encodeBody()
+ if (body === '')
+ return null
+
+ const bodyLen = Buffer.byteLength(body)
+ // round up to 512 bytes
+ // add 512 for header
+ const bufLen = 512 * Math.ceil(1 + bodyLen / 512)
+ const buf = Buffer.allocUnsafe(bufLen)
+
+ // 0-fill the header section, it might not hit every field
+ for (let i = 0; i < 512; i++) {
+ buf[i] = 0
+ }
+
+ new Header({
+ // XXX split the path
+ // then the path should be PaxHeader + basename, but less than 99,
+ // prepend with the dirname
+ path: ('PaxHeader/' + path.basename(this.path)).slice(0, 99),
+ mode: this.mode || 0o644,
+ uid: this.uid || null,
+ gid: this.gid || null,
+ size: bodyLen,
+ mtime: this.mtime || null,
+ type: this.global ? 'GlobalExtendedHeader' : 'ExtendedHeader',
+ linkpath: '',
+ uname: this.uname || '',
+ gname: this.gname || '',
+ devmaj: 0,
+ devmin: 0,
+ atime: this.atime || null,
+ ctime: this.ctime || null
+ }).encode(buf)
+
+ buf.write(body, 512, bodyLen, 'utf8')
+
+ // null pad after the body
+ for (let i = bodyLen + 512; i < buf.length; i++) {
+ buf[i] = 0
+ }
+
+ return buf
+ }
+
+ encodeBody () {
+ return (
+ this.encodeField('path') +
+ this.encodeField('ctime') +
+ this.encodeField('atime') +
+ this.encodeField('dev') +
+ this.encodeField('ino') +
+ this.encodeField('nlink') +
+ this.encodeField('charset') +
+ this.encodeField('comment') +
+ this.encodeField('gid') +
+ this.encodeField('gname') +
+ this.encodeField('linkpath') +
+ this.encodeField('mtime') +
+ this.encodeField('size') +
+ this.encodeField('uid') +
+ this.encodeField('uname')
+ )
+ }
+
+ encodeField (field) {
+ if (this[field] === null || this[field] === undefined)
+ return ''
+ const v = this[field] instanceof Date ? this[field].getTime() / 1000
+ : this[field]
+ const s = ' ' +
+ (field === 'dev' || field === 'ino' || field === 'nlink'
+ ? 'SCHILY.' : '') +
+ field + '=' + v + '\n'
+ const byteLen = Buffer.byteLength(s)
+ // the digits includes the length of the digits in ascii base-10
+ // so if it's 9 characters, then adding 1 for the 9 makes it 10
+ // which makes it 11 chars.
+ let digits = Math.floor(Math.log(byteLen) / Math.log(10)) + 1
+ if (byteLen + digits >= Math.pow(10, digits))
+ digits += 1
+ const len = digits + byteLen
+ return len + s
+ }
+}
+
+Pax.parse = (string, ex, g) => new Pax(merge(parseKV(string), ex), g)
+
+const merge = (a, b) =>
+ b ? Object.keys(a).reduce((s, k) => (s[k] = a[k], s), b) : a
+
+const parseKV = string =>
+ string
+ .replace(/\n$/, '')
+ .split('\n')
+ .reduce(parseKVLine, Object.create(null))
+
+const parseKVLine = (set, line) => {
+ const n = parseInt(line, 10)
+
+ // XXX Values with \n in them will fail this.
+ // Refactor to not be a naive line-by-line parse.
+ if (n !== Buffer.byteLength(line) + 1)
+ return set
+
+ line = line.substr((n + ' ').length)
+ const kv = line.split('=')
+ const k = kv.shift().replace(/^SCHILY\.(dev|ino|nlink)/, '$1')
+ if (!k)
+ return set
+
+ const v = kv.join('=')
+ set[k] = /^([A-Z]+\.)?([mac]|birth|creation)time$/.test(k)
+ ? new Date(v * 1000)
+ : /^[0-9]+$/.test(v) ? +v
+ : v
+ return set
+}
+
+module.exports = Pax
diff --git a/node_modules/cacache/node_modules/tar/lib/read-entry.js b/node_modules/cacache/node_modules/tar/lib/read-entry.js
new file mode 100644
index 000000000..8acee94ba
--- /dev/null
+++ b/node_modules/cacache/node_modules/tar/lib/read-entry.js
@@ -0,0 +1,98 @@
+'use strict'
+const types = require('./types.js')
+const MiniPass = require('minipass')
+
+const SLURP = Symbol('slurp')
+module.exports = class ReadEntry extends MiniPass {
+ constructor (header, ex, gex) {
+ super()
+ // read entries always start life paused. this is to avoid the
+ // situation where Minipass's auto-ending empty streams results
+ // in an entry ending before we're ready for it.
+ this.pause()
+ this.extended = ex
+ this.globalExtended = gex
+ this.header = header
+ this.startBlockSize = 512 * Math.ceil(header.size / 512)
+ this.blockRemain = this.startBlockSize
+ this.remain = header.size
+ this.type = header.type
+ this.meta = false
+ this.ignore = false
+ switch (this.type) {
+ case 'File':
+ case 'OldFile':
+ case 'Link':
+ case 'SymbolicLink':
+ case 'CharacterDevice':
+ case 'BlockDevice':
+ case 'Directory':
+ case 'FIFO':
+ case 'ContiguousFile':
+ case 'GNUDumpDir':
+ break
+
+ case 'NextFileHasLongLinkpath':
+ case 'NextFileHasLongPath':
+ case 'OldGnuLongPath':
+ case 'GlobalExtendedHeader':
+ case 'ExtendedHeader':
+ case 'OldExtendedHeader':
+ this.meta = true
+ break
+
+ // NOTE: gnutar and bsdtar treat unrecognized types as 'File'
+ // it may be worth doing the same, but with a warning.
+ default:
+ this.ignore = true
+ }
+
+ this.path = header.path
+ this.mode = header.mode
+ if (this.mode)
+ this.mode = this.mode & 0o7777
+ this.uid = header.uid
+ this.gid = header.gid
+ this.uname = header.uname
+ this.gname = header.gname
+ this.size = header.size
+ this.mtime = header.mtime
+ this.atime = header.atime
+ this.ctime = header.ctime
+ this.linkpath = header.linkpath
+ this.uname = header.uname
+ this.gname = header.gname
+
+ if (ex) this[SLURP](ex)
+ if (gex) this[SLURP](gex, true)
+ }
+
+ write (data) {
+ const writeLen = data.length
+ if (writeLen > this.blockRemain)
+ throw new Error('writing more to entry than is appropriate')
+
+ const r = this.remain
+ const br = this.blockRemain
+ this.remain = Math.max(0, r - writeLen)
+ this.blockRemain = Math.max(0, br - writeLen)
+ if (this.ignore)
+ return true
+
+ if (r >= writeLen)
+ return super.write(data)
+
+ // r < writeLen
+ return super.write(data.slice(0, r))
+ }
+
+ [SLURP] (ex, global) {
+ for (let k in ex) {
+ // we slurp in everything except for the path attribute in
+ // a global extended header, because that's weird.
+ if (ex[k] !== null && ex[k] !== undefined &&
+ !(global && k === 'path'))
+ this[k] = ex[k]
+ }
+ }
+}
diff --git a/node_modules/cacache/node_modules/tar/lib/replace.js b/node_modules/cacache/node_modules/tar/lib/replace.js
new file mode 100644
index 000000000..44126d1f8
--- /dev/null
+++ b/node_modules/cacache/node_modules/tar/lib/replace.js
@@ -0,0 +1,219 @@
+'use strict'
+
+// tar -r
+const hlo = require('./high-level-opt.js')
+const Pack = require('./pack.js')
+const Parse = require('./parse.js')
+const fs = require('fs')
+const fsm = require('fs-minipass')
+const t = require('./list.js')
+const path = require('path')
+
+// starting at the head of the file, read a Header
+// If the checksum is invalid, that's our position to start writing
+// If it is, jump forward by the specified size (round up to 512)
+// and try again.
+// Write the new Pack stream starting there.
+
+const Header = require('./header.js')
+
+const r = module.exports = (opt_, files, cb) => {
+ const opt = hlo(opt_)
+
+ if (!opt.file)
+ throw new TypeError('file is required')
+
+ if (opt.gzip)
+ throw new TypeError('cannot append to compressed archives')
+
+ if (!files || !Array.isArray(files) || !files.length)
+ throw new TypeError('no files or directories specified')
+
+ files = Array.from(files)
+
+ return opt.sync ? replaceSync(opt, files)
+ : replace(opt, files, cb)
+}
+
+const replaceSync = (opt, files) => {
+ const p = new Pack.Sync(opt)
+
+ let threw = true
+ let fd
+ let position
+
+ try {
+ try {
+ fd = fs.openSync(opt.file, 'r+')
+ } catch (er) {
+ if (er.code === 'ENOENT')
+ fd = fs.openSync(opt.file, 'w+')
+ else
+ throw er
+ }
+
+ const st = fs.fstatSync(fd)
+ const headBuf = Buffer.alloc(512)
+
+ POSITION: for (position = 0; position < st.size; position += 512) {
+ for (let bufPos = 0, bytes = 0; bufPos < 512; bufPos += bytes) {
+ bytes = fs.readSync(
+ fd, headBuf, bufPos, headBuf.length - bufPos, position + bufPos
+ )
+
+ if (position === 0 && headBuf[0] === 0x1f && headBuf[1] === 0x8b)
+ throw new Error('cannot append to compressed archives')
+
+ if (!bytes)
+ break POSITION
+ }
+
+ let h = new Header(headBuf)
+ if (!h.cksumValid)
+ break
+ let entryBlockSize = 512 * Math.ceil(h.size / 512)
+ if (position + entryBlockSize + 512 > st.size)
+ break
+ // the 512 for the header we just parsed will be added as well
+ // also jump ahead all the blocks for the body
+ position += entryBlockSize
+ if (opt.mtimeCache)
+ opt.mtimeCache.set(h.path, h.mtime)
+ }
+ threw = false
+
+ streamSync(opt, p, position, fd, files)
+ } finally {
+ if (threw)
+ try { fs.closeSync(fd) } catch (er) {}
+ }
+}
+
+const streamSync = (opt, p, position, fd, files) => {
+ const stream = new fsm.WriteStreamSync(opt.file, {
+ fd: fd,
+ start: position
+ })
+ p.pipe(stream)
+ addFilesSync(p, files)
+}
+
+const replace = (opt, files, cb) => {
+ files = Array.from(files)
+ const p = new Pack(opt)
+
+ const getPos = (fd, size, cb_) => {
+ const cb = (er, pos) => {
+ if (er)
+ fs.close(fd, _ => cb_(er))
+ else
+ cb_(null, pos)
+ }
+
+ let position = 0
+ if (size === 0)
+ return cb(null, 0)
+
+ let bufPos = 0
+ const headBuf = Buffer.alloc(512)
+ const onread = (er, bytes) => {
+ if (er)
+ return cb(er)
+ bufPos += bytes
+ if (bufPos < 512 && bytes)
+ return fs.read(
+ fd, headBuf, bufPos, headBuf.length - bufPos,
+ position + bufPos, onread
+ )
+
+ if (position === 0 && headBuf[0] === 0x1f && headBuf[1] === 0x8b)
+ return cb(new Error('cannot append to compressed archives'))
+
+ // truncated header
+ if (bufPos < 512)
+ return cb(null, position)
+
+ const h = new Header(headBuf)
+ if (!h.cksumValid)
+ return cb(null, position)
+
+ const entryBlockSize = 512 * Math.ceil(h.size / 512)
+ if (position + entryBlockSize + 512 > size)
+ return cb(null, position)
+
+ position += entryBlockSize + 512
+ if (position >= size)
+ return cb(null, position)
+
+ if (opt.mtimeCache)
+ opt.mtimeCache.set(h.path, h.mtime)
+ bufPos = 0
+ fs.read(fd, headBuf, 0, 512, position, onread)
+ }
+ fs.read(fd, headBuf, 0, 512, position, onread)
+ }
+
+ const promise = new Promise((resolve, reject) => {
+ p.on('error', reject)
+ let flag = 'r+'
+ const onopen = (er, fd) => {
+ if (er && er.code === 'ENOENT' && flag === 'r+') {
+ flag = 'w+'
+ return fs.open(opt.file, flag, onopen)
+ }
+
+ if (er)
+ return reject(er)
+
+ fs.fstat(fd, (er, st) => {
+ if (er)
+ return reject(er)
+ getPos(fd, st.size, (er, position) => {
+ if (er)
+ return reject(er)
+ const stream = new fsm.WriteStream(opt.file, {
+ fd: fd,
+ start: position
+ })
+ p.pipe(stream)
+ stream.on('error', reject)
+ stream.on('close', resolve)
+ addFilesAsync(p, files)
+ })
+ })
+ }
+ fs.open(opt.file, flag, onopen)
+ })
+
+ return cb ? promise.then(cb, cb) : promise
+}
+
+const addFilesSync = (p, files) => {
+ files.forEach(file => {
+ if (file.charAt(0) === '@')
+ t({
+ file: path.resolve(p.cwd, file.substr(1)),
+ sync: true,
+ noResume: true,
+ onentry: entry => p.add(entry)
+ })
+ else
+ p.add(file)
+ })
+ p.end()
+}
+
+const addFilesAsync = (p, files) => {
+ while (files.length) {
+ const file = files.shift()
+ if (file.charAt(0) === '@')
+ return t({
+ file: path.resolve(p.cwd, file.substr(1)),
+ noResume: true,
+ onentry: entry => p.add(entry)
+ }).then(_ => addFilesAsync(p, files))
+ else
+ p.add(file)
+ }
+ p.end()
+}
diff --git a/node_modules/cacache/node_modules/tar/lib/types.js b/node_modules/cacache/node_modules/tar/lib/types.js
new file mode 100644
index 000000000..df425652b
--- /dev/null
+++ b/node_modules/cacache/node_modules/tar/lib/types.js
@@ -0,0 +1,44 @@
+'use strict'
+// map types from key to human-friendly name
+exports.name = new Map([
+ ['0', 'File'],
+ // same as File
+ ['', 'OldFile'],
+ ['1', 'Link'],
+ ['2', 'SymbolicLink'],
+ // Devices and FIFOs aren't fully supported
+ // they are parsed, but skipped when unpacking
+ ['3', 'CharacterDevice'],
+ ['4', 'BlockDevice'],
+ ['5', 'Directory'],
+ ['6', 'FIFO'],
+ // same as File
+ ['7', 'ContiguousFile'],
+ // pax headers
+ ['g', 'GlobalExtendedHeader'],
+ ['x', 'ExtendedHeader'],
+ // vendor-specific stuff
+ // skip
+ ['A', 'SolarisACL'],
+ // like 5, but with data, which should be skipped
+ ['D', 'GNUDumpDir'],
+ // metadata only, skip
+ ['I', 'Inode'],
+ // data = link path of next file
+ ['K', 'NextFileHasLongLinkpath'],
+ // data = path of next file
+ ['L', 'NextFileHasLongPath'],
+ // skip
+ ['M', 'ContinuationFile'],
+ // like L
+ ['N', 'OldGnuLongPath'],
+ // skip
+ ['S', 'SparseFile'],
+ // skip
+ ['V', 'TapeVolumeHeader'],
+ // like x
+ ['X', 'OldExtendedHeader']
+])
+
+// map the other direction
+exports.code = new Map(Array.from(exports.name).map(kv => [kv[1], kv[0]]))
diff --git a/node_modules/cacache/node_modules/tar/lib/unpack.js b/node_modules/cacache/node_modules/tar/lib/unpack.js
new file mode 100644
index 000000000..af0e0ffa0
--- /dev/null
+++ b/node_modules/cacache/node_modules/tar/lib/unpack.js
@@ -0,0 +1,680 @@
+'use strict'
+
+// the PEND/UNPEND stuff tracks whether we're ready to emit end/close yet.
+// but the path reservations are required to avoid race conditions where
+// parallelized unpack ops may mess with one another, due to dependencies
+// (like a Link depending on its target) or destructive operations (like
+// clobbering an fs object to create one of a different type.)
+
+const assert = require('assert')
+const EE = require('events').EventEmitter
+const Parser = require('./parse.js')
+const fs = require('fs')
+const fsm = require('fs-minipass')
+const path = require('path')
+const mkdir = require('./mkdir.js')
+const mkdirSync = mkdir.sync
+const wc = require('./winchars.js')
+const pathReservations = require('./path-reservations.js')
+
+const ONENTRY = Symbol('onEntry')
+const CHECKFS = Symbol('checkFs')
+const CHECKFS2 = Symbol('checkFs2')
+const ISREUSABLE = Symbol('isReusable')
+const MAKEFS = Symbol('makeFs')
+const FILE = Symbol('file')
+const DIRECTORY = Symbol('directory')
+const LINK = Symbol('link')
+const SYMLINK = Symbol('symlink')
+const HARDLINK = Symbol('hardlink')
+const UNSUPPORTED = Symbol('unsupported')
+const UNKNOWN = Symbol('unknown')
+const CHECKPATH = Symbol('checkPath')
+const MKDIR = Symbol('mkdir')
+const ONERROR = Symbol('onError')
+const PENDING = Symbol('pending')
+const PEND = Symbol('pend')
+const UNPEND = Symbol('unpend')
+const ENDED = Symbol('ended')
+const MAYBECLOSE = Symbol('maybeClose')
+const SKIP = Symbol('skip')
+const DOCHOWN = Symbol('doChown')
+const UID = Symbol('uid')
+const GID = Symbol('gid')
+const crypto = require('crypto')
+const getFlag = require('./get-write-flag.js')
+
+/* istanbul ignore next */
+const neverCalled = () => {
+ throw new Error('sync function called cb somehow?!?')
+}
+
+// Unlinks on Windows are not atomic.
+//
+// This means that if you have a file entry, followed by another
+// file entry with an identical name, and you cannot re-use the file
+// (because it's a hardlink, or because unlink:true is set, or it's
+// Windows, which does not have useful nlink values), then the unlink
+// will be committed to the disk AFTER the new file has been written
+// over the old one, deleting the new file.
+//
+// To work around this, on Windows systems, we rename the file and then
+// delete the renamed file. It's a sloppy kludge, but frankly, I do not
+// know of a better way to do this, given windows' non-atomic unlink
+// semantics.
+//
+// See: https://github.com/npm/node-tar/issues/183
+/* istanbul ignore next */
+const unlinkFile = (path, cb) => {
+ if (process.platform !== 'win32')
+ return fs.unlink(path, cb)
+
+ const name = path + '.DELETE.' + crypto.randomBytes(16).toString('hex')
+ fs.rename(path, name, er => {
+ if (er)
+ return cb(er)
+ fs.unlink(name, cb)
+ })
+}
+
+/* istanbul ignore next */
+const unlinkFileSync = path => {
+ if (process.platform !== 'win32')
+ return fs.unlinkSync(path)
+
+ const name = path + '.DELETE.' + crypto.randomBytes(16).toString('hex')
+ fs.renameSync(path, name)
+ fs.unlinkSync(name)
+}
+
+// this.gid, entry.gid, this.processUid
+const uint32 = (a, b, c) =>
+ a === a >>> 0 ? a
+ : b === b >>> 0 ? b
+ : c
+
+class Unpack extends Parser {
+ constructor (opt) {
+ if (!opt)
+ opt = {}
+
+ opt.ondone = _ => {
+ this[ENDED] = true
+ this[MAYBECLOSE]()
+ }
+
+ super(opt)
+
+ this.reservations = pathReservations()
+
+ this.transform = typeof opt.transform === 'function' ? opt.transform : null
+
+ this.writable = true
+ this.readable = false
+
+ this[PENDING] = 0
+ this[ENDED] = false
+
+ this.dirCache = opt.dirCache || new Map()
+
+ if (typeof opt.uid === 'number' || typeof opt.gid === 'number') {
+ // need both or neither
+ if (typeof opt.uid !== 'number' || typeof opt.gid !== 'number')
+ throw new TypeError('cannot set owner without number uid and gid')
+ if (opt.preserveOwner)
+ throw new TypeError(
+ 'cannot preserve owner in archive and also set owner explicitly')
+ this.uid = opt.uid
+ this.gid = opt.gid
+ this.setOwner = true
+ } else {
+ this.uid = null
+ this.gid = null
+ this.setOwner = false
+ }
+
+ // default true for root
+ if (opt.preserveOwner === undefined && typeof opt.uid !== 'number')
+ this.preserveOwner = process.getuid && process.getuid() === 0
+ else
+ this.preserveOwner = !!opt.preserveOwner
+
+ this.processUid = (this.preserveOwner || this.setOwner) && process.getuid ?
+ process.getuid() : null
+ this.processGid = (this.preserveOwner || this.setOwner) && process.getgid ?
+ process.getgid() : null
+
+ // mostly just for testing, but useful in some cases.
+ // Forcibly trigger a chown on every entry, no matter what
+ this.forceChown = opt.forceChown === true
+
+ // turn ><?| in filenames into 0xf000-higher encoded forms
+ this.win32 = !!opt.win32 || process.platform === 'win32'
+
+ // do not unpack over files that are newer than what's in the archive
+ this.newer = !!opt.newer
+
+ // do not unpack over ANY files
+ this.keep = !!opt.keep
+
+ // do not set mtime/atime of extracted entries
+ this.noMtime = !!opt.noMtime
+
+ // allow .., absolute path entries, and unpacking through symlinks
+ // without this, warn and skip .., relativize absolutes, and error
+ // on symlinks in extraction path
+ this.preservePaths = !!opt.preservePaths
+
+ // unlink files and links before writing. This breaks existing hard
+ // links, and removes symlink directories rather than erroring
+ this.unlink = !!opt.unlink
+
+ this.cwd = path.resolve(opt.cwd || process.cwd())
+ this.strip = +opt.strip || 0
+ this.processUmask = process.umask()
+ this.umask = typeof opt.umask === 'number' ? opt.umask : this.processUmask
+ // default mode for dirs created as parents
+ this.dmode = opt.dmode || (0o0777 & (~this.umask))
+ this.fmode = opt.fmode || (0o0666 & (~this.umask))
+ this.on('entry', entry => this[ONENTRY](entry))
+ }
+
+ // a bad or damaged archive is a warning for Parser, but an error
+ // when extracting. Mark those errors as unrecoverable, because
+ // the Unpack contract cannot be met.
+ warn (code, msg, data = {}) {
+ if (code === 'TAR_BAD_ARCHIVE' || code === 'TAR_ABORT')
+ data.recoverable = false
+ return super.warn(code, msg, data)
+ }
+
+ [MAYBECLOSE] () {
+ if (this[ENDED] && this[PENDING] === 0) {
+ this.emit('prefinish')
+ this.emit('finish')
+ this.emit('end')
+ this.emit('close')
+ }
+ }
+
+ [CHECKPATH] (entry) {
+ if (this.strip) {
+ const parts = entry.path.split(/\/|\\/)
+ if (parts.length < this.strip)
+ return false
+ entry.path = parts.slice(this.strip).join('/')
+
+ if (entry.type === 'Link') {
+ const linkparts = entry.linkpath.split(/\/|\\/)
+ if (linkparts.length >= this.strip)
+ entry.linkpath = linkparts.slice(this.strip).join('/')
+ }
+ }
+
+ if (!this.preservePaths) {
+ const p = entry.path
+ if (p.match(/(^|\/|\\)\.\.(\\|\/|$)/)) {
+ this.warn('TAR_ENTRY_ERROR', `path contains '..'`, {
+ entry,
+ path: p,
+ })
+ return false
+ }
+
+ // absolutes on posix are also absolutes on win32
+ // so we only need to test this one to get both
+ if (path.win32.isAbsolute(p)) {
+ const parsed = path.win32.parse(p)
+ entry.path = p.substr(parsed.root.length)
+ const r = parsed.root
+ this.warn('TAR_ENTRY_INFO', `stripping ${r} from absolute path`, {
+ entry,
+ path: p,
+ })
+ }
+ }
+
+ // only encode : chars that aren't drive letter indicators
+ if (this.win32) {
+ const parsed = path.win32.parse(entry.path)
+ entry.path = parsed.root === '' ? wc.encode(entry.path)
+ : parsed.root + wc.encode(entry.path.substr(parsed.root.length))
+ }
+
+ if (path.isAbsolute(entry.path))
+ entry.absolute = entry.path
+ else
+ entry.absolute = path.resolve(this.cwd, entry.path)
+
+ return true
+ }
+
+ [ONENTRY] (entry) {
+ if (!this[CHECKPATH](entry))
+ return entry.resume()
+
+ assert.equal(typeof entry.absolute, 'string')
+
+ switch (entry.type) {
+ case 'Directory':
+ case 'GNUDumpDir':
+ if (entry.mode)
+ entry.mode = entry.mode | 0o700
+
+ case 'File':
+ case 'OldFile':
+ case 'ContiguousFile':
+ case 'Link':
+ case 'SymbolicLink':
+ return this[CHECKFS](entry)
+
+ case 'CharacterDevice':
+ case 'BlockDevice':
+ case 'FIFO':
+ return this[UNSUPPORTED](entry)
+ }
+ }
+
+ [ONERROR] (er, entry) {
+ // Cwd has to exist, or else nothing works. That's serious.
+ // Other errors are warnings, which raise the error in strict
+ // mode, but otherwise continue on.
+ if (er.name === 'CwdError')
+ this.emit('error', er)
+ else {
+ this.warn('TAR_ENTRY_ERROR', er, {entry})
+ this[UNPEND]()
+ entry.resume()
+ }
+ }
+
+ [MKDIR] (dir, mode, cb) {
+ mkdir(dir, {
+ uid: this.uid,
+ gid: this.gid,
+ processUid: this.processUid,
+ processGid: this.processGid,
+ umask: this.processUmask,
+ preserve: this.preservePaths,
+ unlink: this.unlink,
+ cache: this.dirCache,
+ cwd: this.cwd,
+ mode: mode
+ }, cb)
+ }
+
+ [DOCHOWN] (entry) {
+ // in preserve owner mode, chown if the entry doesn't match process
+ // in set owner mode, chown if setting doesn't match process
+ return this.forceChown ||
+ this.preserveOwner &&
+ ( typeof entry.uid === 'number' && entry.uid !== this.processUid ||
+ typeof entry.gid === 'number' && entry.gid !== this.processGid )
+ ||
+ ( typeof this.uid === 'number' && this.uid !== this.processUid ||
+ typeof this.gid === 'number' && this.gid !== this.processGid )
+ }
+
+ [UID] (entry) {
+ return uint32(this.uid, entry.uid, this.processUid)
+ }
+
+ [GID] (entry) {
+ return uint32(this.gid, entry.gid, this.processGid)
+ }
+
+ [FILE] (entry, fullyDone) {
+ const mode = entry.mode & 0o7777 || this.fmode
+ const stream = new fsm.WriteStream(entry.absolute, {
+ flags: getFlag(entry.size),
+ mode: mode,
+ autoClose: false
+ })
+ stream.on('error', er => this[ONERROR](er, entry))
+
+ let actions = 1
+ const done = er => {
+ if (er)
+ return this[ONERROR](er, entry)
+
+ if (--actions === 0) {
+ fs.close(stream.fd, er => {
+ fullyDone()
+ er ? this[ONERROR](er, entry) : this[UNPEND]()
+ })
+ }
+ }
+
+ stream.on('finish', _ => {
+ // if futimes fails, try utimes
+ // if utimes fails, fail with the original error
+ // same for fchown/chown
+ const abs = entry.absolute
+ const fd = stream.fd
+
+ if (entry.mtime && !this.noMtime) {
+ actions++
+ const atime = entry.atime || new Date()
+ const mtime = entry.mtime
+ fs.futimes(fd, atime, mtime, er =>
+ er ? fs.utimes(abs, atime, mtime, er2 => done(er2 && er))
+ : done())
+ }
+
+ if (this[DOCHOWN](entry)) {
+ actions++
+ const uid = this[UID](entry)
+ const gid = this[GID](entry)
+ fs.fchown(fd, uid, gid, er =>
+ er ? fs.chown(abs, uid, gid, er2 => done(er2 && er))
+ : done())
+ }
+
+ done()
+ })
+
+ const tx = this.transform ? this.transform(entry) || entry : entry
+ if (tx !== entry) {
+ tx.on('error', er => this[ONERROR](er, entry))
+ entry.pipe(tx)
+ }
+ tx.pipe(stream)
+ }
+
+ [DIRECTORY] (entry, fullyDone) {
+ const mode = entry.mode & 0o7777 || this.dmode
+ this[MKDIR](entry.absolute, mode, er => {
+ if (er) {
+ fullyDone()
+ return this[ONERROR](er, entry)
+ }
+
+ let actions = 1
+ const done = _ => {
+ if (--actions === 0) {
+ fullyDone()
+ this[UNPEND]()
+ entry.resume()
+ }
+ }
+
+ if (entry.mtime && !this.noMtime) {
+ actions++
+ fs.utimes(entry.absolute, entry.atime || new Date(), entry.mtime, done)
+ }
+
+ if (this[DOCHOWN](entry)) {
+ actions++
+ fs.chown(entry.absolute, this[UID](entry), this[GID](entry), done)
+ }
+
+ done()
+ })
+ }
+
+ [UNSUPPORTED] (entry) {
+ entry.unsupported = true
+ this.warn('TAR_ENTRY_UNSUPPORTED',
+ `unsupported entry type: ${entry.type}`, {entry})
+ entry.resume()
+ }
+
+ [SYMLINK] (entry, done) {
+ this[LINK](entry, entry.linkpath, 'symlink', done)
+ }
+
+ [HARDLINK] (entry, done) {
+ this[LINK](entry, path.resolve(this.cwd, entry.linkpath), 'link', done)
+ }
+
+ [PEND] () {
+ this[PENDING]++
+ }
+
+ [UNPEND] () {
+ this[PENDING]--
+ this[MAYBECLOSE]()
+ }
+
+ [SKIP] (entry) {
+ this[UNPEND]()
+ entry.resume()
+ }
+
+ // Check if we can reuse an existing filesystem entry safely and
+ // overwrite it, rather than unlinking and recreating
+ // Windows doesn't report a useful nlink, so we just never reuse entries
+ [ISREUSABLE] (entry, st) {
+ return entry.type === 'File' &&
+ !this.unlink &&
+ st.isFile() &&
+ st.nlink <= 1 &&
+ process.platform !== 'win32'
+ }
+
+ // check if a thing is there, and if so, try to clobber it
+ [CHECKFS] (entry) {
+ this[PEND]()
+ const paths = [entry.path]
+ if (entry.linkpath)
+ paths.push(entry.linkpath)
+ this.reservations.reserve(paths, done => this[CHECKFS2](entry, done))
+ }
+ [CHECKFS2] (entry, done) {
+ this[MKDIR](path.dirname(entry.absolute), this.dmode, er => {
+ if (er) {
+ done()
+ return this[ONERROR](er, entry)
+ }
+ fs.lstat(entry.absolute, (er, st) => {
+ if (st && (this.keep || this.newer && st.mtime > entry.mtime)) {
+ this[SKIP](entry)
+ done()
+ } else if (er || this[ISREUSABLE](entry, st)) {
+ this[MAKEFS](null, entry, done)
+ }
+ else if (st.isDirectory()) {
+ if (entry.type === 'Directory') {
+ if (!entry.mode || (st.mode & 0o7777) === entry.mode)
+ this[MAKEFS](null, entry, done)
+ else
+ fs.chmod(entry.absolute, entry.mode,
+ er => this[MAKEFS](er, entry, done))
+ } else
+ fs.rmdir(entry.absolute, er => this[MAKEFS](er, entry, done))
+ } else
+ unlinkFile(entry.absolute, er => this[MAKEFS](er, entry, done))
+ })
+ })
+ }
+
+ [MAKEFS] (er, entry, done) {
+ if (er)
+ return this[ONERROR](er, entry)
+
+ switch (entry.type) {
+ case 'File':
+ case 'OldFile':
+ case 'ContiguousFile':
+ return this[FILE](entry, done)
+
+ case 'Link':
+ return this[HARDLINK](entry, done)
+
+ case 'SymbolicLink':
+ return this[SYMLINK](entry, done)
+
+ case 'Directory':
+ case 'GNUDumpDir':
+ return this[DIRECTORY](entry, done)
+ }
+ }
+
+ [LINK] (entry, linkpath, link, done) {
+ // XXX: get the type ('file' or 'dir') for windows
+ fs[link](linkpath, entry.absolute, er => {
+ if (er)
+ return this[ONERROR](er, entry)
+ done()
+ this[UNPEND]()
+ entry.resume()
+ })
+ }
+}
+
+class UnpackSync extends Unpack {
+ constructor (opt) {
+ super(opt)
+ }
+
+ [CHECKFS] (entry) {
+ const er = this[MKDIR](path.dirname(entry.absolute), this.dmode, neverCalled)
+ if (er)
+ return this[ONERROR](er, entry)
+ try {
+ const st = fs.lstatSync(entry.absolute)
+ if (this.keep || this.newer && st.mtime > entry.mtime)
+ return this[SKIP](entry)
+ else if (this[ISREUSABLE](entry, st))
+ return this[MAKEFS](null, entry, neverCalled)
+ else {
+ try {
+ if (st.isDirectory()) {
+ if (entry.type === 'Directory') {
+ if (entry.mode && (st.mode & 0o7777) !== entry.mode)
+ fs.chmodSync(entry.absolute, entry.mode)
+ } else
+ fs.rmdirSync(entry.absolute)
+ } else
+ unlinkFileSync(entry.absolute)
+ return this[MAKEFS](null, entry, neverCalled)
+ } catch (er) {
+ return this[ONERROR](er, entry)
+ }
+ }
+ } catch (er) {
+ return this[MAKEFS](null, entry, neverCalled)
+ }
+ }
+
+ [FILE] (entry, _) {
+ const mode = entry.mode & 0o7777 || this.fmode
+
+ const oner = er => {
+ let closeError
+ try {
+ fs.closeSync(fd)
+ } catch (e) {
+ closeError = e
+ }
+ if (er || closeError)
+ this[ONERROR](er || closeError, entry)
+ }
+
+ let stream
+ let fd
+ try {
+ fd = fs.openSync(entry.absolute, getFlag(entry.size), mode)
+ } catch (er) {
+ return oner(er)
+ }
+ const tx = this.transform ? this.transform(entry) || entry : entry
+ if (tx !== entry) {
+ tx.on('error', er => this[ONERROR](er, entry))
+ entry.pipe(tx)
+ }
+
+ tx.on('data', chunk => {
+ try {
+ fs.writeSync(fd, chunk, 0, chunk.length)
+ } catch (er) {
+ oner(er)
+ }
+ })
+
+ tx.on('end', _ => {
+ let er = null
+ // try both, falling futimes back to utimes
+ // if either fails, handle the first error
+ if (entry.mtime && !this.noMtime) {
+ const atime = entry.atime || new Date()
+ const mtime = entry.mtime
+ try {
+ fs.futimesSync(fd, atime, mtime)
+ } catch (futimeser) {
+ try {
+ fs.utimesSync(entry.absolute, atime, mtime)
+ } catch (utimeser) {
+ er = futimeser
+ }
+ }
+ }
+
+ if (this[DOCHOWN](entry)) {
+ const uid = this[UID](entry)
+ const gid = this[GID](entry)
+
+ try {
+ fs.fchownSync(fd, uid, gid)
+ } catch (fchowner) {
+ try {
+ fs.chownSync(entry.absolute, uid, gid)
+ } catch (chowner) {
+ er = er || fchowner
+ }
+ }
+ }
+
+ oner(er)
+ })
+ }
+
+ [DIRECTORY] (entry, _) {
+ const mode = entry.mode & 0o7777 || this.dmode
+ const er = this[MKDIR](entry.absolute, mode)
+ if (er)
+ return this[ONERROR](er, entry)
+ if (entry.mtime && !this.noMtime) {
+ try {
+ fs.utimesSync(entry.absolute, entry.atime || new Date(), entry.mtime)
+ } catch (er) {}
+ }
+ if (this[DOCHOWN](entry)) {
+ try {
+ fs.chownSync(entry.absolute, this[UID](entry), this[GID](entry))
+ } catch (er) {}
+ }
+ entry.resume()
+ }
+
+ [MKDIR] (dir, mode) {
+ try {
+ return mkdir.sync(dir, {
+ uid: this.uid,
+ gid: this.gid,
+ processUid: this.processUid,
+ processGid: this.processGid,
+ umask: this.processUmask,
+ preserve: this.preservePaths,
+ unlink: this.unlink,
+ cache: this.dirCache,
+ cwd: this.cwd,
+ mode: mode
+ })
+ } catch (er) {
+ return er
+ }
+ }
+
+ [LINK] (entry, linkpath, link, _) {
+ try {
+ fs[link + 'Sync'](linkpath, entry.absolute)
+ entry.resume()
+ } catch (er) {
+ return this[ONERROR](er, entry)
+ }
+ }
+}
+
+Unpack.Sync = UnpackSync
+module.exports = Unpack
diff --git a/node_modules/cacache/node_modules/tar/lib/update.js b/node_modules/cacache/node_modules/tar/lib/update.js
new file mode 100644
index 000000000..16c3e93ed
--- /dev/null
+++ b/node_modules/cacache/node_modules/tar/lib/update.js
@@ -0,0 +1,36 @@
+'use strict'
+
+// tar -u
+
+const hlo = require('./high-level-opt.js')
+const r = require('./replace.js')
+// just call tar.r with the filter and mtimeCache
+
+const u = module.exports = (opt_, files, cb) => {
+ const opt = hlo(opt_)
+
+ if (!opt.file)
+ throw new TypeError('file is required')
+
+ if (opt.gzip)
+ throw new TypeError('cannot append to compressed archives')
+
+ if (!files || !Array.isArray(files) || !files.length)
+ throw new TypeError('no files or directories specified')
+
+ files = Array.from(files)
+
+ mtimeFilter(opt)
+ return r(opt, files, cb)
+}
+
+const mtimeFilter = opt => {
+ const filter = opt.filter
+
+ if (!opt.mtimeCache)
+ opt.mtimeCache = new Map()
+
+ opt.filter = filter ? (path, stat) =>
+ filter(path, stat) && !(opt.mtimeCache.get(path) > stat.mtime)
+ : (path, stat) => !(opt.mtimeCache.get(path) > stat.mtime)
+}
diff --git a/node_modules/cacache/node_modules/tar/lib/warn-mixin.js b/node_modules/cacache/node_modules/tar/lib/warn-mixin.js
new file mode 100644
index 000000000..11eb52cc6
--- /dev/null
+++ b/node_modules/cacache/node_modules/tar/lib/warn-mixin.js
@@ -0,0 +1,21 @@
+'use strict'
+module.exports = Base => class extends Base {
+ warn (code, message, data = {}) {
+ if (this.file)
+ data.file = this.file
+ if (this.cwd)
+ data.cwd = this.cwd
+ data.code = message instanceof Error && message.code || code
+ data.tarCode = code
+ if (!this.strict && data.recoverable !== false) {
+ if (message instanceof Error) {
+ data = Object.assign(message, data)
+ message = message.message
+ }
+ this.emit('warn', data.tarCode, message, data)
+ } else if (message instanceof Error) {
+ this.emit('error', Object.assign(message, data))
+ } else
+ this.emit('error', Object.assign(new Error(`${code}: ${message}`), data))
+ }
+}
diff --git a/node_modules/cacache/node_modules/tar/lib/winchars.js b/node_modules/cacache/node_modules/tar/lib/winchars.js
new file mode 100644
index 000000000..cf6ea0606
--- /dev/null
+++ b/node_modules/cacache/node_modules/tar/lib/winchars.js
@@ -0,0 +1,23 @@
+'use strict'
+
+// When writing files on Windows, translate the characters to their
+// 0xf000 higher-encoded versions.
+
+const raw = [
+ '|',
+ '<',
+ '>',
+ '?',
+ ':'
+]
+
+const win = raw.map(char =>
+ String.fromCharCode(0xf000 + char.charCodeAt(0)))
+
+const toWin = new Map(raw.map((char, i) => [char, win[i]]))
+const toRaw = new Map(win.map((char, i) => [char, raw[i]]))
+
+module.exports = {
+ encode: s => raw.reduce((s, c) => s.split(c).join(toWin.get(c)), s),
+ decode: s => win.reduce((s, c) => s.split(c).join(toRaw.get(c)), s)
+}
diff --git a/node_modules/cacache/node_modules/tar/lib/write-entry.js b/node_modules/cacache/node_modules/tar/lib/write-entry.js
new file mode 100644
index 000000000..0e33cb59d
--- /dev/null
+++ b/node_modules/cacache/node_modules/tar/lib/write-entry.js
@@ -0,0 +1,436 @@
+'use strict'
+const MiniPass = require('minipass')
+const Pax = require('./pax.js')
+const Header = require('./header.js')
+const ReadEntry = require('./read-entry.js')
+const fs = require('fs')
+const path = require('path')
+
+const types = require('./types.js')
+const maxReadSize = 16 * 1024 * 1024
+const PROCESS = Symbol('process')
+const FILE = Symbol('file')
+const DIRECTORY = Symbol('directory')
+const SYMLINK = Symbol('symlink')
+const HARDLINK = Symbol('hardlink')
+const HEADER = Symbol('header')
+const READ = Symbol('read')
+const LSTAT = Symbol('lstat')
+const ONLSTAT = Symbol('onlstat')
+const ONREAD = Symbol('onread')
+const ONREADLINK = Symbol('onreadlink')
+const OPENFILE = Symbol('openfile')
+const ONOPENFILE = Symbol('onopenfile')
+const CLOSE = Symbol('close')
+const MODE = Symbol('mode')
+const warner = require('./warn-mixin.js')
+const winchars = require('./winchars.js')
+
+const modeFix = require('./mode-fix.js')
+
+const WriteEntry = warner(class WriteEntry extends MiniPass {
+ constructor (p, opt) {
+ opt = opt || {}
+ super(opt)
+ if (typeof p !== 'string')
+ throw new TypeError('path is required')
+ this.path = p
+ // suppress atime, ctime, uid, gid, uname, gname
+ this.portable = !!opt.portable
+ // until node has builtin pwnam functions, this'll have to do
+ this.myuid = process.getuid && process.getuid()
+ this.myuser = process.env.USER || ''
+ this.maxReadSize = opt.maxReadSize || maxReadSize
+ this.linkCache = opt.linkCache || new Map()
+ this.statCache = opt.statCache || new Map()
+ this.preservePaths = !!opt.preservePaths
+ this.cwd = opt.cwd || process.cwd()
+ this.strict = !!opt.strict
+ this.noPax = !!opt.noPax
+ this.noMtime = !!opt.noMtime
+ this.mtime = opt.mtime || null
+
+ if (typeof opt.onwarn === 'function')
+ this.on('warn', opt.onwarn)
+
+ let pathWarn = false
+ if (!this.preservePaths && path.win32.isAbsolute(p)) {
+ // absolutes on posix are also absolutes on win32
+ // so we only need to test this one to get both
+ const parsed = path.win32.parse(p)
+ this.path = p.substr(parsed.root.length)
+ pathWarn = parsed.root
+ }
+
+ this.win32 = !!opt.win32 || process.platform === 'win32'
+ if (this.win32) {
+ this.path = winchars.decode(this.path.replace(/\\/g, '/'))
+ p = p.replace(/\\/g, '/')
+ }
+
+ this.absolute = opt.absolute || path.resolve(this.cwd, p)
+
+ if (this.path === '')
+ this.path = './'
+
+ if (pathWarn) {
+ this.warn('TAR_ENTRY_INFO', `stripping ${pathWarn} from absolute path`, {
+ entry: this,
+ path: pathWarn + this.path,
+ })
+ }
+
+ if (this.statCache.has(this.absolute))
+ this[ONLSTAT](this.statCache.get(this.absolute))
+ else
+ this[LSTAT]()
+ }
+
+ [LSTAT] () {
+ fs.lstat(this.absolute, (er, stat) => {
+ if (er)
+ return this.emit('error', er)
+ this[ONLSTAT](stat)
+ })
+ }
+
+ [ONLSTAT] (stat) {
+ this.statCache.set(this.absolute, stat)
+ this.stat = stat
+ if (!stat.isFile())
+ stat.size = 0
+ this.type = getType(stat)
+ this.emit('stat', stat)
+ this[PROCESS]()
+ }
+
+ [PROCESS] () {
+ switch (this.type) {
+ case 'File': return this[FILE]()
+ case 'Directory': return this[DIRECTORY]()
+ case 'SymbolicLink': return this[SYMLINK]()
+ // unsupported types are ignored.
+ default: return this.end()
+ }
+ }
+
+ [MODE] (mode) {
+ return modeFix(mode, this.type === 'Directory', this.portable)
+ }
+
+ [HEADER] () {
+ if (this.type === 'Directory' && this.portable)
+ this.noMtime = true
+
+ this.header = new Header({
+ path: this.path,
+ linkpath: this.linkpath,
+ // only the permissions and setuid/setgid/sticky bitflags
+ // not the higher-order bits that specify file type
+ mode: this[MODE](this.stat.mode),
+ uid: this.portable ? null : this.stat.uid,
+ gid: this.portable ? null : this.stat.gid,
+ size: this.stat.size,
+ mtime: this.noMtime ? null : this.mtime || this.stat.mtime,
+ type: this.type,
+ uname: this.portable ? null :
+ this.stat.uid === this.myuid ? this.myuser : '',
+ atime: this.portable ? null : this.stat.atime,
+ ctime: this.portable ? null : this.stat.ctime
+ })
+
+ if (this.header.encode() && !this.noPax)
+ this.write(new Pax({
+ atime: this.portable ? null : this.header.atime,
+ ctime: this.portable ? null : this.header.ctime,
+ gid: this.portable ? null : this.header.gid,
+ mtime: this.noMtime ? null : this.mtime || this.header.mtime,
+ path: this.path,
+ linkpath: this.linkpath,
+ size: this.header.size,
+ uid: this.portable ? null : this.header.uid,
+ uname: this.portable ? null : this.header.uname,
+ dev: this.portable ? null : this.stat.dev,
+ ino: this.portable ? null : this.stat.ino,
+ nlink: this.portable ? null : this.stat.nlink
+ }).encode())
+ this.write(this.header.block)
+ }
+
+ [DIRECTORY] () {
+ if (this.path.substr(-1) !== '/')
+ this.path += '/'
+ this.stat.size = 0
+ this[HEADER]()
+ this.end()
+ }
+
+ [SYMLINK] () {
+ fs.readlink(this.absolute, (er, linkpath) => {
+ if (er)
+ return this.emit('error', er)
+ this[ONREADLINK](linkpath)
+ })
+ }
+
+ [ONREADLINK] (linkpath) {
+ this.linkpath = linkpath.replace(/\\/g, '/')
+ this[HEADER]()
+ this.end()
+ }
+
+ [HARDLINK] (linkpath) {
+ this.type = 'Link'
+ this.linkpath = path.relative(this.cwd, linkpath).replace(/\\/g, '/')
+ this.stat.size = 0
+ this[HEADER]()
+ this.end()
+ }
+
+ [FILE] () {
+ if (this.stat.nlink > 1) {
+ const linkKey = this.stat.dev + ':' + this.stat.ino
+ if (this.linkCache.has(linkKey)) {
+ const linkpath = this.linkCache.get(linkKey)
+ if (linkpath.indexOf(this.cwd) === 0)
+ return this[HARDLINK](linkpath)
+ }
+ this.linkCache.set(linkKey, this.absolute)
+ }
+
+ this[HEADER]()
+ if (this.stat.size === 0)
+ return this.end()
+
+ this[OPENFILE]()
+ }
+
+ [OPENFILE] () {
+ fs.open(this.absolute, 'r', (er, fd) => {
+ if (er)
+ return this.emit('error', er)
+ this[ONOPENFILE](fd)
+ })
+ }
+
+ [ONOPENFILE] (fd) {
+ const blockLen = 512 * Math.ceil(this.stat.size / 512)
+ const bufLen = Math.min(blockLen, this.maxReadSize)
+ const buf = Buffer.allocUnsafe(bufLen)
+ this[READ](fd, buf, 0, buf.length, 0, this.stat.size, blockLen)
+ }
+
+ [READ] (fd, buf, offset, length, pos, remain, blockRemain) {
+ fs.read(fd, buf, offset, length, pos, (er, bytesRead) => {
+ if (er) {
+ // ignoring the error from close(2) is a bad practice, but at
+ // this point we already have an error, don't need another one
+ return this[CLOSE](fd, () => this.emit('error', er))
+ }
+ this[ONREAD](fd, buf, offset, length, pos, remain, blockRemain, bytesRead)
+ })
+ }
+
+ [CLOSE] (fd, cb) {
+ fs.close(fd, cb)
+ }
+
+ [ONREAD] (fd, buf, offset, length, pos, remain, blockRemain, bytesRead) {
+ if (bytesRead <= 0 && remain > 0) {
+ const er = new Error('encountered unexpected EOF')
+ er.path = this.absolute
+ er.syscall = 'read'
+ er.code = 'EOF'
+ return this[CLOSE](fd, () => this.emit('error', er))
+ }
+
+ if (bytesRead > remain) {
+ const er = new Error('did not encounter expected EOF')
+ er.path = this.absolute
+ er.syscall = 'read'
+ er.code = 'EOF'
+ return this[CLOSE](fd, () => this.emit('error', er))
+ }
+
+ // null out the rest of the buffer, if we could fit the block padding
+ if (bytesRead === remain) {
+ for (let i = bytesRead; i < length && bytesRead < blockRemain; i++) {
+ buf[i + offset] = 0
+ bytesRead ++
+ remain ++
+ }
+ }
+
+ const writeBuf = offset === 0 && bytesRead === buf.length ?
+ buf : buf.slice(offset, offset + bytesRead)
+ remain -= bytesRead
+ blockRemain -= bytesRead
+ pos += bytesRead
+ offset += bytesRead
+
+ this.write(writeBuf)
+
+ if (!remain) {
+ if (blockRemain)
+ this.write(Buffer.alloc(blockRemain))
+ return this[CLOSE](fd, er => er ? this.emit('error', er) : this.end())
+ }
+
+ if (offset >= length) {
+ buf = Buffer.allocUnsafe(length)
+ offset = 0
+ }
+ length = buf.length - offset
+ this[READ](fd, buf, offset, length, pos, remain, blockRemain)
+ }
+})
+
+class WriteEntrySync extends WriteEntry {
+ constructor (path, opt) {
+ super(path, opt)
+ }
+
+ [LSTAT] () {
+ this[ONLSTAT](fs.lstatSync(this.absolute))
+ }
+
+ [SYMLINK] () {
+ this[ONREADLINK](fs.readlinkSync(this.absolute))
+ }
+
+ [OPENFILE] () {
+ this[ONOPENFILE](fs.openSync(this.absolute, 'r'))
+ }
+
+ [READ] (fd, buf, offset, length, pos, remain, blockRemain) {
+ let threw = true
+ try {
+ const bytesRead = fs.readSync(fd, buf, offset, length, pos)
+ this[ONREAD](fd, buf, offset, length, pos, remain, blockRemain, bytesRead)
+ threw = false
+ } finally {
+ // ignoring the error from close(2) is a bad practice, but at
+ // this point we already have an error, don't need another one
+ if (threw)
+ try { this[CLOSE](fd, () => {}) } catch (er) {}
+ }
+ }
+
+ [CLOSE] (fd, cb) {
+ fs.closeSync(fd)
+ cb()
+ }
+}
+
+const WriteEntryTar = warner(class WriteEntryTar extends MiniPass {
+ constructor (readEntry, opt) {
+ opt = opt || {}
+ super(opt)
+ this.preservePaths = !!opt.preservePaths
+ this.portable = !!opt.portable
+ this.strict = !!opt.strict
+ this.noPax = !!opt.noPax
+ this.noMtime = !!opt.noMtime
+
+ this.readEntry = readEntry
+ this.type = readEntry.type
+ if (this.type === 'Directory' && this.portable)
+ this.noMtime = true
+
+ this.path = readEntry.path
+ this.mode = this[MODE](readEntry.mode)
+ this.uid = this.portable ? null : readEntry.uid
+ this.gid = this.portable ? null : readEntry.gid
+ this.uname = this.portable ? null : readEntry.uname
+ this.gname = this.portable ? null : readEntry.gname
+ this.size = readEntry.size
+ this.mtime = this.noMtime ? null : opt.mtime || readEntry.mtime
+ this.atime = this.portable ? null : readEntry.atime
+ this.ctime = this.portable ? null : readEntry.ctime
+ this.linkpath = readEntry.linkpath
+
+ if (typeof opt.onwarn === 'function')
+ this.on('warn', opt.onwarn)
+
+ let pathWarn = false
+ if (path.isAbsolute(this.path) && !this.preservePaths) {
+ const parsed = path.parse(this.path)
+ pathWarn = parsed.root
+ this.path = this.path.substr(parsed.root.length)
+ }
+
+ this.remain = readEntry.size
+ this.blockRemain = readEntry.startBlockSize
+
+ this.header = new Header({
+ path: this.path,
+ linkpath: this.linkpath,
+ // only the permissions and setuid/setgid/sticky bitflags
+ // not the higher-order bits that specify file type
+ mode: this.mode,
+ uid: this.portable ? null : this.uid,
+ gid: this.portable ? null : this.gid,
+ size: this.size,
+ mtime: this.noMtime ? null : this.mtime,
+ type: this.type,
+ uname: this.portable ? null : this.uname,
+ atime: this.portable ? null : this.atime,
+ ctime: this.portable ? null : this.ctime
+ })
+
+ if (pathWarn) {
+ this.warn('TAR_ENTRY_INFO', `stripping ${pathWarn} from absolute path`, {
+ entry: this,
+ path: pathWarn + this.path,
+ })
+ }
+
+ if (this.header.encode() && !this.noPax)
+ super.write(new Pax({
+ atime: this.portable ? null : this.atime,
+ ctime: this.portable ? null : this.ctime,
+ gid: this.portable ? null : this.gid,
+ mtime: this.noMtime ? null : this.mtime,
+ path: this.path,
+ linkpath: this.linkpath,
+ size: this.size,
+ uid: this.portable ? null : this.uid,
+ uname: this.portable ? null : this.uname,
+ dev: this.portable ? null : this.readEntry.dev,
+ ino: this.portable ? null : this.readEntry.ino,
+ nlink: this.portable ? null : this.readEntry.nlink
+ }).encode())
+
+ super.write(this.header.block)
+ readEntry.pipe(this)
+ }
+
+ [MODE] (mode) {
+ return modeFix(mode, this.type === 'Directory', this.portable)
+ }
+
+ write (data) {
+ const writeLen = data.length
+ if (writeLen > this.blockRemain)
+ throw new Error('writing more to entry than is appropriate')
+ this.blockRemain -= writeLen
+ return super.write(data)
+ }
+
+ end () {
+ if (this.blockRemain)
+ this.write(Buffer.alloc(this.blockRemain))
+ return super.end()
+ }
+})
+
+WriteEntry.Sync = WriteEntrySync
+WriteEntry.Tar = WriteEntryTar
+
+const getType = stat =>
+ stat.isFile() ? 'File'
+ : stat.isDirectory() ? 'Directory'
+ : stat.isSymbolicLink() ? 'SymbolicLink'
+ : 'Unsupported'
+
+module.exports = WriteEntry
diff --git a/node_modules/cacache/node_modules/tar/package.json b/node_modules/cacache/node_modules/tar/package.json
new file mode 100644
index 000000000..a2ce568f6
--- /dev/null
+++ b/node_modules/cacache/node_modules/tar/package.json
@@ -0,0 +1,81 @@
+{
+ "_from": "tar@^6.0.0",
+ "_id": "tar@6.0.1",
+ "_inBundle": false,
+ "_integrity": "sha512-bKhKrrz2FJJj5s7wynxy/fyxpE0CmCjmOQ1KV4KkgXFWOgoIT/NbTMnB1n+LFNrNk0SSBVGGxcK5AGsyC+pW5Q==",
+ "_location": "/cacache/tar",
+ "_phantomChildren": {},
+ "_requested": {
+ "type": "range",
+ "registry": true,
+ "raw": "tar@^6.0.0",
+ "name": "tar",
+ "escapedName": "tar",
+ "rawSpec": "^6.0.0",
+ "saveSpec": null,
+ "fetchSpec": "^6.0.0"
+ },
+ "_requiredBy": [
+ "/cacache"
+ ],
+ "_resolved": "https://registry.npmjs.org/tar/-/tar-6.0.1.tgz",
+ "_shasum": "7b3bd6c313cb6e0153770108f8d70ac298607efa",
+ "_spec": "tar@^6.0.0",
+ "_where": "/Users/claudiahdz/npm/cli/node_modules/cacache",
+ "author": {
+ "name": "Isaac Z. Schlueter",
+ "email": "i@izs.me",
+ "url": "http://blog.izs.me/"
+ },
+ "bugs": {
+ "url": "https://github.com/npm/node-tar/issues"
+ },
+ "bundleDependencies": false,
+ "dependencies": {
+ "chownr": "^1.1.3",
+ "fs-minipass": "^2.0.0",
+ "minipass": "^3.0.0",
+ "minizlib": "^2.1.0",
+ "mkdirp": "^1.0.3",
+ "yallist": "^4.0.0"
+ },
+ "deprecated": false,
+ "description": "tar for node",
+ "devDependencies": {
+ "chmodr": "^1.2.0",
+ "end-of-stream": "^1.4.3",
+ "events-to-array": "^1.1.2",
+ "mutate-fs": "^2.1.1",
+ "rimraf": "^2.7.1",
+ "tap": "^14.9.2",
+ "tar-fs": "^1.16.3",
+ "tar-stream": "^1.6.2"
+ },
+ "engines": {
+ "node": ">= 10"
+ },
+ "files": [
+ "index.js",
+ "lib/*.js"
+ ],
+ "homepage": "https://github.com/npm/node-tar#readme",
+ "license": "ISC",
+ "name": "tar",
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/npm/node-tar.git"
+ },
+ "scripts": {
+ "bench": "for i in benchmarks/*/*.js; do echo $i; for j in {1..5}; do node $i || break; done; done",
+ "genparse": "node scripts/generate-parse-fixtures.js",
+ "postpublish": "git push origin --follow-tags",
+ "postversion": "npm publish",
+ "preversion": "npm test",
+ "test": "tap"
+ },
+ "tap": {
+ "coverage-map": "map.js",
+ "check-coverage": true
+ },
+ "version": "6.0.1"
+}
diff --git a/node_modules/cacache/node_modules/yallist/LICENSE b/node_modules/cacache/node_modules/yallist/LICENSE
new file mode 100644
index 000000000..19129e315
--- /dev/null
+++ b/node_modules/cacache/node_modules/yallist/LICENSE
@@ -0,0 +1,15 @@
+The ISC License
+
+Copyright (c) Isaac Z. Schlueter and Contributors
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
+IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/node_modules/cacache/node_modules/yallist/README.md b/node_modules/cacache/node_modules/yallist/README.md
new file mode 100644
index 000000000..f58610186
--- /dev/null
+++ b/node_modules/cacache/node_modules/yallist/README.md
@@ -0,0 +1,204 @@
+# yallist
+
+Yet Another Linked List
+
+There are many doubly-linked list implementations like it, but this
+one is mine.
+
+For when an array would be too big, and a Map can't be iterated in
+reverse order.
+
+
+[![Build Status](https://travis-ci.org/isaacs/yallist.svg?branch=master)](https://travis-ci.org/isaacs/yallist) [![Coverage Status](https://coveralls.io/repos/isaacs/yallist/badge.svg?service=github)](https://coveralls.io/github/isaacs/yallist)
+
+## basic usage
+
+```javascript
+var yallist = require('yallist')
+var myList = yallist.create([1, 2, 3])
+myList.push('foo')
+myList.unshift('bar')
+// of course pop() and shift() are there, too
+console.log(myList.toArray()) // ['bar', 1, 2, 3, 'foo']
+myList.forEach(function (k) {
+ // walk the list head to tail
+})
+myList.forEachReverse(function (k, index, list) {
+ // walk the list tail to head
+})
+var myDoubledList = myList.map(function (k) {
+ return k + k
+})
+// now myDoubledList contains ['barbar', 2, 4, 6, 'foofoo']
+// mapReverse is also a thing
+var myDoubledListReverse = myList.mapReverse(function (k) {
+ return k + k
+}) // ['foofoo', 6, 4, 2, 'barbar']
+
+var reduced = myList.reduce(function (set, entry) {
+ set += entry
+ return set
+}, 'start')
+console.log(reduced) // 'startfoo123bar'
+```
+
+## api
+
+The whole API is considered "public".
+
+Functions with the same name as an Array method work more or less the
+same way.
+
+There's reverse versions of most things because that's the point.
+
+### Yallist
+
+Default export, the class that holds and manages a list.
+
+Call it with either a forEach-able (like an array) or a set of
+arguments, to initialize the list.
+
+The Array-ish methods all act like you'd expect. No magic length,
+though, so if you change that it won't automatically prune or add
+empty spots.
+
+### Yallist.create(..)
+
+Alias for Yallist function. Some people like factories.
+
+#### yallist.head
+
+The first node in the list
+
+#### yallist.tail
+
+The last node in the list
+
+#### yallist.length
+
+The number of nodes in the list. (Change this at your peril. It is
+not magic like Array length.)
+
+#### yallist.toArray()
+
+Convert the list to an array.
+
+#### yallist.forEach(fn, [thisp])
+
+Call a function on each item in the list.
+
+#### yallist.forEachReverse(fn, [thisp])
+
+Call a function on each item in the list, in reverse order.
+
+#### yallist.get(n)
+
+Get the data at position `n` in the list. If you use this a lot,
+probably better off just using an Array.
+
+#### yallist.getReverse(n)
+
+Get the data at position `n`, counting from the tail.
+
+#### yallist.map(fn, thisp)
+
+Create a new Yallist with the result of calling the function on each
+item.
+
+#### yallist.mapReverse(fn, thisp)
+
+Same as `map`, but in reverse.
+
+#### yallist.pop()
+
+Get the data from the list tail, and remove the tail from the list.
+
+#### yallist.push(item, ...)
+
+Insert one or more items to the tail of the list.
+
+#### yallist.reduce(fn, initialValue)
+
+Like Array.reduce.
+
+#### yallist.reduceReverse
+
+Like Array.reduce, but in reverse.
+
+#### yallist.reverse
+
+Reverse the list in place.
+
+#### yallist.shift()
+
+Get the data from the list head, and remove the head from the list.
+
+#### yallist.slice([from], [to])
+
+Just like Array.slice, but returns a new Yallist.
+
+#### yallist.sliceReverse([from], [to])
+
+Just like yallist.slice, but the result is returned in reverse.
+
+#### yallist.toArray()
+
+Create an array representation of the list.
+
+#### yallist.toArrayReverse()
+
+Create a reversed array representation of the list.
+
+#### yallist.unshift(item, ...)
+
+Insert one or more items to the head of the list.
+
+#### yallist.unshiftNode(node)
+
+Move a Node object to the front of the list. (That is, pull it out of
+wherever it lives, and make it the new head.)
+
+If the node belongs to a different list, then that list will remove it
+first.
+
+#### yallist.pushNode(node)
+
+Move a Node object to the end of the list. (That is, pull it out of
+wherever it lives, and make it the new tail.)
+
+If the node belongs to a list already, then that list will remove it
+first.
+
+#### yallist.removeNode(node)
+
+Remove a node from the list, preserving referential integrity of head
+and tail and other nodes.
+
+Will throw an error if you try to have a list remove a node that
+doesn't belong to it.
+
+### Yallist.Node
+
+The class that holds the data and is actually the list.
+
+Call with `var n = new Node(value, previousNode, nextNode)`
+
+Note that if you do direct operations on Nodes themselves, it's very
+easy to get into weird states where the list is broken. Be careful :)
+
+#### node.next
+
+The next node in the list.
+
+#### node.prev
+
+The previous node in the list.
+
+#### node.value
+
+The data the node contains.
+
+#### node.list
+
+The list to which this node belongs. (Null if it does not belong to
+any list.)
diff --git a/node_modules/cacache/node_modules/yallist/iterator.js b/node_modules/cacache/node_modules/yallist/iterator.js
new file mode 100644
index 000000000..d41c97a19
--- /dev/null
+++ b/node_modules/cacache/node_modules/yallist/iterator.js
@@ -0,0 +1,8 @@
+'use strict'
+module.exports = function (Yallist) {
+ Yallist.prototype[Symbol.iterator] = function* () {
+ for (let walker = this.head; walker; walker = walker.next) {
+ yield walker.value
+ }
+ }
+}
diff --git a/node_modules/cacache/node_modules/yallist/package.json b/node_modules/cacache/node_modules/yallist/package.json
new file mode 100644
index 000000000..3f0138e5e
--- /dev/null
+++ b/node_modules/cacache/node_modules/yallist/package.json
@@ -0,0 +1,64 @@
+{
+ "_from": "yallist@^4.0.0",
+ "_id": "yallist@4.0.0",
+ "_inBundle": false,
+ "_integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==",
+ "_location": "/cacache/yallist",
+ "_phantomChildren": {},
+ "_requested": {
+ "type": "range",
+ "registry": true,
+ "raw": "yallist@^4.0.0",
+ "name": "yallist",
+ "escapedName": "yallist",
+ "rawSpec": "^4.0.0",
+ "saveSpec": null,
+ "fetchSpec": "^4.0.0"
+ },
+ "_requiredBy": [
+ "/cacache/minipass",
+ "/cacache/minizlib",
+ "/cacache/tar"
+ ],
+ "_resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
+ "_shasum": "9bb92790d9c0effec63be73519e11a35019a3a72",
+ "_spec": "yallist@^4.0.0",
+ "_where": "/Users/claudiahdz/npm/cli/node_modules/cacache/node_modules/minipass",
+ "author": {
+ "name": "Isaac Z. Schlueter",
+ "email": "i@izs.me",
+ "url": "http://blog.izs.me/"
+ },
+ "bugs": {
+ "url": "https://github.com/isaacs/yallist/issues"
+ },
+ "bundleDependencies": false,
+ "dependencies": {},
+ "deprecated": false,
+ "description": "Yet Another Linked List",
+ "devDependencies": {
+ "tap": "^12.1.0"
+ },
+ "directories": {
+ "test": "test"
+ },
+ "files": [
+ "yallist.js",
+ "iterator.js"
+ ],
+ "homepage": "https://github.com/isaacs/yallist#readme",
+ "license": "ISC",
+ "main": "yallist.js",
+ "name": "yallist",
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/isaacs/yallist.git"
+ },
+ "scripts": {
+ "postpublish": "git push origin --all; git push origin --tags",
+ "postversion": "npm publish",
+ "preversion": "npm test",
+ "test": "tap test/*.js --100"
+ },
+ "version": "4.0.0"
+}
diff --git a/node_modules/cacache/node_modules/yallist/yallist.js b/node_modules/cacache/node_modules/yallist/yallist.js
new file mode 100644
index 000000000..4e83ab1c5
--- /dev/null
+++ b/node_modules/cacache/node_modules/yallist/yallist.js
@@ -0,0 +1,426 @@
+'use strict'
+module.exports = Yallist
+
+Yallist.Node = Node
+Yallist.create = Yallist
+
+function Yallist (list) {
+ var self = this
+ if (!(self instanceof Yallist)) {
+ self = new Yallist()
+ }
+
+ self.tail = null
+ self.head = null
+ self.length = 0
+
+ if (list && typeof list.forEach === 'function') {
+ list.forEach(function (item) {
+ self.push(item)
+ })
+ } else if (arguments.length > 0) {
+ for (var i = 0, l = arguments.length; i < l; i++) {
+ self.push(arguments[i])
+ }
+ }
+
+ return self
+}
+
+Yallist.prototype.removeNode = function (node) {
+ if (node.list !== this) {
+ throw new Error('removing node which does not belong to this list')
+ }
+
+ var next = node.next
+ var prev = node.prev
+
+ if (next) {
+ next.prev = prev
+ }
+
+ if (prev) {
+ prev.next = next
+ }
+
+ if (node === this.head) {
+ this.head = next
+ }
+ if (node === this.tail) {
+ this.tail = prev
+ }
+
+ node.list.length--
+ node.next = null
+ node.prev = null
+ node.list = null
+
+ return next
+}
+
+Yallist.prototype.unshiftNode = function (node) {
+ if (node === this.head) {
+ return
+ }
+
+ if (node.list) {
+ node.list.removeNode(node)
+ }
+
+ var head = this.head
+ node.list = this
+ node.next = head
+ if (head) {
+ head.prev = node
+ }
+
+ this.head = node
+ if (!this.tail) {
+ this.tail = node
+ }
+ this.length++
+}
+
+Yallist.prototype.pushNode = function (node) {
+ if (node === this.tail) {
+ return
+ }
+
+ if (node.list) {
+ node.list.removeNode(node)
+ }
+
+ var tail = this.tail
+ node.list = this
+ node.prev = tail
+ if (tail) {
+ tail.next = node
+ }
+
+ this.tail = node
+ if (!this.head) {
+ this.head = node
+ }
+ this.length++
+}
+
+Yallist.prototype.push = function () {
+ for (var i = 0, l = arguments.length; i < l; i++) {
+ push(this, arguments[i])
+ }
+ return this.length
+}
+
+Yallist.prototype.unshift = function () {
+ for (var i = 0, l = arguments.length; i < l; i++) {
+ unshift(this, arguments[i])
+ }
+ return this.length
+}
+
+Yallist.prototype.pop = function () {
+ if (!this.tail) {
+ return undefined
+ }
+
+ var res = this.tail.value
+ this.tail = this.tail.prev
+ if (this.tail) {
+ this.tail.next = null
+ } else {
+ this.head = null
+ }
+ this.length--
+ return res
+}
+
+Yallist.prototype.shift = function () {
+ if (!this.head) {
+ return undefined
+ }
+
+ var res = this.head.value
+ this.head = this.head.next
+ if (this.head) {
+ this.head.prev = null
+ } else {
+ this.tail = null
+ }
+ this.length--
+ return res
+}
+
+Yallist.prototype.forEach = function (fn, thisp) {
+ thisp = thisp || this
+ for (var walker = this.head, i = 0; walker !== null; i++) {
+ fn.call(thisp, walker.value, i, this)
+ walker = walker.next
+ }
+}
+
+Yallist.prototype.forEachReverse = function (fn, thisp) {
+ thisp = thisp || this
+ for (var walker = this.tail, i = this.length - 1; walker !== null; i--) {
+ fn.call(thisp, walker.value, i, this)
+ walker = walker.prev
+ }
+}
+
+Yallist.prototype.get = function (n) {
+ for (var i = 0, walker = this.head; walker !== null && i < n; i++) {
+ // abort out of the list early if we hit a cycle
+ walker = walker.next
+ }
+ if (i === n && walker !== null) {
+ return walker.value
+ }
+}
+
+Yallist.prototype.getReverse = function (n) {
+ for (var i = 0, walker = this.tail; walker !== null && i < n; i++) {
+ // abort out of the list early if we hit a cycle
+ walker = walker.prev
+ }
+ if (i === n && walker !== null) {
+ return walker.value
+ }
+}
+
+Yallist.prototype.map = function (fn, thisp) {
+ thisp = thisp || this
+ var res = new Yallist()
+ for (var walker = this.head; walker !== null;) {
+ res.push(fn.call(thisp, walker.value, this))
+ walker = walker.next
+ }
+ return res
+}
+
+Yallist.prototype.mapReverse = function (fn, thisp) {
+ thisp = thisp || this
+ var res = new Yallist()
+ for (var walker = this.tail; walker !== null;) {
+ res.push(fn.call(thisp, walker.value, this))
+ walker = walker.prev
+ }
+ return res
+}
+
+Yallist.prototype.reduce = function (fn, initial) {
+ var acc
+ var walker = this.head
+ if (arguments.length > 1) {
+ acc = initial
+ } else if (this.head) {
+ walker = this.head.next
+ acc = this.head.value
+ } else {
+ throw new TypeError('Reduce of empty list with no initial value')
+ }
+
+ for (var i = 0; walker !== null; i++) {
+ acc = fn(acc, walker.value, i)
+ walker = walker.next
+ }
+
+ return acc
+}
+
+Yallist.prototype.reduceReverse = function (fn, initial) {
+ var acc
+ var walker = this.tail
+ if (arguments.length > 1) {
+ acc = initial
+ } else if (this.tail) {
+ walker = this.tail.prev
+ acc = this.tail.value
+ } else {
+ throw new TypeError('Reduce of empty list with no initial value')
+ }
+
+ for (var i = this.length - 1; walker !== null; i--) {
+ acc = fn(acc, walker.value, i)
+ walker = walker.prev
+ }
+
+ return acc
+}
+
+Yallist.prototype.toArray = function () {
+ var arr = new Array(this.length)
+ for (var i = 0, walker = this.head; walker !== null; i++) {
+ arr[i] = walker.value
+ walker = walker.next
+ }
+ return arr
+}
+
+Yallist.prototype.toArrayReverse = function () {
+ var arr = new Array(this.length)
+ for (var i = 0, walker = this.tail; walker !== null; i++) {
+ arr[i] = walker.value
+ walker = walker.prev
+ }
+ return arr
+}
+
+Yallist.prototype.slice = function (from, to) {
+ to = to || this.length
+ if (to < 0) {
+ to += this.length
+ }
+ from = from || 0
+ if (from < 0) {
+ from += this.length
+ }
+ var ret = new Yallist()
+ if (to < from || to < 0) {
+ return ret
+ }
+ if (from < 0) {
+ from = 0
+ }
+ if (to > this.length) {
+ to = this.length
+ }
+ for (var i = 0, walker = this.head; walker !== null && i < from; i++) {
+ walker = walker.next
+ }
+ for (; walker !== null && i < to; i++, walker = walker.next) {
+ ret.push(walker.value)
+ }
+ return ret
+}
+
+Yallist.prototype.sliceReverse = function (from, to) {
+ to = to || this.length
+ if (to < 0) {
+ to += this.length
+ }
+ from = from || 0
+ if (from < 0) {
+ from += this.length
+ }
+ var ret = new Yallist()
+ if (to < from || to < 0) {
+ return ret
+ }
+ if (from < 0) {
+ from = 0
+ }
+ if (to > this.length) {
+ to = this.length
+ }
+ for (var i = this.length, walker = this.tail; walker !== null && i > to; i--) {
+ walker = walker.prev
+ }
+ for (; walker !== null && i > from; i--, walker = walker.prev) {
+ ret.push(walker.value)
+ }
+ return ret
+}
+
+Yallist.prototype.splice = function (start, deleteCount, ...nodes) {
+ if (start > this.length) {
+ start = this.length - 1
+ }
+ if (start < 0) {
+ start = this.length + start;
+ }
+
+ for (var i = 0, walker = this.head; walker !== null && i < start; i++) {
+ walker = walker.next
+ }
+
+ var ret = []
+ for (var i = 0; walker && i < deleteCount; i++) {
+ ret.push(walker.value)
+ walker = this.removeNode(walker)
+ }
+ if (walker === null) {
+ walker = this.tail
+ }
+
+ if (walker !== this.head && walker !== this.tail) {
+ walker = walker.prev
+ }
+
+ for (var i = 0; i < nodes.length; i++) {
+ walker = insert(this, walker, nodes[i])
+ }
+ return ret;
+}
+
+Yallist.prototype.reverse = function () {
+ var head = this.head
+ var tail = this.tail
+ for (var walker = head; walker !== null; walker = walker.prev) {
+ var p = walker.prev
+ walker.prev = walker.next
+ walker.next = p
+ }
+ this.head = tail
+ this.tail = head
+ return this
+}
+
+function insert (self, node, value) {
+ var inserted = node === self.head ?
+ new Node(value, null, node, self) :
+ new Node(value, node, node.next, self)
+
+ if (inserted.next === null) {
+ self.tail = inserted
+ }
+ if (inserted.prev === null) {
+ self.head = inserted
+ }
+
+ self.length++
+
+ return inserted
+}
+
+function push (self, item) {
+ self.tail = new Node(item, self.tail, null, self)
+ if (!self.head) {
+ self.head = self.tail
+ }
+ self.length++
+}
+
+function unshift (self, item) {
+ self.head = new Node(item, null, self.head, self)
+ if (!self.tail) {
+ self.tail = self.head
+ }
+ self.length++
+}
+
+function Node (value, prev, next, list) {
+ if (!(this instanceof Node)) {
+ return new Node(value, prev, next, list)
+ }
+
+ this.list = list
+ this.value = value
+
+ if (prev) {
+ prev.next = this
+ this.prev = prev
+ } else {
+ this.prev = null
+ }
+
+ if (next) {
+ next.prev = this
+ this.next = next
+ } else {
+ this.next = null
+ }
+}
+
+try {
+ // add if support for Symbol.iterator is present
+ require('./iterator.js')(Yallist)
+} catch (er) {}
diff --git a/node_modules/cacache/package.json b/node_modules/cacache/package.json
index aa20092cc..abe1a8114 100644
--- a/node_modules/cacache/package.json
+++ b/node_modules/cacache/package.json
@@ -1,30 +1,31 @@
{
- "_from": "cacache@12.0.3",
- "_id": "cacache@12.0.3",
+ "_from": "cacache@15.0.0",
+ "_id": "cacache@15.0.0",
"_inBundle": false,
- "_integrity": "sha512-kqdmfXEGFepesTuROHMs3MpFLWrPkSSpRqOw80RCflZXy/khxaArvFrQ7uJxSUduzAufc6G0g1VUCOZXxWavPw==",
+ "_integrity": "sha512-L0JpXHhplbJSiDGzyJJnJCTL7er7NzbBgxzVqLswEb4bO91Zbv17OUMuUeu/q0ZwKn3V+1HM4wb9tO4eVE/K8g==",
"_location": "/cacache",
- "_phantomChildren": {},
+ "_phantomChildren": {
+ "chownr": "1.1.3",
+ "glob": "7.1.4"
+ },
"_requested": {
"type": "version",
"registry": true,
- "raw": "cacache@12.0.3",
+ "raw": "cacache@15.0.0",
"name": "cacache",
"escapedName": "cacache",
- "rawSpec": "12.0.3",
+ "rawSpec": "15.0.0",
"saveSpec": null,
- "fetchSpec": "12.0.3"
+ "fetchSpec": "15.0.0"
},
"_requiredBy": [
"#USER",
- "/",
- "/make-fetch-happen",
- "/pacote"
+ "/"
],
- "_resolved": "https://registry.npmjs.org/cacache/-/cacache-12.0.3.tgz",
- "_shasum": "be99abba4e1bf5df461cd5a2c1071fc432573390",
- "_spec": "cacache@12.0.3",
- "_where": "/Users/isaacs/dev/npm/cli",
+ "_resolved": "https://registry.npmjs.org/cacache/-/cacache-15.0.0.tgz",
+ "_shasum": "133b59edbd2a37ea8ef2d54964c6f247e47e5059",
+ "_spec": "cacache@15.0.0",
+ "_where": "/Users/claudiahdz/npm/cli",
"author": {
"name": "Kat Marchán",
"email": "kzm@sykosomatic.org"
@@ -37,14 +38,6 @@
"content": "2",
"index": "5"
},
- "config": {
- "nyc": {
- "exclude": [
- "node_modules/**",
- "test/**"
- ]
- }
- },
"contributors": [
{
"name": "Charlotte Spencer",
@@ -56,40 +49,41 @@
}
],
"dependencies": {
- "bluebird": "^3.5.5",
- "chownr": "^1.1.1",
- "figgy-pudding": "^3.5.1",
+ "chownr": "^1.1.2",
+ "fs-minipass": "^2.0.0",
"glob": "^7.1.4",
- "graceful-fs": "^4.1.15",
- "infer-owner": "^1.0.3",
+ "infer-owner": "^1.0.4",
"lru-cache": "^5.1.1",
- "mississippi": "^3.0.0",
- "mkdirp": "^0.5.1",
+ "minipass": "^3.1.1",
+ "minipass-collect": "^1.0.2",
+ "minipass-flush": "^1.0.5",
+ "minipass-pipeline": "^1.2.2",
+ "mkdirp": "^1.0.3",
"move-concurrently": "^1.0.1",
+ "p-map": "^3.0.0",
"promise-inflight": "^1.0.1",
- "rimraf": "^2.6.3",
- "ssri": "^6.0.1",
- "unique-filename": "^1.1.1",
- "y18n": "^4.0.0"
+ "rimraf": "^2.7.1",
+ "ssri": "^8.0.0",
+ "tar": "^6.0.1",
+ "unique-filename": "^1.1.1"
},
"deprecated": false,
"description": "Fast, fault-tolerant, cross-platform, disk-based, data-agnostic, content-addressable cache.",
"devDependencies": {
"benchmark": "^2.1.4",
"chalk": "^2.4.2",
- "cross-env": "^5.1.4",
"require-inject": "^1.4.4",
- "standard": "^12.0.1",
- "standard-version": "^6.0.1",
+ "standard": "^14.3.1",
+ "standard-version": "^7.1.0",
"tacks": "^1.3.0",
- "tap": "^12.7.0",
- "weallbehave": "^1.2.0",
- "weallcontribute": "^1.0.9"
+ "tap": "^14.10.6"
+ },
+ "engines": {
+ "node": ">= 10"
},
"files": [
"*.js",
- "lib",
- "locales"
+ "lib"
],
"homepage": "https://github.com/npm/cacache#readme",
"keywords": [
@@ -116,14 +110,19 @@
},
"scripts": {
"benchmarks": "node test/benchmarks",
- "postrelease": "npm publish && git push --follow-tags",
+ "coverage": "tap",
+ "lint": "standard",
+ "postrelease": "npm publish",
+ "posttest": "npm run lint",
+ "prepublishOnly": "git push --follow-tags",
"prerelease": "npm t",
- "pretest": "standard",
"release": "standard-version -s",
- "test": "cross-env CACACHE_UPDATE_LOCALE_FILES=true tap --coverage --nyc-arg=--all -J test/*.js",
- "test-docker": "docker run -it --rm --name pacotest -v \"$PWD\":/tmp -w /tmp node:latest npm test",
- "update-coc": "weallbehave -o . && git add CODE_OF_CONDUCT.md && git commit -m 'docs(coc): updated CODE_OF_CONDUCT.md'",
- "update-contrib": "weallcontribute -o . && git add CONTRIBUTING.md && git commit -m 'docs(contributing): updated CONTRIBUTING.md'"
+ "test": "tap",
+ "test-docker": "docker run -it --rm --name pacotest -v \"$PWD\":/tmp -w /tmp node:latest npm test"
+ },
+ "tap": {
+ "100": true,
+ "test-regex": "test/[^/]*.js"
},
- "version": "12.0.3"
+ "version": "15.0.0"
}
diff --git a/node_modules/cacache/put.js b/node_modules/cacache/put.js
index a40063930..eb21aa867 100644
--- a/node_modules/cacache/put.js
+++ b/node_modules/cacache/put.js
@@ -1,86 +1,84 @@
'use strict'
-const figgyPudding = require('figgy-pudding')
const index = require('./lib/entry-index')
const memo = require('./lib/memoization')
const write = require('./lib/content/write')
-const to = require('mississippi').to
+const Flush = require('minipass-flush')
+const { PassThrough } = require('minipass-collect')
+const Pipeline = require('minipass-pipeline')
-const PutOpts = figgyPudding({
- algorithms: {
- default: ['sha512']
- },
- integrity: {},
- memoize: {},
- metadata: {},
- pickAlgorithm: {},
- size: {},
- tmpPrefix: {},
- single: {},
- sep: {},
- error: {},
- strict: {}
+const putOpts = (opts) => ({
+ algorithms: ['sha512'],
+ ...opts
})
module.exports = putData
-function putData (cache, key, data, opts) {
- opts = PutOpts(opts)
- return write(cache, data, opts).then(res => {
- return index.insert(
- cache, key, res.integrity, opts.concat({ size: res.size })
- ).then(entry => {
- if (opts.memoize) {
- memo.put(cache, entry, data, opts)
- }
- return res.integrity
- })
+
+function putData (cache, key, data, opts = {}) {
+ const { memoize } = opts
+ opts = putOpts(opts)
+ return write(cache, data, opts).then((res) => {
+ return index
+ .insert(cache, key, res.integrity, { ...opts, size: res.size })
+ .then((entry) => {
+ if (memoize) {
+ memo.put(cache, entry, data, opts)
+ }
+ return res.integrity
+ })
})
}
module.exports.stream = putStream
-function putStream (cache, key, opts) {
- opts = PutOpts(opts)
+
+function putStream (cache, key, opts = {}) {
+ const { memoize } = opts
+ opts = putOpts(opts)
let integrity
let size
- const contentStream = write.stream(
- cache, opts
- ).on('integrity', int => {
- integrity = int
- }).on('size', s => {
- size = s
- })
+
let memoData
- let memoTotal = 0
- const stream = to((chunk, enc, cb) => {
- contentStream.write(chunk, enc, () => {
- if (opts.memoize) {
- if (!memoData) { memoData = [] }
- memoData.push(chunk)
- memoTotal += chunk.length
- }
- cb()
+ const pipeline = new Pipeline()
+ // first item in the pipeline is the memoizer, because we need
+ // that to end first and get the collected data.
+ if (memoize) {
+ const memoizer = new PassThrough().on('collect', data => {
+ memoData = data
})
- }, cb => {
- contentStream.end(() => {
- index.insert(cache, key, integrity, opts.concat({ size })).then(entry => {
- if (opts.memoize) {
- memo.put(cache, entry, Buffer.concat(memoData, memoTotal), opts)
- }
- stream.emit('integrity', integrity)
- cb()
- })
+ pipeline.push(memoizer)
+ }
+
+ // contentStream is a write-only, not a passthrough
+ // no data comes out of it.
+ const contentStream = write.stream(cache, opts)
+ .on('integrity', (int) => {
+ integrity = int
})
- })
- let erred = false
- stream.once('error', err => {
- if (erred) { return }
- erred = true
- contentStream.emit('error', err)
- })
- contentStream.once('error', err => {
- if (erred) { return }
- erred = true
- stream.emit('error', err)
- })
- return stream
+ .on('size', (s) => {
+ size = s
+ })
+
+ pipeline.push(contentStream)
+
+ // last but not least, we write the index and emit hash and size,
+ // and memoize if we're doing that
+ pipeline.push(new Flush({
+ flush () {
+ return index
+ .insert(cache, key, integrity, { ...opts, size })
+ .then((entry) => {
+ if (memoize && memoData) {
+ memo.put(cache, entry, memoData, opts)
+ }
+ if (integrity) {
+ pipeline.emit('integrity', integrity)
+ }
+ if (size) {
+ pipeline.emit('size', size)
+ }
+ })
+ }
+ }))
+
+ return pipeline
}
diff --git a/node_modules/cacache/rm.js b/node_modules/cacache/rm.js
index e71a1d27b..7dd4e8c8b 100644
--- a/node_modules/cacache/rm.js
+++ b/node_modules/cacache/rm.js
@@ -1,27 +1,30 @@
'use strict'
-const BB = require('bluebird')
+const util = require('util')
const index = require('./lib/entry-index')
const memo = require('./lib/memoization')
const path = require('path')
-const rimraf = BB.promisify(require('rimraf'))
+const rimraf = util.promisify(require('rimraf'))
const rmContent = require('./lib/content/rm')
module.exports = entry
module.exports.entry = entry
+
function entry (cache, key) {
memo.clearMemoized()
return index.delete(cache, key)
}
module.exports.content = content
+
function content (cache, integrity) {
memo.clearMemoized()
return rmContent(cache, integrity)
}
module.exports.all = all
+
function all (cache) {
memo.clearMemoized()
return rimraf(path.join(cache, '*(content-*|index-*)'))