Welcome to mirror list, hosted at ThFree Co, Russian Federation.

storage.js « lib - github.com/webtorrent/webtorrent.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
blob: d13e1168c3f3e3d1ad8c3f9af47651a14fe5cacf (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
module.exports = exports = Storage

var BitField = require('bitfield')
var BlockStream = require('block-stream2')
var debug = require('debug')('webtorrent:storage')
var dezalgo = require('dezalgo')
var eos = require('end-of-stream')
var EventEmitter = require('events').EventEmitter
var FileStream = require('./file-stream')
var inherits = require('inherits')
var mime = require('./mime.json')
var MultiStream = require('multistream')
var once = require('once')
var path = require('path')
var sha1 = require('simple-sha1')

var BLOCK_LENGTH = 16 * 1024

var BLOCK_BLANK = exports.BLOCK_BLANK = 0
var BLOCK_RESERVED = exports.BLOCK_RESERVED = 1
var BLOCK_WRITTEN = exports.BLOCK_WRITTEN = 2

function noop () {}

inherits(Piece, EventEmitter)

/**
 * A torrent piece
 *
 * @param {number} index  piece index
 * @param {string} hash   sha1 hash (hex) for this piece
 * @param {Buffer|number} buffer backing buffer, or piece length if backing buffer is lazy
 * @param {boolean=} noVerify skip piece verification (used when seeding a new file)
 */
function Piece (index, hash, buffer, noVerify) {
  var self = this
  EventEmitter.call(self)
  if (!debug.enabled) self.setMaxListeners(0)

  self.index = index
  self.hash = hash
  self.noVerify = !!noVerify

  if (typeof buffer === 'number') {
    // alloc buffer lazily
    self.buffer = null
    self.length = buffer
  } else {
    // use buffer provided
    self.buffer = buffer
    self.length = buffer.length
  }

  self._reset()
}

Piece.prototype.readBlock = function (offset, length, cb) {
  var self = this
  cb = dezalgo(cb)
  if (!self.buffer || !self._verifyOffset(offset)) {
    return cb(new Error('invalid block offset ' + offset))
  }
  cb(null, self.buffer.slice(offset, offset + length))
}

Piece.prototype.writeBlock = function (offset, buffer, cb) {
  var self = this
  cb = dezalgo(cb)
  if (!self._verifyOffset(offset) || !self._verifyBlock(offset, buffer)) {
    return cb(new Error('invalid block ' + offset + ':' + buffer.length))
  }
  self._lazyAllocBuffer()

  var i = offset / BLOCK_LENGTH
  if (self.blocks[i] === BLOCK_WRITTEN) {
    return cb(null)
  }

  buffer.copy(self.buffer, offset)
  self.blocks[i] = BLOCK_WRITTEN
  self.blocksWritten += 1

  if (self.blocksWritten === self.blocks.length) {
    self.verify()
  }

  cb(null)
}

Piece.prototype.reserveBlock = function (endGame) {
  var self = this
  var len = self.blocks.length
  for (var i = 0; i < len; i++) {
    if ((self.blocks[i] && !endGame) || self.blocks[i] === BLOCK_WRITTEN) {
      continue
    }
    self.blocks[i] = BLOCK_RESERVED
    return {
      offset: i * BLOCK_LENGTH,
      length: (i === len - 1)
        ? self.length - (i * BLOCK_LENGTH)
        : BLOCK_LENGTH
    }
  }
  return null
}

Piece.prototype.cancelBlock = function (offset) {
  var self = this
  if (!self._verifyOffset(offset)) {
    return false
  }

  var i = offset / BLOCK_LENGTH
  if (self.blocks[i] === BLOCK_RESERVED) {
    self.blocks[i] = BLOCK_BLANK
  }

  return true
}

Piece.prototype._reset = function () {
  var self = this
  self.verified = false
  self.blocks = new Buffer(Math.ceil(self.length / BLOCK_LENGTH))
  self.blocks.fill(0)
  self.blocksWritten = 0
}

Piece.prototype.verify = function (buffer) {
  var self = this
  buffer = buffer || self.buffer
  if (self.verified || !buffer) {
    return
  }

  if (self.noVerify) {
    self.verified = true
    onResult()
    return
  }

  sha1(buffer, function (expectedHash) {
    self.verified = (expectedHash === self.hash)
    onResult()
  })

  function onResult () {
    if (self.verified) {
      self.emit('done')
    } else {
      self.emit('warning', new Error('piece ' + self.index + ' failed verification'))
      self._reset()
    }
  }
}

Piece.prototype._verifyOffset = function (offset) {
  var self = this
  if (offset % BLOCK_LENGTH === 0) {
    return true
  } else {
    self.emit(
      'warning',
      new Error('invalid block offset ' + offset + ', not multiple of ' + BLOCK_LENGTH)
    )
    return false
  }
}

Piece.prototype._verifyBlock = function (offset, buffer) {
  var self = this
  if (buffer.length === BLOCK_LENGTH) {
    // normal block length
    return true
  } else if (buffer.length === self.length - offset &&
    self.length - offset < BLOCK_LENGTH) {
    // last block in piece is allowed to be less than block length
    return true
  } else {
    self.emit('warning', new Error('invalid block size ' + buffer.length))
    return false
  }
}

Piece.prototype._lazyAllocBuffer = function () {
  var self = this
  if (!self.buffer) {
    self.buffer = new Buffer(self.length)
  }
}

inherits(File, EventEmitter)

/**
 * A torrent file
 *
 * @param {Storage} storage       Storage container object
 * @param {Object}  file          the file object from the parsed torrent
 * @param {Array.<Piece>} pieces  backing pieces for this file
 * @param {number}  pieceLength   the length in bytes of a non-terminal piece
 */
function File (storage, file, pieces, pieceLength) {
  var self = this
  EventEmitter.call(self)
  if (!debug.enabled) self.setMaxListeners(0)

  self.storage = storage
  self.name = file.name
  self.path = file.path
  self.length = file.length
  self.offset = file.offset
  self.pieces = pieces
  self.pieceLength = pieceLength

  self.done = false

  self.pieces.forEach(function (piece) {
    piece.on('done', function () {
      self._checkDone()
    })
  })

  // if the file is zero-length, it will be done upon initialization
  self._checkDone()
}

/**
 * Selects the file to be downloaded, but at a lower priority than files with streams.
 * Useful if you know you need the file at a later stage.
 */
File.prototype.select = function () {
  var self = this
  if (self.pieces.length > 0) {
    var start = self.pieces[0].index
    var end = self.pieces[self.pieces.length - 1].index
    self.storage.emit('select', start, end, false)
  }
}

/**
 * Deselects the file, which means it won't be downloaded unless someone creates a stream
 * for it.
 */
File.prototype.deselect = function () {
  var self = this
  if (self.pieces.length > 0) {
    var start = self.pieces[0].index
    var end = self.pieces[self.pieces.length - 1].index
    self.storage.emit('deselect', start, end, false)
  }
}

/**
 * Create a readable stream to the file. Pieces needed by the stream will be prioritized
 * highly and fetched from the swarm first.
 *
 * @param {Object} opts
 * @param {number} opts.start stream slice of file, starting from this byte (inclusive)
 * @param {number} opts.end   stream slice of file, ending with this byte (inclusive)
 * @return {stream.Readable}
 */
File.prototype.createReadStream = function (opts) {
  var self = this
  if (!opts) opts = {}
  if (opts.pieceLength == null) opts.pieceLength = self.pieceLength
  var stream = new FileStream(self, opts)
  self.storage.emit('select', stream.startPiece, stream.endPiece, true, stream.notify.bind(stream))
  eos(stream, function () {
    self.storage.emit('deselect', stream.startPiece, stream.endPiece, true)
  })

  return stream
}

/**
 * Note: This function is async to support different types of (async) storage backends in
 * the future.
 * @param {function} cb
 */
File.prototype.getBlobURL = function (cb) {
  var self = this
  if (typeof window === 'undefined') throw new Error('browser-only method')

  self.getBuffer(function (err, buffer) {
    if (err) return cb(err)
    var type = mime[path.extname(self.name).toLowerCase()]
    var blob = type
      ? new window.Blob([ buffer ], { type: type })
      : new window.Blob([ buffer ])
    var url = window.URL.createObjectURL(blob)
    cb(null, url)
  })
}

/**
 * @param {function} cb
 */
File.prototype.getBuffer = function (cb) {
  var self = this
  cb = dezalgo(once(cb))

  var buffer
  if (self.storage.buffer) {
    // Use the in-memory buffer (when possible) for better memory utilization
    var onDone = function () {
      buffer = self.storage.buffer.slice(self.offset, self.offset + self.length)
      cb(null, buffer)
    }
    if (self.done) onDone()
    else self.once('done', onDone)
  } else {
    buffer = new Buffer(self.length)
    var start = 0
    self.createReadStream()
      .on('data', function (chunk) {
        chunk.copy(buffer, start)
        start += chunk.length
      })
      .on('end', function () {
        cb(null, buffer)
      })
      .on('error', cb)
  }
}

File.prototype._checkDone = function () {
  var self = this
  self.done = self.pieces.every(function (piece) {
    return piece.verified
  })

  if (self.done) {
    process.nextTick(function () {
      self.emit('done')
    })
  }
}

inherits(Storage, EventEmitter)

/**
 * Storage for a torrent download. Handles the complexities of reading and writing
 * to pieces and files.
 *
 * @param {Object} parsedTorrent
 * @param {Object} opts
 */
function Storage (parsedTorrent, opts) {
  var self = this
  EventEmitter.call(self)
  if (!debug.enabled) self.setMaxListeners(0)
  if (!opts) opts = {}

  self.bitfield = new BitField(parsedTorrent.pieces.length)

  self.done = false
  self.closed = false
  self.readonly = true

  if (!opts.nobuffer) {
    self.buffer = new Buffer(parsedTorrent.length)
  }

  var pieceLength = self.pieceLength = parsedTorrent.pieceLength
  var lastPieceLength = parsedTorrent.lastPieceLength
  var numPieces = parsedTorrent.pieces.length

  self.pieces = parsedTorrent.pieces.map(function (hash, index) {
    var start = index * pieceLength
    var end = start + (index === numPieces - 1 ? lastPieceLength : pieceLength)

    // if we're backed by a buffer, the piece's buffer will reference the same memory.
    // otherwise, the piece's buffer will be lazily created on demand
    var buffer = (self.buffer ? self.buffer.slice(start, end) : end - start)

    var piece = new Piece(index, hash, buffer, !!opts.noVerify)
    piece.on('done', self._onPieceDone.bind(self, piece))
    return piece
  })

  self.files = parsedTorrent.files.map(function (fileObj) {
    var start = fileObj.offset
    var end = start + fileObj.length - 1

    var startPiece = start / pieceLength | 0
    var endPiece = end / pieceLength | 0
    var pieces = self.pieces.slice(startPiece, endPiece + 1)

    var file = new File(self, fileObj, pieces, pieceLength)
    file.on('done', self._onFileDone.bind(self, file))
    return file
  })
}

Storage.BLOCK_LENGTH = BLOCK_LENGTH

Storage.prototype.load = function (streams, cb) {
  var self = this
  if (!Array.isArray(streams)) streams = [ streams ]
  cb = once(cb || function () {})

  var pieceIndex = 0
  var multistream = new MultiStream(streams)
  var blockstream = new BlockStream(self.pieceLength, { zeroPadding: false })

  multistream.on('error', onError)

  self.once('done', onDone)

  multistream
    .pipe(blockstream)
    .on('data', onData)
    .on('error', onError)

  function onData (piece) {
    var index = pieceIndex
    pieceIndex += 1

    var blockIndex = 0
    var s = new BlockStream(BLOCK_LENGTH, { zeroPadding: false })

    s.on('data', onBlockData)
    s.on('end', onBlockEnd)

    function onBlockData (block) {
      var offset = blockIndex * BLOCK_LENGTH
      blockIndex += 1
      self.writeBlock(index, offset, block)
    }

    function onBlockEnd () {
      blockCleanup()
    }

    function blockCleanup () {
      s.removeListener('data', onBlockData)
      s.removeListener('end', onBlockEnd)
    }

    s.end(piece)
  }

  function onError (err) {
    cleanup()
    cb(err)
  }

  function onDone () {
    cleanup()
    cb(null)
  }

  function cleanup () {
    multistream.removeListener('error', onError)
    blockstream.removeListener('data', onData)
    blockstream.removeListener('error', onError)
    self.removeListener('done', onDone)
  }
}

Object.defineProperty(Storage.prototype, 'downloaded', {
  get: function () {
    var self = this
    return self.pieces.reduce(function (total, piece) {
      return total + (piece.verified ? piece.length : piece.blocksWritten * BLOCK_LENGTH)
    }, 0)
  }
})

/**
 * The number of missing pieces. Used to implement 'end game' mode.
 */
Object.defineProperty(Storage.prototype, 'numMissing', {
  get: function () {
    var self = this
    var numMissing = self.pieces.length
    for (var index = 0, len = self.pieces.length; index < len; index++) {
      numMissing -= self.bitfield.get(index)
    }
    return numMissing
  }
})

/**
 * Reads a block from a piece.
 *
 * @param {number}    index    piece index
 * @param {number}    offset   byte offset within piece
 * @param {number}    length   length in bytes to read from piece
 * @param {function}  cb
 */
Storage.prototype.readBlock = function (index, offset, length, cb) {
  var self = this
  cb = dezalgo(cb)
  var piece = self.pieces[index]
  if (!piece) return cb(new Error('invalid piece index ' + index))
  piece.readBlock(offset, length, cb)
}

/**
 * Writes a block to a piece.
 *
 * @param {number}  index    piece index
 * @param {number}  offset   byte offset within piece
 * @param {Buffer}  buffer   buffer to write
 * @param {function}  cb
 */
Storage.prototype.writeBlock = function (index, offset, buffer, cb) {
  var self = this
  if (!cb) cb = noop
  cb = dezalgo(cb)

  if (self.readonly) return cb(new Error('cannot write to readonly storage'))
  var piece = self.pieces[index]
  if (!piece) return cb(new Error('invalid piece index ' + index))
  piece.writeBlock(offset, buffer, cb)
}

/**
 * Reads a piece or a range of a piece.
 *
 * @param {number}   index         piece index
 * @param {Object=}  range         optional range within piece
 * @param {number}   range.offset  byte offset within piece
 * @param {number}   range.length  length in bytes to read from piece
 * @param {function} cb
 * @param {boolean}  force         optionally overrides default check preventing reading
 *                                 from unverified piece
 */
Storage.prototype.read = function (index, range, cb, force) {
  var self = this

  if (typeof range === 'function') {
    force = cb
    cb = range
    range = null
  }
  cb = dezalgo(cb)

  var piece = self.pieces[index]
  if (!piece) {
    return cb(new Error('invalid piece index ' + index))
  }

  if (!piece.verified && !force) {
    return cb(new Error('Storage.read called on incomplete piece ' + index))
  }

  var offset = 0
  var length = piece.length

  if (range) {
    offset = range.offset || 0
    length = range.length || length
  }

  if (piece.buffer) {
    // shortcut for piece with static backing buffer
    return cb(null, piece.buffer.slice(offset, offset + length))
  }

  var blocks = []
  function readNextBlock () {
    if (length <= 0) return cb(null, Buffer.concat(blocks))

    var blockOffset = offset
    var blockLength = Math.min(BLOCK_LENGTH, length)

    offset += blockLength
    length -= blockLength

    self.readBlock(index, blockOffset, blockLength, function (err, block) {
      if (err) return cb(err)

      blocks.push(block)
      readNextBlock()
    })
  }

  readNextBlock()
}

/**
 * Reserves a block from the given piece.
 *
 * @param {number}  index    piece index
 * @param {Boolean} endGame  whether or not end game mode is enabled
 *
 * @returns {Object|null} reservation with offset and length or null if failed.
 */
Storage.prototype.reserveBlock = function (index, endGame) {
  var self = this
  var piece = self.pieces[index]
  if (!piece) return null

  return piece.reserveBlock(endGame)
}

/**
 * Cancels a previous block reservation from the given piece.
 *
 * @param {number}  index   piece index
 * @param {number}  offset  byte offset of block in piece
 *
 * @returns {Boolean}
 */
Storage.prototype.cancelBlock = function (index, offset) {
  var self = this
  var piece = self.pieces[index]
  if (!piece) return false

  return piece.cancelBlock(offset)
}

/**
 * Removes and cleans up any backing store for this storage.
 * @param {function=} cb
 */
Storage.prototype.remove = function (cb) {
  if (cb) dezalgo(cb)(null)
}

/**
 * Closes the backing store for this storage.
 * @param {function=} cb
 */
Storage.prototype.close = function (cb) {
  var self = this
  self.closed = true
  if (cb) dezalgo(cb)(null)
}

//
// HELPER METHODS
//

Storage.prototype._onPieceDone = function (piece) {
  var self = this
  self.bitfield.set(piece.index)
  debug('piece done ' + piece.index + ' (' + self.numMissing + ' still missing)')
  self.emit('piece', piece)
}

Storage.prototype._onFileDone = function (file) {
  var self = this
  debug('file done ' + file.name)
  self.emit('file', file)

  self._checkDone()
}

Storage.prototype._checkDone = function () {
  var self = this

  if (!self.done && self.files.every(function (file) { return file.done })) {
    self.done = true
    self.emit('done')
  }
}