1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
|
import random
import numpy as np
import os
from collections import Counter
import logging
import torch
import stanza.models.common.seq2seq_constant as constant
from stanza.models.common.data import map_to_ids, get_long_tensor, get_float_tensor, sort_all
from stanza.models.mwt.vocab import Vocab
from stanza.models.common.doc import Document
logger = logging.getLogger('stanza')
class DataLoader:
def __init__(self, doc, batch_size, args, vocab=None, evaluation=False):
self.batch_size = batch_size
self.args = args
self.eval = evaluation
self.shuffled = not self.eval
self.doc = doc
data = self.load_doc(self.doc, evaluation=self.eval)
# handle vocab
if vocab is None:
self.vocab = self.init_vocab(data)
else:
self.vocab = vocab
# filter and sample data
if args.get('sample_train', 1.0) < 1.0 and not self.eval:
keep = int(args['sample_train'] * len(data))
data = random.sample(data, keep)
logger.debug("Subsample training set with rate {:g}".format(args['sample_train']))
data = self.preprocess(data, self.vocab, args)
# shuffle for training
if self.shuffled:
indices = list(range(len(data)))
random.shuffle(indices)
data = [data[i] for i in indices]
self.num_examples = len(data)
# chunk into batches
data = [data[i:i+batch_size] for i in range(0, len(data), batch_size)]
self.data = data
logger.debug("{} batches created.".format(len(data)))
def init_vocab(self, data):
assert self.eval == False # for eval vocab must exist
vocab = Vocab(data, self.args['shorthand'])
return vocab
def preprocess(self, data, vocab, args):
processed = []
for d in data:
src = list(d[0])
src = [constant.SOS] + src + [constant.EOS]
src = vocab.map(src)
if self.eval:
tgt = src # as a placeholder
else:
tgt = list(d[1])
tgt_in = vocab.map([constant.SOS] + tgt)
tgt_out = vocab.map(tgt + [constant.EOS])
processed += [[src, tgt_in, tgt_out]]
return processed
def __len__(self):
return len(self.data)
def __getitem__(self, key):
""" Get a batch with index. """
if not isinstance(key, int):
raise TypeError
if key < 0 or key >= len(self.data):
raise IndexError
batch = self.data[key]
batch_size = len(batch)
batch = list(zip(*batch))
assert len(batch) == 3
# sort all fields by lens for easy RNN operations
lens = [len(x) for x in batch[0]]
batch, orig_idx = sort_all(batch, lens)
# convert to tensors
src = batch[0]
src = get_long_tensor(src, batch_size)
src_mask = torch.eq(src, constant.PAD_ID)
tgt_in = get_long_tensor(batch[1], batch_size)
tgt_out = get_long_tensor(batch[2], batch_size)
assert tgt_in.size(1) == tgt_out.size(1), \
"Target input and output sequence sizes do not match."
return (src, src_mask, tgt_in, tgt_out, orig_idx)
def __iter__(self):
for i in range(self.__len__()):
yield self.__getitem__(i)
def load_doc(self, doc, evaluation=False):
data = doc.get_mwt_expansions(evaluation)
if evaluation: data = [[e] for e in data]
return data
|