Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/moses-smt/nplm.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRico Sennrich <rico.sennrich@gmx.ch>2014-11-17 14:01:00 +0300
committerRico Sennrich <rico.sennrich@gmx.ch>2014-11-17 14:01:00 +0300
commit337ead1bbf4d4f1e6297a6c584a4718f8ba90173 (patch)
treecf5849f5b58512203abd1e96a9f612d2ee329e97
parentf757a6295a393a7edd4f6f300350d264a89adef7 (diff)
re-apply 31412f (osx compile)
-rw-r--r--src/prepareNeuralLM.cpp4
-rw-r--r--src/trainNeuralNetwork.cpp8
2 files changed, 6 insertions, 6 deletions
diff --git a/src/prepareNeuralLM.cpp b/src/prepareNeuralLM.cpp
index 13a534a..adedc72 100644
--- a/src/prepareNeuralLM.cpp
+++ b/src/prepareNeuralLM.cpp
@@ -240,7 +240,7 @@ void writeMmapNgrams(const string &input_filename,
if (i %500000 == 0) {
cerr<<"Shuffled "<<num_tokens-1<<" instances...";
}
- data_size_t j = uniform_int_distribution<data_size_t>(0, i-1)(rng);
+ data_size_t j = boost::random::uniform_int_distribution<data_size_t>(0, i-1)(rng);
for (int k=0;k<ngram_size;k++) {
int temp_val = temp.at(i*ngram_size+k);
temp.at(i*ngram_size+k) =
@@ -263,7 +263,7 @@ void writeMmapNgrams(const string &input_filename,
if (i %500000 == 0) {
cerr<<"Shuffled "<<num_tokens-1<<" instances...";
}
- data_size_t j = uniform_int_distribution<data_size_t>(0, i-1)(rng);
+ data_size_t j = boost::random::uniform_int_distribution<data_size_t>(0, i-1)(rng);
for (int k=0;k<ngram_size;k++) {
int temp_val = mMapVec->at(i*ngram_size+k);
mMapVec->at(i*ngram_size+k) =
diff --git a/src/trainNeuralNetwork.cpp b/src/trainNeuralNetwork.cpp
index e231c20..a4cac12 100644
--- a/src/trainNeuralNetwork.cpp
+++ b/src/trainNeuralNetwork.cpp
@@ -312,7 +312,7 @@ int main(int argc, char** argv)
if (i %500000 == 0) {
cerr<<"Shuffled "<<training_data_size-1<<" instances...";
}
- data_size_t j = uniform_int_distribution<data_size_t>(0, i-1)(rng);
+ data_size_t j = boost::random::uniform_int_distribution<data_size_t>(0, i-1)(rng);
for (int k=0;k<myParam.ngram_size;k++) {
int temp_val = training_data_flat_mmap->at(i*myParam.ngram_size+k);
training_data_flat_mmap->at(i*myParam.ngram_size+k) =
@@ -326,7 +326,7 @@ int main(int argc, char** argv)
if (i %500000 == 0) {
cerr<<"Shuffled "<<training_data_size-1<<" instances...";
}
- data_size_t j = uniform_int_distribution<data_size_t>(0, i-1)(rng);
+ data_size_t j = boost::random::uniform_int_distribution<data_size_t>(0, i-1)(rng);
for (int k=0;k<myParam.ngram_size;k++) {
int temp_val = temp.at(i*myParam.ngram_size+k);
temp.at(i*myParam.ngram_size+k) =
@@ -348,7 +348,7 @@ int main(int argc, char** argv)
if (i %500000 == 0) {
cerr<<"Shuffled "<<training_data_size-1<<" instances...";
}
- data_size_t j = uniform_int_distribution<data_size_t>(0, i-1)(rng);
+ data_size_t j = boost::random::uniform_int_distribution<data_size_t>(0, i-1)(rng);
for (int k=0;k<myParam.ngram_size;k++) {
int temp_val = training_data_flat_mmap->at(i*myParam.ngram_size+k);
training_data_flat_mmap->at(i*myParam.ngram_size+k) =
@@ -396,7 +396,7 @@ int main(int argc, char** argv)
// Randomly shuffle training data to improve learning
for (data_size_t i=training_data_size-1; i>0; i--)
{
- data_size_t j = uniform_int_distribution<data_size_t>(0, i-1)(rng);
+ data_size_t j = boost::random::uniform_int_distribution<data_size_t>(0, i-1)(rng);
training_data.col(i).swap(training_data.col(j));
}
}