Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/marian-nmt/marian.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorJon Clark <jonathac@microsoft.com>2018-05-25 03:05:31 +0300
committerJon Clark <jonathac@microsoft.com>2018-05-25 03:05:31 +0300
commit3e68deff5285ddd22ef3f97c54978e14d60b65e8 (patch)
tree80cc29156f19186a58053c05256c15af0583377d /src
parentfd5ea9f6b74560eae362ec33fd266bb74fee825f (diff)
Pile of truly horrible hacks to *almost* get things building on windows
Diffstat (limited to 'src')
-rw-r--r--src/common/config.h4
-rw-r--r--src/common/config_parser.cpp4
-rw-r--r--src/common/config_parser.h4
-rw-r--r--src/common/file_stream.h28
-rw-r--r--src/common/logging.cpp2
-rw-r--r--src/common/utils.cpp4
-rw-r--r--src/data/corpus.cpp1
-rw-r--r--src/functional/shape.h2
-rw-r--r--src/graph/node_operators_unary.h2
-rw-r--r--src/tensors/cpu/device.cpp8
-rw-r--r--src/training/graph_group_async.h2
-rw-r--r--src/training/graph_group_async_drop.cpp3
-rw-r--r--src/training/graph_group_singleton.h2
-rw-r--r--src/training/graph_group_sync.h2
-rw-r--r--src/translator/beam_search.h2
15 files changed, 49 insertions, 21 deletions
diff --git a/src/common/config.h b/src/common/config.h
index f603af61..f1dd02c8 100644
--- a/src/common/config.h
+++ b/src/common/config.h
@@ -1,7 +1,7 @@
#pragma once
-#include <sys/ioctl.h>
-#include <unistd.h>
+//#include <sys/ioctl.h>
+//#include <unistd.h>
#include <boost/program_options.hpp>
#include "3rd_party/yaml-cpp/yaml.h"
diff --git a/src/common/config_parser.cpp b/src/common/config_parser.cpp
index 04f913b3..e455cc3e 100644
--- a/src/common/config_parser.cpp
+++ b/src/common/config_parser.cpp
@@ -1134,11 +1134,11 @@ std::vector<DeviceId> ConfigParser::getDevices() {
// does this make sense?
devices.push_back({ds.size(), DeviceType::gpu});
for(auto d : ds)
- devices.push_back({std::stoull(d), DeviceType::gpu});
+ devices.push_back({(size_t)std::stoull(d), DeviceType::gpu});
}
} else {
for(auto d : Split(devicesStr))
- devices.push_back({std::stoull(d), DeviceType::gpu});
+ devices.push_back({(size_t)std::stoull(d), DeviceType::gpu});
}
if(config_["cpu-threads"].as<size_t>() > 0) {
diff --git a/src/common/config_parser.h b/src/common/config_parser.h
index c51fb0ae..a9242e6a 100644
--- a/src/common/config_parser.h
+++ b/src/common/config_parser.h
@@ -2,8 +2,8 @@
#include <boost/program_options.hpp>
-#include <sys/ioctl.h>
-#include <unistd.h>
+//#include <sys/ioctl.h>
+//#include <unistd.h>
#include "3rd_party/yaml-cpp/yaml.h"
#include "common/definitions.h"
#include "common/file_stream.h"
diff --git a/src/common/file_stream.h b/src/common/file_stream.h
index 53d41faf..c5c4492f 100644
--- a/src/common/file_stream.h
+++ b/src/common/file_stream.h
@@ -1,9 +1,10 @@
#pragma once
+
#include <boost/filesystem.hpp>
#include <boost/filesystem/fstream.hpp>
#include <boost/iostreams/device/file_descriptor.hpp>
-#include <boost/iostreams/filter/gzip.hpp>
+//#include <boost/iostreams/filter/gzip.hpp>
#include <boost/iostreams/filtering_stream.hpp>
#include <iostream>
@@ -21,11 +22,15 @@ private:
std::string name_;
int mkstemp_and_unlink(char* tmpl) {
+ ABORT_IF(true, "NYI");
+ return 0;
+ /*
int ret = mkstemp(tmpl);
if(unlink_ && ret != -1) {
ABORT_IF(unlink(tmpl), "Error while deleting '{}'", tmpl);
}
return ret;
+ */
}
int MakeTemp(const std::string& base) {
@@ -41,6 +46,9 @@ private:
}
void NormalizeTempPrefix(std::string& base) {
+ ABORT_IF(true, "NYI");
+ return;
+ /*
if(base.empty())
return;
if(base[base.size() - 1] == '/')
@@ -51,11 +59,13 @@ private:
return;
if(S_ISDIR(sb.st_mode))
base += '/';
+ */
}
public:
TemporaryFile(const std::string base = "/tmp/", bool earlyUnlink = true)
: unlink_(earlyUnlink) {
+ ABORT_IF(true, "NYI (see destructor)");
std::string baseTemp(base);
NormalizeTempPrefix(baseTemp);
fd_ = MakeTemp(baseTemp);
@@ -65,10 +75,12 @@ public:
if(fd_ != -1 && !unlink_) {
ABORT_IF(unlink(name_.c_str()), "Error while deleting '{}'", name_);
}
+ /*
if(fd_ != -1 && close(fd_)) {
std::cerr << "Could not close file " << fd_ << std::endl;
std::abort();
}
+ */
}
int getFileDescriptor() { return fd_; }
@@ -82,14 +94,15 @@ public:
ABORT_IF(
!boost::filesystem::exists(file_), "File '{}' does not exist", file);
- if(file_.extension() == ".gz")
- istream_.push(io::gzip_decompressor());
+ //if(file_.extension() == ".gz")
+ // istream_.push(io::gzip_decompressor());
istream_.push(ifstream_);
}
InputFileStream(TemporaryFile& tempfile)
: fds_(tempfile.getFileDescriptor(), io::never_close_handle) {
- lseek(tempfile.getFileDescriptor(), 0, SEEK_SET);
+ ABORT_IF(true, "NYI");
+ //lseek(tempfile.getFileDescriptor(), 0, SEEK_SET);
istream_.push(fds_, 1024);
}
@@ -122,14 +135,15 @@ public:
ABORT_IF(
!boost::filesystem::exists(file_), "File '{}' does not exist", file);
- if(file_.extension() == ".gz")
- ostream_.push(io::gzip_compressor());
+ //if(file_.extension() == ".gz")
+ // ostream_.push(io::gzip_compressor());
ostream_.push(ofstream_);
}
OutputFileStream(TemporaryFile& tempfile)
: fds_(tempfile.getFileDescriptor(), io::never_close_handle) {
- lseek(tempfile.getFileDescriptor(), 0, SEEK_SET);
+ ABORT_IF(true, "NYI");
+ //lseek(tempfile.getFileDescriptor(), 0, SEEK_SET);
ostream_.push(fds_, 1024);
}
diff --git a/src/common/logging.cpp b/src/common/logging.cpp
index 8ef5fada..e89cb5f7 100644
--- a/src/common/logging.cpp
+++ b/src/common/logging.cpp
@@ -37,7 +37,7 @@ bool setLoggingLevel(spdlog::logger& logger, std::string const level) {
logger.set_level(spdlog::level::info);
else if(level == "warn")
logger.set_level(spdlog::level::warn);
- else if(level == "err" or level == "error")
+ else if(level == "err" || level == "error")
logger.set_level(spdlog::level::err);
else if(level == "critical")
logger.set_level(spdlog::level::critical);
diff --git a/src/common/utils.cpp b/src/common/utils.cpp
index c69085e6..895f9062 100644
--- a/src/common/utils.cpp
+++ b/src/common/utils.cpp
@@ -64,6 +64,9 @@ std::string Join(const std::vector<std::string>& words,
}
std::string Exec(const std::string& cmd) {
+ ABORT_IF(true, "NYI");
+ return "";
+ /*
std::array<char, 128> buffer;
std::string result;
std::shared_ptr<std::FILE> pipe(popen(cmd.c_str(), "r"), pclose);
@@ -75,4 +78,5 @@ std::string Exec(const std::string& cmd) {
result += buffer.data();
}
return result;
+ */
}
diff --git a/src/data/corpus.cpp b/src/data/corpus.cpp
index e8dcfff0..7d4f6488 100644
--- a/src/data/corpus.cpp
+++ b/src/data/corpus.cpp
@@ -1,4 +1,5 @@
#include <random>
+#include <numeric>
#include "data/corpus.h"
diff --git a/src/functional/shape.h b/src/functional/shape.h
index 3212a3ed..62e51f51 100644
--- a/src/functional/shape.h
+++ b/src/functional/shape.h
@@ -36,7 +36,7 @@ struct ConstantShape {
bstride_(shape.bstride_),
elements_(shape.elements_) {}
- ConstantShape(const Shape& shape) {
+ ConstantShape(const marian::Shape& shape) {
size_t filled = shape.size();
ABORT_IF(filled > N,
diff --git a/src/graph/node_operators_unary.h b/src/graph/node_operators_unary.h
index 273adf44..e14f6546 100644
--- a/src/graph/node_operators_unary.h
+++ b/src/graph/node_operators_unary.h
@@ -293,7 +293,7 @@ struct PReLUNodeOp : public UnaryNodeOp {
}
private:
- float alpha_{0.01};
+ float alpha_{0.01f};
};
/**
diff --git a/src/tensors/cpu/device.cpp b/src/tensors/cpu/device.cpp
index 985b00f5..7d0d9feb 100644
--- a/src/tensors/cpu/device.cpp
+++ b/src/tensors/cpu/device.cpp
@@ -1,6 +1,9 @@
#include "tensors/device.h"
#include <iostream>
+//#if DOZE
+#include <malloc.h>
+
#include <stdlib.h>
namespace marian {
@@ -12,6 +15,11 @@ Device::~Device() {
size_ = 0;
}
+// #if DOZE
+void* aligned_alloc(size_t alignment, size_t size) {
+ return _aligned_malloc(size, alignment);
+}
+
void Device::reserve(size_t size) {
size = align(size);
ABORT_IF(size < size_ || size == 0,
diff --git a/src/training/graph_group_async.h b/src/training/graph_group_async.h
index b9568735..d0687b80 100644
--- a/src/training/graph_group_async.h
+++ b/src/training/graph_group_async.h
@@ -42,7 +42,7 @@ protected:
std::vector<Tensor> paramsAvg_;
std::vector<Ptr<TensorAllocator>> paramsAllocAvg_;
bool movingAvg_{false};
- float mvDecay_{1e-4};
+ float mvDecay_{1e-4f};
std::unique_ptr<ThreadPool> pool_;
diff --git a/src/training/graph_group_async_drop.cpp b/src/training/graph_group_async_drop.cpp
index 5ab2b46a..dd56ee21 100644
--- a/src/training/graph_group_async_drop.cpp
+++ b/src/training/graph_group_async_drop.cpp
@@ -70,9 +70,10 @@ void AsyncGraphGroupDrop::fetchParams(Tensor oldParams,
pos += shardSize_;
}
+ /* OMG BAD
for(auto&& t : threads) {
t.join();
- }
+ }*/
fetchStep_[device_id]++;
}
diff --git a/src/training/graph_group_singleton.h b/src/training/graph_group_singleton.h
index 5562d302..73cdaaac 100644
--- a/src/training/graph_group_singleton.h
+++ b/src/training/graph_group_singleton.h
@@ -21,7 +21,7 @@ private:
Ptr<ExpressionGraph> mvAvgGraph_;
bool mvAvg_{false};
- float mvDecay_{1e-4};
+ float mvDecay_{1e-4f};
void updateMovingAverage(Tensor mvAvgParams, Tensor params, size_t batches);
diff --git a/src/training/graph_group_sync.h b/src/training/graph_group_sync.h
index dac7e402..ecc1a8e8 100644
--- a/src/training/graph_group_sync.h
+++ b/src/training/graph_group_sync.h
@@ -29,7 +29,7 @@ private:
std::vector<Tensor> paramsAvg_;
std::vector<Ptr<TensorAllocator>> paramsAllocAvg_;
bool movingAvg_{false};
- float mvDecay_{1e-4};
+ float mvDecay_{1e-4f};
size_t delay_{1};
void updateMovingAverage(Tensor paramsAvg, Tensor params, size_t batches);
diff --git a/src/translator/beam_search.h b/src/translator/beam_search.h
index faf21c73..e476c13a 100644
--- a/src/translator/beam_search.h
+++ b/src/translator/beam_search.h
@@ -27,7 +27,7 @@ public:
? options_->get<size_t>("beam-size")
: 3) {}
- Beams toHyps(const std::vector<uint> keys,
+ Beams toHyps(const std::vector<uint32_t> keys,
const std::vector<float> costs,
size_t vocabSize,
const Beams& beams,