Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/mapsme/omim.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlex Zolotarev <deathbaba@gmail.com>2010-12-05 19:24:16 +0300
committerAlex Zolotarev <alex@maps.me>2015-09-22 22:33:57 +0300
commitd6e12b7ce4bcbf0ccd1c07eb25de143422913c34 (patch)
treea7e910c330ce4da9b4f2d8be76067adece2561c4 /indexer
One Month In Minsk. Made in Belarus.
Diffstat (limited to 'indexer')
-rw-r--r--indexer/cell_coverer.hpp293
-rw-r--r--indexer/cell_id.hpp114
-rw-r--r--indexer/classif_routine.cpp59
-rw-r--r--indexer/classif_routine.hpp10
-rw-r--r--indexer/classificator.cpp530
-rw-r--r--indexer/classificator.hpp245
-rw-r--r--indexer/country.cpp144
-rw-r--r--indexer/country.hpp84
-rw-r--r--indexer/covering.cpp132
-rw-r--r--indexer/covering.hpp18
-rw-r--r--indexer/data_header.cpp47
-rw-r--r--indexer/data_header.hpp68
-rw-r--r--indexer/data_header_reader.cpp47
-rw-r--r--indexer/data_header_reader.hpp14
-rw-r--r--indexer/defines.hpp31
-rw-r--r--indexer/drawing_rule_def.cpp36
-rw-r--r--indexer/drawing_rule_def.hpp36
-rw-r--r--indexer/drawing_rules.cpp1030
-rw-r--r--indexer/drawing_rules.hpp121
-rw-r--r--indexer/feature.cpp292
-rw-r--r--indexer/feature.hpp231
-rw-r--r--indexer/feature.pb.cc572
-rw-r--r--indexer/feature.pb.h461
-rw-r--r--indexer/feature.proto16
-rw-r--r--indexer/feature_processor.hpp49
-rw-r--r--indexer/feature_visibility.cpp229
-rw-r--r--indexer/feature_visibility.hpp23
-rw-r--r--indexer/features_vector.hpp53
-rw-r--r--indexer/file_reader_stream.hpp26
-rw-r--r--indexer/file_writer_stream.hpp19
-rw-r--r--indexer/index.hpp224
-rw-r--r--indexer/index_builder.cpp39
-rw-r--r--indexer/index_builder.hpp28
-rw-r--r--indexer/indexer.pro73
-rw-r--r--indexer/indexer_tests/cell_coverer_test.cpp162
-rw-r--r--indexer/indexer_tests/cell_covering_visualize_test.cpp115
-rw-r--r--indexer/indexer_tests/cell_id_test.cpp53
-rw-r--r--indexer/indexer_tests/country_test.cpp38
-rw-r--r--indexer/indexer_tests/data_header_test.cpp41
-rw-r--r--indexer/indexer_tests/feature_bucketer_test.cpp56
-rw-r--r--indexer/indexer_tests/feature_test.cpp98
-rw-r--r--indexer/indexer_tests/index_builder_test.cpp29
-rw-r--r--indexer/indexer_tests/index_test.cpp22
-rw-r--r--indexer/indexer_tests/indexer_tests.pro37
-rw-r--r--indexer/indexer_tests/interval_index_test.cpp157
-rw-r--r--indexer/indexer_tests/mercator_test.cpp49
-rw-r--r--indexer/indexer_tests/point_to_int64_test.cpp48
-rw-r--r--indexer/indexer_tests/sort_and_merge_intervals_test.cpp62
-rw-r--r--indexer/indexer_tests/test_type.cpp57
-rw-r--r--indexer/indexer_tool/data_cache_file.hpp244
-rw-r--r--indexer/indexer_tool/data_generator.cpp157
-rw-r--r--indexer/indexer_tool/data_generator.hpp8
-rw-r--r--indexer/indexer_tool/feature_bucketer.hpp126
-rw-r--r--indexer/indexer_tool/feature_generator.cpp304
-rw-r--r--indexer/indexer_tool/feature_generator.hpp45
-rw-r--r--indexer/indexer_tool/feature_sorter.cpp110
-rw-r--r--indexer/indexer_tool/feature_sorter.hpp9
-rw-r--r--indexer/indexer_tool/first_pass_parser.hpp106
-rw-r--r--indexer/indexer_tool/grid_generator.cpp192
-rw-r--r--indexer/indexer_tool/grid_generator.hpp8
-rw-r--r--indexer/indexer_tool/indexer_tool.cpp159
-rw-r--r--indexer/indexer_tool/indexer_tool.pro33
-rw-r--r--indexer/indexer_tool/osm_element.hpp421
-rw-r--r--indexer/indexer_tool/tesselator.cpp72
-rw-r--r--indexer/indexer_tool/update_generator.cpp217
-rw-r--r--indexer/indexer_tool/update_generator.hpp8
-rw-r--r--indexer/interval_index.hpp116
-rw-r--r--indexer/interval_index_builder.hpp137
-rw-r--r--indexer/mercator.cpp6
-rw-r--r--indexer/mercator.hpp33
-rw-r--r--indexer/osm2type.cpp646
-rw-r--r--indexer/osm2type.hpp20
-rw-r--r--indexer/osm_decl.cpp88
-rw-r--r--indexer/osm_decl.hpp114
-rw-r--r--indexer/point_to_int64.cpp58
-rw-r--r--indexer/scale_index.cpp4
-rw-r--r--indexer/scale_index.hpp84
-rw-r--r--indexer/scale_index_builder.hpp167
-rw-r--r--indexer/scales.cpp54
-rw-r--r--indexer/scales.hpp14
-rw-r--r--indexer/std_serialization.hpp199
-rw-r--r--indexer/tree_structure.hpp103
-rw-r--r--indexer/xml_element.cpp82
-rw-r--r--indexer/xml_element.hpp48
84 files changed, 10610 insertions, 0 deletions
diff --git a/indexer/cell_coverer.hpp b/indexer/cell_coverer.hpp
new file mode 100644
index 0000000000..8afc068e53
--- /dev/null
+++ b/indexer/cell_coverer.hpp
@@ -0,0 +1,293 @@
+#pragma once
+
+#include "cell_id.hpp"
+
+#include "../std/queue.hpp"
+#include "../std/vector.hpp"
+
+inline bool IntersectsHoriz(CoordT x1, CoordT y1, CoordT x2, CoordT y2,
+ CoordT y, CoordT l, CoordT r)
+{
+ CoordT d = (y1 - y) * (y2 - y);
+ if (d > 0) return false;
+
+ CoordT x = x1 + (x2 - x1) * (y - y1) / (y2 - y1);
+
+ if ((l - x) * (r - x) <= 0) return true; else return false;
+}
+
+inline bool IntersectsVert(CoordT x1, CoordT y1, CoordT x2, CoordT y2,
+ CoordT x, CoordT b, CoordT t)
+{
+ CoordT d = (x1 - x) * (x2 - x);
+ if (d > 0) return false;
+
+ CoordT y = y1 + (y2 - y1) * (x - x1) / (x2 - x1);
+
+ if ((b - y) * (t - y) <= 0) return true; else return false;
+}
+
+template <typename BoundsT, typename CellIdT>
+inline bool CellIntersects(vector<CoordPointT> const & polyLine, CellIdT id)
+{
+ CoordT minX, minY, maxX, maxY;
+ CellIdConverter<BoundsT, CellIdT>::GetCellBounds(id, minX, minY, maxX, maxY);
+ CoordPointT minPoint = make_pair(minX, minY);
+ CoordPointT maxPoint = make_pair(maxX, maxY);
+
+ for (size_t i = 0; i < polyLine.size() - 1; ++i)
+ {
+ if (IntersectsHoriz(polyLine[i].first, polyLine[i].second,
+ polyLine[i + 1].first, polyLine[i + 1].second,
+ minPoint.second, minPoint.first, maxPoint.first)) return true;
+
+ if (IntersectsHoriz(polyLine[i].first, polyLine[i].second,
+ polyLine[i + 1].first, polyLine[i + 1].second,
+ maxPoint.second, minPoint.first, maxPoint.first)) return true;
+
+ if (IntersectsVert(polyLine[i].first, polyLine[i].second,
+ polyLine[i + 1].first, polyLine[i + 1].second,
+ minPoint.first, minPoint.second, maxPoint.second)) return true;
+
+ if (IntersectsVert(polyLine[i].first, polyLine[i].second,
+ polyLine[i + 1].first, polyLine[i + 1].second,
+ maxPoint.first, minPoint.second, maxPoint.second)) return true;
+ }
+
+ return false;
+}
+
+template <typename BoundsT, typename CellIdT>
+inline void SplitCell(vector<CoordPointT> const & polyLine, queue<CellIdT> & cellQueue)
+{
+ CellIdT id = cellQueue.front();
+ cellQueue.pop();
+
+ for (size_t i = 0; i < 4; ++i)
+ {
+ CellIdT child = id.Child(i);
+
+ if (CellIntersects<BoundsT>(polyLine, child))
+ {
+ cellQueue.push(child);
+ }
+ }
+}
+
+template <typename ItT>
+inline bool FindBounds(ItT begin, ItT end,
+ CoordT & minX, CoordT & minY, CoordT & maxX, CoordT & maxY)
+{
+ if (begin == end) return false;
+
+ minX = begin->first;
+ maxX = begin->first;
+ minY = begin->second;
+ maxY = begin->second;
+
+ for (ItT it = begin; it != end; ++it)
+ {
+ if (it->first < minX) minX = it->first;
+ if (it->first > maxX) maxX = it->first;
+ if (it->second < minY) minY = it->second;
+ if (it->second > maxY) maxY = it->second;
+ }
+
+ return true;
+}
+
+template <typename BoundsT, typename CellIdT>
+inline CellIdT CoverPoint(CoordPointT const & point)
+{
+ return CellIdConverter<BoundsT, CellIdT>::ToCellId(point.first, point.second);
+}
+
+template <typename BoundsT, typename CellIdT>
+inline void CoverPolyLine(vector< CoordPointT > const & polyLine, size_t cellDepth,
+ vector<CellIdT> & cells)
+{
+ CoordT minX = 0, minY = 0, maxX = 0, maxY = 0;
+ FindBounds(polyLine.begin(), polyLine.end(), minX, minY, maxX, maxY);
+
+ CellIdT commonCell =
+ CellIdConverter<BoundsT, CellIdT>::Cover2PointsWithCell(minX, minY, maxX, maxY);
+
+ queue<CellIdT> cellQueue;
+ cellQueue.push(commonCell);
+ while (cellQueue.front().Level() < static_cast<int>(cellDepth)) // cellQueue.size() < cells_count
+ {
+ SplitCell<BoundsT>(polyLine, cellQueue);
+ }
+
+ while (!cellQueue.empty())
+ {
+ cells.push_back(cellQueue.front());
+ cellQueue.pop();
+ }
+}
+
+template <typename BoundsT, typename CellIdT>
+inline void SplitRectCell(CellIdT id,
+ CoordT minX, CoordT minY,
+ CoordT maxX, CoordT maxY,
+ vector<CellIdT> & result)
+{
+ for (size_t i = 0; i < 4; ++i)
+ {
+ CellIdT child = id.Child(i);
+ CoordT minCellX, minCellY, maxCellX, maxCellY;
+ CellIdConverter<BoundsT, CellIdT>::GetCellBounds(child, minCellX, minCellY, maxCellX, maxCellY);
+ if (!((maxX < minCellX) || (minX > maxCellX) || (maxY < minCellY) || (minY > maxCellY)))
+ result.push_back(child);
+ }
+}
+
+template <typename BoundsT, typename CellIdT>
+inline void CoverRect(CoordT minX, CoordT minY,
+ CoordT maxX, CoordT maxY,
+ size_t cells_count,
+ vector<CellIdT> & cells)
+{
+ ASSERT_LESS(minX, maxX, ());
+ ASSERT_LESS(minY, maxY, ());
+
+ if (minX < BoundsT::minX) minX = BoundsT::minX;
+ if (minY < BoundsT::minY) minY = BoundsT::minY;
+ if (maxX > BoundsT::maxX) maxX = BoundsT::maxX;
+ if (maxY > BoundsT::maxY) maxY = BoundsT::maxY;
+
+ CellIdT commonCell =
+ CellIdConverter<BoundsT, CellIdT>::Cover2PointsWithCell(minX, minY, maxX, maxY);
+
+ vector<CellIdT> result;
+
+ queue<CellIdT> cellQueue;
+ cellQueue.push(commonCell);
+
+ while (!cellQueue.empty() && cellQueue.size() + result.size() < cells_count)
+ {
+ CellIdT id = cellQueue.front();
+ cellQueue.pop();
+
+ if (id.Level() == CellIdT::DEPTH_LEVELS - 1)
+ {
+ result.push_back(id);
+ break;
+ }
+
+ vector<CellIdT> children;
+ SplitRectCell<BoundsT>(id, minX, minY, maxX, maxY, children);
+
+ // Children shouldn't be empty, but if it is, ignore this cellid in release.
+ ASSERT(!children.empty(), (id, minX, minY, maxX, maxY));
+ if (children.empty())
+ {
+ result.push_back(id);
+ continue;
+ }
+
+ if (cellQueue.size() + result.size() + children.size() <= cells_count)
+ {
+ for (size_t i = 0; i < children.size(); ++i)
+ cellQueue.push(children[i]);
+ }
+ else
+ result.push_back(id);
+ }
+
+ for (; !cellQueue.empty(); cellQueue.pop())
+ result.push_back(cellQueue.front());
+
+ for (size_t i = 0; i < result.size(); ++i)
+ {
+ CellIdT id = result[i];
+ while (id.Level() < CellIdT::DEPTH_LEVELS - 1)
+ {
+ vector<CellIdT> children;
+ SplitRectCell<BoundsT>(id, minX, minY, maxX, maxY, children);
+ if (children.size() == 1)
+ id = children[0];
+ else
+ break;
+ }
+ result[i] = id;
+ }
+
+ ASSERT_LESS_OR_EQUAL(result.size(), cells_count, (minX, minY, maxX, maxY));
+ cells.insert(cells.end(), result.begin(), result.end());
+}
+
+/*
+template <typename BoundsT, typename CellIdT>
+inline void CoverPolygon(vector<CoordPointT> const & polyLine, size_t cellDepth,
+ vector<CellIdT> & cells)
+{
+ CoverPolyLine<BoundsT>(polyLine, cellDepth, cells);
+ if (cells.size() < 8)
+ return;
+
+ CellIdT minX = CellX(cells[0]), minY = CellY(cells[0]),
+ maxX = CellX(cells[0]), maxY = CellY(cells[0]);
+ for (size_t i = 1; i < cells.size(); ++i)
+ {
+ CellIdT cellX = CellX(cells[i]);
+ CellIdT cellY = CellY(cells[i]);
+
+ if (cellX.m_V < minX.m_V) minX.m_V = cellX.m_V;
+ if (cellY.m_V < minY.m_V) minY.m_V = cellY.m_V;
+ if (cellX.m_V > maxX.m_V) maxX.m_V = cellX.m_V;
+ if (cellY.m_V > maxY.m_V) maxY.m_V = cellY.m_V;
+ }
+
+ vector< vector<bool> > covered;
+ covered.resize(static_cast<size_t>(maxY.m_V - minY.m_V + 3));
+ for (size_t i = 0; i < covered.size(); ++i)
+ {
+ covered[i].resize(static_cast<size_t>(maxX.m_V - minX.m_V + 3));
+ }
+
+ vector< vector<bool> > outer = covered;
+
+ for (size_t i = 0; i < cells.size(); ++i)
+ {
+ size_t offsetX = static_cast<size_t>(CellX(cells[i]).m_V - minX.m_V + 1);
+ size_t offsetY = static_cast<size_t>(CellY(cells[i]).m_V - minY.m_V + 1);
+
+ covered[offsetY][offsetX] = true;
+ }
+
+ queue< pair<size_t, size_t> > flood;
+ size_t outerY = outer.size();
+ size_t outerX = outer[0].size();
+ flood.push(make_pair(0, 0));
+
+ while (!flood.empty())
+ {
+ size_t i = flood.front().first;
+ size_t j = flood.front().second;
+ flood.pop();
+ outer[i][j] = true;
+ if ((j > 0) && (!outer[i][j - 1]) && (!covered[i][j - 1]))
+ flood.push(make_pair(i, j - 1));
+ if ((i > 0) && (!outer[i - 1][j]) && (!covered[i - 1][j]))
+ flood.push(make_pair(i - 1, j));
+ if ((j < outerX - 1) && (!outer[i][j + 1]) && (!covered[i][j + 1]))
+ flood.push(make_pair(i, j + 1));
+ if ((i < outerY - 1) && (!outer[i + 1][j]) && (!covered[i + 1][j]))
+ flood.push(make_pair(i + 1, j));
+ }
+
+ cells.clear();
+
+ for (size_t i = 0; i < outer.size(); ++i)
+ {
+ for (size_t j = 0; j < outer[i].size(); ++j)
+ {
+ if (!outer[i][j])
+ {
+ cells.push_back(CellFromCellXY(cellDepth, minX.m_V + j - 1, minY.m_V + i - 1));
+ }
+ }
+ }
+}
+*/
diff --git a/indexer/cell_id.hpp b/indexer/cell_id.hpp
new file mode 100644
index 0000000000..b43e744d46
--- /dev/null
+++ b/indexer/cell_id.hpp
@@ -0,0 +1,114 @@
+#pragma once
+#include "mercator.hpp"
+
+#include "../geometry/cellid.hpp"
+#include "../geometry/rect2d.hpp"
+
+#include "../base/base.hpp"
+#include "../base/assert.hpp"
+
+#include "../std/utility.hpp"
+#include "../std/string.hpp"
+
+typedef double CoordT;
+typedef pair<CoordT, CoordT> CoordPointT;
+
+typedef m2::CellId<19> RectId;
+
+int64_t PointToInt64(CoordT x, CoordT y);
+inline int64_t PointToInt64(CoordPointT const & pt) { return PointToInt64(pt.first, pt.second); }
+CoordPointT Int64ToPoint(int64_t v);
+
+pair<int64_t, int64_t> RectToInt64(m2::RectD const & r);
+m2::RectD Int64ToRect(pair<int64_t, int64_t> const & p);
+
+template <int MinX, int MinY, int MaxX, int MaxY>
+struct Bounds
+{
+ enum
+ {
+ minX = MinX,
+ maxX = MaxX,
+ minY = MinY,
+ maxY = MaxY
+ };
+};
+
+//typedef Bounds<-180, -90, 180, 90> OrthoBounds;
+
+template <typename BoundsT, typename CellIdT>
+class CellIdConverter
+{
+public:
+
+ static CoordT XToCellIdX(CoordT x)
+ {
+ return (x - BoundsT::minX) / StepX();
+ }
+ static CoordT YToCellIdY(CoordT y)
+ {
+ return (y - BoundsT::minY) / StepY();
+ }
+
+ static CellIdT ToCellId(CoordT x, CoordT y)
+ {
+ uint32_t const ix = static_cast<uint32_t>(XToCellIdX(x));
+ uint32_t const iy = static_cast<uint32_t>(YToCellIdY(y));
+ CellIdT id = CellIdT::FromXY(ix, iy);
+#if 0 // DEBUG
+ pair<uint32_t, uint32_t> ixy = id.XY();
+ ASSERT(Abs(ixy.first - ix) <= 1, (x, y, id, ixy));
+ ASSERT(Abs(ixy.second - iy) <= 1, (x, y, id, ixy));
+ CoordT minX, minY, maxX, maxY;
+ GetCellBounds(id, minX, minY, maxX, maxY);
+ ASSERT(minX <= x && x <= maxX, (x, y, id, minX, minY, maxX, maxY));
+ ASSERT(minY <= y && y <= maxY, (x, y, id, minX, minY, maxX, maxY));
+#endif
+ return id;
+ }
+
+ static CellIdT Cover2PointsWithCell(CoordT x1, CoordT y1, CoordT x2, CoordT y2)
+ {
+ CellIdT id1 = ToCellId(x1, y1);
+ CellIdT id2 = ToCellId(x2, y2);
+ while (id1 != id2)
+ {
+ id1 = id1.Parent();
+ id2 = id2.Parent();
+ }
+#if 0 // DEBUG
+ CoordT minX, minY, maxX, maxY;
+ GetCellBounds(id1, minX, minY, maxX, maxY);
+ ASSERT(minX <= x1 && x1 <= maxX, (x1, y1, x2, y2, id1, minX, minY, maxX, maxY));
+ ASSERT(minX <= x2 && x2 <= maxX, (x1, y1, x2, y2, id1, minX, minY, maxX, maxY));
+ ASSERT(minY <= y1 && y1 <= maxY, (x1, y1, x2, y2, id1, minX, minY, maxX, maxY));
+ ASSERT(minY <= y2 && y2 <= maxY, (x1, y1, x2, y2, id1, minX, minY, maxX, maxY));
+#endif
+ return id1;
+ }
+
+ static CoordPointT FromCellId(CellIdT id)
+ {
+ pair<uint32_t, uint32_t> const xy = id.XY();
+ return CoordPointT(xy.first * StepX() + BoundsT::minX, xy.second * StepY() + BoundsT::minY);
+ }
+
+ static void GetCellBounds(CellIdT id, CoordT & minX, CoordT & minY, CoordT & maxX, CoordT & maxY)
+ {
+ pair<uint32_t, uint32_t> const xy = id.XY();
+ uint32_t const r = id.Radius();
+ minX = (xy.first - r) * StepX() + BoundsT::minX;
+ maxX = (xy.first + r) * StepX() + BoundsT::minX;
+ minY = (xy.second - r) * StepY() + BoundsT::minY;
+ maxY = (xy.second + r) * StepY() + BoundsT::minY;
+ }
+private:
+ inline static CoordT StepX()
+ {
+ return CoordT(BoundsT::maxX - BoundsT::minX) / CellIdT::MAX_COORD;
+ }
+ inline static CoordT StepY()
+ {
+ return CoordT(BoundsT::maxY - BoundsT::minY) / CellIdT::MAX_COORD;
+ }
+};
diff --git a/indexer/classif_routine.cpp b/indexer/classif_routine.cpp
new file mode 100644
index 0000000000..ff32733f2a
--- /dev/null
+++ b/indexer/classif_routine.cpp
@@ -0,0 +1,59 @@
+#include "classif_routine.hpp"
+#include "classificator.hpp"
+#include "drawing_rules.hpp"
+
+#include "../indexer/osm2type.hpp"
+
+#include "../coding/reader.hpp"
+
+#include "../std/stdio.hpp"
+
+#include "../base/start_mem_debug.hpp"
+
+
+namespace classificator
+{
+ void Read(string const & dir)
+ {
+ drule::ReadRules((dir + "drawing_rules.bin").c_str());
+ if (!classif().ReadClassificator((dir + "classificator.txt").c_str()))
+ MYTHROW(Reader::OpenException, ("drawing rules or classificator file"));
+
+ (void)classif().ReadVisibility((dir + "visibility.txt").c_str());
+ }
+
+ void parse_osm_types(int start, int end, string const & path)
+ {
+ for (int i = start; i <= end; ++i)
+ {
+ char buf[5] = { 0 };
+ sprintf(buf, "%d", i);
+
+ string const inFile = path + buf + ".xml";
+ ftype::ParseOSMTypes(inFile.c_str(), i);
+ }
+ }
+
+ void GenerateAndWrite(string const & path)
+ {
+ // 1. generic types
+ parse_osm_types(0, 11, path + "styles/caption-z");
+ parse_osm_types(6, 17, path + "styles/osm-map-features-z");
+
+ // 2. POI (not used)
+ //parse_osm_types(12, 17, path + "styles/osm-POI-features-z");
+
+ // 3. generate map
+ string const inFile = path + "styles/mapswithme.xml";
+ for (int i = 0; i <= 17; ++i)
+ ftype::ParseOSMTypes(inFile.c_str(), i);
+
+ drule::WriteRules(string(path + "drawing_rules.bin").c_str());
+ classif().PrintClassificator(string(path + "classificator.txt").c_str());
+ }
+
+ void PrepareForFeatureGeneration()
+ {
+ classif().SortClassificator();
+ }
+}
diff --git a/indexer/classif_routine.hpp b/indexer/classif_routine.hpp
new file mode 100644
index 0000000000..26a0604534
--- /dev/null
+++ b/indexer/classif_routine.hpp
@@ -0,0 +1,10 @@
+#pragma once
+
+#include "../std/string.hpp"
+
+namespace classificator
+{
+ void Read(string const & dir);
+ void GenerateAndWrite(string const & dir);
+ void PrepareForFeatureGeneration();
+}
diff --git a/indexer/classificator.cpp b/indexer/classificator.cpp
new file mode 100644
index 0000000000..be4092e35f
--- /dev/null
+++ b/indexer/classificator.cpp
@@ -0,0 +1,530 @@
+#include "../base/SRC_FIRST.hpp"
+#include "classificator.hpp"
+#include "tree_structure.hpp"
+
+#include "../coding/file_reader.hpp"
+
+#include "../base/assert.hpp"
+
+#include "../std/target_os.hpp"
+#include "../std/bind.hpp"
+#include "../std/algorithm.hpp"
+#include "../std/iterator.hpp"
+
+#include "../base/start_mem_debug.hpp"
+
+
+/////////////////////////////////////////////////////////////////////////////////////////
+// ClassifObject implementation
+/////////////////////////////////////////////////////////////////////////////////////////
+
+ClassifObject * ClassifObject::AddImpl(string const & s)
+{
+ if (m_objs.empty()) m_objs.reserve(30);
+
+ m_objs.push_back(ClassifObject(s));
+ return &(m_objs.back());
+}
+
+ClassifObject * ClassifObject::Add(string const & s)
+{
+ ClassifObject * p = Find(s);
+ return (p ? p : AddImpl(s));
+}
+
+void ClassifObject::AddCriterion(string const & s)
+{
+ Add('[' + s + ']');
+}
+
+ClassifObject * ClassifObject::Find(string const & s)
+{
+ for (iter_t i = m_objs.begin(); i != m_objs.end(); ++i)
+ if ((*i).m_name == s)
+ return &(*i);
+
+ return 0;
+}
+
+void ClassifObject::AddDrawRule(drule::Key const & k)
+{
+ for (size_t i = 0; i < m_drawRule.size(); ++i)
+ if (k == m_drawRule[i]) return;
+
+ m_drawRule.push_back(k);
+}
+
+bool ClassifObject::IsCriterion() const
+{
+ return (m_name[0] == '[');
+}
+
+ClassifObjectPtr ClassifObject::BinaryFind(string const & s) const
+{
+ const_iter_t i = lower_bound(m_objs.begin(), m_objs.end(), s, less_name_t());
+ if ((i == m_objs.end()) || ((*i).m_name != s))
+ return ClassifObjectPtr(0, 0);
+ else
+ return ClassifObjectPtr(&(*i), distance(m_objs.begin(), i));
+}
+
+void ClassifObject::SavePolicy::Serialize(ostream & s) const
+{
+ ClassifObject const * p = Current();
+ for (size_t i = 0; i < p->m_drawRule.size(); ++i)
+ s << p->m_drawRule[i].toString() << " ";
+}
+
+void ClassifObject::LoadPolicy::Serialize(string const & s)
+{
+ ClassifObject * p = Current();
+
+ // load drawing rule
+ drule::Key key;
+ key.fromString(s);
+ p->m_drawRule.push_back(key);
+
+ // mark as visible in rule's scale
+ p->m_visibility[key.m_scale] = true;
+}
+
+void ClassifObject::LoadPolicy::Start(size_t i)
+{
+ ClassifObject * p = Current();
+ p->m_objs.push_back(ClassifObject());
+
+ base_type::Start(i);
+}
+
+void ClassifObject::LoadPolicy::EndChilds()
+{
+ ClassifObject * p = Current();
+ ASSERT ( p->m_objs.back().m_name.empty(), () );
+ p->m_objs.pop_back();
+}
+
+void ClassifObject::VisSavePolicy::Serialize(ostream & s) const
+{
+ ClassifObject const * p = Current();
+
+ size_t const count = p->m_visibility.size();
+
+ string str;
+ str.resize(count);
+ for (size_t i = 0; i < count; ++i)
+ str[i] = p->m_visibility[i] ? '1' : '0';
+
+ s << str << " ";
+}
+
+void ClassifObject::VisLoadPolicy::Name(string const & name) const
+{
+ // Assume that classificator doesn't changed for saved visibility.
+ ASSERT ( name == Current()->m_name, () );
+}
+
+void ClassifObject::VisLoadPolicy::Serialize(string const & s)
+{
+ ClassifObject * p = Current();
+
+ for (size_t i = 0; i < s.size(); ++i)
+ p->m_visibility[i] = (s[i] == '1');
+}
+
+void ClassifObject::VisLoadPolicy::Start(size_t i)
+{
+ if (i < Current()->m_objs.size())
+ base_type::Start(i);
+ else
+ m_stack.push_back(0); // dummy
+}
+
+void ClassifObject::Sort()
+{
+ sort(m_objs.begin(), m_objs.end(), less_name_t());
+ for_each(m_objs.begin(), m_objs.end(), boost::bind(&ClassifObject::Sort, _1));
+}
+
+void ClassifObject::Swap(ClassifObject & r)
+{
+ swap(m_name, r.m_name);
+ swap(m_drawRule, r.m_drawRule);
+ swap(m_objs, r.m_objs);
+ swap(m_visibility, r.m_visibility);
+}
+
+ClassifObject const * ClassifObject::GetObject(size_t i) const
+{
+ ASSERT ( i < m_objs.size(), (i) );
+ return &(m_objs[i]);
+}
+
+void ClassifObject::ConcatChildNames(string & s) const
+{
+ s.clear();
+ size_t const count = m_objs.size();
+ for (size_t i = 0; i < count; ++i)
+ {
+ s += m_objs[i].GetName();
+ if (i != count-1) s += '|';
+ }
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////
+// Classificator implementation
+/////////////////////////////////////////////////////////////////////////////////////////
+
+Classificator & classif()
+{
+ static Classificator c;
+ return c;
+}
+
+namespace ftype
+{
+ uint8_t const bits_count = 6;
+ uint8_t const levels_count = 5;
+ uint8_t const max_value = (1 << bits_count) - 1;
+
+ void set_value(uint32_t & type, uint8_t level, uint8_t value)
+ {
+ level *= bits_count; // level to bits
+
+ uint32_t const m1 = uint32_t(max_value) << level;
+ type &= (~m1); // zero bits
+
+ uint32_t const m2 = uint32_t(value) << level;
+ type |= m2; // set bits
+ }
+
+ uint8_t get_value(uint32_t type, uint8_t level)
+ {
+ level *= bits_count; // level to bits;
+
+ uint32_t const m = uint32_t(max_value) << level;
+ type &= m; // leave only our bits
+
+ type = type >> level; // move to start
+ ASSERT ( type <= max_value, ("invalid output value", type) );
+
+ return uint8_t(type); // conversion
+ }
+
+ uint8_t get_control_level(uint32_t type)
+ {
+ uint8_t count = 0;
+ while (type > 1)
+ {
+ type = type >> bits_count;
+ ++count;
+ }
+
+ return count;
+ }
+
+ void PushValue(uint32_t & type, uint8_t value)
+ {
+ ASSERT ( value <= max_value, ("invalid input value", value) );
+
+ uint8_t const cl = get_control_level(type);
+ ASSERT ( cl < levels_count, (cl) );
+
+ set_value(type, cl, value);
+
+ // set control level
+ set_value(type, cl+1, 1);
+ }
+
+ bool GetValue(uint32_t type, uint8_t level, uint8_t & value)
+ {
+ ASSERT ( level < levels_count, ("invalid input level", level) );
+
+ if (level < get_control_level(type))
+ {
+ value = get_value(type, level);
+ return true;
+ }
+ return false;
+ }
+
+ void PopValue(uint32_t & type)
+ {
+ uint8_t const cl = get_control_level(type);
+ ASSERT ( cl > 0, (cl) );
+
+ // remove control level
+ set_value(type, cl, 0);
+
+ // set control level
+ set_value(type, cl-1, 1);
+ }
+}
+
+namespace
+{
+ class suitable_getter
+ {
+ struct compare_scales
+ {
+ bool operator() (drule::Key const & l, int r) const { return l.m_scale < r; }
+ bool operator() (int l, drule::Key const & r) const { return l < r.m_scale; }
+ bool operator() (drule::Key const & l, drule::Key const & r) const { return l.m_scale < r.m_scale; }
+ };
+
+ typedef vector<drule::Key> vec_t;
+ typedef vec_t::const_iterator iter_t;
+
+ vec_t const & m_rules;
+ vec_t & m_keys;
+
+ iter_t m_iters[2];
+ int m_scales[2];
+
+ bool m_added;
+
+ void add_rule(int ft, iter_t i)
+ {
+ static const int visible[3][drule::count_of_rules] = {
+ {0, 0, 1, 1, 0, 0, 0}, // fpoint
+ {1, 0, 0, 0, 0, 1, 0}, // fline
+ {1, 1, 1, 1, 0, 0, 0} // farea
+ };
+
+ if (visible[ft][i->m_type] == 1)
+ {
+ m_keys.push_back(*i);
+ m_added = true;
+ }
+ }
+
+ void look_forward(int ft)
+ {
+ if (m_scales[0] < 0) return;
+ iter_t i = m_iters[0];
+ do
+ {
+ add_rule(ft, i);
+ ++i;
+ } while (i != m_rules.end() && i->m_scale == m_scales[0]);
+ }
+
+ void look_backward(int ft)
+ {
+ if (m_scales[1] < 0) return;
+ iter_t i = m_iters[1];
+ do
+ {
+ add_rule(ft, i);
+ if (i == m_rules.begin())
+ break;
+ else
+ --i;
+ } while (i->m_scale == m_scales[1]);
+ }
+
+ public:
+ suitable_getter(vec_t const & rules, vec_t & keys)
+ : m_rules(rules), m_keys(keys)
+ {
+ }
+
+ void find(int ft, int scale)
+ {
+ // find greater or equal scale
+ m_iters[0] = lower_bound(m_rules.begin(), m_rules.end(), scale, compare_scales());
+ if (m_iters[0] != m_rules.end())
+ m_scales[0] = m_iters[0]->m_scale;
+ else
+ m_scales[0] = -1000;
+
+ // if drawing rules exist for 'scale', than process and exit
+ if (scale == m_scales[0])
+ {
+ look_forward(ft);
+ return;
+ }
+
+ // find less or equal scale
+ m_iters[1] = upper_bound(m_rules.begin(), m_rules.end(), scale, compare_scales());
+ if (m_iters[1] != m_rules.begin())
+ {
+ --m_iters[1];
+ m_scales[1] = m_iters[1]->m_scale;
+ }
+ else
+ m_scales[1] = -1000;
+
+ // choose the nearest scale to process first
+ m_added = false;
+ if (abs(m_scales[0] - scale) > abs(m_scales[1] - scale))
+ {
+ look_backward(ft);
+ if (!m_added)
+ look_forward(ft);
+ }
+ else
+ {
+ look_forward(ft);
+ if (!m_added)
+ look_backward(ft);
+ }
+ }
+ };
+}
+
+void ClassifObject::GetSuitable(int scale, feature_t ft, vector<drule::Key> & keys) const
+{
+ ASSERT ( ft <= farea, () );
+
+ // 2. Check visibility criterion for scale first.
+ if (!m_visibility[scale])
+ return;
+
+ // special for AlexZ
+ // find rules for 'scale' or if no - for nearest to 'scale' scale
+ suitable_getter rulesGetter(m_drawRule, keys);
+ rulesGetter.find(ft, scale);
+}
+
+bool ClassifObject::IsDrawable(int scale) const
+{
+ return (m_visibility[scale] && IsDrawableAny());
+}
+
+bool ClassifObject::IsDrawableAny() const
+{
+ return (m_visibility != visible_mask_t() && !m_drawRule.empty());
+}
+
+bool ClassifObject::IsDrawableLike(feature_t ft) const
+{
+ // check the very common criterion first
+ if (!IsDrawableAny())
+ return false;
+
+ ASSERT ( ft <= farea, () );
+
+ static const int visible[3][drule::count_of_rules] = {
+ {0, 0, 1, 1, 0, 0, 0}, // fpoint
+ {1, 0, 0, 0, 0, 1, 0}, // fline
+ {0, 1, 0, 0, 0, 0, 0} // farea (!!! key difference with GetSuitable !!!)
+ };
+
+ for (size_t i = 0; i < m_drawRule.size(); ++i)
+ {
+ ASSERT ( m_drawRule[i].m_type < drule::count_of_rules, () );
+ if (visible[ft][m_drawRule[i].m_type] == 1)
+ {
+ /// @todo Check if rule's scale is reachable according to m_visibility (see GetSuitable algorithm).
+ return true;
+ }
+ }
+
+ return false;
+}
+
+namespace
+{
+ bool LoadFileToString(char const * fPath, string & buffer)
+ {
+ try
+ {
+ FileReader reader(fPath);
+ size_t const sz = static_cast<size_t>(reader.Size());
+ if (sz > 0)
+ {
+ buffer.resize(sz);
+ reader.Read(0, &buffer[0], sz);
+ return true;
+ }
+ }
+ catch (FileReader::OpenException const &)
+ {
+ // It's OK. Just return false.
+ }
+ return false;
+ }
+}
+
+bool Classificator::ReadClassificator(char const * fPath)
+{
+ string buffer;
+ if (!LoadFileToString(fPath, buffer))
+ return false;
+
+ istringstream iss(buffer);
+
+ m_root.Clear();
+
+ ClassifObject::LoadPolicy policy(&m_root);
+ tree::LoadTreeAsText(iss, policy);
+
+ m_root.Sort();
+ return true;
+}
+
+void Classificator::PrintClassificator(char const * fPath)
+{
+#ifndef OMIM_OS_BADA
+ ofstream file(fPath);
+
+ ClassifObject::SavePolicy policy(&m_root);
+ tree::SaveTreeAsText(file, policy);
+
+#else
+ ASSERT ( false, ("PrintClassificator uses only in indexer_tool") );
+#endif
+}
+
+bool Classificator::ReadVisibility(char const * fPath)
+{
+ string buffer;
+ if (!LoadFileToString(fPath, buffer))
+ return false;
+
+ istringstream iss(buffer);
+
+ ClassifObject::VisLoadPolicy policy(&m_root);
+ tree::LoadTreeAsText(iss, policy);
+
+ return true;
+}
+
+void Classificator::PrintVisibility(char const * fPath)
+{
+#ifndef OMIM_OS_BADA
+ ofstream file(fPath);
+
+ ClassifObject::VisSavePolicy policy(&m_root);
+ tree::SaveTreeAsText(file, policy);
+
+#else
+ ASSERT ( false, ("PrintVisibility uses only in indexer_tool") );
+#endif
+}
+
+void Classificator::SortClassificator()
+{
+ GetMutableRoot()->Sort();
+}
+
+uint32_t Classificator::GetTypeByPath(vector<string> const & path)
+{
+ ClassifObject const * p = GetRoot();
+
+ size_t i = 0;
+ uint32_t type = ftype::GetEmptyValue();
+
+ while (i < path.size())
+ {
+ ClassifObjectPtr ptr = p->BinaryFind(path[i]);
+ ASSERT ( ptr, ("Invalid path in Classificator::GetTypeByPath") );
+
+ ftype::PushValue(type, ptr.GetIndex());
+
+ ++i;
+ p = ptr.get();
+ }
+
+ return type;
+}
diff --git a/indexer/classificator.hpp b/indexer/classificator.hpp
new file mode 100644
index 0000000000..8f1dc59c2b
--- /dev/null
+++ b/indexer/classificator.hpp
@@ -0,0 +1,245 @@
+#pragma once
+#include "drawing_rule_def.hpp"
+
+#include "../base/base.hpp"
+
+#include "../std/vector.hpp"
+#include "../std/string.hpp"
+#include "../std/sstream.hpp"
+#include "../std/fstream.hpp"
+#include "../std/bitset.hpp"
+
+#include "../base/start_mem_debug.hpp"
+
+class ClassifObject;
+
+namespace ftype
+{
+ inline uint32_t GetEmptyValue() { return 1; }
+
+ void PushValue(uint32_t & type, uint8_t value);
+ bool GetValue(uint32_t type, uint8_t level, uint8_t & value);
+ void PopValue(uint32_t & type);
+}
+
+class ClassifObjectPtr
+{
+ ClassifObject const * m_p;
+ size_t m_ind;
+
+public:
+ ClassifObjectPtr() : m_p(0), m_ind(0) {}
+ ClassifObjectPtr(ClassifObject const * p, size_t i): m_p(p), m_ind(i) {}
+
+ ClassifObject const * get() const { return m_p; }
+ ClassifObject const * operator->() const { return m_p; }
+ operator bool() const { return (m_p != 0); }
+
+ size_t GetIndex() const { return m_ind; }
+};
+
+class ClassifObject
+{
+ struct less_name_t
+ {
+ bool operator() (ClassifObject const & r1, ClassifObject const & r2) const
+ {
+ return (r1.m_name < r2.m_name);
+ }
+ };
+
+public:
+ ClassifObject() {} // for serialization only
+ ClassifObject(string const & s) : m_name(s) {}
+
+ /// @name Fill from osm draw rule files.
+ //@{
+private:
+ ClassifObject * AddImpl(string const & s);
+public:
+ ClassifObject * Add(string const & s);
+ void AddCriterion(string const & s);
+ ClassifObject * Find(string const & s);
+
+ void AddDrawRule(drule::Key const & k);
+ //@}
+
+ /// @name Find substitution when reading osm features.
+ //@{
+ ClassifObjectPtr BinaryFind(string const & s) const;
+ //@}
+
+ void Clear() { m_objs.clear(); }
+
+ void Sort();
+ void Swap(ClassifObject & r);
+
+ bool IsCriterion() const;
+ string const & GetName() const { return m_name; }
+ ClassifObject const * GetObject(size_t i) const;
+
+ void ConcatChildNames(string & s) const;
+
+ enum feature_t { fpoint = 0, fline, farea };
+ void GetSuitable(int scale, feature_t ft, vector<drule::Key> & keys) const;
+
+ bool IsDrawable(int scale) const;
+ bool IsDrawableAny() const;
+ bool IsDrawableLike(feature_t ft) const;
+
+ template <class ToDo>
+ void ForEachObject(ToDo toDo)
+ {
+ for (size_t i = 0; i < m_objs.size(); ++i)
+ toDo(&m_objs[i]);
+ }
+
+ typedef bitset<18> visible_mask_t;
+ visible_mask_t GetVisibilityMask() const { return m_visibility; }
+ void SetVisibilityMask(visible_mask_t mask) { m_visibility = mask; }
+
+ //template <class ToDo> void ForEachType(int level, uint32_t type, ToDo & toDo)
+ //{
+ // if (IsCriterion()) return;
+
+ // if ((level > 1) || (level == 1 && m_objs.empty())) // root and first level is skipped
+ // {
+ // toDo(type);
+ // }
+
+ // for (size_t i = 0; i < m_objs.size(); ++i)
+ // {
+ // uint32_t t = type;
+ // ftype::PushValue(t, i);
+ // m_objs[i].ForEachType(level + 1, t, toDo);
+ // }
+ //}
+
+ /// @name Policies for classificator tree serialization.
+ //@{
+ class BasePolicy
+ {
+ protected:
+ vector<ClassifObject *> m_stack;
+ ClassifObject * Current() const { return m_stack.back(); }
+
+ public:
+ BasePolicy(ClassifObject * pRoot) { m_stack.push_back(pRoot); }
+
+ void Start(size_t i) { m_stack.push_back(&(Current()->m_objs[i])); }
+ void End() { m_stack.pop_back(); }
+ };
+
+ class SavePolicy : public BasePolicy
+ {
+ public:
+ SavePolicy(ClassifObject * pRoot) : BasePolicy(pRoot) {}
+
+ string Name() const { return Current()->m_name; }
+ void Serialize(ostream & s) const;
+
+ size_t BeginChilds() const { return Current()->m_objs.size(); }
+ };
+
+ class LoadPolicy : public BasePolicy
+ {
+ typedef BasePolicy base_type;
+ public:
+ LoadPolicy(ClassifObject * pRoot) : base_type(pRoot) {}
+
+ void Name(string const & name) { Current()->m_name = name; }
+ void Serialize(string const & s);
+
+ void Start(size_t i);
+ void EndChilds();
+ };
+
+ class VisSavePolicy : public SavePolicy
+ {
+ public:
+ VisSavePolicy(ClassifObject * pRoot) : SavePolicy(pRoot) {}
+
+ void Serialize(ostream & s) const;
+ };
+
+ class VisLoadPolicy : public BasePolicy
+ {
+ typedef BasePolicy base_type;
+
+ public:
+ VisLoadPolicy(ClassifObject * pRoot) : BasePolicy(pRoot) {}
+
+ void Name(string const & name) const;
+ void Serialize(string const & s);
+
+ void Start(size_t i);
+ void EndChilds() {}
+ };
+ //@}
+
+private:
+ string m_name;
+ vector<drule::Key> m_drawRule;
+ vector<ClassifObject> m_objs;
+ visible_mask_t m_visibility;
+
+ typedef vector<ClassifObject>::iterator iter_t;
+ typedef vector<ClassifObject>::const_iterator const_iter_t;
+};
+
+inline void swap(ClassifObject & r1, ClassifObject & r2)
+{
+ r1.Swap(r2);
+}
+
+class Classificator
+{
+ ClassifObject m_root;
+
+ static ClassifObject * AddV(ClassifObject * parent, string const & key, string const & value);
+
+public:
+ Classificator() : m_root("world") {}
+
+ ClassifObject * Add(ClassifObject * parent, string const & key, string const & value);
+
+ /// @name Serialization-like functions.
+ //@{
+ bool ReadClassificator(char const * fPath);
+ void PrintClassificator(char const * fPath);
+
+ bool ReadVisibility(char const * fPath);
+ void PrintVisibility(char const * fPath);
+
+ void SortClassificator();
+ //@}
+
+ /// Return type by path in classificator tree, example:
+ /// path = ["natural", "caostline"].
+ uint32_t GetTypeByPath(vector<string> const & path);
+
+ // Iterate for possible objects types
+ //template <class ToDo> void ForEachType(ToDo toDo)
+ //{
+ // m_root.ForEachType(0, ftype::GetEmptyValue(), toDo);
+ //}
+
+ /// @name used in osm2type.cpp, not for public use.
+ //@{
+ ClassifObject const * GetRoot() const { return &m_root; }
+ ClassifObject * GetMutableRoot() { return &m_root; }
+ //@}
+
+public:
+ /// @name Used only in feature_visibility.cpp, not for public use.
+ //@{
+ template <class ToDo> typename ToDo::result_type
+ ProcessObjects(uint32_t type, ToDo & toDo) const;
+
+ ClassifObject const * GetObject(uint32_t type) const;
+ //@}
+};
+
+Classificator & classif();
+
+#include "../base/stop_mem_debug.hpp"
diff --git a/indexer/country.cpp b/indexer/country.cpp
new file mode 100644
index 0000000000..97da55ae1e
--- /dev/null
+++ b/indexer/country.cpp
@@ -0,0 +1,144 @@
+#include "country.hpp"
+
+#include "../base/logging.hpp"
+
+#include "../coding/file_reader.hpp"
+#include "../coding/file_writer.hpp"
+
+#include "../platform/platform.hpp"
+
+#include "../indexer/data_header.hpp"
+#include "../indexer/data_header_reader.hpp"
+
+namespace mapinfo
+{
+ string FileNameFromUrl(string const & url)
+ {
+ size_t lastSlashPos = url.find_last_of('/');
+ if (lastSlashPos != string::npos && (lastSlashPos + 1) < url.size())
+ return url.substr(lastSlashPos + 1);
+ ASSERT( false, ("Url should be valid") );
+ return string();
+ }
+
+ bool IsFileSizeEqualTo(string const & fileName, uint64_t size)
+ {
+ uint64_t diskSize = 0;
+ if (GetPlatform().GetFileSize(GetPlatform().WorkingDir() + fileName, diskSize)
+ && diskSize == size)
+ return true;
+ else
+ return false;
+ }
+
+ /// Simple check - compare url size with real file size on disk
+ bool IsFileDownloaded(TUrl const & url)
+ {
+ string fileName = FileNameFromUrl(url.first);
+ return IsFileSizeEqualTo(fileName, url.second);
+ }
+
+ struct CountryBoundsCalculator
+ {
+ m2::RectD & m_bounds;
+ CountryBoundsCalculator(m2::RectD & bounds) : m_bounds(bounds) {}
+ void operator()(TUrl const & url)
+ {
+ string fileName = FileNameFromUrl(url.first);
+ if (IsFileSizeEqualTo(fileName, url.second) && IsDatFile(fileName))
+ {
+ feature::DataHeader header;
+ if (feature::ReadDataHeader(GetPlatform().WorkingDir() + fileName, header))
+ m_bounds.Add(header.Bounds());
+ }
+ }
+ };
+
+ m2::RectD Country::Bounds() const
+ {
+ m2::RectD bounds;
+ std::for_each(m_urls.begin(), m_urls.end(), CountryBoundsCalculator(bounds));
+ return bounds;
+ }
+
+ struct LocalSizeCalculator
+ {
+ uint64_t & m_size;
+ LocalSizeCalculator(uint64_t & size) : m_size(size) {}
+ void operator()(TUrl const & url)
+ {
+ if (IsFileDownloaded(url))
+ m_size += url.second;
+ }
+ };
+
+ uint64_t Country::LocalSize() const
+ {
+ uint64_t size = 0;
+ std::for_each(m_urls.begin(), m_urls.end(), LocalSizeCalculator(size));
+ return size;
+ }
+
+ struct RemoteSizeCalculator
+ {
+ uint64_t & m_size;
+ RemoteSizeCalculator(uint64_t & size) : m_size(size) {}
+ void operator()(TUrl const & url)
+ {
+ if (!IsFileDownloaded(url))
+ m_size += url.second;
+ }
+ };
+
+ uint64_t Country::RemoteSize() const
+ {
+ uint64_t size = 0;
+ std::for_each(m_urls.begin(), m_urls.end(), RemoteSizeCalculator(size));
+ return size;
+ }
+
+ void Country::AddUrl(TUrl const & url)
+ {
+ m_urls.push_back(url);
+ }
+
+ ////////////////////////////////////////////////////////////////////////
+
+ template <class TArchive> TArchive & operator << (TArchive & ar, mapinfo::Country const & country)
+ {
+ ar << country.m_group;
+ ar << country.m_country;
+ ar << country.m_region;
+ ar << country.m_urls;
+ return ar;
+ }
+
+ bool LoadCountries(TCountriesContainer & countries, string const & updateFile)
+ {
+ countries.clear();
+ try
+ {
+ FileReader file(updateFile.c_str());
+ ReaderSource<FileReader> source(file);
+ stream::SinkReaderStream<ReaderSource<FileReader> > rStream(source);
+ uint32_t version;
+ rStream >> version;
+ if (version > MAPS_MAJOR_VERSION_BINARY_FORMAT)
+ return false;
+ rStream >> countries;
+ return true;
+ }
+ catch (RootException const & e)
+ {
+ LOG(LERROR, ("LoadCountries exception", e.what()));
+ }
+ return false;
+ }
+
+ void SaveCountries(TCountriesContainer const & countries, Writer & writer)
+ {
+ stream::SinkWriterStream<Writer> wStream(writer);
+ wStream << MAPS_MAJOR_VERSION_BINARY_FORMAT;
+ wStream << countries;
+ }
+}
diff --git a/indexer/country.hpp b/indexer/country.hpp
new file mode 100644
index 0000000000..e1528fb224
--- /dev/null
+++ b/indexer/country.hpp
@@ -0,0 +1,84 @@
+#pragma once
+
+#include "../coding/streams_sink.hpp"
+
+#include "../geometry/rect2d.hpp"
+
+#include "../indexer/std_serialization.hpp"
+#include "../indexer/defines.hpp"
+
+#include "../std/string.hpp"
+#include "../std/vector.hpp"
+
+class Reader;
+class Writer;
+
+namespace mapinfo
+{
+ typedef pair<string, uint64_t> TUrl;
+ typedef vector<TUrl> TUrlContainer;
+
+ /// helper function
+ string FileNameFromUrl(string const & url);
+ bool IsFileDownloaded(TUrl const & url);
+ bool IsDatFile(string const & fileName);
+
+ /// Serves as a proxy between GUI and downloaded files
+ class Country
+ {
+ template <class TArchive> friend TArchive & operator << (TArchive & ar, mapinfo::Country const & country);
+ template <class TArchive> friend TArchive & operator >> (TArchive & ar, mapinfo::Country & country);
+
+ private:
+ /// Europe, Asia etc.
+ string m_group;
+ /// USA, Switzerland etc.
+ string m_country;
+ /// can be empty, Alaska, California etc.
+ string m_region;
+ /// stores squares with world pieces where this country resides
+ TUrlContainer m_urls;
+
+ public:
+ Country() {}
+ Country(string const & group, string const & country, string const & region)
+ : m_group(group), m_country(country), m_region(region) {}
+
+ bool operator<(Country const & other) const { return Name() < other.Name(); }
+ bool operator==(Country const & other) const
+ {
+ return m_group == other.m_group && m_country == other.m_country && m_region == other.m_region
+ && m_urls == other.m_urls;
+ }
+ bool operator!=(Country const & other) const { return !(*this == other); }
+
+ void AddUrl(TUrl const & url);
+ TUrlContainer const & Urls() const { return m_urls; }
+
+ string const & Group() const { return m_group; }
+ string Name() const { return m_region.empty() ? m_country : m_country + " - " + m_region; }
+
+ /// @return bounds for downloaded parts of the country or empty rect
+ m2::RectD Bounds() const;
+ /// Downloaded parts size
+ uint64_t LocalSize() const;
+ /// Not downloaded parts size
+ uint64_t RemoteSize() const;
+ };
+
+ template <class TArchive> TArchive & operator >> (TArchive & ar, mapinfo::Country & country)
+ {
+ ar >> country.m_group;
+ ar >> country.m_country;
+ ar >> country.m_region;
+ ar >> country.m_urls;
+ return ar;
+ }
+
+ /// key is Country::Group()
+ typedef map<string, vector<Country> > TCountriesContainer;
+
+ /// @return false if new application version should be downloaded
+ bool LoadCountries(TCountriesContainer & countries, string const & updateFile);
+ void SaveCountries(TCountriesContainer const & countries, Writer & writer);
+}
diff --git a/indexer/covering.cpp b/indexer/covering.cpp
new file mode 100644
index 0000000000..22d7364943
--- /dev/null
+++ b/indexer/covering.cpp
@@ -0,0 +1,132 @@
+#include "covering.hpp"
+#include "cell_coverer.hpp"
+#include "cell_id.hpp"
+#include "feature.hpp"
+#include "../geometry/covering.hpp"
+#include "../base/base.hpp"
+#include "../base/stl_add.hpp"
+#include "../std/algorithm.hpp"
+#include "../std/bind.hpp"
+
+// TODO: Redo polyline covering right.
+
+namespace
+{
+ template <class BoundsT, class CoveringT>
+ class TriangleCoverer
+ {
+ public:
+ typedef typename CoveringT::CellId CellId;
+ typedef CellIdConverter<BoundsT, CellId> CellIdConverterType;
+
+ explicit TriangleCoverer(CoveringT const & covering) : m_Covering(1, covering) {}
+
+ void operator () (m2::PointD const & a, m2::PointD const & b, m2::PointD const & c)
+ {
+ AddTriangle(a, b, c);
+ }
+
+ void AddTriangle(m2::PointD const & a, m2::PointD const & b, m2::PointD const & c)
+ {
+ m_Covering.push_back(CoveringT(
+ m2::PointD(CellIdConverterType::XToCellIdX(a.x), CellIdConverterType::YToCellIdY(a.y)),
+ m2::PointD(CellIdConverterType::XToCellIdX(b.x), CellIdConverterType::YToCellIdY(b.y)),
+ m2::PointD(CellIdConverterType::XToCellIdX(c.x), CellIdConverterType::YToCellIdY(c.y))));
+ while (m_Covering.size() > 1 &&
+ m_Covering[m_Covering.size() - 2].Size() < m_Covering.back().Size())
+ {
+ m_Covering[m_Covering.size() - 2].Append(m_Covering.back());
+ m_Covering.pop_back();
+ }
+ }
+
+ CoveringT const & GetCovering()
+ {
+ ASSERT(!m_Covering.empty(), ());
+#ifdef DEBUG
+ // Make appends in another order and assert that result is the same.
+ CoveringT dbgCovering(m_Covering[0]);
+ for (size_t i = 1; i < m_Covering.size(); ++i)
+ dbgCovering.Append(m_Covering[i]);
+#endif
+ while (m_Covering.size() > 1)
+ {
+ m_Covering[m_Covering.size() - 2].Append(m_Covering.back());
+ m_Covering.pop_back();
+ }
+#ifdef DEBUG
+ vector<CellId> dbgIds, ids;
+ dbgCovering.OutputToVector(dbgIds);
+ m_Covering[0].OutputToVector(ids);
+ ASSERT_EQUAL(dbgIds, ids, ());
+#endif
+ return m_Covering[0];
+ }
+
+ private:
+ vector<CoveringT> m_Covering;
+ };
+}
+
+vector<int64_t> covering::CoverFeature(Feature const & feature)
+{
+ vector<CoordPointT> geometry;
+ feature.ForEachPoint(MakeBackInsertFunctor(geometry));
+ ASSERT(!geometry.empty(), ());
+ if (geometry.empty())
+ return vector<int64_t>();
+ vector<RectId> ids;
+ if (geometry.size() > 1)
+ // TODO: Tweak CoverPolyLine() depth level.
+ CoverPolyLine<MercatorBounds, RectId>(geometry, RectId::DEPTH_LEVELS - 1, ids);
+ else
+ ids.push_back(CoverPoint<MercatorBounds, RectId>(geometry[0]));
+
+ typedef covering::Covering<RectId> CoveringType;
+ typedef TriangleCoverer<MercatorBounds, CoveringType> CovererType;
+ CovererType coverer = CovererType(CoveringType(ids));
+ feature.ForEachTriangleRef(coverer);
+ vector<int64_t> res;
+ coverer.GetCovering().OutputToVector(res);
+ return res;
+}
+
+vector<pair<int64_t, int64_t> > covering::SortAndMergeIntervals(vector<pair<int64_t, int64_t> > v)
+{
+#ifdef DEBUG
+ for (size_t i = 0; i < v.size(); ++i)
+ ASSERT_LESS(v[i].first, v[i].second, (i));
+#endif
+ sort(v.begin(), v.end());
+ vector<pair<int64_t, int64_t> > res;
+ res.reserve(v.size());
+ for (size_t i = 0; i < v.size(); ++i)
+ {
+ if (i == 0 || res.back().second < v[i].first)
+ res.push_back(v[i]);
+ else
+ res.back().second = max(res.back().second, v[i].second);
+ }
+ return res;
+}
+
+vector<pair<int64_t, int64_t> > covering::CoverViewportAndAppendLowerLevels(m2::RectD const & rect)
+{
+ vector<RectId> ids;
+ CoverRect<MercatorBounds, RectId>(rect.minX(), rect.minY(), rect.maxX(), rect.maxY(), 8, ids);
+ vector<pair<int64_t, int64_t> > intervals;
+ intervals.reserve(ids.size() * 4);
+ for (vector<RectId>::const_iterator it = ids.begin(); it != ids.end(); ++it)
+ {
+ RectId id = *it;
+ int64_t idInt64 = id.ToInt64();
+ intervals.push_back(pair<int64_t, int64_t>(idInt64, idInt64 + id.SubTreeSize()));
+ while (id.Level() > 0)
+ {
+ id = id.Parent();
+ idInt64 = id.ToInt64();
+ intervals.push_back(pair<int64_t, int64_t>(idInt64, idInt64 + 1));
+ }
+ }
+ return SortAndMergeIntervals(intervals);
+}
diff --git a/indexer/covering.hpp b/indexer/covering.hpp
new file mode 100644
index 0000000000..854b92f623
--- /dev/null
+++ b/indexer/covering.hpp
@@ -0,0 +1,18 @@
+#pragma once
+
+#include "../geometry/rect2d.hpp"
+#include "../base/base.hpp"
+#include "../std/utility.hpp"
+#include "../std/vector.hpp"
+
+class Feature;
+
+namespace covering
+{
+ // Cover feature with RectIds and return their integer representations.
+ vector<int64_t> CoverFeature(Feature const & feature);
+ // Cover viewport with RectIds and append their RectIds as well.
+ vector<pair<int64_t, int64_t> > CoverViewportAndAppendLowerLevels(m2::RectD const & rect);
+ // Given a vector of intervals [a, b), sort them and merge overlapping intervals.
+ vector<pair<int64_t, int64_t> > SortAndMergeIntervals(vector<pair<int64_t, int64_t> > intervals);
+}
diff --git a/indexer/data_header.cpp b/indexer/data_header.cpp
new file mode 100644
index 0000000000..4b3805b2c7
--- /dev/null
+++ b/indexer/data_header.cpp
@@ -0,0 +1,47 @@
+#include "data_header.hpp"
+
+#include "../base/string_utils.hpp"
+
+#include "../platform/platform.hpp"
+
+#include "../coding/file_writer.hpp"
+
+#include "../indexer/cell_id.hpp"
+
+#include "../base/start_mem_debug.hpp"
+
+namespace feature
+{
+
+ DataHeader::DataHeader()
+ {
+ Reset();
+ }
+
+ namespace
+ {
+ struct do_reset
+ {
+ void operator() (string & t, int) { t.clear(); }
+ void operator() (uint64_t & t, int) { t = 0; }
+ void operator() (pair<int64_t, int64_t> &, int) {}
+ };
+ }
+
+ void DataHeader::Reset()
+ {
+ do_reset doReset;
+ for_each_tuple(m_params, doReset);
+ }
+
+ m2::RectD const DataHeader::Bounds() const
+ {
+ return Int64ToRect(Get<EBoundary>());
+ }
+
+ void DataHeader::SetBounds(m2::RectD const & r)
+ {
+ Set<EBoundary>(RectToInt64(r));
+ }
+
+}
diff --git a/indexer/data_header.hpp b/indexer/data_header.hpp
new file mode 100644
index 0000000000..86eaf54fdf
--- /dev/null
+++ b/indexer/data_header.hpp
@@ -0,0 +1,68 @@
+#pragma once
+
+#include "../indexer/std_serialization.hpp"
+#include "../indexer/defines.hpp"
+
+#include "../coding/streams_sink.hpp"
+
+#include "../geometry/rect2d.hpp"
+
+#include "../std/string.hpp"
+#include "../std/tuple.hpp"
+
+#include "../base/start_mem_debug.hpp"
+
+namespace feature
+{
+ /// All file sizes are in bytes
+ class DataHeader
+ {
+ private:
+ typedef tuple<
+ pair<int64_t, int64_t> // boundary;
+ > params_t;
+ params_t m_params;
+
+ enum param_t { EBoundary };
+
+ template <int N>
+ typename tuple_element<N, params_t>::type const & Get() const { return m_params.get<N>(); }
+ template <int N, class T>
+ void Set(T const & t) { m_params.get<N>() = t; }
+
+ public:
+ DataHeader();
+
+ /// Zeroes all fields
+ void Reset();
+
+ m2::RectD const Bounds() const;
+ void SetBounds(m2::RectD const & r);
+
+ /// @name Serialization
+ //@{
+ template <class TWriter> void Save(TWriter & writer) const
+ {
+ stream::SinkWriterStream<TWriter> w(writer);
+ w << MAPS_MAJOR_VERSION_BINARY_FORMAT;
+ serial::save_tuple(w, m_params);
+ }
+ /// @return false if header can't be read (invalid or newer version format)
+ template <class TReader> bool Load(TReader & reader)
+ {
+ stream::SinkReaderStream<TReader> r(reader);
+
+ uint32_t ver;
+ r >> ver;
+ if (ver > MAPS_MAJOR_VERSION_BINARY_FORMAT)
+ return false;
+ Reset();
+ serial::load_tuple(r, m_params);
+ return true;
+ }
+ //@}
+ };
+
+}
+
+#include "../base/stop_mem_debug.hpp"
diff --git a/indexer/data_header_reader.cpp b/indexer/data_header_reader.cpp
new file mode 100644
index 0000000000..a3bfd95f34
--- /dev/null
+++ b/indexer/data_header_reader.cpp
@@ -0,0 +1,47 @@
+#include "data_header_reader.hpp"
+#include "data_header.hpp"
+
+#include "../coding/file_reader.hpp"
+#include "../coding/file_writer.hpp"
+
+#include "../base/start_mem_debug.hpp"
+
+namespace feature
+{
+ uint64_t ReadDataHeader(string const & datFileName, feature::DataHeader & outHeader)
+ {
+ try
+ {
+ FileReader datReader(datFileName);
+ // read header size
+ uint64_t const headerSize = ReadPrimitiveFromPos<uint64_t>(datReader, 0);
+
+ FileReader subReader = datReader.SubReader(sizeof(uint64_t), headerSize);
+ ReaderSource<FileReader> src(subReader);
+ outHeader.Load(src);
+
+ return (headerSize + sizeof(uint64_t));
+ }
+ catch (Reader::Exception const & e)
+ {
+ ASSERT(false, ("Error reading header from dat file", e.what()));
+ return 0;
+ }
+ }
+
+ void WriteDataHeader(Writer & writer, feature::DataHeader const & header)
+ {
+ typedef vector<unsigned char> TBuffer;
+ TBuffer buffer;
+ MemWriter<TBuffer> w(buffer);
+
+ header.Save(w);
+
+ uint64_t const sz = buffer.size();
+ WriteToSink(writer, sz);
+
+ if (sz > 0)
+ writer.Write(&buffer[0], buffer.size());
+ }
+
+}
diff --git a/indexer/data_header_reader.hpp b/indexer/data_header_reader.hpp
new file mode 100644
index 0000000000..61e3f5937f
--- /dev/null
+++ b/indexer/data_header_reader.hpp
@@ -0,0 +1,14 @@
+#pragma once
+#include "../base/base.hpp"
+
+#include "../std/string.hpp"
+
+class Writer;
+
+namespace feature
+{
+ class DataHeader;
+ /// @return total header size, which should be skipped for data read, or 0 if error
+ uint64_t ReadDataHeader(string const & datFileName, feature::DataHeader & outHeader);
+ void WriteDataHeader(Writer & writer, feature::DataHeader const & header);
+}
diff --git a/indexer/defines.hpp b/indexer/defines.hpp
new file mode 100644
index 0000000000..6e1a480ea7
--- /dev/null
+++ b/indexer/defines.hpp
@@ -0,0 +1,31 @@
+#pragma once
+
+#include "../base/assert.hpp"
+
+#include "../std/string.hpp"
+
+/// Should be incremented when binary format changes
+uint32_t const MAPS_MAJOR_VERSION_BINARY_FORMAT = 0;
+
+#define DATA_FILE_EXTENSION ".dat"
+
+#define UPDATE_CHECK_FILE "maps.update"
+#define UPDATE_BASE_URL "http://melnichek.ath.cx:34568/maps/"
+#define UPDATE_FULL_URL UPDATE_BASE_URL UPDATE_CHECK_FILE
+
+namespace mapinfo
+{
+ inline bool IsDatFile(string const & fileName)
+ {
+ /// file name ends with data file extension
+ string const ext(DATA_FILE_EXTENSION);
+ return fileName.rfind(ext) == fileName.size() - ext.size();
+ }
+
+ inline string IndexFileForDatFile(string const & fileName)
+ {
+ ASSERT(IsDatFile(fileName), ());
+ static char const * INDEX_FILE_EXTENSION = ".idx";
+ return fileName + INDEX_FILE_EXTENSION;
+ }
+}
diff --git a/indexer/drawing_rule_def.cpp b/indexer/drawing_rule_def.cpp
new file mode 100644
index 0000000000..bfb48a7f1c
--- /dev/null
+++ b/indexer/drawing_rule_def.cpp
@@ -0,0 +1,36 @@
+#include "../base/SRC_FIRST.hpp"
+
+#include "drawing_rule_def.hpp"
+
+#include "../base/macros.hpp"
+#include "../base/assert.hpp"
+#include "../base/string_utils.hpp"
+
+#include "../std/stdio.hpp"
+
+#include "../base/start_mem_debug.hpp"
+
+namespace drule
+{
+ string Key::toString() const
+ {
+ char buffer[50];
+ sprintf(buffer, "%d|%d|%d|%d", m_scale, m_type, m_index, m_priority);
+ return buffer;
+ }
+
+ void Key::fromString(string const & s)
+ {
+ int * arrParams[] = { &m_scale, &m_type, &m_index, &m_priority };
+
+ utils::TokenizeIterator it(s, "|");
+ size_t i = 0;
+ while (!it.end())
+ {
+ ASSERT ( i < ARRAY_SIZE(arrParams), (i) );
+
+ *(arrParams[i++]) = atoi((*it).c_str());
+ ++it;
+ }
+ }
+}
diff --git a/indexer/drawing_rule_def.hpp b/indexer/drawing_rule_def.hpp
new file mode 100644
index 0000000000..b697f43406
--- /dev/null
+++ b/indexer/drawing_rule_def.hpp
@@ -0,0 +1,36 @@
+#pragma once
+
+#include "../std/string.hpp"
+
+namespace drule
+{
+ class Key
+ {
+ public:
+ int m_scale;
+ int m_type;
+ int m_index;
+ int m_priority;
+
+ Key() : m_scale(-1), m_type(-1), m_index(-1), m_priority(-1) {}
+ Key(int s, int t, int i) : m_scale(s), m_type(t), m_index(i), m_priority(-1) {}
+
+ bool operator==(Key const & r) const
+ {
+ return (m_scale == r.m_scale && m_type == r.m_type && m_index == r.m_index);
+ }
+
+ string toString() const;
+ void fromString(string const & s);
+
+ void SetPriority(int pr) { m_priority = pr; }
+ };
+
+ /// drawing type of rule - can be one of ...
+ enum rule_type_t { line, area, symbol, caption, circle, pathtext, waymarker, count_of_rules };
+
+ /// geo type of rule - can be one combined of ...
+ enum rule_geo_t { node = 1, way = 2 };
+
+ int const layer_base_priority = 2000;
+}
diff --git a/indexer/drawing_rules.cpp b/indexer/drawing_rules.cpp
new file mode 100644
index 0000000000..270693ce3d
--- /dev/null
+++ b/indexer/drawing_rules.cpp
@@ -0,0 +1,1030 @@
+#include "../base/SRC_FIRST.hpp"
+
+#include "drawing_rules.hpp"
+#include "file_reader_stream.hpp"
+#include "file_writer_stream.hpp"
+#include "std_serialization.hpp"
+
+#include "../coding/file_reader.hpp"
+#include "../coding/file_writer.hpp"
+
+#include "../base/assert.hpp"
+#include "../base/macros.hpp"
+#include "../base/string_utils.hpp"
+
+#include "../std/bind.hpp"
+#include "../std/algorithm.hpp"
+#include "../std/tuple.hpp"
+#include "../std/fstream.hpp"
+#include "../std/exception.hpp"
+#include "../std/limits.hpp"
+
+#include "../base/start_mem_debug.hpp"
+
+
+namespace drule {
+
+ unsigned char alpha_opacity(double d)
+ {
+ ASSERT ( d >= 0.0 && d <= 1.0, (d) );
+ return static_cast<unsigned char>(255 * d);
+ }
+
+ /// @name convertors for standart types
+ //@{
+ template <class T> T get_value(string const & s);
+
+ template <> bool get_value<bool>(string const & s)
+ {
+ return (s == "yes" || s == "1");
+ }
+ template <> double get_value<double>(string const & s)
+ {
+ double d;
+ VERIFY ( utils::to_double(s, d), ("Bad double in drawing rule : ", s) );
+ return d;
+ }
+ template <> string get_value<string>(string const & s)
+ {
+ string ss(s);
+ utils::make_lower_case(ss);
+ return ss;
+ }
+ //@}
+
+ /// parameters tuple initialization
+ //@{
+ class assign_element
+ {
+ attrs_map_t const & m_attrs;
+ string * m_keys;
+
+ public:
+ assign_element(attrs_map_t const & attrs, string * keys) : m_attrs(attrs), m_keys(keys) {}
+
+ template <class T> void operator() (T & t, int n)
+ {
+ attrs_map_t::const_iterator i = m_attrs.find(m_keys[n]);
+ if (i != m_attrs.end())
+ t = get_value<T>(i->second);
+ }
+ };
+
+ template <class Tuple, int N>
+ void parse_tuple(Tuple & t, attrs_map_t const & attrs, string (&keys)[N])
+ {
+ STATIC_ASSERT ( N == tuple_length<Tuple>::value );
+ assign_element toDo(attrs, keys);
+ for_each_tuple(t, toDo);
+ }
+ //@}
+
+ /// main function for rule creation
+ template <class T> T * create_rule(attrs_map_t const & attrs)
+ {
+ T * p = new T();
+ parse_tuple(p->m_params, attrs, T::arrKeys);
+ return p;
+ }
+
+ /// compare rules
+ template <class T> bool is_equal_rules(T const * p1, BaseRule const * p2)
+ {
+ T const * pp2 = dynamic_cast<T const *>(p2);
+ if (pp2)
+ return (p1->IsEqualBase(p2) && (p1->m_params == pp2->m_params));
+ return false;
+ }
+
+ template <class TArchive, class T> void write_rules(TArchive & ar, T const * p)
+ {
+ p->WriteBase(ar);
+ serial::save_tuple(ar, p->m_params);
+ }
+ template <class TArchive, class T> void read_rules(TArchive & ar, T * p)
+ {
+ p->ReadBase(ar);
+ serial::load_tuple(ar, p->m_params);
+ }
+
+ ////////////////////////////////////////////////////////////////////////////////////////
+ // px_metric_t
+ ////////////////////////////////////////////////////////////////////////////////////////
+ struct px_metric_t
+ {
+ double m_v;
+ px_metric_t(double v = 0.0) : m_v(v) {}
+ bool operator==(px_metric_t const & r) const { return m_v == r.m_v; }
+ };
+
+ template <class TArchive> TArchive & operator << (TArchive & ar, px_metric_t const & t)
+ {
+ ar << t.m_v;
+ return ar;
+ }
+ template <class TArchive> TArchive & operator >> (TArchive & ar, px_metric_t & t)
+ {
+ ar >> t.m_v;
+ return ar;
+ }
+
+ template <> px_metric_t get_value<px_metric_t>(string const & s)
+ {
+ size_t i = s.find("px");
+ if (i == string::npos)
+ i = s.size();
+
+ return px_metric_t(atof(s.substr(0, i).c_str()));
+ }
+
+ ////////////////////////////////////////////////////////////////////////////////////////
+ // color_t
+ ////////////////////////////////////////////////////////////////////////////////////////
+ struct color_t
+ {
+ int32_t m_v;
+ enum XXX { none = -1 };
+
+ color_t(int32_t v = 0) : m_v(v) {}
+ bool operator==(color_t const & r) const { return m_v == r.m_v; }
+ };
+
+ template <class TArchive> TArchive & operator << (TArchive & ar, color_t const & t)
+ {
+ ar << t.m_v;
+ return ar;
+ }
+ template <class TArchive> TArchive & operator >> (TArchive & ar, color_t & t)
+ {
+ ar >> t.m_v;
+ return ar;
+ }
+
+ template <> color_t get_value<color_t>(string const & s)
+ {
+ int v = 0;
+ if (s[0] == '#')
+ {
+ char * dummy;
+ v = strtol(&s[1], &dummy, 16);
+ }
+ else if (s == "none") v = color_t::none;
+ else if (s == "black") { /*already initialized*/ }
+ else if (s == "white") v = 0x00FFFFFF;
+ else if (s == "red") v = 0x00FF0000;
+ else if (s == "green") v = 0x0000FF00;
+ else if (s == "blue") v = 0x000000FF;
+ else if (s == "lightblue") v = 0xADD8E6;
+ else
+ {
+ ASSERT ( !"check color values", (s) );
+ }
+ return v;
+ }
+
+ ////////////////////////////////////////////////////////////////////////////////////////
+ // dash_array_t
+ ////////////////////////////////////////////////////////////////////////////////////////
+ struct dash_array_t
+ {
+ vector<double> m_v;
+ void add(string const & s)
+ {
+ double const v = atof(s.c_str());
+ if (v != 0.0) m_v.push_back(v);
+ }
+
+ bool operator==(dash_array_t const & r) const { return m_v == r.m_v; }
+ };
+
+ template <class TArchive> TArchive & operator << (TArchive & ar, dash_array_t const & t)
+ {
+ ar << t.m_v;
+ return ar;
+ }
+ template <class TArchive> TArchive & operator >> (TArchive & ar, dash_array_t & t)
+ {
+ ar >> t.m_v;
+ return ar;
+ }
+
+ template <> dash_array_t get_value<dash_array_t>(string const & s)
+ {
+ dash_array_t ret;
+ utils::TokenizeString(s, " \tpx,", bind(&dash_array_t::add, ref(ret), _1));
+
+ /// @see http://www.w3.org/TR/SVG/painting.html stroke-dasharray
+ size_t const count = ret.m_v.size();
+ if (count % 2 != 0)
+ for (size_t i = 0; i < count; ++i)
+ {
+ double const d = ret.m_v[i];
+ ret.m_v.push_back(d);
+ }
+
+ return ret;
+ }
+
+ ////////////////////////////////////////////////////////////////////////////////////////
+ // line_cap_t
+ ////////////////////////////////////////////////////////////////////////////////////////
+ struct line_cap_t
+ {
+ enum { round, butt, square };
+ int32_t m_v;
+
+ line_cap_t(int32_t v = round) : m_v(v) {}
+ bool operator==(line_cap_t const & r) const { return m_v == r.m_v; }
+ };
+
+ template <class TArchive> TArchive & operator << (TArchive & ar, line_cap_t const & t)
+ {
+ ar << t.m_v;
+ return ar;
+ }
+ template <class TArchive> TArchive & operator >> (TArchive & ar, line_cap_t & t)
+ {
+ ar >> t.m_v;
+ return ar;
+ }
+
+ template <> line_cap_t get_value<line_cap_t>(string const & s)
+ {
+ int v = line_cap_t::round;
+ if (s == "round") { /*initialized*/ }
+ else if (s == "butt") v = line_cap_t::butt;
+ else if (s == "square") v = line_cap_t::square;
+ else
+ {
+ ASSERT ( !"check stroke-linecap values", (s) );
+ }
+ return line_cap_t(v);
+ }
+
+ ////////////////////////////////////////////////////////////////////////////////////////
+ // pattern_url_t
+ ////////////////////////////////////////////////////////////////////////////////////////
+ struct pattern_url_t : public color_t
+ {
+ string m_pattern;
+
+ pattern_url_t() : color_t(color_t::none) {}
+ pattern_url_t(string const & s) : color_t(color_t::none), m_pattern(s) {}
+ pattern_url_t(color_t const & r) : color_t(r) {}
+
+ bool operator==(pattern_url_t const & r) const
+ {
+ return (m_v == r.m_v) && (m_pattern == r.m_pattern);
+ }
+ };
+
+ template <class TArchive> TArchive & operator << (TArchive & ar, pattern_url_t const & t)
+ {
+ ar << t.m_v << t.m_pattern;
+ return ar;
+ }
+ template <class TArchive> TArchive & operator >> (TArchive & ar, pattern_url_t & t)
+ {
+ ar >> t.m_v >> t.m_pattern;
+ return ar;
+ }
+
+ template <> pattern_url_t get_value<pattern_url_t>(string const & s)
+ {
+ if (s[0] == 'u')
+ {
+ /// @todo make fill pattern by symbol
+ return pattern_url_t(s);
+ }
+ else
+ return get_value<color_t>(s);
+ }
+
+ ////////////////////////////////////////////////////////////////////////////////////////
+ // position_t
+ ////////////////////////////////////////////////////////////////////////////////////////
+ struct position_t
+ {
+ enum { center = 0 };
+ int32_t m_v;
+
+ position_t(int32_t v = center) : m_v(v) {}
+ bool operator==(position_t const & r) const { return m_v == r.m_v; }
+ };
+
+ template <class TArchive> TArchive & operator << (TArchive & ar, position_t const & t)
+ {
+ ar << t.m_v;
+ return ar;
+ }
+ template <class TArchive> TArchive & operator >> (TArchive & ar, position_t & t)
+ {
+ ar >> t.m_v;
+ return ar;
+ }
+
+ template <> position_t get_value<position_t>(string const & s)
+ {
+ int v = position_t::center;
+ if (s == "center") { /*initialized*/ }
+ else
+ {
+ ASSERT ( !"check symbol position values", (s) );
+ }
+
+ return position_t(v);
+ }
+
+ ////////////////////////////////////////////////////////////////////////////////////////
+ // txt_anchor_t
+ ////////////////////////////////////////////////////////////////////////////////////////
+ struct txt_anchor_t
+ {
+ enum { start, middle, end };
+ int32_t m_v;
+
+ txt_anchor_t(int32_t v = start) : m_v(v) {}
+ bool operator==(txt_anchor_t const & r) const { return m_v == r.m_v; }
+ };
+
+ template <class TArchive> TArchive & operator << (TArchive & ar, txt_anchor_t const & t)
+ {
+ ar << t.m_v;
+ return ar;
+ }
+ template <class TArchive> TArchive & operator >> (TArchive & ar, txt_anchor_t & t)
+ {
+ ar >> t.m_v;
+ return ar;
+ }
+
+ template <> txt_anchor_t get_value<txt_anchor_t>(string const & s)
+ {
+ int v = txt_anchor_t::start;
+ if (s == "start") { /*initialized*/ }
+ else if (s == "middle") v = txt_anchor_t::middle;
+ else if (s == "end") v = txt_anchor_t::end;
+ else
+ {
+ ASSERT ( !"check text-anchor values", (s) );
+ }
+
+ return txt_anchor_t(v);
+ }
+
+ ////////////////////////////////////////////////////////////////////////////////////////
+ // font_family_t
+ ////////////////////////////////////////////////////////////////////////////////////////
+ struct font_family_t
+ {
+ string m_v;
+ font_family_t() : m_v("DejaVu Sans") {}
+ bool operator==(font_family_t const & r) const { return m_v == r.m_v; }
+ };
+
+ template <class TArchive> TArchive & operator << (TArchive & ar, font_family_t const & t)
+ {
+ ar << t.m_v;
+ return ar;
+ }
+ template <class TArchive> TArchive & operator >> (TArchive & ar, font_family_t & t)
+ {
+ ar >> t.m_v;
+ return ar;
+ }
+
+ template <> font_family_t get_value<font_family_t>(string const & /*s*/)
+ {
+ /// @todo process font (example: "DejaVu Sans",sans-serif)
+ return font_family_t();
+ }
+
+ ////////////////////////////////////////////////////////////////////////////////////////
+ // percent_t
+ ////////////////////////////////////////////////////////////////////////////////////////
+ struct percent_t
+ {
+ int32_t m_v;
+ percent_t(int32_t v = 0) : m_v(v) {}
+ bool operator==(percent_t const & r) const { return m_v == r.m_v; }
+ };
+
+ template <class TArchive> TArchive & operator << (TArchive & ar, percent_t const & t)
+ {
+ ar << t.m_v;
+ return ar;
+ }
+ template <class TArchive> TArchive & operator >> (TArchive & ar, percent_t & t)
+ {
+ ar >> t.m_v;
+ return ar;
+ }
+
+ template <> percent_t get_value<percent_t>(string const & s)
+ {
+ size_t i = s.find_first_of('%');
+ if (i == string::npos)
+ {
+ ASSERT ( !"percent string has no % mark", (s) );
+ i = s.size();
+ }
+
+ return percent_t(atoi(s.substr(0, i).c_str()));
+ }
+
+ ////////////////////////////////////////////////////////////////////////////////////////
+ // LineRule
+ ////////////////////////////////////////////////////////////////////////////////////////
+ struct LineRule : public BaseRule
+ {
+ tuple<bool, double, double, double,
+ pattern_url_t, px_metric_t, dash_array_t, line_cap_t, double, px_metric_t> m_params;
+
+ LineRule()
+ : m_params(make_tuple(false, 1.0, 0.0, std::numeric_limits<double>::max(),
+ pattern_url_t(), -1.0, dash_array_t(), line_cap_t(), 1.0,
+ 0.0))
+ {}
+
+ virtual bool IsEqual(BaseRule const * p) const { return is_equal_rules(this, p); }
+ virtual void Read(FileReaderStream & ar) { read_rules(ar, this); }
+ virtual void Write(FileWriterStream & ar) const { write_rules(ar, this); }
+
+ virtual int GetColor() const { return m_params.get<4>().m_v; }
+ virtual unsigned char GetAlpha() const { return alpha_opacity(m_params.get<8>()); }
+ virtual double GetWidth() const
+ {
+ double w = m_params.get<5>().m_v;
+ if (w == -1) return 0.0;
+
+ if (m_params.get<0>())
+ {
+ double minw = m_params.get<2>();
+ double maxw = m_params.get<3>();
+
+ if (w < minw) w = minw;
+ if (w > maxw) w = maxw;
+ w *= m_params.get<1>();
+ }
+ return w;
+ }
+ virtual void GetPattern(vector<double> & p, double & offset) const
+ {
+ offset = m_params.get<9>().m_v;
+ p = m_params.get<6>().m_v;
+ }
+
+ static string arrKeys[10];
+ };
+ string LineRule::arrKeys[] = {
+ // If it is yes, the line will be drawn with the width reflecting the width=* value,
+ // if the way has the width tag.
+ "honor-width",
+ // Is scale factor from width tag value (meter) to pixel width of the SVG.
+ "width-scale-factor",
+ // Specify the minimum and the maximum width. If the way doesn't have the width tag,
+ // the line is drawn with the width specified by CSS
+ "minimum-width", "maximum-width",
+ // The colour of the line.
+ "stroke",
+ // The width of the line.
+ "stroke-width",
+ // Specifing the line style.
+ "stroke-dasharray",
+ // How to draw the terminal. Choice one from round, butt or square.
+ "stroke-linecap",
+ // The opacity of the line. The value takes from 0.0 (completely invisible) to
+ // 1.0 (completely overdrawing).
+ // The default is 1.0
+ "stroke-opacity",
+
+ // undocumented
+ "stroke-dashoffset"
+ };
+
+ ////////////////////////////////////////////////////////////////////////////////////////
+ // AreaRule
+ ////////////////////////////////////////////////////////////////////////////////////////
+ struct AreaRule : public BaseRule
+ {
+ tuple<pattern_url_t, double, pattern_url_t, px_metric_t, double> m_params;
+
+ AreaRule() : m_params(make_tuple(pattern_url_t(), 1.0, pattern_url_t(), 1.0, 1.0)) {}
+
+ virtual bool IsEqual(BaseRule const * p) const { return is_equal_rules(this, p); }
+ virtual void Read(FileReaderStream & ar) { read_rules(ar, this); }
+ virtual void Write(FileWriterStream & ar) const { write_rules(ar, this); }
+
+ virtual int GetFillColor() const { return m_params.get<0>().m_v; }
+ virtual int GetColor() const { return m_params.get<2>().m_v; }
+ virtual unsigned char GetAlpha() const { return alpha_opacity(m_params.get<1>()); }
+ virtual double GetWidth() const { return m_params.get<3>().m_v; }
+
+ static string arrKeys[5];
+ };
+ string AreaRule::arrKeys[] =
+ {
+ "fill", "fill-opacity", "stroke", "stroke-width", "stroke-opacity"
+ };
+
+ ////////////////////////////////////////////////////////////////////////////////////////
+ // SymbolRule
+ ////////////////////////////////////////////////////////////////////////////////////////
+ struct SymbolRule : public BaseRule
+ {
+ tuple<string, string, position_t> m_params;
+
+ virtual bool IsEqual(BaseRule const * p) const { return is_equal_rules(this, p); }
+ virtual void Read(FileReaderStream & ar) { read_rules(ar, this); }
+ virtual void Write(FileWriterStream & ar) const { write_rules(ar, this); }
+
+ virtual void GetSymbol(string & s) const
+ {
+ s = m_params.get<0>();
+ if (s.empty())
+ {
+ string const & ss = m_params.get<1>();
+ if (!ss.empty()) s = ss.substr(1, ss.size()-1);
+ }
+ }
+
+ static string arrKeys[3];
+ };
+ // "width", "height", "transform"
+ string SymbolRule::arrKeys[] = { "ref", "xlink:href", "position" };
+
+ ////////////////////////////////////////////////////////////////////////////////////////
+ // CaptionRule
+ ////////////////////////////////////////////////////////////////////////////////////////
+ struct CaptionRule : public BaseRule
+ {
+ tuple<string, px_metric_t, px_metric_t, txt_anchor_t,
+ font_family_t, px_metric_t, color_t, double, color_t> m_params;
+
+ CaptionRule()
+ : m_params(make_tuple("", 0, 0, txt_anchor_t(),
+ font_family_t(), 2.0, color_t(), 1.0, color_t::none))
+ {}
+
+ virtual bool IsEqual(BaseRule const * p) const { return is_equal_rules(this, p); }
+ virtual void Read(FileReaderStream & ar) { read_rules(ar, this); }
+ virtual void Write(FileWriterStream & ar) const { write_rules(ar, this); }
+
+ virtual double GetTextHeight() const { return m_params.get<5>().m_v; }
+
+ static string arrKeys[9];
+ };
+ string CaptionRule::arrKeys[] = {
+ // it specified the key of the text instruction (eg k="name" ).
+ "k",
+ // Translational length on x or y axis from the center point.
+ "dx", "dy",
+ // It specify the anchor point on the text. You can choice form start, middle or end.
+ // The default value is start.
+ // It is the same meaning with svg:text-anchor property
+ "text-anchor",
+
+ "font-family", // The font family of the text. (ex. serif, "DejaVu Sans")
+ "font-size", // The size of the font.
+ "fill", // The colour of the text.
+ "fill-opacity", // The opacity of the text. The value takes from 0.0 (completely transparent)
+ // to 1.0 (completely overdrawing). The default is 1.0 .
+ "stroke" // The colour of the font outline. Usually it should be none.
+ };
+
+ ////////////////////////////////////////////////////////////////////////////////////////
+ // CircleRule
+ ////////////////////////////////////////////////////////////////////////////////////////
+ struct CircleRule : public BaseRule
+ {
+ tuple<px_metric_t, color_t, double, color_t, px_metric_t, double> m_params;
+
+ CircleRule() : m_params(make_tuple(1, color_t(), 1.0, color_t::none, 1.0, 1.0)) {}
+
+ virtual bool IsEqual(BaseRule const * p) const { return is_equal_rules(this, p); }
+ virtual void Read(FileReaderStream & ar) { read_rules(ar, this); }
+ virtual void Write(FileWriterStream & ar) const { write_rules(ar, this); }
+
+ static string arrKeys[6];
+ };
+ string CircleRule::arrKeys[] = {
+ "r", // The radius of a circle.
+ "fill" // The colour of the filling.
+ "fill-opacity" // The opacity of the filling. The value takes from 0.0 (completely transparent)
+ // to 1.0 (completely overdrawing). The default is 1.0 .
+ "stroke" // The colour of the outline. If you don't want to draw the outline,
+ // set it as none.
+ "stroke-width" // The width of the outline.
+ "stroke-opacity" // The opacity of the line.
+ };
+
+ ////////////////////////////////////////////////////////////////////////////////////////
+ // PathTextRule
+ ////////////////////////////////////////////////////////////////////////////////////////
+ struct PathTextRule : public BaseRule
+ {
+ tuple<string, px_metric_t, px_metric_t, txt_anchor_t, percent_t, /*bool,*/
+ font_family_t, px_metric_t, color_t, double, color_t> m_params;
+
+ PathTextRule()
+ : m_params(make_tuple("", 0, 0, txt_anchor_t(), 0, /*true,*/
+ font_family_t(), 2.0, color_t(), 1.0, color_t::none))
+ {}
+
+ virtual bool IsEqual(BaseRule const * p) const { return is_equal_rules(this, p); }
+ virtual void Read(FileReaderStream & ar) { read_rules(ar, this); }
+ virtual void Write(FileWriterStream & ar) const { write_rules(ar, this); }
+
+ virtual double GetTextHeight() const { return m_params.get<6>().m_v; }
+
+ static string arrKeys[10];
+ };
+ string PathTextRule::arrKeys[] = {
+ // It specified the key of the text instruction (eg k="name" ).
+ "k",
+ // Translational length on y axis from the center line of the way.
+ // Usually this is used for drawing ref=*.
+ "dx", "dy",
+ // It specify the anchor point on the text. You can choice form start,middle or end.
+ // The default value is start. It is the same meaning with svg:text-anchor property
+ "text-anchor",
+ // It specify the anchor point on the path. The value is given by %.
+ // The range is form 0% (start of the path) to 100%
+ // (end of the path). The default value is 0%.
+ // It is the same meaning with <svg:textPath StartOffset="">
+ "startOffset",
+ // This works only on or/p now
+ //"avoid-duplicate",
+
+ "font-family", // The font family of the text. (ex. serif, "DejaVu Sans")
+ "font-size", // The size of the font.
+ "fill", // The colour of the text.
+ "fill-opacity", // The opacity of the text. The value takes from 0.0 (completely transparent)
+ // to 1.0 (completely overdrawing). The default is 1.0 .
+ "stroke" // The colour of the font outline. Usually it should be none.
+ };
+
+ ////////////////////////////////////////////////////////////////////////////////////////
+ // WayMarkerRule
+ ////////////////////////////////////////////////////////////////////////////////////////
+ struct WayMarkerRule : public BaseRule
+ {
+ tuple<string, color_t, px_metric_t, line_cap_t, double> m_params;
+
+ WayMarkerRule() : m_params(make_tuple("", color_t(), 1.0, line_cap_t(), 1.0)) {}
+
+ virtual bool IsEqual(BaseRule const * p) const { return is_equal_rules(this, p); }
+ virtual void Read(FileReaderStream & ar) { read_rules(ar, this); }
+ virtual void Write(FileWriterStream & ar) const { write_rules(ar, this); }
+
+ static string arrKeys[5];
+ };
+ string WayMarkerRule::arrKeys[] = {
+ // It specified the key of the way that passes through the node. (eg k="highway" ).
+ "k",
+ "stroke", // The colour of the line.
+ "stroke-width", // The width of the line.
+ "stroke-linecap", // How to draw the terminal. Choice one from round, butt or square.
+ "stroke-opacity" // The opacity of the line. The value takes from 0.0 (completely invisible)
+ // to 1.0 (completely overdrawing). The default is 1.0 .
+ };
+
+////////////////////////////////////////////////////////////////////////////////////////////
+// BaseRule implementation
+////////////////////////////////////////////////////////////////////////////////////////////
+void BaseRule::ReadBase(FileReaderStream & ar)
+{
+ ar >> m_class >> m_type;
+}
+
+void BaseRule::WriteBase(FileWriterStream & ar) const
+{
+ ar << m_class << m_type;
+}
+
+namespace
+{
+ bool find_sub_str(string const & s, char const * p)
+ {
+ size_t i = s.find(p);
+ return (i != string::npos);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////
+// RulesHolder implementation
+////////////////////////////////////////////////////////////////////////////////////////////
+RulesHolder::~RulesHolder()
+{
+ Clean();
+}
+
+void RulesHolder::Clean()
+{
+ for (size_t i = 0; i < m_container.size(); ++i)
+ {
+ rule_vec_t & v = m_container[i];
+ for (size_t j = 0; j < v.size(); ++j)
+ delete v[j];
+ v.clear();
+ }
+
+ m_rules.clear();
+}
+
+void RulesHolder::SetParseFile(char const * fPath, int scale)
+{
+ m_file.clear();
+ m_currScale = scale;
+
+#ifndef OMIM_OS_BADA
+ ifstream file(fPath);
+ if (!file.is_open()) return;
+
+ vector<char> v(500);
+ bool doPush = false;
+
+ // push the part of file <style>...</style> to the string buffer
+ while (!file.eof())
+ {
+ file.getline(&v[0], v.size());
+
+ if (!doPush)
+ {
+ if (find_sub_str(&v[0], "<style"))
+ doPush = true;
+ }
+
+ if (doPush)
+ {
+ m_file += ' '; // add delimeter between strings
+ m_file += &v[0];
+
+ if (find_sub_str(&v[0], "</style>"))
+ break;
+ }
+ }
+#else
+ ASSERT ( false, ("SetParseFile uses only in indexer_tool") );
+#endif
+}
+
+/// parse pairs (<key>:<value>;) for a specified 'class' attribute obj in file buffer
+void RulesHolder::PushAttributes(string objClass, attrs_map_t & attrs)
+{
+ objClass = '.' + objClass;
+
+ size_t objSz = objClass.size();
+ size_t buffSz = m_file.size();
+
+ // find 'objClass' string in buffer
+ size_t i = 0;
+ while (i < buffSz)
+ {
+ i = m_file.find(objClass, i);
+ if (i == string::npos) return;
+
+ i += objSz;
+ if (i >= buffSz) return;
+
+ if (m_file[i] == ' ' || m_file[i] == '\t' || m_file[i] == '{') break;
+ }
+
+ // parse attributes in {...} scope
+ while (++i < buffSz)
+ {
+ i = m_file.find_first_not_of("{ \t", i);
+ if (i == string::npos || m_file[i] == '}') return;
+
+ size_t e = m_file.find_first_of(':', i);
+ if (e == string::npos) return;
+
+ string const k = m_file.substr(i, e-i);
+
+ i = m_file.find_first_not_of(" \t", e + 1);
+ if (i == string::npos) return;
+
+ e = m_file.find_first_of(';', i);
+ if (e == string::npos) return;
+
+ string const v = m_file.substr(i, e-i);
+
+ // How to push values, if the params are duplicating?
+ // Assume that the latest rule is better than previous.
+ attrs[k] = v;
+
+ i = e;
+ }
+}
+
+namespace
+{
+ char const * arrClassTags[] = { "class", "mask-class" };
+}
+
+void RulesHolder::CreateRules(string const & name, uint8_t type, attrs_map_t const & attrs, vector<Key> & v)
+{
+ bool added = false;
+
+ for (size_t i = 0; i < ARRAY_SIZE(arrClassTags); ++i)
+ {
+ attrs_map_t::const_iterator it = attrs.find(arrClassTags[i]);
+ if (it != attrs.end())
+ {
+ added = true;
+ v.push_back(CreateRuleImpl1(name, type, it->second, attrs, i == 1));
+ }
+ }
+
+ if (!added)
+ v.push_back(CreateRuleImpl2(name, type, "", attrs));
+}
+
+Key RulesHolder::CreateRuleImpl1(string const & name,
+ uint8_t type,
+ string const & clValue,
+ attrs_map_t const & attrs,
+ bool isMask)
+{
+#ifdef DEBUG
+ if (clValue.find("highway-pedestrian-area") != string::npos)
+ {
+ ASSERT (true, ());
+ }
+#endif
+
+ attrs_map_t a;
+ utils::TokenizeString(clValue, " \t", bind(&RulesHolder::PushAttributes, this, _1, ref(a)));
+
+ for (attrs_map_t::const_iterator i = attrs.begin(); i != attrs.end(); ++i)
+ if (!utils::IsInArray(arrClassTags, i->first))
+ a[i->first] = i->second;
+
+ // background color (imitation of masks in tunnel patterns)
+ if (isMask) a["stroke"] = "#ffffff";
+
+ // patch the tunnel draw rules -> make line draw rule
+ if (name == "tunnel")
+ {
+ attrs_map_t::iterator i = a.find("width");
+ if (i != a.end())
+ {
+ a["stroke-width"] = i->second;
+ a.erase(i);
+ }
+
+ return CreateRuleImpl2("line", type, clValue, a);
+ }
+ else
+ return CreateRuleImpl2(name, type, clValue, a);
+}
+
+Key RulesHolder::CreateRuleImpl2(string const & name,
+ uint8_t rType,
+ string const & clName,
+ attrs_map_t const & attrs)
+{
+ BaseRule * pRule = 0;
+ int type = -1;
+
+ if (name == "line")
+ {
+ pRule = create_rule<LineRule>(attrs);
+ type = line;
+ }
+ else if (name == "area")
+ {
+ pRule = create_rule<AreaRule>(attrs);
+ type = area;
+ }
+ else if (name == "symbol")
+ {
+ pRule = create_rule<SymbolRule>(attrs);
+ type = symbol;
+ }
+ else if (name == "caption" || name == "text")
+ {
+ pRule = create_rule<CaptionRule>(attrs);
+ type = caption;
+ }
+ else if (name == "circle")
+ {
+ pRule = create_rule<CircleRule>(attrs);
+ type = circle;
+ }
+ else if (name == "pathText")
+ {
+ pRule = create_rule<PathTextRule>(attrs);
+ type = pathtext;
+ }
+ else if (name == "wayMarker")
+ {
+ pRule = new WayMarkerRule();
+ type = waymarker;
+ }
+
+ if (pRule)
+ {
+ pRule->SetType(rType);
+
+ // find existing equal rule for scale and type
+ vector<uint32_t> & rInd = m_rules[m_currScale][type];
+ vector<BaseRule*> & rCont = m_container[type];
+ size_t ind = 0;
+ for (; ind < rInd.size(); ++ind)
+ {
+ ASSERT ( rInd[ind] < rCont.size(), (rInd[ind], rCont.size()) );
+ if (rCont[rInd[ind]]->IsEqual(pRule))
+ break;
+ }
+
+ if (ind == rInd.size())
+ {
+ // add new rule
+ pRule->SetClassName(clName);
+
+ rCont.push_back(pRule);
+ rInd.push_back(rCont.size()-1);
+ ind = rInd.size()-1;
+ }
+ else
+ delete pRule;
+
+ return Key(m_currScale, type, ind);
+ }
+ else
+ {
+ ASSERT ( !"check possible rules", (name) );
+ return Key();
+ }
+}
+
+BaseRule const * RulesHolder::Find(Key const & k) const
+{
+ rules_map_t::const_iterator i = m_rules.find(k.m_scale);
+ if (i == m_rules.end()) return 0;
+
+ vector<uint32_t> const & v = (i->second)[k.m_type];
+ if (k.m_index < v.size())
+ return m_container[k.m_type][v[k.m_index]];
+ else
+ return 0;
+}
+
+FileWriterStream & operator << (FileWriterStream & ar, BaseRule * p)
+{
+ p->Write(ar);
+ return ar;
+}
+
+void do_load(FileReaderStream & ar, size_t ind, BaseRule * & p)
+{
+ switch (ind)
+ {
+ case line: p = new LineRule(); break;
+ case area: p = new AreaRule(); break;
+ case symbol: p = new SymbolRule(); break;
+ case caption: p = new CaptionRule(); break;
+ case circle: p = new CircleRule(); break;
+ case pathtext: p = new PathTextRule(); break;
+ case waymarker: p = new WayMarkerRule(); break;
+ default:
+ ASSERT ( !"Incorrect draw rule type for reading.", (ind) );
+ throw std::out_of_range("Bad draw rule index");
+ }
+
+ p->Read(ar);
+}
+
+void RulesHolder::Read(FileReaderStream & s)
+{
+ Clean();
+
+ serial::do_load(s, m_container);
+ s >> m_rules;
+}
+
+void RulesHolder::Write(FileWriterStream & s)
+{
+ s << m_container << m_rules;
+}
+
+void WriteRules(char const * fPath)
+{
+ FileWriterStream file(fPath);
+ rules().Write(file);
+}
+
+void ReadRules(char const * fPath)
+{
+ FileReaderStream file(fPath);
+ rules().Read(file);
+}
+
+RulesHolder & rules()
+{
+ static RulesHolder holder;
+ return holder;
+}
+
+}
diff --git a/indexer/drawing_rules.hpp b/indexer/drawing_rules.hpp
new file mode 100644
index 0000000000..aa962b245b
--- /dev/null
+++ b/indexer/drawing_rules.hpp
@@ -0,0 +1,121 @@
+#pragma once
+#include "drawing_rule_def.hpp"
+
+#include "../base/base.hpp"
+
+#include "../std/fstream.hpp"
+#include "../std/map.hpp"
+#include "../std/vector.hpp"
+#include "../std/array.hpp"
+#include "../std/string.hpp"
+
+class FileReaderStream;
+class FileWriterStream;
+
+namespace drule
+{
+ typedef map<string, string> attrs_map_t;
+
+ class BaseRule
+ {
+ string m_class;
+ char m_type;
+
+ mutable uint32_t m_id;
+
+ public:
+ static uint32_t const empty_id = 0xFFFFFFFF;
+
+ BaseRule() : m_id(empty_id) {}
+
+ virtual ~BaseRule() {}
+
+ uint32_t GetID() const { return m_id; }
+ void SetID(uint32_t id) const { m_id = id; }
+ void MakeEmptyID() { m_id = empty_id; }
+
+ void SetClassName(string const & cl) { m_class = cl; }
+ void SetType(char type) { m_type = type; }
+
+ char GetType() const { return m_type; }
+
+ bool IsEqualBase(BaseRule const * p) const { return (m_type == p->m_type); }
+ void ReadBase(FileReaderStream & ar);
+ void WriteBase(FileWriterStream & ar) const;
+
+ virtual bool IsEqual(BaseRule const * p) const = 0;
+ virtual void Read(FileReaderStream & ar) = 0;
+ virtual void Write(FileWriterStream & ar) const = 0;
+
+ /// @name This functions can tell us about the type of rule.
+ //@{
+ virtual int GetColor() const { return -1; } ///< path "line" color
+ virtual int GetFillColor()const { return -1; } ///< fill "area" color
+ virtual double GetTextHeight() const { return -1.0; } ///< text height of "caption"
+ //@}
+
+ virtual unsigned char GetAlpha () const { return 255; }
+ virtual double GetWidth() const { return -1; }
+ virtual void GetPattern(vector<double> &, double &) const {}
+ virtual void GetSymbol(string &) const {}
+ };
+
+ class RulesHolder
+ {
+ // container of rules by type
+ typedef vector<BaseRule*> rule_vec_t;
+ array<rule_vec_t, count_of_rules> m_container;
+
+ /// scale -> array of rules by type -> index of rule in m_container
+ typedef map<int32_t, array<vector<uint32_t>, count_of_rules> > rules_map_t;
+ rules_map_t m_rules;
+
+ /// @name temporary for search rules parameters by 'class' attribute
+ //@{
+ string m_file;
+ int m_currScale;
+ //@}
+
+ void PushAttributes(string objClass, attrs_map_t & attrs);
+
+ Key CreateRuleImpl1(string const & name, uint8_t type, string const & clValue, attrs_map_t const & attrs, bool isMask);
+ Key CreateRuleImpl2(string const & name, uint8_t type, string const & clName, attrs_map_t const & attrs);
+
+ public:
+ ~RulesHolder();
+
+ void Clean();
+
+ void SetParseFile(char const * fPath, int scale);
+
+ void CreateRules(string const & name, uint8_t type, attrs_map_t const & attrs, vector<Key> & v);
+
+ BaseRule const * Find(Key const & k) const;
+
+ int GetScale() const { return m_currScale; }
+
+ void Read(FileReaderStream & s);
+ void Write(FileWriterStream & s);
+
+ template <class ToDo> void ForEachRule(ToDo toDo)
+ {
+ for (rules_map_t::const_iterator i = m_rules.begin(); i != m_rules.end(); ++i)
+ {
+ for (int j = 0; j < count_of_rules; ++j)
+ {
+ vector<uint32_t> const & v = i->second[j];
+ for (size_t k = 0; k < v.size(); ++k)
+ {
+ // scale, type, rule
+ toDo(i->first, j, m_container[j][v[k]]);
+ }
+ }
+ }
+ }
+ };
+
+ void WriteRules(char const * fPath);
+ void ReadRules(char const * fPath);
+
+ RulesHolder & rules();
+}
diff --git a/indexer/feature.cpp b/indexer/feature.cpp
new file mode 100644
index 0000000000..85e515d97a
--- /dev/null
+++ b/indexer/feature.cpp
@@ -0,0 +1,292 @@
+#include "feature.hpp"
+#include "../geometry/rect2d.hpp"
+#include "../coding/byte_stream.hpp"
+#include "../coding/reader.hpp"
+#include "../coding/varint.hpp"
+#include "../coding/write_to_sink.hpp"
+#include "../base/logging.hpp"
+#include "../std/bind.hpp"
+#include "../std/map.hpp"
+
+namespace pts
+{
+ inline m2::PointD ToPoint(int64_t i)
+ {
+ CoordPointT const pt = Int64ToPoint(i);
+ return m2::PointD(pt.first, pt.second);
+ }
+ inline int64_t ToId(CoordPointT const & p)
+ {
+ return PointToInt64(p.first, p.second);
+ }
+}
+
+FeatureBuilder::FeatureBuilder() : m_Layer(0)
+{
+}
+
+bool FeatureBuilder::IsGeometryClosed() const
+{
+ return !m_Geometry.empty() && m_Geometry.front() == m_Geometry.back();
+}
+
+void FeatureBuilder::AddPoint(m2::PointD const & p)
+{
+ m_Geometry.push_back(pts::ToId(CoordPointT(p.x, p.y)));
+}
+
+void FeatureBuilder::AddTriangle(m2::PointD const & a, m2::PointD const & b, m2::PointD const & c)
+{
+ m_Triangles.push_back(pts::ToId(CoordPointT(a.x, a.y)));
+ m_Triangles.push_back(pts::ToId(CoordPointT(b.x, b.y)));
+ m_Triangles.push_back(pts::ToId(CoordPointT(c.x, c.y)));
+}
+
+void FeatureBuilder::AddName(string const & name)
+{
+ CHECK_EQUAL(m_Name, "", (name));
+ m_Name = name;
+}
+
+void FeatureBuilder::AddLayer(int32_t layer)
+{
+ CHECK_EQUAL(m_Layer, 0, (layer));
+
+ int const bound = 10;
+ if (layer < -bound) layer = -bound;
+ else if (layer > bound) layer = bound;
+ m_Layer = layer;
+}
+
+bool FeatureBuilder::operator == (FeatureBuilder const & fb) const
+{
+ return
+ m_Types == fb.m_Types &&
+ m_Layer == fb.m_Layer &&
+ m_Name == fb.m_Name &&
+ m_Geometry == fb.m_Geometry &&
+ m_Triangles == fb.m_Triangles;
+}
+
+void FeatureBuilder::Serialize(vector<char> & data) const
+{
+ CHECK(!m_Geometry.empty(), ());
+ CHECK(m_Geometry.size() > 1 || m_Triangles.empty(), ());
+ CHECK_LESS(m_Types.size(), 16, ());
+
+ data.clear();
+ PushBackByteSink<vector<char> > sink(data);
+
+ // Serializing header.
+ uint8_t header = static_cast<uint8_t>(m_Types.size());
+ if (m_Layer != 0)
+ header |= Feature::HEADER_HAS_LAYER;
+ if (m_Geometry.size() > 1)
+ {
+ if (m_Triangles.empty())
+ header |= Feature::HEADER_IS_LINE;
+ else
+ header |= Feature::HEADER_IS_AREA;
+ }
+ if (!m_Name.empty())
+ header |= Feature::HEADER_HAS_NAME;
+ WriteToSink(sink, header);
+
+ // Serializing types.
+ {
+ for (size_t i = 0; i < m_Types.size(); ++i)
+ WriteVarUint(sink, m_Types[i]);
+ }
+
+ // Serializing layer.
+ if (m_Layer != 0)
+ WriteVarInt(sink, m_Layer);
+
+ // Serializing geometry.
+ if (m_Geometry.size() == 1)
+ {
+ WriteVarInt(sink, m_Geometry[0]);
+ }
+ else
+ {
+ WriteVarUint(sink, m_Geometry.size() - 1);
+ for (size_t i = 0; i < m_Geometry.size(); ++i)
+ WriteVarInt(sink, i == 0 ? m_Geometry[0] : m_Geometry[i] - m_Geometry[i-1]);
+ }
+
+ // Serializing triangles.
+ if (!m_Triangles.empty())
+ {
+ ASSERT_EQUAL(m_Triangles.size() % 3, 0, (m_Triangles.size()));
+ WriteVarUint(sink, m_Triangles.size() / 3 - 1);
+ for (size_t i = 0; i < m_Triangles.size(); ++i)
+ WriteVarInt(sink, i == 0 ? m_Triangles[i] : (m_Triangles[i] - m_Triangles[i-1]));
+ }
+
+ // Serializing name.
+ if (!m_Name.empty())
+ {
+ WriteVarUint(sink, m_Name.size() - 1);
+ sink.Write(&m_Name[0], m_Name.size());
+ }
+
+
+#ifdef DEBUG
+ vector<char> data1 = data;
+ Feature feature;
+ feature.DeserializeAndParse(data1);
+ FeatureBuilder const fb = feature.GetFeatureBuilder();
+ ASSERT_EQUAL(m_Types, fb.m_Types, (feature.DebugString()));
+ ASSERT_EQUAL(m_Layer, fb.m_Layer, (feature.DebugString()));
+ ASSERT_EQUAL(m_Geometry, fb.m_Geometry, (feature.DebugString()));
+ ASSERT_EQUAL(m_Triangles, fb.m_Triangles, (feature.DebugString()));
+ ASSERT_EQUAL(m_Name, fb.m_Name, (feature.DebugString()));
+ ASSERT(*this == fb, (feature.DebugString()));
+#endif
+}
+
+Feature FeatureBuilder::GetFeature() const
+{
+ vector<char> data;
+ Serialize(data);
+ Feature feature;
+ feature.Deserialize(data);
+ return feature;
+}
+
+Feature::Feature(vector<char> & data, uint32_t offset)
+{
+ Deserialize(data, offset);
+}
+
+void Feature::Deserialize(vector<char> & data, uint32_t offset)
+{
+ m_Offset = offset;
+ m_Data.swap(data);
+
+ m_LayerOffset = m_GeometryOffset = m_TrianglesOffset = m_NameOffset = 0;
+ m_bTypesParsed = m_bLayerParsed = m_bGeometryParsed = m_bTrianglesParsed = m_bNameParsed = false;
+ m_Layer = m_TriangleCount = 0;
+ m_Geometry.clear();
+ m_Triangles.clear();
+ m_Name.clear();
+ m_LimitRect = m2::RectD();
+}
+
+void Feature::ParseTypes() const
+{
+ ASSERT(!m_bTypesParsed, ());
+ ArrayByteSource source(DataPtr() + 1);
+ for (size_t i = 0; i < GetTypesCount(); ++i)
+ m_Types[i] = ReadVarUint<uint32_t>(source);
+ m_LayerOffset = static_cast<uint32_t>(static_cast<char const *>(source.Ptr()) - DataPtr());
+ m_bTypesParsed = true;
+}
+
+void Feature::ParseLayer() const
+{
+ ASSERT(!m_bLayerParsed, ());
+ if (!m_bTypesParsed)
+ ParseTypes();
+
+ ArrayByteSource source(DataPtr() + m_LayerOffset);
+ if (Header() & HEADER_HAS_LAYER)
+ m_Layer = ReadVarInt<int32_t>(source);
+
+ m_GeometryOffset = static_cast<uint32_t>(static_cast<char const *>(source.Ptr()) - DataPtr());
+ m_bLayerParsed = true;
+}
+
+void Feature::ParseGeometry() const
+{
+ ASSERT(!m_bGeometryParsed, ());
+ if (!m_bLayerParsed)
+ ParseLayer();
+ ArrayByteSource source(DataPtr() + m_GeometryOffset);
+ uint32_t const geometrySize =
+ (GetFeatureType() == FEATURE_TYPE_POINT ? 1 : ReadVarUint<uint32_t>(source) + 1);
+ m_Geometry.resize(geometrySize);
+ int64_t id = 0;
+ for (size_t i = 0; i < geometrySize; ++i)
+ m_LimitRect.Add(m_Geometry[i] = pts::ToPoint(id += ReadVarInt<int64_t>(source)));
+ m_TrianglesOffset = static_cast<uint32_t>(static_cast<char const *>(source.Ptr()) - DataPtr());
+ m_bGeometryParsed = true;
+}
+
+void Feature::ParseTriangles() const
+{
+ ASSERT(!m_bTrianglesParsed, ());
+ if (!m_bGeometryParsed)
+ ParseGeometry();
+ ArrayByteSource source(DataPtr() + m_TrianglesOffset);
+ if (GetFeatureType() == FEATURE_TYPE_AREA)
+ {
+ m_TriangleCount = ReadVarUint<uint32_t>(source) + 1;
+ uint32_t const trianglePoints = m_TriangleCount * 3;
+ m_Triangles.resize(trianglePoints);
+ int64_t id = 0;
+ for (size_t i = 0; i < trianglePoints; ++i)
+ m_Triangles[i] = pts::ToPoint(id += ReadVarInt<int64_t>(source));
+ }
+ m_NameOffset = static_cast<uint32_t>(static_cast<char const *>(source.Ptr()) - DataPtr());
+ m_bTrianglesParsed = true;
+}
+
+void Feature::ParseName() const
+{
+ ASSERT(!m_bNameParsed, ());
+ if (!m_bTrianglesParsed)
+ ParseTriangles();
+ ArrayByteSource source(DataPtr() + m_NameOffset);
+ if (Header() & HEADER_HAS_NAME)
+ {
+ m_Name.resize(ReadVarUint<uint32_t>(source) + 1);
+ source.Read(&m_Name[0], m_Name.size());
+ }
+ m_bNameParsed = true;
+ ASSERT_EQUAL(static_cast<uint32_t>(static_cast<char const *>(source.Ptr()) - DataPtr()),
+ m_Data.size() - m_Offset, ());
+}
+
+void Feature::ParseAll() const
+{
+ if (!m_bNameParsed)
+ ParseName();
+}
+
+void Feature::DeserializeAndParse(vector<char> & data, uint32_t offset)
+{
+ Deserialize(data, offset);
+ ParseAll();
+}
+
+string Feature::DebugString() const
+{
+ ParseAll();
+ string res("Feature(");
+ res += "'" + m_Name + "' ";
+
+ for (size_t i = 0; i < GetTypesCount(); ++i)
+ res += "Type:" + debug_print(m_Types[i]) + " ";
+
+ res += "Layer:" + debug_print(m_Layer) + " ";
+ res += debug_print(m_Geometry) + " ";
+ res += debug_print(m_Triangles) + ")";
+ return res;
+}
+
+FeatureBuilder Feature::GetFeatureBuilder() const
+{
+ ParseAll();
+ FeatureBuilder fb;
+ fb.AddTypes(m_Types, m_Types + GetTypesCount());
+ fb.AddLayer(m_Layer);
+ for (size_t i = 0; i < m_Geometry.size(); ++i)
+ fb.AddPoint(m_Geometry[i]);
+ ASSERT_EQUAL(m_Triangles.size() % 3, 0, ());
+ uint32_t const triangleCount = m_Triangles.size() / 3;
+ for (size_t i = 0; i < triangleCount; ++i)
+ fb.AddTriangle(m_Triangles[3*i + 0], m_Triangles[3*i + 1], m_Triangles[3*i + 2]);
+ fb.AddName(m_Name);
+ return fb;
+}
diff --git a/indexer/feature.hpp b/indexer/feature.hpp
new file mode 100644
index 0000000000..7447fd5a02
--- /dev/null
+++ b/indexer/feature.hpp
@@ -0,0 +1,231 @@
+#pragma once
+
+#include "cell_id.hpp"
+
+#include "../geometry/point2d.hpp"
+#include "../geometry/rect2d.hpp"
+
+#include "../base/base.hpp"
+
+#include "../std/string.hpp"
+#include "../std/vector.hpp"
+#include "../std/iterator.hpp"
+#include "../std/algorithm.hpp"
+
+class Feature;
+
+class FeatureBuilder
+{
+public:
+ FeatureBuilder();
+ void AddName(string const & name);
+ void AddPoint(m2::PointD const & p);
+ void AddTriangle(m2::PointD const & a, m2::PointD const & b, m2::PointD const & c);
+
+ template <class TIter>
+ void AddTypes(TIter beg, TIter end)
+ {
+ // 15 - is the maximum count of types (@see Feature::GetTypesCount())
+ size_t const count = min(15, static_cast<int>(distance(beg, end)));
+ m_Types.assign(beg, beg + count);
+ }
+ void SetType(uint32_t type)
+ {
+ m_Types.clear();
+ m_Types.push_back(type);
+ }
+
+ void AddLayer(int32_t layer);
+
+ void Serialize(vector<char> & data) const;
+
+ // Returns corresponding feature. Does Serialize() and feature.Deserialize() internally.
+ Feature GetFeature() const;
+
+ bool IsGeometryClosed() const;
+ size_t GetPointsCount() const { return m_Geometry.size(); }
+
+ bool operator == (FeatureBuilder const &) const;
+
+private:
+ int32_t m_Layer;
+ string m_Name;
+ vector<uint32_t> m_Types;
+ vector<int64_t> m_Geometry;
+ vector<int64_t> m_Triangles;
+};
+
+class Feature
+{
+public:
+ enum FeatureType
+ {
+ FEATURE_TYPE_POINT = 0,
+ FEATURE_TYPE_LINE = 1,
+ FEATURE_TYPE_AREA = 2,
+ FEATURE_TYPE_UNKNOWN = 17
+ };
+
+ Feature() {}
+ Feature(vector<char> & data, uint32_t offset = 0);
+
+ void Deserialize(vector<char> & data, uint32_t offset = 0);
+ void DeserializeAndParse(vector<char> & data, uint32_t offset = 0);
+
+ string DebugString() const;
+
+ FeatureBuilder GetFeatureBuilder() const;
+
+ inline FeatureType GetFeatureType() const
+ {
+ ASSERT_NOT_EQUAL((Header() >> 4) & 3, 3, (DebugString()));
+ return static_cast<FeatureType>((Header() >> 4) & 3);
+ }
+
+ inline uint32_t GetTypesCount() const
+ {
+ return Header() & 0xF;
+ }
+
+ inline int32_t GetLayer() const
+ {
+ if (!(Header() & HEADER_HAS_LAYER))
+ return 0;
+ if (!m_bLayerParsed)
+ ParseLayer();
+ return m_Layer;
+ }
+
+ inline m2::RectD GetLimitRect() const
+ {
+ if (!m_bGeometryParsed)
+ ParseGeometry();
+ return m_LimitRect;
+ }
+
+ inline uint32_t GetGeometrySize() const
+ {
+ if (!m_bGeometryParsed)
+ ParseGeometry();
+ return m_Geometry.size();
+ }
+
+ inline uint32_t GetTriangleCount() const
+ {
+ if (!m_bTrianglesParsed)
+ ParseTriangles();
+ return m_TriangleCount;
+ }
+
+ inline string GetName() const
+ {
+ if (!(Header() & HEADER_HAS_NAME))
+ return string();
+ if (!m_bNameParsed)
+ ParseName();
+ return m_Name;
+ }
+
+ class GetTypesFn
+ {
+ public:
+ vector<uint32_t> m_types;
+
+ GetTypesFn() { m_types.reserve(16); }
+ void operator() (uint32_t t)
+ {
+ m_types.push_back(t);
+ }
+ };
+
+ template <typename FunctorT>
+ void ForEachTypeRef(FunctorT & f) const
+ {
+ if (!m_bTypesParsed)
+ ParseTypes();
+ uint32_t const typeCount = GetTypesCount();
+ for (size_t i = 0; i < typeCount; ++i)
+ f(m_Types[i]);
+ }
+
+ template <typename FunctorT>
+ void ForEachPointRef(FunctorT & f) const
+ {
+ if (!m_bGeometryParsed)
+ ParseGeometry();
+ for (size_t i = 0; i < m_Geometry.size(); ++i)
+ f(CoordPointT(m_Geometry[i].x, m_Geometry[i].y));
+ }
+
+ template <typename FunctorT>
+ void ForEachPoint(FunctorT f) const
+ {
+ ForEachPointRef(f);
+ }
+
+ template <typename FunctorT>
+ void ForEachTriangleRef(FunctorT & f) const
+ {
+ if (!m_bTrianglesParsed)
+ ParseTriangles();
+ for (size_t i = 0; i < m_Triangles.size();)
+ {
+ f(m_Triangles[i], m_Triangles[i+1], m_Triangles[i+2]);
+ i += 3;
+ }
+ }
+
+ template <typename FunctorT>
+ void ForEachTriangleExRef(FunctorT & f) const
+ {
+ f.StartPrimitive(m_Triangles.size());
+ ForEachTriangleRef(f);
+ f.EndPrimitive();
+ }
+
+ enum
+ {
+ HEADER_HAS_LAYER = 1U << 7,
+ HEADER_HAS_NAME = 1U << 6,
+ HEADER_IS_AREA = 1U << 5,
+ HEADER_IS_LINE = 1U << 4
+ };
+
+private:
+ vector<char> m_Data;
+ uint32_t m_Offset;
+
+ inline char const * DataPtr() const { return &m_Data[m_Offset]; }
+ inline uint8_t Header() const { return static_cast<uint8_t>(*DataPtr()); }
+
+ mutable uint32_t m_Types[16];
+ mutable int32_t m_Layer;
+ mutable vector<m2::PointD> m_Geometry;
+ mutable m2::RectD m_LimitRect;
+ mutable vector<m2::PointD> m_Triangles;
+ mutable uint32_t m_TriangleCount;
+ mutable string m_Name;
+
+ mutable uint32_t m_LayerOffset;
+ mutable uint32_t m_GeometryOffset;
+ mutable uint32_t m_TrianglesOffset;
+ mutable uint32_t m_NameOffset;
+
+ mutable bool m_bTypesParsed;
+ mutable bool m_bLayerParsed;
+ mutable bool m_bGeometryParsed;
+ mutable bool m_bTrianglesParsed;
+ mutable bool m_bNameParsed;
+
+ void ParseTypes() const;
+ void ParseLayer() const;
+ void ParseGeometry() const;
+ void ParseTriangles() const;
+ void ParseName() const;
+ void ParseAll() const;
+};
+
+inline string debug_print(Feature const & f)
+{
+ return f.DebugString();
+}
diff --git a/indexer/feature.pb.cc b/indexer/feature.pb.cc
new file mode 100644
index 0000000000..08c62725d5
--- /dev/null
+++ b/indexer/feature.pb.cc
@@ -0,0 +1,572 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+
+#define INTERNAL_SUPPRESS_PROTOBUF_FIELD_DEPRECATION
+#include "feature.pb.h"
+#include <google/protobuf/stubs/once.h>
+#include <google/protobuf/io/coded_stream.h>
+#include <google/protobuf/wire_format_lite_inl.h>
+// @@protoc_insertion_point(includes)
+
+void protobuf_ShutdownFile_feature_2eproto() {
+ delete NameProto::default_instance_;
+ delete FeatureProto::default_instance_;
+}
+
+void protobuf_AddDesc_feature_2eproto() {
+ static bool already_here = false;
+ if (already_here) return;
+ already_here = true;
+ GOOGLE_PROTOBUF_VERIFY_VERSION;
+
+ NameProto::default_instance_ = new NameProto();
+ FeatureProto::default_instance_ = new FeatureProto();
+ NameProto::default_instance_->InitAsDefaultInstance();
+ FeatureProto::default_instance_->InitAsDefaultInstance();
+ ::google::protobuf::internal::OnShutdown(&protobuf_ShutdownFile_feature_2eproto);
+}
+
+// Force AddDescriptors() to be called at static initialization time.
+struct StaticDescriptorInitializer_feature_2eproto {
+ StaticDescriptorInitializer_feature_2eproto() {
+ protobuf_AddDesc_feature_2eproto();
+ }
+} static_descriptor_initializer_feature_2eproto_;
+
+
+// ===================================================================
+
+const ::std::string NameProto::_default_text_;
+const ::std::string NameProto::_default_lang_;
+#ifndef _MSC_VER
+const int NameProto::kTextFieldNumber;
+const int NameProto::kLangFieldNumber;
+#endif // !_MSC_VER
+
+NameProto::NameProto()
+ : ::google::protobuf::MessageLite() {
+ SharedCtor();
+}
+
+void NameProto::InitAsDefaultInstance() {
+}
+
+NameProto::NameProto(const NameProto& from)
+ : ::google::protobuf::MessageLite() {
+ SharedCtor();
+ MergeFrom(from);
+}
+
+void NameProto::SharedCtor() {
+ _cached_size_ = 0;
+ text_ = const_cast< ::std::string*>(&_default_text_);
+ lang_ = const_cast< ::std::string*>(&_default_lang_);
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+NameProto::~NameProto() {
+ SharedDtor();
+}
+
+void NameProto::SharedDtor() {
+ if (text_ != &_default_text_) {
+ delete text_;
+ }
+ if (lang_ != &_default_lang_) {
+ delete lang_;
+ }
+ if (this != default_instance_) {
+ }
+}
+
+void NameProto::SetCachedSize(int size) const {
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const NameProto& NameProto::default_instance() {
+ if (default_instance_ == NULL) protobuf_AddDesc_feature_2eproto(); return *default_instance_;
+}
+
+NameProto* NameProto::default_instance_ = NULL;
+
+NameProto* NameProto::New() const {
+ return new NameProto;
+}
+
+void NameProto::Clear() {
+ if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ if (_has_bit(0)) {
+ if (text_ != &_default_text_) {
+ text_->clear();
+ }
+ }
+ if (_has_bit(1)) {
+ if (lang_ != &_default_lang_) {
+ lang_->clear();
+ }
+ }
+ }
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+bool NameProto::MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!(EXPRESSION)) return false
+ ::google::protobuf::uint32 tag;
+ while ((tag = input->ReadTag()) != 0) {
+ switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+ // optional string text = 1;
+ case 1: {
+ if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
+ DO_(::google::protobuf::internal::WireFormatLite::ReadString(
+ input, this->mutable_text()));
+ } else {
+ goto handle_uninterpreted;
+ }
+ if (input->ExpectTag(18)) goto parse_lang;
+ break;
+ }
+
+ // optional string lang = 2;
+ case 2: {
+ if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
+ parse_lang:
+ DO_(::google::protobuf::internal::WireFormatLite::ReadString(
+ input, this->mutable_lang()));
+ } else {
+ goto handle_uninterpreted;
+ }
+ if (input->ExpectAtEnd()) return true;
+ break;
+ }
+
+ default: {
+ handle_uninterpreted:
+ if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+ return true;
+ }
+ DO_(::google::protobuf::internal::WireFormatLite::SkipField(input, tag));
+ break;
+ }
+ }
+ }
+ return true;
+#undef DO_
+}
+
+void NameProto::SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const {
+ // optional string text = 1;
+ if (_has_bit(0)) {
+ ::google::protobuf::internal::WireFormatLite::WriteString(
+ 1, this->text(), output);
+ }
+
+ // optional string lang = 2;
+ if (_has_bit(1)) {
+ ::google::protobuf::internal::WireFormatLite::WriteString(
+ 2, this->lang(), output);
+ }
+
+}
+
+int NameProto::ByteSize() const {
+ int total_size = 0;
+
+ if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ // optional string text = 1;
+ if (has_text()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::StringSize(
+ this->text());
+ }
+
+ // optional string lang = 2;
+ if (has_lang()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::StringSize(
+ this->lang());
+ }
+
+ }
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = total_size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+ return total_size;
+}
+
+void NameProto::CheckTypeAndMergeFrom(
+ const ::google::protobuf::MessageLite& from) {
+ MergeFrom(*::google::protobuf::down_cast<const NameProto*>(&from));
+}
+
+void NameProto::MergeFrom(const NameProto& from) {
+ GOOGLE_CHECK_NE(&from, this);
+ if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ if (from._has_bit(0)) {
+ set_text(from.text());
+ }
+ if (from._has_bit(1)) {
+ set_lang(from.lang());
+ }
+ }
+}
+
+void NameProto::CopyFrom(const NameProto& from) {
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+bool NameProto::IsInitialized() const {
+
+ return true;
+}
+
+void NameProto::Swap(NameProto* other) {
+ if (other != this) {
+ std::swap(text_, other->text_);
+ std::swap(lang_, other->lang_);
+ std::swap(_has_bits_[0], other->_has_bits_[0]);
+ std::swap(_cached_size_, other->_cached_size_);
+ }
+}
+
+::std::string NameProto::GetTypeName() const {
+ return "NameProto";
+}
+
+
+// ===================================================================
+
+#ifndef _MSC_VER
+const int FeatureProto::kTypeFieldNumber;
+const int FeatureProto::kNameFieldNumber;
+const int FeatureProto::kGeometryFieldNumber;
+const int FeatureProto::kPolygonsFieldNumber;
+const int FeatureProto::kLayerFieldNumber;
+#endif // !_MSC_VER
+
+FeatureProto::FeatureProto()
+ : ::google::protobuf::MessageLite() {
+ SharedCtor();
+}
+
+void FeatureProto::InitAsDefaultInstance() {
+}
+
+FeatureProto::FeatureProto(const FeatureProto& from)
+ : ::google::protobuf::MessageLite() {
+ SharedCtor();
+ MergeFrom(from);
+}
+
+void FeatureProto::SharedCtor() {
+ _cached_size_ = 0;
+ type_ = 0;
+ layer_ = 0;
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+FeatureProto::~FeatureProto() {
+ SharedDtor();
+}
+
+void FeatureProto::SharedDtor() {
+ if (this != default_instance_) {
+ }
+}
+
+void FeatureProto::SetCachedSize(int size) const {
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const FeatureProto& FeatureProto::default_instance() {
+ if (default_instance_ == NULL) protobuf_AddDesc_feature_2eproto(); return *default_instance_;
+}
+
+FeatureProto* FeatureProto::default_instance_ = NULL;
+
+FeatureProto* FeatureProto::New() const {
+ return new FeatureProto;
+}
+
+void FeatureProto::Clear() {
+ if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ type_ = 0;
+ layer_ = 0;
+ }
+ name_.Clear();
+ geometry_.Clear();
+ polygons_.Clear();
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+bool FeatureProto::MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!(EXPRESSION)) return false
+ ::google::protobuf::uint32 tag;
+ while ((tag = input->ReadTag()) != 0) {
+ switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+ // required int32 type = 1;
+ case 1: {
+ if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+ DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+ ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
+ input, &type_)));
+ _set_bit(0);
+ } else {
+ goto handle_uninterpreted;
+ }
+ if (input->ExpectTag(18)) goto parse_name;
+ break;
+ }
+
+ // repeated .NameProto name = 2;
+ case 2: {
+ if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
+ parse_name:
+ DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+ input, add_name()));
+ } else {
+ goto handle_uninterpreted;
+ }
+ if (input->ExpectTag(18)) goto parse_name;
+ if (input->ExpectTag(26)) goto parse_geometry;
+ break;
+ }
+
+ // repeated sint64 geometry = 3 [packed = true];
+ case 3: {
+ if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
+ parse_geometry:
+ DO_((::google::protobuf::internal::WireFormatLite::ReadPackedPrimitive<
+ ::google::protobuf::int64, ::google::protobuf::internal::WireFormatLite::TYPE_SINT64>(
+ input, this->mutable_geometry())));
+ } else if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag)
+ == ::google::protobuf::internal::WireFormatLite::
+ WIRETYPE_VARINT) {
+ DO_((::google::protobuf::internal::WireFormatLite::ReadRepeatedPrimitiveNoInline<
+ ::google::protobuf::int64, ::google::protobuf::internal::WireFormatLite::TYPE_SINT64>(
+ 1, 26, input, this->mutable_geometry())));
+ } else {
+ goto handle_uninterpreted;
+ }
+ if (input->ExpectTag(34)) goto parse_polygons;
+ break;
+ }
+
+ // repeated sint64 polygons = 4 [packed = true];
+ case 4: {
+ if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
+ parse_polygons:
+ DO_((::google::protobuf::internal::WireFormatLite::ReadPackedPrimitive<
+ ::google::protobuf::int64, ::google::protobuf::internal::WireFormatLite::TYPE_SINT64>(
+ input, this->mutable_polygons())));
+ } else if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag)
+ == ::google::protobuf::internal::WireFormatLite::
+ WIRETYPE_VARINT) {
+ DO_((::google::protobuf::internal::WireFormatLite::ReadRepeatedPrimitiveNoInline<
+ ::google::protobuf::int64, ::google::protobuf::internal::WireFormatLite::TYPE_SINT64>(
+ 1, 34, input, this->mutable_polygons())));
+ } else {
+ goto handle_uninterpreted;
+ }
+ if (input->ExpectTag(40)) goto parse_layer;
+ break;
+ }
+
+ // optional int32 layer = 5;
+ case 5: {
+ if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+ parse_layer:
+ DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+ ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
+ input, &layer_)));
+ _set_bit(4);
+ } else {
+ goto handle_uninterpreted;
+ }
+ if (input->ExpectAtEnd()) return true;
+ break;
+ }
+
+ default: {
+ handle_uninterpreted:
+ if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+ return true;
+ }
+ DO_(::google::protobuf::internal::WireFormatLite::SkipField(input, tag));
+ break;
+ }
+ }
+ }
+ return true;
+#undef DO_
+}
+
+void FeatureProto::SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const {
+ // required int32 type = 1;
+ if (_has_bit(0)) {
+ ::google::protobuf::internal::WireFormatLite::WriteInt32(1, this->type(), output);
+ }
+
+ // repeated .NameProto name = 2;
+ for (int i = 0; i < this->name_size(); i++) {
+ ::google::protobuf::internal::WireFormatLite::WriteMessage(
+ 2, this->name(i), output);
+ }
+
+ // repeated sint64 geometry = 3 [packed = true];
+ if (this->geometry_size() > 0) {
+ ::google::protobuf::internal::WireFormatLite::WriteTag(3, ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED, output);
+ output->WriteVarint32(_geometry_cached_byte_size_);
+ }
+ for (int i = 0; i < this->geometry_size(); i++) {
+ ::google::protobuf::internal::WireFormatLite::WriteSInt64NoTag(
+ this->geometry(i), output);
+ }
+
+ // repeated sint64 polygons = 4 [packed = true];
+ if (this->polygons_size() > 0) {
+ ::google::protobuf::internal::WireFormatLite::WriteTag(4, ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED, output);
+ output->WriteVarint32(_polygons_cached_byte_size_);
+ }
+ for (int i = 0; i < this->polygons_size(); i++) {
+ ::google::protobuf::internal::WireFormatLite::WriteSInt64NoTag(
+ this->polygons(i), output);
+ }
+
+ // optional int32 layer = 5;
+ if (_has_bit(4)) {
+ ::google::protobuf::internal::WireFormatLite::WriteInt32(5, this->layer(), output);
+ }
+
+}
+
+int FeatureProto::ByteSize() const {
+ int total_size = 0;
+
+ if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ // required int32 type = 1;
+ if (has_type()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::Int32Size(
+ this->type());
+ }
+
+ // optional int32 layer = 5;
+ if (has_layer()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::Int32Size(
+ this->layer());
+ }
+
+ }
+ // repeated .NameProto name = 2;
+ total_size += 1 * this->name_size();
+ for (int i = 0; i < this->name_size(); i++) {
+ total_size +=
+ ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+ this->name(i));
+ }
+
+ // repeated sint64 geometry = 3 [packed = true];
+ {
+ int data_size = 0;
+ for (int i = 0; i < this->geometry_size(); i++) {
+ data_size += ::google::protobuf::internal::WireFormatLite::
+ SInt64Size(this->geometry(i));
+ }
+ if (data_size > 0) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::Int32Size(data_size);
+ }
+ _geometry_cached_byte_size_ = data_size;
+ total_size += data_size;
+ }
+
+ // repeated sint64 polygons = 4 [packed = true];
+ {
+ int data_size = 0;
+ for (int i = 0; i < this->polygons_size(); i++) {
+ data_size += ::google::protobuf::internal::WireFormatLite::
+ SInt64Size(this->polygons(i));
+ }
+ if (data_size > 0) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::Int32Size(data_size);
+ }
+ _polygons_cached_byte_size_ = data_size;
+ total_size += data_size;
+ }
+
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = total_size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+ return total_size;
+}
+
+void FeatureProto::CheckTypeAndMergeFrom(
+ const ::google::protobuf::MessageLite& from) {
+ MergeFrom(*::google::protobuf::down_cast<const FeatureProto*>(&from));
+}
+
+void FeatureProto::MergeFrom(const FeatureProto& from) {
+ GOOGLE_CHECK_NE(&from, this);
+ name_.MergeFrom(from.name_);
+ geometry_.MergeFrom(from.geometry_);
+ polygons_.MergeFrom(from.polygons_);
+ if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ if (from._has_bit(0)) {
+ set_type(from.type());
+ }
+ if (from._has_bit(4)) {
+ set_layer(from.layer());
+ }
+ }
+}
+
+void FeatureProto::CopyFrom(const FeatureProto& from) {
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+bool FeatureProto::IsInitialized() const {
+ if ((_has_bits_[0] & 0x00000001) != 0x00000001) return false;
+
+ return true;
+}
+
+void FeatureProto::Swap(FeatureProto* other) {
+ if (other != this) {
+ std::swap(type_, other->type_);
+ name_.Swap(&other->name_);
+ geometry_.Swap(&other->geometry_);
+ polygons_.Swap(&other->polygons_);
+ std::swap(layer_, other->layer_);
+ std::swap(_has_bits_[0], other->_has_bits_[0]);
+ std::swap(_cached_size_, other->_cached_size_);
+ }
+}
+
+::std::string FeatureProto::GetTypeName() const {
+ return "FeatureProto";
+}
+
+
+// @@protoc_insertion_point(namespace_scope)
+
+// @@protoc_insertion_point(global_scope)
diff --git a/indexer/feature.pb.h b/indexer/feature.pb.h
new file mode 100644
index 0000000000..e7f8fc783e
--- /dev/null
+++ b/indexer/feature.pb.h
@@ -0,0 +1,461 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: feature.proto
+
+#ifndef PROTOBUF_feature_2eproto__INCLUDED
+#define PROTOBUF_feature_2eproto__INCLUDED
+
+#include <string>
+
+#include <google/protobuf/stubs/common.h>
+
+#if GOOGLE_PROTOBUF_VERSION < 2003000
+#error This file was generated by a newer version of protoc which is
+#error incompatible with your Protocol Buffer headers. Please update
+#error your headers.
+#endif
+#if 2003000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION
+#error This file was generated by an older version of protoc which is
+#error incompatible with your Protocol Buffer headers. Please
+#error regenerate this file with a newer version of protoc.
+#endif
+
+#include <google/protobuf/generated_message_util.h>
+#include <google/protobuf/repeated_field.h>
+#include <google/protobuf/extension_set.h>
+// @@protoc_insertion_point(includes)
+
+// Internal implementation detail -- do not call these.
+void protobuf_AddDesc_feature_2eproto();
+void protobuf_AssignDesc_feature_2eproto();
+void protobuf_ShutdownFile_feature_2eproto();
+
+class NameProto;
+class FeatureProto;
+
+// ===================================================================
+
+class NameProto : public ::google::protobuf::MessageLite {
+ public:
+ NameProto();
+ virtual ~NameProto();
+
+ NameProto(const NameProto& from);
+
+ inline NameProto& operator=(const NameProto& from) {
+ CopyFrom(from);
+ return *this;
+ }
+
+ static const NameProto& default_instance();
+
+ void Swap(NameProto* other);
+
+ // implements Message ----------------------------------------------
+
+ NameProto* New() const;
+ void CheckTypeAndMergeFrom(const ::google::protobuf::MessageLite& from);
+ void CopyFrom(const NameProto& from);
+ void MergeFrom(const NameProto& from);
+ void Clear();
+ bool IsInitialized() const;
+
+ int ByteSize() const;
+ bool MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input);
+ void SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const;
+ int GetCachedSize() const { return _cached_size_; }
+ private:
+ void SharedCtor();
+ void SharedDtor();
+ void SetCachedSize(int size) const;
+ public:
+
+ ::std::string GetTypeName() const;
+
+ // nested types ----------------------------------------------------
+
+ // accessors -------------------------------------------------------
+
+ // optional string text = 1;
+ inline bool has_text() const;
+ inline void clear_text();
+ static const int kTextFieldNumber = 1;
+ inline const ::std::string& text() const;
+ inline void set_text(const ::std::string& value);
+ inline void set_text(const char* value);
+ inline void set_text(const char* value, size_t size);
+ inline ::std::string* mutable_text();
+
+ // optional string lang = 2;
+ inline bool has_lang() const;
+ inline void clear_lang();
+ static const int kLangFieldNumber = 2;
+ inline const ::std::string& lang() const;
+ inline void set_lang(const ::std::string& value);
+ inline void set_lang(const char* value);
+ inline void set_lang(const char* value, size_t size);
+ inline ::std::string* mutable_lang();
+
+ // @@protoc_insertion_point(class_scope:NameProto)
+ private:
+ mutable int _cached_size_;
+
+ ::std::string* text_;
+ static const ::std::string _default_text_;
+ ::std::string* lang_;
+ static const ::std::string _default_lang_;
+ friend void protobuf_AddDesc_feature_2eproto();
+ friend void protobuf_AssignDesc_feature_2eproto();
+ friend void protobuf_ShutdownFile_feature_2eproto();
+
+ ::google::protobuf::uint32 _has_bits_[(2 + 31) / 32];
+
+ // WHY DOES & HAVE LOWER PRECEDENCE THAN != !?
+ inline bool _has_bit(int index) const {
+ return (_has_bits_[index / 32] & (1u << (index % 32))) != 0;
+ }
+ inline void _set_bit(int index) {
+ _has_bits_[index / 32] |= (1u << (index % 32));
+ }
+ inline void _clear_bit(int index) {
+ _has_bits_[index / 32] &= ~(1u << (index % 32));
+ }
+
+ void InitAsDefaultInstance();
+ static NameProto* default_instance_;
+};
+// -------------------------------------------------------------------
+
+class FeatureProto : public ::google::protobuf::MessageLite {
+ public:
+ FeatureProto();
+ virtual ~FeatureProto();
+
+ FeatureProto(const FeatureProto& from);
+
+ inline FeatureProto& operator=(const FeatureProto& from) {
+ CopyFrom(from);
+ return *this;
+ }
+
+ static const FeatureProto& default_instance();
+
+ void Swap(FeatureProto* other);
+
+ // implements Message ----------------------------------------------
+
+ FeatureProto* New() const;
+ void CheckTypeAndMergeFrom(const ::google::protobuf::MessageLite& from);
+ void CopyFrom(const FeatureProto& from);
+ void MergeFrom(const FeatureProto& from);
+ void Clear();
+ bool IsInitialized() const;
+
+ int ByteSize() const;
+ bool MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input);
+ void SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const;
+ int GetCachedSize() const { return _cached_size_; }
+ private:
+ void SharedCtor();
+ void SharedDtor();
+ void SetCachedSize(int size) const;
+ public:
+
+ ::std::string GetTypeName() const;
+
+ // nested types ----------------------------------------------------
+
+ // accessors -------------------------------------------------------
+
+ // required int32 type = 1;
+ inline bool has_type() const;
+ inline void clear_type();
+ static const int kTypeFieldNumber = 1;
+ inline ::google::protobuf::int32 type() const;
+ inline void set_type(::google::protobuf::int32 value);
+
+ // repeated .NameProto name = 2;
+ inline int name_size() const;
+ inline void clear_name();
+ static const int kNameFieldNumber = 2;
+ inline const ::NameProto& name(int index) const;
+ inline ::NameProto* mutable_name(int index);
+ inline ::NameProto* add_name();
+ inline const ::google::protobuf::RepeatedPtrField< ::NameProto >&
+ name() const;
+ inline ::google::protobuf::RepeatedPtrField< ::NameProto >*
+ mutable_name();
+
+ // repeated sint64 geometry = 3 [packed = true];
+ inline int geometry_size() const;
+ inline void clear_geometry();
+ static const int kGeometryFieldNumber = 3;
+ inline ::google::protobuf::int64 geometry(int index) const;
+ inline void set_geometry(int index, ::google::protobuf::int64 value);
+ inline void add_geometry(::google::protobuf::int64 value);
+ inline const ::google::protobuf::RepeatedField< ::google::protobuf::int64 >&
+ geometry() const;
+ inline ::google::protobuf::RepeatedField< ::google::protobuf::int64 >*
+ mutable_geometry();
+
+ // repeated sint64 polygons = 4 [packed = true];
+ inline int polygons_size() const;
+ inline void clear_polygons();
+ static const int kPolygonsFieldNumber = 4;
+ inline ::google::protobuf::int64 polygons(int index) const;
+ inline void set_polygons(int index, ::google::protobuf::int64 value);
+ inline void add_polygons(::google::protobuf::int64 value);
+ inline const ::google::protobuf::RepeatedField< ::google::protobuf::int64 >&
+ polygons() const;
+ inline ::google::protobuf::RepeatedField< ::google::protobuf::int64 >*
+ mutable_polygons();
+
+ // optional int32 layer = 5;
+ inline bool has_layer() const;
+ inline void clear_layer();
+ static const int kLayerFieldNumber = 5;
+ inline ::google::protobuf::int32 layer() const;
+ inline void set_layer(::google::protobuf::int32 value);
+
+ // @@protoc_insertion_point(class_scope:FeatureProto)
+ private:
+ mutable int _cached_size_;
+
+ ::google::protobuf::int32 type_;
+ ::google::protobuf::RepeatedPtrField< ::NameProto > name_;
+ ::google::protobuf::RepeatedField< ::google::protobuf::int64 > geometry_;
+ mutable int _geometry_cached_byte_size_;
+ ::google::protobuf::RepeatedField< ::google::protobuf::int64 > polygons_;
+ mutable int _polygons_cached_byte_size_;
+ ::google::protobuf::int32 layer_;
+ friend void protobuf_AddDesc_feature_2eproto();
+ friend void protobuf_AssignDesc_feature_2eproto();
+ friend void protobuf_ShutdownFile_feature_2eproto();
+
+ ::google::protobuf::uint32 _has_bits_[(5 + 31) / 32];
+
+ // WHY DOES & HAVE LOWER PRECEDENCE THAN != !?
+ inline bool _has_bit(int index) const {
+ return (_has_bits_[index / 32] & (1u << (index % 32))) != 0;
+ }
+ inline void _set_bit(int index) {
+ _has_bits_[index / 32] |= (1u << (index % 32));
+ }
+ inline void _clear_bit(int index) {
+ _has_bits_[index / 32] &= ~(1u << (index % 32));
+ }
+
+ void InitAsDefaultInstance();
+ static FeatureProto* default_instance_;
+};
+// ===================================================================
+
+
+// ===================================================================
+
+// NameProto
+
+// optional string text = 1;
+inline bool NameProto::has_text() const {
+ return _has_bit(0);
+}
+inline void NameProto::clear_text() {
+ if (text_ != &_default_text_) {
+ text_->clear();
+ }
+ _clear_bit(0);
+}
+inline const ::std::string& NameProto::text() const {
+ return *text_;
+}
+inline void NameProto::set_text(const ::std::string& value) {
+ _set_bit(0);
+ if (text_ == &_default_text_) {
+ text_ = new ::std::string;
+ }
+ text_->assign(value);
+}
+inline void NameProto::set_text(const char* value) {
+ _set_bit(0);
+ if (text_ == &_default_text_) {
+ text_ = new ::std::string;
+ }
+ text_->assign(value);
+}
+inline void NameProto::set_text(const char* value, size_t size) {
+ _set_bit(0);
+ if (text_ == &_default_text_) {
+ text_ = new ::std::string;
+ }
+ text_->assign(reinterpret_cast<const char*>(value), size);
+}
+inline ::std::string* NameProto::mutable_text() {
+ _set_bit(0);
+ if (text_ == &_default_text_) {
+ text_ = new ::std::string;
+ }
+ return text_;
+}
+
+// optional string lang = 2;
+inline bool NameProto::has_lang() const {
+ return _has_bit(1);
+}
+inline void NameProto::clear_lang() {
+ if (lang_ != &_default_lang_) {
+ lang_->clear();
+ }
+ _clear_bit(1);
+}
+inline const ::std::string& NameProto::lang() const {
+ return *lang_;
+}
+inline void NameProto::set_lang(const ::std::string& value) {
+ _set_bit(1);
+ if (lang_ == &_default_lang_) {
+ lang_ = new ::std::string;
+ }
+ lang_->assign(value);
+}
+inline void NameProto::set_lang(const char* value) {
+ _set_bit(1);
+ if (lang_ == &_default_lang_) {
+ lang_ = new ::std::string;
+ }
+ lang_->assign(value);
+}
+inline void NameProto::set_lang(const char* value, size_t size) {
+ _set_bit(1);
+ if (lang_ == &_default_lang_) {
+ lang_ = new ::std::string;
+ }
+ lang_->assign(reinterpret_cast<const char*>(value), size);
+}
+inline ::std::string* NameProto::mutable_lang() {
+ _set_bit(1);
+ if (lang_ == &_default_lang_) {
+ lang_ = new ::std::string;
+ }
+ return lang_;
+}
+
+// -------------------------------------------------------------------
+
+// FeatureProto
+
+// required int32 type = 1;
+inline bool FeatureProto::has_type() const {
+ return _has_bit(0);
+}
+inline void FeatureProto::clear_type() {
+ type_ = 0;
+ _clear_bit(0);
+}
+inline ::google::protobuf::int32 FeatureProto::type() const {
+ return type_;
+}
+inline void FeatureProto::set_type(::google::protobuf::int32 value) {
+ _set_bit(0);
+ type_ = value;
+}
+
+// repeated .NameProto name = 2;
+inline int FeatureProto::name_size() const {
+ return name_.size();
+}
+inline void FeatureProto::clear_name() {
+ name_.Clear();
+}
+inline const ::NameProto& FeatureProto::name(int index) const {
+ return name_.Get(index);
+}
+inline ::NameProto* FeatureProto::mutable_name(int index) {
+ return name_.Mutable(index);
+}
+inline ::NameProto* FeatureProto::add_name() {
+ return name_.Add();
+}
+inline const ::google::protobuf::RepeatedPtrField< ::NameProto >&
+FeatureProto::name() const {
+ return name_;
+}
+inline ::google::protobuf::RepeatedPtrField< ::NameProto >*
+FeatureProto::mutable_name() {
+ return &name_;
+}
+
+// repeated sint64 geometry = 3 [packed = true];
+inline int FeatureProto::geometry_size() const {
+ return geometry_.size();
+}
+inline void FeatureProto::clear_geometry() {
+ geometry_.Clear();
+}
+inline ::google::protobuf::int64 FeatureProto::geometry(int index) const {
+ return geometry_.Get(index);
+}
+inline void FeatureProto::set_geometry(int index, ::google::protobuf::int64 value) {
+ geometry_.Set(index, value);
+}
+inline void FeatureProto::add_geometry(::google::protobuf::int64 value) {
+ geometry_.Add(value);
+}
+inline const ::google::protobuf::RepeatedField< ::google::protobuf::int64 >&
+FeatureProto::geometry() const {
+ return geometry_;
+}
+inline ::google::protobuf::RepeatedField< ::google::protobuf::int64 >*
+FeatureProto::mutable_geometry() {
+ return &geometry_;
+}
+
+// repeated sint64 polygons = 4 [packed = true];
+inline int FeatureProto::polygons_size() const {
+ return polygons_.size();
+}
+inline void FeatureProto::clear_polygons() {
+ polygons_.Clear();
+}
+inline ::google::protobuf::int64 FeatureProto::polygons(int index) const {
+ return polygons_.Get(index);
+}
+inline void FeatureProto::set_polygons(int index, ::google::protobuf::int64 value) {
+ polygons_.Set(index, value);
+}
+inline void FeatureProto::add_polygons(::google::protobuf::int64 value) {
+ polygons_.Add(value);
+}
+inline const ::google::protobuf::RepeatedField< ::google::protobuf::int64 >&
+FeatureProto::polygons() const {
+ return polygons_;
+}
+inline ::google::protobuf::RepeatedField< ::google::protobuf::int64 >*
+FeatureProto::mutable_polygons() {
+ return &polygons_;
+}
+
+// optional int32 layer = 5;
+inline bool FeatureProto::has_layer() const {
+ return _has_bit(4);
+}
+inline void FeatureProto::clear_layer() {
+ layer_ = 0;
+ _clear_bit(4);
+}
+inline ::google::protobuf::int32 FeatureProto::layer() const {
+ return layer_;
+}
+inline void FeatureProto::set_layer(::google::protobuf::int32 value) {
+ _set_bit(4);
+ layer_ = value;
+}
+
+
+// @@protoc_insertion_point(namespace_scope)
+
+// @@protoc_insertion_point(global_scope)
+
+#endif // PROTOBUF_feature_2eproto__INCLUDED
diff --git a/indexer/feature.proto b/indexer/feature.proto
new file mode 100644
index 0000000000..895d19b3c4
--- /dev/null
+++ b/indexer/feature.proto
@@ -0,0 +1,16 @@
+option optimize_for = LITE_RUNTIME;
+
+message NameProto
+{
+ optional string text = 1;
+ optional string lang = 2;
+}
+
+message FeatureProto
+{
+ required int32 type = 1;
+ repeated NameProto name = 2;
+ repeated sint64 geometry = 3 [packed=true];
+ repeated sint64 polygons = 4 [packed=true];
+ optional int32 layer = 5;
+}
diff --git a/indexer/feature_processor.hpp b/indexer/feature_processor.hpp
new file mode 100644
index 0000000000..92167fcccf
--- /dev/null
+++ b/indexer/feature_processor.hpp
@@ -0,0 +1,49 @@
+#pragma once
+
+#include "feature.hpp"
+
+#include "../coding/varint.hpp"
+#include "../coding/file_reader.hpp"
+
+#include "../std/vector.hpp"
+
+namespace feature
+{
+ template <typename TSource>
+ void ReadFromSource(TSource & src, Feature & ft)
+ {
+ uint32_t const sz = ReadVarUint<uint32_t>(src);
+ vector<char> buffer(sz);
+ src.Read(&buffer[0], sz);
+ ft.Deserialize(buffer);
+ }
+
+ /// @return total header size, which should be skipped for data read, or 0 if error
+ inline uint64_t ReadDatHeaderSize(Reader const & reader)
+ {
+ uint64_t const headerSize = ReadPrimitiveFromPos<uint64_t>(reader, 0);
+ return headerSize + sizeof(uint64_t);
+ }
+
+ template <class ToDo>
+ void ForEachFromDat(string const & fName, ToDo & toDo)
+ {
+ typedef ReaderSource<FileReader> source_t;
+
+ FileReader reader(fName);
+ source_t src(reader);
+
+ // skip xml header
+ uint64_t currPos = ReadDatHeaderSize(reader);
+ src.Skip(currPos);
+ uint64_t const fSize = reader.Size();
+ // read features one by one
+ while (currPos < fSize)
+ {
+ Feature ft;
+ ReadFromSource(src, ft);
+ toDo(ft, currPos);
+ currPos = src.Pos();
+ }
+ }
+}
diff --git a/indexer/feature_visibility.cpp b/indexer/feature_visibility.cpp
new file mode 100644
index 0000000000..b2f65c8443
--- /dev/null
+++ b/indexer/feature_visibility.cpp
@@ -0,0 +1,229 @@
+#include "feature_visibility.hpp"
+#include "classificator.hpp"
+#include "feature.hpp"
+#include "scales.hpp"
+
+#include "../base/assert.hpp"
+
+#include "../std/array.hpp"
+
+#include "../base/start_mem_debug.hpp"
+
+
+
+namespace
+{
+ bool need_process_parent(ClassifObject const * p)
+ {
+ string const & n = p->GetName();
+ // same as is_mark_key (@see osm2type.cpp)
+ return (n == "bridge" || n == "junction" || n == "oneway" || n == "fee");
+ }
+}
+
+template <class ToDo> typename ToDo::result_type
+Classificator::ProcessObjects(uint32_t type, ToDo & toDo) const
+{
+ typedef typename ToDo::result_type res_t;
+ res_t res = res_t(); // default initialization
+
+ ClassifObject const * p = &m_root;
+ uint8_t i = 0;
+ uint8_t v;
+
+ // it's enough for now with our 3-level classificator
+ array<ClassifObject const *, 8> path;
+
+ // get objects route in hierarchy for type
+ while (ftype::GetValue(type, i, v))
+ {
+ p = p->GetObject(v);
+ path[i++] = p;
+ toDo(p);
+ }
+
+ if (path.empty())
+ return res;
+ else
+ {
+ // process objects from child to root
+ for (; i > 0; --i)
+ {
+ // process and stop find if needed
+ if (toDo(path[i-1], res)) break;
+
+ // no need to process parents
+ if (!need_process_parent(path[i-1])) break;
+ }
+ return res;
+ }
+}
+
+ClassifObject const * Classificator::GetObject(uint32_t type) const
+{
+ ClassifObject const * p = &m_root;
+ uint8_t i = 0;
+
+ // get the final ClassifObject
+ uint8_t v;
+ while (ftype::GetValue(type, i, v))
+ {
+ ++i;
+ p = p->GetObject(v);
+ }
+
+ return p;
+}
+
+namespace feature
+{
+
+namespace
+{
+ class get_draw_rule
+ {
+ int m_scale;
+ feature_geo_t m_ft;
+ vector<drule::Key> & m_keys;
+ string & m_name;
+
+ public:
+ get_draw_rule(int scale, feature_geo_t ft,
+ vector<drule::Key> & keys, string & name)
+ : m_scale(scale), m_ft(ft), m_keys(keys), m_name(name)
+ {
+ }
+
+ typedef bool result_type;
+
+ void operator() (ClassifObject const * p)
+ {
+#ifdef DEBUG
+ if (!m_name.empty()) m_name += '-';
+ m_name += p->GetName();
+#endif
+ }
+ bool operator() (ClassifObject const * p, bool & res)
+ {
+ res = true;
+ p->GetSuitable(m_scale, ClassifObject::feature_t(m_ft), m_keys);
+ return false;
+ }
+ };
+}
+
+int GetDrawRule(Feature const & f, int level, vector<drule::Key> & keys, string & names)
+{
+ Feature::FeatureType const geoType = f.GetFeatureType();
+ if (geoType == Feature::FEATURE_TYPE_UNKNOWN)
+ {
+ ASSERT ( false, ("Logic Error! Unknown feature type.") );
+ return Feature::FEATURE_TYPE_UNKNOWN;
+ }
+
+ Feature::GetTypesFn types;
+ f.ForEachTypeRef(types);
+
+ ASSERT ( keys.empty(), () );
+ Classificator const & c = classif();
+
+ get_draw_rule doRules(level, static_cast<feature_geo_t>(geoType), keys, names);
+ for (size_t i = 0; i < types.m_types.size(); ++i)
+ (void)c.ProcessObjects(types.m_types[i], doRules);
+
+ return geoType;
+}
+
+namespace
+{
+ class check_is_drawable
+ {
+ int m_scale;
+ public:
+ check_is_drawable(int scale) : m_scale(scale) {}
+
+ typedef bool result_type;
+
+ void operator() (ClassifObject const *) {}
+ bool operator() (ClassifObject const * p, bool & res)
+ {
+ if (p->IsDrawable(m_scale))
+ {
+ res = true;
+ return true;
+ }
+ return false;
+ }
+ };
+
+ class check_is_drawable_like
+ {
+ ClassifObject::feature_t m_type;
+ public:
+ check_is_drawable_like(feature_geo_t type)
+ : m_type(ClassifObject::feature_t(type))
+ {
+ }
+
+ typedef bool result_type;
+
+ void operator() (ClassifObject const *) {}
+ bool operator() (ClassifObject const * p, bool & res)
+ {
+ if (p->IsDrawableLike(m_type))
+ {
+ res = true;
+ return true;
+ }
+ return false;
+ }
+ };
+}
+
+bool IsDrawableAny(uint32_t type)
+{
+ return classif().GetObject(type)->IsDrawableAny();
+}
+
+bool IsDrawableLike(vector<uint32_t> const & types, feature_geo_t ft)
+{
+ Classificator const & c = classif();
+
+ check_is_drawable_like doCheck(ft);
+ for (size_t i = 0; i < types.size(); ++i)
+ if (c.ProcessObjects(types[i], doCheck))
+ return true;
+ return false;
+}
+
+bool IsDrawableForIndex(Feature const & f, int level)
+{
+ if (f.GetFeatureType() == Feature::FEATURE_TYPE_AREA)
+ if (!scales::IsGoodForLevel(level, f.GetLimitRect()))
+ return false;
+
+ Feature::GetTypesFn types;
+ f.ForEachTypeRef(types);
+
+ Classificator const & c = classif();
+
+ check_is_drawable doCheck(level);
+ for (size_t i = 0; i < types.m_types.size(); ++i)
+ if (c.ProcessObjects(types.m_types[i], doCheck))
+ return true;
+
+ return false;
+}
+
+uint32_t MinDrawableScaleForFeature(Feature const & f)
+{
+ uint32_t const upBound = scales::GetUpperScale();
+
+ for (uint32_t level = 0; level <= upBound; ++level)
+ if (feature::IsDrawableForIndex(f, level))
+ return level;
+
+ return uint32_t(-1);
+}
+
+}
diff --git a/indexer/feature_visibility.hpp b/indexer/feature_visibility.hpp
new file mode 100644
index 0000000000..00c32ec3e2
--- /dev/null
+++ b/indexer/feature_visibility.hpp
@@ -0,0 +1,23 @@
+#pragma once
+
+#include "drawing_rule_def.hpp"
+
+#include "../base/base.hpp"
+
+#include "../std/vector.hpp"
+#include "../std/string.hpp"
+
+class Feature;
+
+namespace feature
+{
+ enum feature_geo_t { fpoint = 0, fline, farea };
+
+ bool IsDrawableAny(uint32_t type);
+ bool IsDrawableLike(vector<uint32_t> const & type, feature_geo_t ft);
+ bool IsDrawableForIndex(Feature const & f, int level);
+ uint32_t MinDrawableScaleForFeature(Feature const & f);
+
+
+ int GetDrawRule(Feature const & f, int level, vector<drule::Key> & keys, string & names);
+}
diff --git a/indexer/features_vector.hpp b/indexer/features_vector.hpp
new file mode 100644
index 0000000000..8000ea66fa
--- /dev/null
+++ b/indexer/features_vector.hpp
@@ -0,0 +1,53 @@
+#pragma once
+#include "../indexer/feature.hpp"
+#include "../coding/var_record_reader.hpp"
+#include "../base/base.hpp"
+#include "../std/bind.hpp"
+
+template <typename ReaderT>
+class FeaturesVector
+{
+public:
+ typedef ReaderT ReaderType;
+
+ explicit FeaturesVector(ReaderT const & reader) : m_RecordReader(reader, 256)
+ {
+ }
+
+ void Get(uint64_t pos, Feature & feature) const
+ {
+ vector<char> record;
+ uint32_t offset;
+ m_RecordReader.ReadRecord(pos, record, offset);
+ feature.Deserialize(record, offset);
+ }
+
+ template <class TDo> void ForEachOffset(TDo const & toDo) const
+ {
+ Feature f;
+ m_RecordReader.ForEachRecord(
+ bind<void>(toDo, bind(&FeaturesVector<ReaderT>::DeserializeFeature, this, _2, _3, &f), _1));
+ }
+
+ template <class TDo> void ForEach(TDo const & toDo) const
+ {
+ Feature f;
+ m_RecordReader.ForEachRecord(
+ bind<void>(toDo, bind(&FeaturesVector<ReaderT>::DeserializeFeature, this, _2, _3, &f)));
+ }
+
+ bool IsMyData(string const & fName) const
+ {
+ return m_RecordReader.IsEqual(fName);
+ }
+
+private:
+ Feature const & DeserializeFeature(char const * data, uint32_t size, Feature * pFeature) const
+ {
+ vector<char> data1(data, data + size);
+ pFeature->Deserialize(data1);
+ return *pFeature;
+ }
+
+ VarRecordReader<ReaderT, &VarRecordSizeReaderVarint> m_RecordReader;
+};
diff --git a/indexer/file_reader_stream.hpp b/indexer/file_reader_stream.hpp
new file mode 100644
index 0000000000..a49ac81d23
--- /dev/null
+++ b/indexer/file_reader_stream.hpp
@@ -0,0 +1,26 @@
+#pragma once
+
+#include "../coding/streams.hpp"
+#include "../coding/file_reader.hpp"
+
+/// @todo Remove and use ReadPrimitive() and other free functions.
+class FileReaderStream : public stream::ReaderStream<ReaderSource<FileReader> >
+{
+ typedef stream::ReaderStream<ReaderSource<FileReader> > base_type;
+
+ FileReader m_file;
+ ReaderSource<FileReader> m_reader;
+
+public:
+ FileReaderStream(char const * fName)
+ : base_type(m_reader), m_file(fName), m_reader(m_file)
+ {
+ }
+
+ using base_type::operator >>;
+
+ void Seek(uint64_t pos)
+ {
+ m_reader = m_file.SubReader(pos, m_file.Size() - pos);
+ }
+};
diff --git a/indexer/file_writer_stream.hpp b/indexer/file_writer_stream.hpp
new file mode 100644
index 0000000000..0ea3aa9c3c
--- /dev/null
+++ b/indexer/file_writer_stream.hpp
@@ -0,0 +1,19 @@
+#pragma once
+
+#include "../coding/streams.hpp"
+#include "../coding/file_writer.hpp"
+
+class FileWriterStream : public stream::WriterStream<FileWriter>
+{
+ typedef stream::WriterStream<FileWriter> base_type;
+
+ FileWriter m_file;
+
+public:
+ FileWriterStream(char const * fName)
+ : base_type(m_file), m_file(fName) {}
+
+ using base_type::operator <<;
+
+ int64_t Pos() const { return m_file.Pos(); }
+};
diff --git a/indexer/index.hpp b/indexer/index.hpp
new file mode 100644
index 0000000000..969576874b
--- /dev/null
+++ b/indexer/index.hpp
@@ -0,0 +1,224 @@
+#pragma once
+#include "cell_id.hpp"
+#include "covering.hpp"
+#include "features_vector.hpp"
+#include "scale_index.hpp"
+#include "scales.hpp"
+
+#include "../geometry/rect2d.hpp"
+#include "../coding/varint.hpp"
+#include "../base/base.hpp"
+
+#include "../std/string.hpp"
+#include "../std/vector.hpp"
+#include "../std/unordered_set.hpp"
+#include "../std/utility.hpp"
+#include "../std/bind.hpp"
+
+template <class BaseT> class IndexForEachAdapter : public BaseT
+{
+public:
+ typedef typename BaseT::Query Query;
+
+ template <typename F>
+ void ForEachInRect(F const & f, m2::RectD const & rect, uint32_t scale, Query & query) const
+ {
+ vector<pair<int64_t, int64_t> > intervals = covering::CoverViewportAndAppendLowerLevels(rect);
+ for (size_t i = 0; i < intervals.size(); ++i)
+ BaseT::ForEachInIntervalAndScale(f, intervals[i].first, intervals[i].second, scale, query);
+ }
+
+ template <typename F>
+ void ForEachInRect(F const & f, m2::RectD const & rect, uint32_t scale) const
+ {
+ Query query;
+ ForEachInRect(f, rect, scale, query);
+ }
+
+ template <typename F>
+ void ForEachInViewport(F const & f, m2::RectD const & viewport, Query & query) const
+ {
+ ForEachInRect(f, viewport, scales::GetScaleLevel(viewport), query);
+ }
+
+ template <typename F>
+ void ForEachInViewport(F const & f, m2::RectD const & viewport) const
+ {
+ Query query;
+ ForEachInViewport(f, viewport, query);
+ }
+
+ template <typename F>
+ void ForEachInScale(F const & f, uint32_t scale, Query & query) const
+ {
+ int64_t const rootId = RectId("").ToInt64();
+ BaseT::ForEachInIntervalAndScale(f, rootId, rootId + RectId("").SubTreeSize(), scale, query);
+ }
+
+ template <typename F>
+ void ForEachInScale(F const & f, uint32_t scale) const
+ {
+ Query query;
+ ForEachInScale(f, scale, query);
+ }
+};
+
+template <class IndexT> class MultiIndexAdapter
+{
+ struct deletor_t
+ {
+ template <class T> void operator() (T * p) { delete p; }
+ };
+
+public:
+ typedef typename IndexT::Query Query;
+
+ ~MultiIndexAdapter()
+ {
+ Clean();
+ }
+
+ template <typename F>
+ void ForEachInIntervalAndScale(F const & f, int64_t beg, int64_t end, uint32_t scale,
+ Query & query) const
+ {
+ for (size_t i = 0; i < m_Indexes.size(); ++i)
+ m_Indexes[i]->ForEachInIntervalAndScale(f, beg, end, scale, query);
+ }
+
+ template <class DataReaderT, class IndexReaderT>
+ void Add(DataReaderT const & dataReader, IndexReaderT const & indexReader)
+ {
+ m_Indexes.push_back(new IndexT(dataReader, indexReader));
+ }
+
+ bool IsExist(string const & dataPath) const
+ {
+ return (find_if(m_Indexes.begin(), m_Indexes.end(),
+ bind(&IndexT::IsMyData, _1, cref(dataPath))) !=
+ m_Indexes.end());
+ }
+
+ void Remove(string const & dataPath)
+ {
+ for (typename vector<IndexT *>::iterator it = m_Indexes.begin(); it != m_Indexes.end(); ++it)
+ {
+ if ((*it)->IsMyData(dataPath))
+ {
+ delete *it;
+ m_Indexes.erase(it);
+ break;
+ }
+ }
+ }
+
+ void Clean()
+ {
+ for_each(m_Indexes.begin(), m_Indexes.end(), deletor_t());
+ m_Indexes.clear();
+ }
+
+private:
+ vector<IndexT *> m_Indexes;
+};
+
+template <class FeatureVectorT, class BaseT> class OffsetToFeatureAdapter : public BaseT
+{
+public:
+ typedef typename BaseT::Query Query;
+
+ OffsetToFeatureAdapter(typename FeatureVectorT::ReaderType const & dataReader,
+ typename BaseT::ReaderType const & indexReader)
+ : BaseT(indexReader), m_FeatureVector(dataReader)
+ {
+ }
+
+ template <typename F>
+ void ForEachInIntervalAndScale(F const & f, int64_t beg, int64_t end, uint32_t scale,
+ Query & query) const
+ {
+ OffsetToFeatureReplacer<F> offsetToFeatureReplacer(m_FeatureVector, f);
+ BaseT::ForEachInIntervalAndScale(offsetToFeatureReplacer, beg, end, scale, query);
+ }
+
+ bool IsMyData(string const & fName) const
+ {
+ return m_FeatureVector.IsMyData(fName);
+ }
+
+private:
+ FeatureVectorT m_FeatureVector;
+
+ template <typename F>
+ struct OffsetToFeatureReplacer
+ {
+ OffsetToFeatureReplacer(FeatureVectorT const & v, F const & f) : m_V(v), m_F(f) {}
+ void operator() (uint32_t offset) const
+ {
+ Feature feature;
+ m_V.Get(offset, feature);
+ m_F(feature);
+ }
+ FeatureVectorT const & m_V;
+ F const & m_F;
+ };
+};
+
+template <class BaseT> class UniqueOffsetAdapter : public BaseT
+{
+public:
+ // Defines base Query type.
+ // If someone in BaseT want's to do that, use the following line and pass query in ForEachXXX().
+ // class Query : public typename BaseT::Query
+ class Query
+ {
+ // TODO: Remember max offsets.size() and initialize offsets with it?
+ unordered_set<uint32_t> m_Offsets;
+ friend class UniqueOffsetAdapter;
+ };
+
+ template <typename T1>
+ explicit UniqueOffsetAdapter(T1 const & t1) : BaseT(t1) {}
+
+ template <typename T1, typename T2>
+ UniqueOffsetAdapter(T1 const & t1, T2 const & t2) : BaseT(t1, t2) {}
+
+ template <typename F>
+ void ForEachInIntervalAndScale(F const & f, int64_t beg, int64_t end, uint32_t scale,
+ Query & query) const
+ {
+ UniqueOffsetFunctorAdapter<F> uniqueOffsetFunctorAdapter(query.m_Offsets, f);
+ BaseT::ForEachInIntervalAndScale(uniqueOffsetFunctorAdapter, beg, end, scale);
+ }
+
+private:
+ template <typename F>
+ struct UniqueOffsetFunctorAdapter
+ {
+ UniqueOffsetFunctorAdapter(unordered_set<uint32_t> & offsets, F const & f)
+ : m_Offsets(offsets), m_F(f) {}
+
+ void operator() (uint32_t offset) const
+ {
+ if (m_Offsets.insert(offset).second)
+ m_F(offset);
+ }
+
+ unordered_set<uint32_t> & m_Offsets;
+ F const & m_F;
+ };
+};
+
+template <typename DataReaderT, typename IndexReaderT>
+struct Index
+{
+ typedef IndexForEachAdapter<
+ MultiIndexAdapter<
+ OffsetToFeatureAdapter<FeaturesVector<DataReaderT>,
+ UniqueOffsetAdapter<
+ ScaleIndex<IndexReaderT>
+ >
+ >
+ >
+ > Type;
+};
diff --git a/indexer/index_builder.cpp b/indexer/index_builder.cpp
new file mode 100644
index 0000000000..6b86cef541
--- /dev/null
+++ b/indexer/index_builder.cpp
@@ -0,0 +1,39 @@
+#include "index_builder.hpp"
+#include "feature_processor.hpp"
+#include "features_vector.hpp"
+#include "../coding/file_reader.hpp"
+
+namespace indexer
+{
+ bool BuildIndexFromDatFile(string const & fullIndexFilePath, string const & fullDatFilePath,
+ string const & tmpFilePath)
+ {
+ try
+ {
+ FileReader dataReader(fullDatFilePath);
+ // skip xml header with metadata
+ uint64_t startOffset = feature::ReadDatHeaderSize(dataReader);
+ FileReader subReader = dataReader.SubReader(startOffset, dataReader.Size() - startOffset);
+ FeaturesVector<FileReader> featuresVector(subReader);
+
+ FileWriter indexWriter(fullIndexFilePath.c_str());
+ BuildIndex(featuresVector, indexWriter, tmpFilePath);
+ }
+ catch (Reader::OpenException const & e)
+ {
+ LOG(LERROR, (e.what(), " file is not found"));
+ return false;
+ }
+ catch (Reader::Exception const & e)
+ {
+ LOG(LERROR, ("Unknown error while reading file ", e.what()));
+ return false;
+ }
+ catch (Writer::Exception const & e)
+ {
+ LOG(LERROR, ("Error writing index file", e.what()));
+ }
+
+ return true;
+ }
+} // namespace indexer
diff --git a/indexer/index_builder.hpp b/indexer/index_builder.hpp
new file mode 100644
index 0000000000..b31353136a
--- /dev/null
+++ b/indexer/index_builder.hpp
@@ -0,0 +1,28 @@
+#pragma once
+
+#include "scale_index_builder.hpp"
+
+namespace indexer
+{
+ template <class FeaturesVectorT, typename WriterT>
+ void BuildIndex(FeaturesVectorT const & featuresVector,
+ WriterT & writer,
+ string const & tmpFilePrefix)
+ {
+ {
+ LOG(LINFO, ("Building scale index."));
+ uint64_t indexSize;
+ {
+ SubWriter<WriterT> subWriter(writer);
+ IndexScales(featuresVector, subWriter, tmpFilePrefix);
+ indexSize = subWriter.Size();
+ }
+ LOG(LINFO, ("Built scale index. Size =", indexSize));
+ }
+ }
+
+ // doesn't throw exceptions
+ bool BuildIndexFromDatFile(string const & fullIndexFilePath,
+ string const & fullDatFilePath,
+ string const & tmpFilePath);
+}
diff --git a/indexer/indexer.pro b/indexer/indexer.pro
new file mode 100644
index 0000000000..61b274dfa7
--- /dev/null
+++ b/indexer/indexer.pro
@@ -0,0 +1,73 @@
+# Indexer library.
+
+TARGET = indexer
+TEMPLATE = lib
+CONFIG += staticlib
+#!macx:DEFINES += COMPILED_FROM_DSP # needed for Expat
+#macx:DEFINES += HAVE_MEMMOVE # needed for Expat
+
+ROOT_DIR = ..
+DEPENDENCIES = geometry coding base expat
+
+include($$ROOT_DIR/common.pri)
+
+!iphonesimulator-g++42 {
+ !iphonedevice-g++42 {
+ !bada-simulator {
+ PRE_TARGETDEPS += $$BINARIES_PATH/$${LIB_PREFIX}sgitess$$LIB_EXT
+ LIBS += -lsgitess
+ }
+ }
+}
+
+SOURCES += \
+ osm2type.cpp \
+ classificator.cpp \
+ drawing_rules.cpp \
+ drawing_rule_def.cpp \
+ scales.cpp \
+ osm_decl.cpp \
+ feature.cpp \
+ classif_routine.cpp \
+ xml_element.cpp \
+ scale_index.cpp \
+ covering.cpp \
+ point_to_int64.cpp \
+ mercator.cpp \
+ index_builder.cpp \
+ feature_visibility.cpp \
+ data_header.cpp \
+ data_header_reader.cpp \
+ country.cpp \
+
+HEADERS += \
+ feature.hpp \
+ cell_coverer.hpp \
+ cell_id.hpp \
+ osm2type.hpp \
+ classificator.hpp \
+ drawing_rules.hpp \
+ drawing_rule_def.hpp \
+ features_vector.hpp \
+ std_serialization.hpp \
+ scale_index.hpp \
+ scale_index_builder.hpp \
+ index.hpp \
+ index_builder.hpp \
+ scales.hpp \
+ osm_decl.hpp \
+ classif_routine.hpp \
+ xml_element.hpp \
+ interval_index.hpp \
+ interval_index_builder.hpp \
+ covering.hpp \
+ mercator.hpp \
+ feature_processor.hpp \
+ file_reader_stream.hpp \
+ file_writer_stream.hpp \
+ feature_visibility.hpp \
+ data_header.hpp \
+ data_header_reader.hpp \
+ country.hpp \
+ defines.hpp \
+ tree_structure.hpp \
diff --git a/indexer/indexer_tests/cell_coverer_test.cpp b/indexer/indexer_tests/cell_coverer_test.cpp
new file mode 100644
index 0000000000..fc01826b07
--- /dev/null
+++ b/indexer/indexer_tests/cell_coverer_test.cpp
@@ -0,0 +1,162 @@
+#include "../cell_coverer.hpp"
+#include "../../testing/testing.hpp"
+
+#include "../../geometry/covering.hpp"
+#include "../../coding/hex.hpp"
+#include "../../base/logging.hpp"
+
+
+// Unit test uses m2::CellId<30> for historical reasons, the actual production code uses RectId.
+typedef m2::CellId<30> CellIdT;
+typedef Bounds<-180, -90, 180, 90> OrthoBounds;
+
+namespace
+{
+ class CoordsPusher
+ {
+ public:
+ typedef vector< CoordPointT > VT;
+
+ CoordsPusher(VT & v) : m_v(v) {}
+ CoordsPusher & operator()(CoordT x, CoordT y)
+ {
+ m_v.push_back(make_pair(x, y));
+ return *this;
+ }
+ private:
+ VT & m_v;
+ };
+
+ string EnumCells(vector<CellIdT> const & v)
+ {
+ string result;
+ for (size_t i = 0; i < v.size(); ++i)
+ {
+ result += v[i].ToString();
+ if (i != v.size() - 1) result += ' ';
+ }
+ return result;
+ }
+}
+
+UNIT_TEST(CellIdToStringRecode)
+{
+ char const kTest[] = "21032012203";
+ TEST_EQUAL(CellIdT::FromString(kTest).ToString(), kTest, ());
+}
+
+UNIT_TEST(GoldenTestCover)
+{
+ vector<CoordPointT> coords;
+ CoordsPusher c(coords);
+ c(0.7, 0.5)
+ (1.5, 1.5)
+ (2.5, 3.5)
+ (5.5, 5.0);
+
+ vector<CellIdT> cells;
+ CoverPolyLine<Bounds<0, 0, 8, 8> >(coords, 3, cells);
+
+ TEST_EQUAL(EnumCells(cells), "000 001 003 021 030 032 033 211 300 301 303", ());
+}
+
+UNIT_TEST(GoldenTestCellIntersect)
+{
+ vector< CoordPointT > coords;
+ CoordsPusher c(coords);
+ c (0.7, 0.5)
+ (1.5, 1.5)
+ (2.5, 3.5)
+ (5.5, 5.0);
+
+ vector<CellIdT> cells;
+
+ typedef Bounds<0, 0, 8, 8> BoundsT;
+
+ CoverPolyLine<BoundsT>(coords, 7, cells);
+ TEST(!CellIntersects<BoundsT>(coords, CellIdT::FromString("210")), ());
+}
+
+UNIT_TEST(GoldenOrthoCover)
+{
+ vector< CoordPointT > coords;
+ CoordsPusher c(coords);
+ c
+ (27.545927047729492, 53.888893127441406)
+ (27.546476364135742, 53.888614654541016)
+ (27.546852111816406, 53.889347076416016)
+ (27.546596527099609, 53.889404296875000)
+ (27.545927047729492, 53.888893127441406);
+
+ vector<CellIdT> cells;
+ CoverPolyLine<OrthoBounds>(coords, 19, cells);
+
+ TEST_EQUAL(EnumCells(cells),
+ "3201221130210310103 3201221130210310120 3201221130210310121 3201221130210310122 "
+ "3201221130210310123 3201221130210310301 3201221130210310310", ());
+}
+
+UNIT_TEST(GoldenCoverRect)
+{
+ vector<CellIdT> cells;
+ CoverRect<OrthoBounds>(27.43, 53.83, 27.70, 53.96, 4, cells);
+
+ TEST_EQUAL(cells.size(), 4, ());
+
+ TEST_EQUAL(cells[0].ToString(), "32012211300", ());
+ TEST_EQUAL(cells[1].ToString(), "32012211301", ());
+ TEST_EQUAL(cells[2].ToString(), "32012211302", ());
+ TEST_EQUAL(cells[3].ToString(), "32012211303", ());
+}
+
+UNIT_TEST(ArtificialCoverRect)
+{
+ typedef Bounds<0, 0, 16, 16> TestBounds;
+
+ vector<CellIdT> cells;
+ CoverRect<TestBounds>(5, 5, 11, 11, 4, cells);
+
+ TEST_EQUAL(cells.size(), 4, ());
+
+ TEST_EQUAL(cells[0].ToString(), "03", ());
+ TEST_EQUAL(cells[1].ToString(), "12", ());
+ TEST_EQUAL(cells[2].ToString(), "21", ());
+ TEST_EQUAL(cells[3].ToString(), "30", ());
+}
+
+UNIT_TEST(CoverEmptyTriangleTest)
+{
+ vector<int64_t> ids, expectedIds;
+ m2::PointD pt(8.89748, 51.974);
+ expectedIds.push_back(CoverPoint<MercatorBounds, RectId>(CoordPointT(pt.x, pt.y)).ToInt64());
+ TEST_NOT_EQUAL(expectedIds[0], 1, ());
+ typedef CellIdConverter<MercatorBounds, RectId> CellIdConverterType;
+ m2::PointD pt1(CellIdConverterType::XToCellIdX(pt.x), CellIdConverterType::YToCellIdY(pt.y));
+ covering::Covering<RectId> covering(pt1, pt1, pt1);
+ covering.OutputToVector(ids);
+ TEST_EQUAL(ids, expectedIds, ());
+}
+
+// TODO: UNIT_TEST(CoverPolygon)
+/*
+UNIT_TEST(CoverPolygon)
+{
+ typedef Bounds<0, 0, 16, 16> TestBounds;
+
+ vector< CoordPointT > coords;
+ CoordsPusher c(coords);
+ c
+ (4.1, 4.1)
+ (6.1, 8.1)
+ (10.1, 10.1)
+ (8.1, 6.1)
+ (4.1, 4.1);
+
+ vector<CellIdT> cells;
+ CoverPolygon<TestBounds>(coords, 4, cells);
+
+ TEST_EQUAL(EnumCells(cells),
+ "0300 0301 0302 0303 0312 0313 0321 0330 0331 1220 0323 0332 0333 "
+ "1222 1223 2110 2111 3000 3001 2113 3002 3003 3012 3021 3030", ());
+}
+*/
diff --git a/indexer/indexer_tests/cell_covering_visualize_test.cpp b/indexer/indexer_tests/cell_covering_visualize_test.cpp
new file mode 100644
index 0000000000..e604da7adf
--- /dev/null
+++ b/indexer/indexer_tests/cell_covering_visualize_test.cpp
@@ -0,0 +1,115 @@
+#include "../../base/SRC_FIRST.hpp"
+
+#include "../cell_coverer.hpp"
+
+#include "../../qt_tstfrm/main_tester.hpp"
+#include "../../qt_tstfrm/tstwidgets.hpp"
+#include "../../yg/screen.hpp"
+#include "../../yg/skin.hpp"
+#include "../../geometry/screenbase.hpp"
+
+#include "../../testing/testing.hpp"
+
+#include "../../base/math.hpp"
+
+#include "../../std/bind.hpp"
+
+namespace
+{
+ class CoordsPusher
+ {
+ public:
+ typedef vector< CoordPointT > VT;
+
+ CoordsPusher(VT & v) : m_v(v) {}
+ CoordsPusher & operator()(CoordT x, CoordT y)
+ {
+ m_v.push_back(make_pair(x, y));
+ return *this;
+ }
+ private:
+ VT & m_v;
+ };
+
+ class CellTesterWidget : public tst::GLDrawWidget
+ {
+ vector< vector < CoordPointT > > m_lines;
+ vector<m2::RectD> m_rects;
+ ScreenBase m_screenBase;
+
+ public:
+ CellTesterWidget(vector< vector< CoordPointT > > & points,
+ vector<m2::RectD> & rects, m2::RectD const & rect)
+ {
+ m_lines.swap(points);
+ m_rects.swap(rects);
+ m_screenBase.SetFromRect(rect);
+ }
+
+ virtual void DoDraw(shared_ptr<yg::gl::Screen> pScreen)
+ {
+ for (size_t i = 0; i < m_rects.size(); ++i)
+ {
+ m2::RectF r(m_rects[i].minX(), m_rects[i].minY(), m_rects[i].maxX(), m_rects[i].maxY());
+ pScreen->immDrawSolidRect(r, yg::Color(255, 0, 0, 128));
+ }
+ for (size_t i = 0; i < m_lines.size(); ++i)
+ {
+ std::vector<m2::PointD> pts;
+ for (size_t j = 0; j < m_lines[i].size(); ++j)
+ pts.push_back(m_screenBase.GtoP(m2::PointD(m_lines[i][j].first, m_lines[i][j].second)));
+ pScreen->drawPath(&pts[0], pts.size(), m_skin->mapPenInfo(yg::PenInfo(yg::Color(0, 255, 0, 255), 2, 0, 0, 0)), 0);
+ }
+ }
+
+ virtual void DoResize(int w, int h)
+ {
+ m_p->onSize(w, h);
+ m_screenBase.OnSize(0, 0, w, h);
+ }
+ };
+
+/* QWidget * create_widget(vector< vector< CoordPointT > > & points,
+ vector<m2::RectD> & rects, m2::RectD const & rect)
+ {
+ return new CellTesterWidget(points, rects, rect);
+ }
+*/
+
+ // TODO: Unit test Visualize_Covering.
+ /*
+ UNIT_TEST(Visualize_Covering)
+ {
+ typedef Bounds<-180, -270, 180, 90> BoundsT;
+
+ vector< vector< CoordPointT > > points;
+ points.resize(1);
+ CoordsPusher c(points[0]);
+ c(53.888893127441406, 27.545927047729492)
+ (53.888614654541016, 27.546476364135742)
+ (53.889347076416016, 27.546852111816406)
+ (53.889404296875000, 27.546596527099609)
+ (53.888893127441406, 27.545927047729492);
+
+ vector<CellIdT> cells;
+ CoverPolygon<BoundsT>(points[0], 21, cells);
+
+ vector<m2::RectD> cellsRects;
+
+ m2::RectD viewport;
+ for (size_t i = 0; i < cells.size(); ++i)
+ {
+ CoordT minX, minY, maxX, maxY;
+ GetCellBounds<BoundsT>(cells[i], minX, minY, maxX, maxY);
+
+ m2::RectD r(minX, minY, maxX, maxY);
+ cellsRects.push_back(r);
+ viewport.Add(r);
+ }
+
+ tst::BaseTester tester;
+ tester.Run("cell covering testing",
+ bind(&create_widget, ref(points), ref(cellsRects), cref(viewport)));
+ }
+ */
+}
diff --git a/indexer/indexer_tests/cell_id_test.cpp b/indexer/indexer_tests/cell_id_test.cpp
new file mode 100644
index 0000000000..1be63e604c
--- /dev/null
+++ b/indexer/indexer_tests/cell_id_test.cpp
@@ -0,0 +1,53 @@
+#include "../cell_id.hpp"
+#include "../../testing/testing.hpp"
+#include "../../coding/hex.hpp"
+#include "../../base/pseudo_random.hpp"
+#include "../../std/cmath.hpp"
+#include "../../std/string.hpp"
+#include "../../std/cmath.hpp"
+
+typedef m2::CellId<30> CellIdT;
+
+UNIT_TEST(ToCellId)
+{
+ string s("2130000");
+ s.append(CellIdT::DEPTH_LEVELS - 1 - s.size(), '0');
+ TEST_EQUAL((CellIdConverter<Bounds<0, 0, 4, 4>, CellIdT>::ToCellId(1.5, 2.5).ToString()),
+ s, ());
+ TEST_EQUAL(CellIdT::FromString(s),
+ (CellIdConverter<Bounds<0, 0, 4, 4>, CellIdT>::ToCellId(1.5, 2.5)), ());
+}
+
+UNIT_TEST(CommonCell)
+{
+ TEST_EQUAL((CellIdConverter<Bounds<0, 0, 4, 4>, CellIdT>::Cover2PointsWithCell(
+ 3.5, 2.5, 2.5, 3.5)),
+ CellIdT::FromString("3"), ());
+ TEST_EQUAL((CellIdConverter<Bounds<0, 0, 4, 4>, CellIdT>::Cover2PointsWithCell(
+ 2.25, 1.75, 2.75, 1.25)),
+ CellIdT::FromString("12"), ());
+}
+
+namespace
+{
+ template <typename T1, typename T2>
+ bool PairsAlmostEqual(pair<T1, T1> const & p1, pair<T2, T2> const & p2)
+ {
+ return fabs(p1.first - p2.first) + fabs(p1.second - p2.second) < 0.00001;
+ }
+}
+
+UNIT_TEST(RandomRecode)
+{
+ PseudoRNG32 rng;
+ for (size_t i = 0; i < 1000; ++i)
+ {
+ uint32_t x = rng.Generate() % 2000;
+ uint32_t y = rng.Generate() % 1000;
+ pair<double, double> xy =
+ CellIdConverter<Bounds<0, 0, 2000, 1000>, CellIdT>::FromCellId(
+ CellIdConverter<Bounds<0, 0, 2000, 1000>, CellIdT>::ToCellId(x, y));
+ TEST(fabs(xy.first - x) < 0.0002, (x, y, xy));
+ TEST(fabs(xy.second - y) < 0.0001, (x, y, xy));
+ }
+}
diff --git a/indexer/indexer_tests/country_test.cpp b/indexer/indexer_tests/country_test.cpp
new file mode 100644
index 0000000000..88c6249cb3
--- /dev/null
+++ b/indexer/indexer_tests/country_test.cpp
@@ -0,0 +1,38 @@
+#include "../../testing/testing.hpp"
+
+#include "../country.hpp"
+
+#include "../../coding/file_writer.hpp"
+#include "../../coding/file_reader.hpp"
+
+#include "../../base/start_mem_debug.hpp"
+
+UNIT_TEST(CountrySerialization)
+{
+ string const TEST_URL = "http://someurl.com/somemap.dat";
+ uint64_t const TEST_SIZE = 123456790;
+ char const * TEST_FILE_NAME = "some_temporary_update_file.tmp";
+ mapinfo::Country c("North America", "USA", "Alaska");
+ c.AddUrl(mapinfo::TUrl(TEST_URL, TEST_SIZE));
+
+ {
+ mapinfo::TCountriesContainer countries;
+ countries[c.Group()].push_back(c);
+
+ FileWriter writer(TEST_FILE_NAME);
+ mapinfo::SaveCountries(countries, writer);
+ }
+
+ mapinfo::TCountriesContainer loadedCountries;
+ {
+ TEST( mapinfo::LoadCountries(loadedCountries, TEST_FILE_NAME), ());
+ }
+
+ TEST_GREATER(loadedCountries.size(), 0, ());
+ mapinfo::Country const & c2 = loadedCountries.begin()->second.front();
+ TEST_EQUAL(c.Group(), loadedCountries.begin()->first, ());
+ TEST_EQUAL(c.Group(), c2.Group(), ());
+ TEST_EQUAL(c.Name(), c2.Name(), ());
+ TEST_GREATER(c2.Urls().size(), 0, ());
+ TEST_EQUAL(*c.Urls().begin(), *c2.Urls().begin(), ());
+}
diff --git a/indexer/indexer_tests/data_header_test.cpp b/indexer/indexer_tests/data_header_test.cpp
new file mode 100644
index 0000000000..9e7c98aaf9
--- /dev/null
+++ b/indexer/indexer_tests/data_header_test.cpp
@@ -0,0 +1,41 @@
+#include "../../testing/testing.hpp"
+
+#include "../../coding/file_writer.hpp"
+#include "../feature_processor.hpp"
+#include "../data_header_reader.hpp"
+#include "../data_header.hpp"
+#include "../cell_id.hpp"
+
+UNIT_TEST(DataHeaderSerialization)
+{
+ char const * fileName = "mfj4340smn54123.tmp";
+ feature::DataHeader header1;
+ // normalize rect due to convertation rounding errors
+ m2::RectD rect(11.5, 12.6, 13.7, 14.8);
+ std::pair<int64_t, int64_t> cellIds = RectToInt64(rect);
+ rect = Int64ToRect(cellIds);
+
+ header1.SetBounds(rect);
+ uint64_t const controlNumber = 0x54321643;
+ {
+ FileWriter writer(fileName);
+ feature::WriteDataHeader(writer, header1);
+ writer.Write(&controlNumber, sizeof(controlNumber));
+ }
+
+ feature::DataHeader header2;
+ TEST_GREATER(feature::ReadDataHeader(fileName, header2), 0, ());
+
+ TEST_EQUAL(header1.Bounds(), header2.Bounds(), ());
+
+ {
+ FileReader reader(fileName);
+ uint64_t const headerSize = feature::ReadDatHeaderSize(reader);
+ TEST_GREATER(headerSize, 0, ());
+ uint64_t number = 0;
+ reader.Read(headerSize, &number, sizeof(number));
+ TEST_EQUAL(controlNumber, number, ());
+ }
+
+ FileWriter::DeleteFile(fileName);
+}
diff --git a/indexer/indexer_tests/feature_bucketer_test.cpp b/indexer/indexer_tests/feature_bucketer_test.cpp
new file mode 100644
index 0000000000..504cc6bd42
--- /dev/null
+++ b/indexer/indexer_tests/feature_bucketer_test.cpp
@@ -0,0 +1,56 @@
+#include "../../testing/testing.hpp"
+#include "../indexer_tool/feature_bucketer.hpp"
+#include "../feature.hpp"
+#include "../../base/stl_add.hpp"
+
+namespace
+{
+ class PushBackFeatureDebugStringOutput
+ {
+ public:
+ typedef map<string, vector<string> > * InitDataType;
+
+ PushBackFeatureDebugStringOutput(string const & name, InitDataType const & initData)
+ : m_pContainer(&((*initData)[name])) {}
+
+ void operator() (Feature const & feature)
+ {
+ m_pContainer->push_back(feature.DebugString());
+ }
+
+ private:
+ vector<string> * m_pContainer;
+ };
+
+ typedef feature::CellFeatureBucketer<
+ PushBackFeatureDebugStringOutput,
+ feature::SimpleFeatureClipper,
+ MercatorBounds,
+ RectId
+ > FeatureBucketer;
+
+ Feature MakeFeature(FeatureBuilder const & fb)
+ {
+ vector<char> data;
+ fb.Serialize(data);
+ return Feature(data, 0);
+ }
+}
+
+UNIT_TEST(FeatureBucketerSmokeTest)
+{
+ map<string, vector<string> > out, expectedOut;
+ FeatureBucketer bucketer(1, &out);
+
+ FeatureBuilder fb;
+ fb.AddPoint(m2::PointD(10, 10));
+ fb.AddPoint(m2::PointD(20, 20));
+ bucketer(MakeFeature(fb));
+
+ expectedOut["3"].push_back(MakeFeature(fb).DebugString());
+ TEST_EQUAL(out, expectedOut, ());
+
+ vector<string> bucketNames;
+ bucketer.GetBucketNames(MakeBackInsertFunctor(bucketNames));
+ TEST_EQUAL(bucketNames, vector<string>(1, "3"), ());
+}
diff --git a/indexer/indexer_tests/feature_test.cpp b/indexer/indexer_tests/feature_test.cpp
new file mode 100644
index 0000000000..33fff0bbc2
--- /dev/null
+++ b/indexer/indexer_tests/feature_test.cpp
@@ -0,0 +1,98 @@
+#include "../../testing/testing.hpp"
+#include "../feature.hpp"
+#include "../../geometry/point2d.hpp"
+#include "../../base/stl_add.hpp"
+
+namespace
+{
+ double Round(double x)
+ {
+ return static_cast<int>(x * 1000 + 0.5) / 1000.0;
+ }
+
+ struct PointAccumulator
+ {
+ vector<m2::PointD> m_V;
+
+ void operator() (CoordPointT p)
+ {
+ m_V.push_back(m2::PointD(Round(p.first), Round(p.second)));
+ }
+
+ void operator() (m2::PointD a, m2::PointD b, m2::PointD c)
+ {
+ m_V.push_back(m2::PointD(Round(a.x), Round(a.y)));
+ m_V.push_back(m2::PointD(Round(b.x), Round(b.y)));
+ m_V.push_back(m2::PointD(Round(c.x), Round(c.y)));
+ }
+ };
+}
+
+UNIT_TEST(Feature_Deserialize)
+{
+ vector<int> a;
+ a.push_back(1);
+ a.push_back(2);
+ FeatureBuilder builder;
+
+ builder.AddName("name");
+
+ vector<m2::PointD> points;
+ {
+ points.push_back(m2::PointD(1.0, 1.0));
+ points.push_back(m2::PointD(0.25, 0.5));
+ points.push_back(m2::PointD(0.25, 0.2));
+ points.push_back(m2::PointD(1.0, 1.0));
+ for (size_t i = 0; i < points.size(); ++i)
+ builder.AddPoint(points[i]);
+ }
+
+ vector<m2::PointD> triangles;
+ {
+ triangles.push_back(m2::PointD(0.5, 0.5));
+ triangles.push_back(m2::PointD(0.25, 0.5));
+ triangles.push_back(m2::PointD(1.0, 1.0));
+ for (size_t i = 0; i < triangles.size(); i += 3)
+ builder.AddTriangle(triangles[i], triangles[i+1], triangles[i+2]);
+ }
+
+ builder.AddLayer(3);
+
+ size_t const typesCount = 2;
+ uint32_t arrTypes[typesCount+1] = { 5, 7, 0 };
+ builder.AddTypes(arrTypes, arrTypes + typesCount);
+
+ vector<char> serial;
+ builder.Serialize(serial);
+ vector<char> serial1 = serial;
+ Feature f(serial1);
+
+ TEST_EQUAL(f.GetFeatureType(), Feature::FEATURE_TYPE_AREA, ());
+
+ Feature::GetTypesFn types;
+ f.ForEachTypeRef(types);
+ TEST_EQUAL(types.m_types, vector<uint32_t>(arrTypes, arrTypes + typesCount), ());
+
+ TEST_EQUAL(f.GetLayer(), 3, ());
+ TEST_EQUAL(f.GetName(), "name", ());
+ TEST_EQUAL(f.GetGeometrySize(), 4, ());
+ TEST_EQUAL(f.GetTriangleCount(), 1, ());
+
+ PointAccumulator featurePoints;
+ f.ForEachPointRef(featurePoints);
+ TEST_EQUAL(points, featurePoints.m_V, ());
+
+ PointAccumulator featureTriangles;
+ f.ForEachTriangleRef(featureTriangles);
+ TEST_EQUAL(triangles, featureTriangles.m_V, ());
+
+ TEST_LESS(fabs(f.GetLimitRect().minX() - 0.25), 0.0001, ());
+ TEST_LESS(fabs(f.GetLimitRect().minY() - 0.20), 0.0001, ());
+ TEST_LESS(fabs(f.GetLimitRect().maxX() - 1.00), 0.0001, ());
+ TEST_LESS(fabs(f.GetLimitRect().maxY() - 1.00), 0.0001, ());
+
+ vector<char> serial2;
+ f.GetFeatureBuilder().Serialize(serial2);
+ TEST_EQUAL(serial, serial2,
+ (f.DebugString(), Feature(serial2).DebugString()));
+}
diff --git a/indexer/indexer_tests/index_builder_test.cpp b/indexer/indexer_tests/index_builder_test.cpp
new file mode 100644
index 0000000000..a209e1e939
--- /dev/null
+++ b/indexer/indexer_tests/index_builder_test.cpp
@@ -0,0 +1,29 @@
+#include "../../testing/testing.hpp"
+#include "../index.hpp"
+#include "../index_builder.hpp"
+#include "../classif_routine.hpp"
+#include "../features_vector.hpp"
+#include "../feature_processor.hpp"
+#include "../../platform/platform.hpp"
+
+UNIT_TEST(BuildIndexTest)
+{
+ classificator::Read(GetPlatform().ResourcesDir());
+ string const dir = GetPlatform().WorkingDir();
+
+ FileReader reader(dir + "minsk-pass.dat");
+ // skip xml metadata header
+ uint64_t startOffset = feature::ReadDatHeaderSize(reader);
+ FileReader subReader = reader.SubReader(startOffset, reader.Size() - startOffset);
+ FeaturesVector<FileReader> featuresVector(subReader);
+
+ string serial;
+ {
+ MemWriter<string> serialWriter(serial);
+ indexer::BuildIndex(featuresVector, serialWriter, "build_index_test");
+ }
+
+ MemReader indexReader(&serial[0], serial.size());
+ Index<FileReader, MemReader>::Type index;
+ index.Add(reader, indexReader);
+}
diff --git a/indexer/indexer_tests/index_test.cpp b/indexer/indexer_tests/index_test.cpp
new file mode 100644
index 0000000000..bfb92f3e24
--- /dev/null
+++ b/indexer/indexer_tests/index_test.cpp
@@ -0,0 +1,22 @@
+#include "../../base/SRC_FIRST.hpp"
+
+#include "../index.hpp"
+#include "../index_builder.hpp"
+
+#include "../../testing/testing.hpp"
+#include "../../coding/file_reader.hpp"
+#include "../../coding/writer.hpp"
+#include "../../platform/platform.hpp"
+
+#include "../../std/string.hpp"
+
+UNIT_TEST(IndexParseTest)
+{
+ string const dir = GetPlatform().WorkingDir();
+
+ FileReader dataReader(dir + "minsk-pass.dat");
+ FileReader indexReader(dir + "minsk-pass.dat.idx");
+
+ Index<FileReader, FileReader>::Type index;
+ index.Add(dataReader, indexReader);
+}
diff --git a/indexer/indexer_tests/indexer_tests.pro b/indexer/indexer_tests/indexer_tests.pro
new file mode 100644
index 0000000000..fcab6aa6d7
--- /dev/null
+++ b/indexer/indexer_tests/indexer_tests.pro
@@ -0,0 +1,37 @@
+TARGET = indexer_tests
+CONFIG += console
+CONFIG -= app_bundle
+TEMPLATE = app
+
+ROOT_DIR = ../..
+DEPENDENCIES = qt_tstfrm yg map indexer platform geometry coding base expat sgitess
+
+include($$ROOT_DIR/common.pri)
+
+QT *= core gui opengl
+
+win32 {
+ LIBS += -lopengl32
+}
+
+win32-g++ {
+ LIBS += -lpthread
+}
+
+SOURCES += \
+ ../../testing/testingmain.cpp \
+ cell_covering_visualize_test.cpp \
+ cell_id_test.cpp \
+ cell_coverer_test.cpp \
+ test_type.cpp \
+ index_builder_test.cpp \
+ index_test.cpp \
+ interval_index_test.cpp \
+ point_to_int64_test.cpp \
+ mercator_test.cpp \
+ sort_and_merge_intervals_test.cpp \
+ feature_test.cpp \
+ data_header_test.cpp \
+ country_test.cpp \
+ feature_bucketer_test.cpp \
+
diff --git a/indexer/indexer_tests/interval_index_test.cpp b/indexer/indexer_tests/interval_index_test.cpp
new file mode 100644
index 0000000000..ad841bf157
--- /dev/null
+++ b/indexer/indexer_tests/interval_index_test.cpp
@@ -0,0 +1,157 @@
+#include "../../testing/testing.hpp"
+#include "../interval_index.hpp"
+#include "../interval_index_builder.hpp"
+#include "../../coding/reader.hpp"
+#include "../../coding/writer.hpp"
+#include "../../base/macros.hpp"
+#include "../../base/stl_add.hpp"
+#include "../../std/utility.hpp"
+#include "../../std/vector.hpp"
+
+UNIT_TEST(IntervalIndex_Simple)
+{
+ vector<pair<int64_t, uint32_t> > data;
+ data.push_back(make_pair(0xA0B1C2D100ULL, 0));
+ data.push_back(make_pair(0xA0B1C2D200ULL, 1));
+ data.push_back(make_pair(0xA0B2C2D100ULL, 2));
+ vector<char> serializedIndex;
+ MemWriter<vector<char> > writer(serializedIndex);
+ BuildIntervalIndex<5>(data.begin(), data.end(), writer, 2);
+ MemReader reader(&serializedIndex[0], serializedIndex.size());
+ IntervalIndex<uint32_t, MemReader> index(reader, 5);
+ {
+ uint32_t expected [] = {0, 1, 2};
+ vector<uint32_t> values;
+ index.ForEach(MakeBackInsertFunctor(values), 0ULL, 0xFFFFFFFFFFULL);
+ TEST_EQUAL(values, vector<uint32_t>(expected, expected + ARRAY_SIZE(expected)), ());
+ }
+ {
+ uint32_t expected [] = {0, 1};
+ vector<uint32_t> values;
+ index.ForEach(MakeBackInsertFunctor(values), 0xA0B1C2D100ULL, 0xA0B1C2D201ULL);
+ TEST_EQUAL(values, vector<uint32_t>(expected, expected + ARRAY_SIZE(expected)), ());
+ }
+ {
+ uint32_t expected [] = {0, 1};
+ vector<uint32_t> values;
+ index.ForEach(MakeBackInsertFunctor(values), 0x0ULL, 0xA0B1C30000ULL);
+ TEST_EQUAL(values, vector<uint32_t>(expected, expected + ARRAY_SIZE(expected)), ());
+ }
+ {
+ uint32_t expected [] = {0};
+ vector<uint32_t> values;
+ index.ForEach(MakeBackInsertFunctor(values), 0xA0B1C2D100ULL, 0xA0B1C2D101ULL);
+ TEST_EQUAL(values, vector<uint32_t>(expected, expected + ARRAY_SIZE(expected)), ());
+ }
+ {
+ uint32_t expected [] = {0};
+ vector<uint32_t> values;
+ index.ForEach(MakeBackInsertFunctor(values), 0xA0B1C2D100ULL, 0xA0B1C2D200ULL);
+ TEST_EQUAL(values, vector<uint32_t>(expected, expected + ARRAY_SIZE(expected)), ());
+ }
+ {
+ vector<uint32_t> values;
+ index.ForEach(MakeBackInsertFunctor(values), 0xA0B1C2D100ULL, 0xA0B1C2D100ULL);
+ TEST_EQUAL(values, vector<uint32_t>(), ());
+ }
+ {
+ vector<uint32_t> values;
+ index.ForEach(MakeBackInsertFunctor(values), 0xA0B1000000ULL, 0xA0B1B20000ULL);
+ TEST_EQUAL(values, vector<uint32_t>(), ());
+ }
+}
+
+UNIT_TEST(IntervalIndex_Empty)
+{
+ vector<pair<int64_t, uint32_t> > data;
+ vector<char> serializedIndex;
+ MemWriter<vector<char> > writer(serializedIndex);
+ BuildIntervalIndex<5>(data.begin(), data.end(), writer, 2);
+ MemReader reader(&serializedIndex[0], serializedIndex.size());
+ IntervalIndex<uint32_t, MemReader> index(reader, 5);
+ {
+ vector<uint32_t> values;
+ index.ForEach(MakeBackInsertFunctor(values), 0ULL, 0xFFFFFFFFFFULL);
+ TEST_EQUAL(values, vector<uint32_t>(), ());
+ }
+}
+
+UNIT_TEST(IntervalIndex_Simple2)
+{
+ vector<pair<int64_t, uint32_t> > data;
+ data.push_back(make_pair(0xA0B1C2D200ULL, 0));
+ data.push_back(make_pair(0xA0B1C2D200ULL, 1));
+ data.push_back(make_pair(0xA0B1C2D200ULL, 3));
+ data.push_back(make_pair(0xA0B2C2D200ULL, 2));
+ vector<char> serializedIndex;
+ MemWriter<vector<char> > writer(serializedIndex);
+ BuildIntervalIndex<5>(data.begin(), data.end(), writer, 2);
+ MemReader reader(&serializedIndex[0], serializedIndex.size());
+ IntervalIndex<uint32_t, MemReader> index(reader, 5);
+ {
+ uint32_t expected [] = {0, 1, 2, 3};
+ vector<uint32_t> values;
+ index.ForEach(MakeBackInsertFunctor(values), 0, 0xFFFFFFFFFFULL);
+ sort(values.begin(), values.end());
+ TEST_EQUAL(values, vector<uint32_t>(expected, expected + ARRAY_SIZE(expected)), ());
+ }
+}
+
+UNIT_TEST(IntervalIndex_Simple3)
+{
+ vector<pair<uint64_t, uint32_t> > data;
+ data.push_back(make_pair(0x0100ULL, 0));
+ data.push_back(make_pair(0x0200ULL, 1));
+ vector<char> serializedIndex;
+ MemWriter<vector<char> > writer(serializedIndex);
+ BuildIntervalIndex<2>(data.begin(), data.end(), writer, 1);
+ MemReader reader(&serializedIndex[0], serializedIndex.size());
+ IntervalIndex<uint32_t, MemReader> index(reader, 2);
+ {
+ uint32_t expected [] = {0, 1};
+ vector<uint32_t> values;
+ index.ForEach(MakeBackInsertFunctor(values), 0, 0xFFFFULL);
+ sort(values.begin(), values.end());
+ TEST_EQUAL(values, vector<uint32_t>(expected, expected + ARRAY_SIZE(expected)), ());
+ }
+}
+
+UNIT_TEST(IntervalIndex_Simple4)
+{
+ vector<pair<uint64_t, uint32_t> > data;
+ data.push_back(make_pair(0x01030400ULL, 0));
+ data.push_back(make_pair(0x02030400ULL, 1));
+ vector<char> serializedIndex;
+ MemWriter<vector<char> > writer(serializedIndex);
+ BuildIntervalIndex<4>(data.begin(), data.end(), writer, 1);
+ MemReader reader(&serializedIndex[0], serializedIndex.size());
+ IntervalIndex<uint32_t, MemReader> index(reader, 4);
+ {
+ uint32_t expected [] = {0, 1};
+ vector<uint32_t> values;
+ index.ForEach(MakeBackInsertFunctor(values), 0, 0xFFFFFFFFULL);
+ sort(values.begin(), values.end());
+ TEST_EQUAL(values, vector<uint32_t>(expected, expected + ARRAY_SIZE(expected)), ());
+ }
+}
+
+UNIT_TEST(IntervalIndex_Simple5)
+{
+ vector<pair<uint64_t, uint32_t> > data;
+ data.push_back(make_pair(0xA0B1C2D200ULL, 0));
+ data.push_back(make_pair(0xA0B1C2D200ULL, 1));
+ data.push_back(make_pair(0xA0B1C2D200ULL, 3));
+ data.push_back(make_pair(0xA0B2C2D200ULL, 2));
+ vector<char> serializedIndex;
+ MemWriter<vector<char> > writer(serializedIndex);
+ BuildIntervalIndex<5>(data.begin(), data.end(), writer, 1);
+ MemReader reader(&serializedIndex[0], serializedIndex.size());
+ IntervalIndex<uint32_t, MemReader> index(reader, 5);
+ {
+ uint32_t expected [] = {0, 1, 2, 3};
+ vector<uint32_t> values;
+ index.ForEach(MakeBackInsertFunctor(values), 0, 0xFFFFFFFFFFULL);
+ sort(values.begin(), values.end());
+ TEST_EQUAL(values, vector<uint32_t>(expected, expected + ARRAY_SIZE(expected)), ());
+ }
+}
diff --git a/indexer/indexer_tests/mercator_test.cpp b/indexer/indexer_tests/mercator_test.cpp
new file mode 100644
index 0000000000..f3ab41fcbd
--- /dev/null
+++ b/indexer/indexer_tests/mercator_test.cpp
@@ -0,0 +1,49 @@
+#include "../../base/SRC_FIRST.hpp"
+
+#include "../../testing/testing.hpp"
+#include "../mercator.hpp"
+#include "../../base/math.hpp"
+
+UNIT_TEST(MercatorTestGrid)
+{
+ double const eps = 0.0000001;
+ for (int lat = -85; lat <= 85; ++lat)
+ {
+ for (int lon = -180; lon <= 180; ++lon)
+ {
+ double const x = MercatorBounds::LonToX(lon);
+ double const y = MercatorBounds::LatToY(lat);
+ double const lat1 = MercatorBounds::YToLat(y);
+ double const lon1 = MercatorBounds::XToLon(x);
+
+ // Normal assumption for any projection.
+ TEST_ALMOST_EQUAL(static_cast<double>(lat), lat1, ());
+ TEST_ALMOST_EQUAL(static_cast<double>(lon), lon1, ());
+
+ // x is actually lon unmodified.
+ TEST_ALMOST_EQUAL(x, static_cast<double>(lon), ());
+
+ if (lat == 0)
+ {
+ // TODO: Investigate, how to make Mercator transform more precise.
+ // Error is to large for TEST_ALMOST_EQUAL(y, 0.0, ());
+ TEST_LESS(fabs(y), eps, (lat, y, lat1));
+ }
+ }
+ }
+}
+
+UNIT_TEST(MercatorTest)
+{
+ double const eps = 0.0000001;
+ double lon = 63.45421;
+ double x = MercatorBounds::LonToX(lon);
+ double lon1 = MercatorBounds::XToLon(x);
+ TEST_LESS(fabs(lon - lon1), eps, ("Too big round error"));
+ double lat = 34.28754;
+ double y = MercatorBounds::LatToY(lat);
+ double lat1 = MercatorBounds::YToLat(y);
+ TEST_LESS(fabs(lat - lat1), eps, ("Too big round error"));
+ TEST_LESS(fabs(MercatorBounds::maxX - MercatorBounds::maxY), eps, ("Non-square maxX and maxY"));
+ TEST_LESS(fabs(MercatorBounds::minX - MercatorBounds::minY), eps, ("Non-square minX and minY"));
+}
diff --git a/indexer/indexer_tests/point_to_int64_test.cpp b/indexer/indexer_tests/point_to_int64_test.cpp
new file mode 100644
index 0000000000..f06f4806fb
--- /dev/null
+++ b/indexer/indexer_tests/point_to_int64_test.cpp
@@ -0,0 +1,48 @@
+#include "../../testing/testing.hpp"
+#include "../cell_id.hpp"
+#include "../../std/cmath.hpp"
+
+UNIT_TEST(PointToInt64_Simple)
+{
+ CoordPointT orig(1.25, 1.3);
+ CoordPointT conv = Int64ToPoint(PointToInt64(orig.first, orig.second));
+ TEST(fabs(orig.first - conv.first ) < 0.000001 &&
+ fabs(orig.second - conv.second) < 0.000001,
+ (orig, conv));
+}
+
+UNIT_TEST(PointToInt64_Border)
+{
+ CoordPointT orig(180, 90);
+ CoordPointT conv = Int64ToPoint(PointToInt64(orig.first, orig.second));
+ TEST(fabs(orig.first - conv.first ) < 0.000001 &&
+ fabs(orig.second - conv.second) < 0.000001,
+ (orig, conv));
+}
+
+UNIT_TEST(PointToInt64_908175295886057813)
+{
+ int64_t const id1 = 908175295886057813LL;
+ CoordPointT const pt1 = Int64ToPoint(id1);
+ int64_t const id2 = PointToInt64(pt1);
+ TEST_EQUAL(id1, id2, (pt1));
+}
+
+UNIT_TEST(PointToInt64_MinMax)
+{
+ for (int ix = -180; ix <= 180; ix += 180)
+ {
+ for (int iy = -180; iy <= 180; iy += 180)
+ {
+ CoordPointT const pt(ix, iy);
+ int64_t const id = PointToInt64(pt);
+ CoordPointT const pt1 = Int64ToPoint(id);
+ TEST_LESS(fabs(pt1.first - pt.first ), 0.000001, (pt, pt1, id));
+ TEST_LESS(fabs(pt1.second - pt.second), 0.000001, (pt, pt1, id));
+ TEST_LESS(pt1.first, 180.000001, ());
+ TEST_LESS(pt1.second, 180.000001, ());
+ int64_t const id1 = PointToInt64(pt1);
+ TEST_EQUAL(id, id1, (pt, pt1));
+ }
+ }
+}
diff --git a/indexer/indexer_tests/sort_and_merge_intervals_test.cpp b/indexer/indexer_tests/sort_and_merge_intervals_test.cpp
new file mode 100644
index 0000000000..61aac031fe
--- /dev/null
+++ b/indexer/indexer_tests/sort_and_merge_intervals_test.cpp
@@ -0,0 +1,62 @@
+#include "../../testing/testing.hpp"
+#include "../covering.hpp"
+
+UNIT_TEST(SortAndMergeIntervals_1Interval)
+{
+ vector<pair<int64_t, int64_t> > v;
+ v.push_back(make_pair(1ULL, 2ULL));
+ TEST_EQUAL(covering::SortAndMergeIntervals(v), v, ());
+}
+UNIT_TEST(SortAndMergeIntervals_2NotSortedNotOverlappin)
+{
+ vector<pair<int64_t, int64_t> > v;
+ v.push_back(make_pair(3ULL, 4ULL));
+ v.push_back(make_pair(1ULL, 2ULL));
+ vector<pair<int64_t, int64_t> > e;
+ e.push_back(make_pair(1ULL, 2ULL));
+ e.push_back(make_pair(3ULL, 4ULL));
+ TEST_EQUAL(covering::SortAndMergeIntervals(v), e, ());
+}
+
+UNIT_TEST(SortAndMergeIntervals_BorderMerge)
+{
+ vector<pair<int64_t, int64_t> > v;
+ v.push_back(make_pair(1ULL, 2ULL));
+ v.push_back(make_pair(2ULL, 3ULL));
+ vector<pair<int64_t, int64_t> > e;
+ e.push_back(make_pair(1ULL, 3ULL));
+ TEST_EQUAL(covering::SortAndMergeIntervals(v), e, ());
+}
+
+UNIT_TEST(SortAndMergeIntervals_Overlap)
+{
+ vector<pair<int64_t, int64_t> > v;
+ v.push_back(make_pair(1ULL, 3ULL));
+ v.push_back(make_pair(2ULL, 4ULL));
+ vector<pair<int64_t, int64_t> > e;
+ e.push_back(make_pair(1ULL, 4ULL));
+ TEST_EQUAL(covering::SortAndMergeIntervals(v), e, ());
+}
+
+UNIT_TEST(SortAndMergeIntervals_Contain)
+{
+ vector<pair<int64_t, int64_t> > v;
+ v.push_back(make_pair(2ULL, 3ULL));
+ v.push_back(make_pair(1ULL, 4ULL));
+ vector<pair<int64_t, int64_t> > e;
+ e.push_back(make_pair(1ULL, 4ULL));
+ TEST_EQUAL(covering::SortAndMergeIntervals(v), e, ());
+}
+
+UNIT_TEST(SortAndMergeIntervals_ContainAndTouchBorder)
+{
+ vector<pair<int64_t, int64_t> > v;
+ v.push_back(make_pair(1ULL, 3ULL));
+ v.push_back(make_pair(1ULL, 4ULL));
+ vector<pair<int64_t, int64_t> > e;
+ e.push_back(make_pair(1ULL, 4ULL));
+ TEST_EQUAL(covering::SortAndMergeIntervals(v), e, ());
+}
+
+
+
diff --git a/indexer/indexer_tests/test_type.cpp b/indexer/indexer_tests/test_type.cpp
new file mode 100644
index 0000000000..33df7d34a0
--- /dev/null
+++ b/indexer/indexer_tests/test_type.cpp
@@ -0,0 +1,57 @@
+#include "../../base/SRC_FIRST.hpp"
+
+#include "../../testing/testing.hpp"
+
+#include "../classificator.hpp"
+
+namespace
+{
+ void check_values_array(uint8_t values[], uint8_t count)
+ {
+ uint32_t type = ftype::GetEmptyValue();
+ uint8_t value;
+ bool res = ftype::GetValue(type, 0, value);
+ TEST_EQUAL(res, false, ());
+ res = ftype::GetValue(type, 4, value);
+ TEST_EQUAL(res, false, ());
+
+ for (uint8_t i = 0; i < count; ++i)
+ ftype::PushValue(type, values[i]);
+
+ for (uint8_t i = 0; i < count; ++i)
+ {
+ res = ftype::GetValue(type, i, value);
+ TEST_EQUAL(res, true, ());
+ TEST_EQUAL(value, values[i], (value, values[i]));
+ }
+
+ for (char i = count-1; i >= 0; --i)
+ {
+ ftype::PopValue(type);
+
+ res = ftype::GetValue(type, i, value);
+ TEST_EQUAL(res, false, ());
+ }
+
+ TEST_EQUAL(type, ftype::GetEmptyValue(), (type));
+ }
+}
+
+UNIT_TEST(SetGetTypes)
+{
+ uint8_t v1[] = { 6, 30, 50, 0, 1 };
+ check_values_array(v1, 5);
+ check_values_array(v1, 4);
+
+ uint8_t v2[] = { 0, 0, 0, 0, 0 };
+ check_values_array(v2, 5);
+ check_values_array(v2, 4);
+
+ uint8_t v3[] = { 1, 1, 1, 1, 1 };
+ check_values_array(v3, 5);
+ check_values_array(v3, 4);
+
+ uint8_t v4[] = { 63, 63, 63, 63, 63 };
+ check_values_array(v4, 5);
+ check_values_array(v4, 4);
+}
diff --git a/indexer/indexer_tool/data_cache_file.hpp b/indexer/indexer_tool/data_cache_file.hpp
new file mode 100644
index 0000000000..791ae4e5c1
--- /dev/null
+++ b/indexer/indexer_tool/data_cache_file.hpp
@@ -0,0 +1,244 @@
+#pragma once
+
+#include "../../indexer/file_reader_stream.hpp"
+#include "../../indexer/file_writer_stream.hpp"
+#include "../../indexer/osm_decl.hpp"
+
+#include "../../coding/file_reader.hpp"
+#include "../../coding/file_writer.hpp"
+
+#include "../../base/logging.hpp"
+
+#include "../../std/utility.hpp"
+#include "../../std/vector.hpp"
+#include "../../std/algorithm.hpp"
+#include "../../std/limits.hpp"
+#include "../../std/exception.hpp"
+
+
+/// Classes for reading and writing any data in file with map of offsets for
+/// fast searching in memory by some user-id.
+namespace cache
+{
+ namespace detail
+ {
+ template <class TFile, class TValue> class file_map_t
+ {
+ typedef pair<uint64_t, TValue> element_t;
+ typedef vector<element_t> id_cont_t;
+ id_cont_t m_memory;
+ TFile m_file;
+
+ static const size_t s_max_count = 1024;
+
+ struct element_less_t
+ {
+ bool operator() (element_t const & r1, element_t const & r2) const
+ {
+ return ((r1.first == r2.first) ? r1.second < r2.second : r1.first < r2.first);
+ }
+ bool operator() (element_t const & r1, uint64_t r2) const
+ {
+ return (r1.first < r2);
+ }
+ bool operator() (uint64_t r1, element_t const & r2) const
+ {
+ return (r1 < r2.first);
+ }
+ };
+
+ size_t uint64_to_size(uint64_t v)
+ {
+ ASSERT ( v < numeric_limits<size_t>::max(), ("Value to long for memory address : ", v) );
+ return static_cast<size_t>(v);
+ }
+
+ public:
+ file_map_t(string const & name) : m_file(name.c_str()) {}
+
+ string get_name() const { return m_file.GetName(); }
+
+ void flush_to_file()
+ {
+ if (!m_memory.empty())
+ {
+ m_file.Write(&m_memory[0], m_memory.size() * sizeof(element_t));
+ m_memory.clear();
+ }
+ }
+
+ void read_to_memory()
+ {
+ m_memory.clear();
+ uint64_t const fileSize = m_file.Size();
+ if (fileSize == 0) return;
+
+ LOG_SHORT(LINFO, ("Reading offsets started in file ", get_name()));
+
+ try
+ {
+ m_memory.resize(uint64_to_size(fileSize / sizeof(element_t)));
+ }
+ catch (exception const &) // bad_alloc
+ {
+ LOG(LCRITICAL, ("Insufficient memory for required offset map"));
+ }
+
+ m_file.Read(0, &m_memory[0], uint64_to_size(fileSize));
+
+ sort(m_memory.begin(), m_memory.end(), element_less_t());
+
+ LOG_SHORT(LINFO, ("Reading offsets finished"));
+ }
+
+ void write(uint64_t k, TValue const & v)
+ {
+ if (m_memory.size() > s_max_count)
+ flush_to_file();
+
+ m_memory.push_back(make_pair(k, v));
+ }
+
+ bool read_one(uint64_t k, TValue & v) const
+ {
+ typename id_cont_t::const_iterator i =
+ lower_bound(m_memory.begin(), m_memory.end(), k, element_less_t());
+ if ((i != m_memory.end()) && ((*i).first == k))
+ {
+ v = (*i).second;
+ return true;
+ }
+ return false;
+ }
+
+ typedef typename id_cont_t::const_iterator iter_t;
+ pair<iter_t, iter_t> GetRange(uint64_t k) const
+ {
+ return equal_range(m_memory.begin(), m_memory.end(), k, element_less_t());
+ }
+
+ template <class ToDo> void for_each_ret(uint64_t k, ToDo & toDo) const
+ {
+ pair<iter_t, iter_t> range = GetRange(k);
+ for (; range.first != range.second; ++range.first)
+ if (toDo((*range.first).second))
+ return;
+ }
+ };
+ }
+
+ template <class TStream, class TOffsetFile> class DataFileBase
+ {
+ public:
+ typedef uint64_t user_id_t;
+
+ protected:
+ TStream m_stream;
+ detail::file_map_t<TOffsetFile, uint64_t> m_offsets;
+
+ public:
+ DataFileBase(string const & name)
+ : m_stream(name.c_str()), m_offsets(name + OFFSET_EXT)
+ {
+ }
+ };
+
+ class DataFileWriter : public DataFileBase<FileWriterStream, FileWriter>
+ {
+ typedef DataFileBase<FileWriterStream, FileWriter> base_type;
+
+ static const size_t s_max_count = 1024;
+
+ public:
+ DataFileWriter(string const & name) : base_type(name) {}
+
+ template <class T> void Write(user_id_t id, T const & t)
+ {
+ m_offsets.write(id, m_stream.Pos());
+ m_stream << t;
+ }
+
+ void SaveOffsets()
+ {
+ m_offsets.flush_to_file();
+ }
+ };
+
+ class DataFileReader : public DataFileBase<FileReaderStream, FileReader>
+ {
+ typedef DataFileBase<FileReaderStream, FileReader> base_type;
+
+ public:
+ DataFileReader(string const & name) : base_type(name) {}
+
+ template <class T> bool Read(user_id_t id, T & t)
+ {
+ uint64_t pos;
+ if (m_offsets.read_one(id, pos))
+ {
+ m_stream.Seek(pos);
+ m_stream >> t;
+ return true;
+ }
+ else
+ {
+ LOG_SHORT(LWARNING, ("Can't find offset in file ", m_offsets.get_name(), " by id ", id) );
+ return false;
+ }
+ }
+
+ void LoadOffsets()
+ {
+ m_offsets.read_to_memory();
+ }
+ };
+
+#pragma pack(push, 1)
+ struct MappedWay
+ {
+ uint64_t m_id;
+ int m_type;
+
+ MappedWay() {}
+ MappedWay(uint64_t id, int type) : m_id(id), m_type(type) {}
+
+ bool operator<(MappedWay const & r) const
+ {
+ return ((m_id == r.m_id) ? m_type < r.m_type : m_id < r.m_id);
+ }
+
+ enum { coast_direct = 0,
+ empty_direct = 1,
+ coast_opposite = 2,
+ empty_opposite = 3 };
+ };
+#pragma pack(pop)
+
+ template <class TNodesHolder, class TData, class TFile>
+ class BaseFileHolder
+ {
+ protected:
+ typedef typename TData::user_id_t user_id_t;
+
+ TNodesHolder & m_nodes;
+
+ TData m_ways, m_relations;
+
+ typedef detail::file_map_t<TFile, uint64_t> offset_map_t;
+ offset_map_t m_nodes2rel, m_ways2rel;
+
+ typedef detail::file_map_t<TFile, MappedWay> ways_map_t;
+ ways_map_t m_mappedWays;
+
+ public:
+ BaseFileHolder(TNodesHolder & nodes, string const & dir)
+ : m_nodes(nodes),
+ m_ways(dir + WAYS_FILE),
+ m_relations(dir + RELATIONS_FILE),
+ m_nodes2rel(dir + NODES_FILE + ID2REL_EXT),
+ m_ways2rel(dir + WAYS_FILE + ID2REL_EXT),
+ m_mappedWays(dir + MAPPED_WAYS)
+ {
+ }
+ };
+}
diff --git a/indexer/indexer_tool/data_generator.cpp b/indexer/indexer_tool/data_generator.cpp
new file mode 100644
index 0000000000..8aacd17287
--- /dev/null
+++ b/indexer/indexer_tool/data_generator.cpp
@@ -0,0 +1,157 @@
+#include "data_generator.hpp"
+#include "data_cache_file.hpp"
+#include "first_pass_parser.hpp"
+
+#include "../../indexer/std_serialization.hpp"
+#include "../../indexer/osm_decl.hpp"
+
+#include "../../base/logging.hpp"
+
+#include "../../std/bind.hpp"
+
+#include "../../base/start_mem_debug.hpp"
+
+
+namespace data
+{
+
+template <class TNodesHolder>
+class FileHolder : public cache::BaseFileHolder<TNodesHolder, cache::DataFileWriter, FileWriter>
+{
+ typedef cache::BaseFileHolder<TNodesHolder, cache::DataFileWriter, FileWriter> base_type;
+
+ typedef typename base_type::user_id_t user_id_t;
+
+ template <class TMap, class TVec>
+ void add_id2rel_vector(TMap & rMap, user_id_t relid, TVec const & v)
+ {
+ for (size_t i = 0; i < v.size(); ++i)
+ rMap.write(v[i].first, relid);
+ }
+
+public:
+ FileHolder(TNodesHolder & nodes, string const & dir) : base_type(nodes, dir) {}
+
+ void AddNode(uint64_t id, double lat, double lng)
+ {
+ this->m_nodes.AddPoint(id, lat, lng);
+ }
+
+ void AddWay(user_id_t id, WayElement const & e)
+ {
+ this->m_ways.Write(id, e);
+ }
+
+ void AddRelation(user_id_t id, RelationElement const & e)
+ {
+ this->m_relations.Write(id, e);
+
+ add_id2rel_vector(this->m_nodes2rel, id, e.nodes);
+ add_id2rel_vector(this->m_ways2rel, id, e.ways);
+ }
+
+ void AddMappedWay(user_id_t id, WayElement const & e, bool emptyTags)
+ {
+ typedef cache::MappedWay way_t;
+
+ int const type = (emptyTags ? way_t::empty_direct : way_t::coast_direct);
+
+ this->m_mappedWays.write(e.nodes.front(), way_t(id, type)); // direct
+ this->m_mappedWays.write(e.nodes.back(), way_t(id, type + 2)); // opposite
+ }
+
+ void SaveIndex()
+ {
+ this->m_ways.SaveOffsets();
+ this->m_relations.SaveOffsets();
+
+ this->m_nodes2rel.flush_to_file();
+ this->m_ways2rel.flush_to_file();
+ this->m_mappedWays.flush_to_file();
+ }
+};
+
+
+class points_in_file_base
+{
+protected:
+ FileWriter m_file;
+ progress_policy m_progress;
+
+public:
+ points_in_file_base(string const & name, size_t factor) : m_file(name.c_str())
+ {
+ m_progress.Begin(name, factor);
+ }
+
+ uint64_t GetCount() const { return m_progress.GetCount(); }
+};
+
+class points_in_file : public points_in_file_base
+{
+public:
+ points_in_file(string const & name) : points_in_file_base(name, 1000) {}
+
+ void AddPoint(uint64_t id, double lat, double lng)
+ {
+ LatLon ll;
+ ll.lat = lat;
+ ll.lon = lng;
+ m_file.Seek(id * sizeof(ll));
+ m_file.Write(&ll, sizeof(ll));
+
+ m_progress.Inc();
+ }
+};
+
+class points_in_file_light : public points_in_file_base
+{
+public:
+ points_in_file_light(string const & name) : points_in_file_base(name, 10000) {}
+
+ void AddPoint(uint64_t id, double lat, double lng)
+ {
+ LatLonPos ll;
+ ll.pos = id;
+ ll.lat = lat;
+ ll.lon = lng;
+ m_file.Write(&ll, sizeof(ll));
+
+ m_progress.Inc();
+ }
+};
+
+template <class TNodesHolder>
+bool GenerateImpl(string const & dir)
+{
+ try
+ {
+ TNodesHolder nodes(dir + NODES_FILE);
+ typedef FileHolder<TNodesHolder> holder_t;
+ holder_t holder(nodes, dir);
+
+ FirstPassParser<holder_t> parser(holder);
+ ParseXMLFromStdIn(parser);
+
+ LOG(LINFO, ("Added points count = ", nodes.GetCount()));
+
+ holder.SaveIndex();
+ }
+ catch (Writer::Exception const & e)
+ {
+ LOG(LERROR, ("Error with file ", e.what()));
+ return false;
+ }
+
+ return true;
+}
+
+bool GenerateToFile(string const & dir, bool lightNodes)
+{
+ if (lightNodes)
+ return GenerateImpl<points_in_file_light>(dir);
+ else
+ return GenerateImpl<points_in_file>(dir);
+}
+
+}
diff --git a/indexer/indexer_tool/data_generator.hpp b/indexer/indexer_tool/data_generator.hpp
new file mode 100644
index 0000000000..f31dd08189
--- /dev/null
+++ b/indexer/indexer_tool/data_generator.hpp
@@ -0,0 +1,8 @@
+#pragma once
+
+#include "../../std/string.hpp"
+
+namespace data
+{
+ bool GenerateToFile(string const & dir, bool lightNodes);
+}
diff --git a/indexer/indexer_tool/feature_bucketer.hpp b/indexer/indexer_tool/feature_bucketer.hpp
new file mode 100644
index 0000000000..39756392d3
--- /dev/null
+++ b/indexer/indexer_tool/feature_bucketer.hpp
@@ -0,0 +1,126 @@
+#pragma once
+
+#include "../../base/base.hpp"
+
+#include "../../coding/file_writer.hpp"
+
+#include "../../geometry/rect2d.hpp"
+
+#include "../../indexer/feature.hpp"
+#include "../../indexer/feature_visibility.hpp"
+
+#include "../../std/map.hpp"
+#include "../../std/string.hpp"
+
+#include <boost/scoped_ptr.hpp>
+
+#define WORLD_FILE_NAME "world"
+
+namespace feature
+{
+
+// Groups features in buckets according to their coordinates.
+template <class FeatureOutT, class FeatureClipperT, class BoundsT, typename CellIdT>
+class CellFeatureBucketer
+{
+public:
+ CellFeatureBucketer(int level, typename FeatureOutT::InitDataType const & featureOutInitData,
+ int maxWorldZoom = -1)
+ : m_Level(level), m_FeatureOutInitData(featureOutInitData), m_maxWorldZoom(maxWorldZoom)
+ {
+ uint32_t const size = 1 << 2 * m_Level;
+ m_Buckets.resize(size);
+ for (uint32_t i = 0; i < m_Buckets.size(); ++i)
+ {
+ CellIdT cell = CellIdT::FromBitsAndLevel(i, m_Level);
+ double minX, minY, maxX, maxY;
+ CellIdConverter<BoundsT, CellIdT>::GetCellBounds(cell, minX, minY, maxX, maxY);
+ m_Buckets[i].m_Rect = m2::RectD(minX, minY, maxX, maxY);
+ }
+ // create separate world bucket if necessary
+ if (maxWorldZoom >= 0)
+ {
+ m_worldBucket.reset(new FeatureOutT(WORLD_FILE_NAME, m_FeatureOutInitData));
+ }
+ }
+
+ void operator () (Feature const & feature)
+ {
+ // separately store features needed for world map
+ if (m_worldBucket
+ && m_maxWorldZoom >= feature::MinDrawableScaleForFeature(feature))
+ {
+ (*m_worldBucket)(feature);
+ }
+
+ FeatureClipperT clipper(feature);
+ // TODO: Is feature fully inside GetLimitRect()?
+ m2::RectD const limitRect = feature.GetLimitRect();
+ for (uint32_t i = 0; i < m_Buckets.size(); ++i)
+ {
+ // First quick and dirty limit rect intersection.
+ // Clipper may (or may not) do a better intersection.
+ if (m_Buckets[i].m_Rect.IsIntersect(limitRect))
+ {
+ Feature clippedFeature;
+ if (clipper(m_Buckets[i].m_Rect, clippedFeature))
+ {
+ if (!m_Buckets[i].m_pOut)
+ m_Buckets[i].m_pOut = new FeatureOutT(BucketName(i), m_FeatureOutInitData);
+
+ (*(m_Buckets[i].m_pOut))(clippedFeature);
+ }
+ }
+ }
+ }
+
+ void operator () (FeatureBuilder const & fb) { (*this)(fb.GetFeature()); }
+
+ template <typename F> void GetBucketNames(F f) const
+ {
+ for (uint32_t i = 0; i < m_Buckets.size(); ++i)
+ if (m_Buckets[i].m_pOut)
+ f(BucketName(i));
+ }
+
+private:
+ inline string BucketName(uint32_t i) const
+ {
+ return CellIdT::FromBitsAndLevel(i, m_Level).ToString();
+ }
+
+ struct Bucket
+ {
+ Bucket() : m_pOut(NULL) {}
+ ~Bucket() { delete m_pOut; }
+
+ FeatureOutT * m_pOut;
+ m2::RectD m_Rect;
+ };
+
+ int m_Level;
+ typename FeatureOutT::InitDataType m_FeatureOutInitData;
+ vector<Bucket> m_Buckets;
+ /// if NULL, separate world data file is not generated
+ boost::scoped_ptr<FeatureOutT> m_worldBucket;
+ int m_maxWorldZoom;
+};
+
+class SimpleFeatureClipper
+{
+public:
+ explicit SimpleFeatureClipper(Feature const & feature) : m_Feature(feature)
+ {
+ }
+
+ bool operator () (m2::RectD const & /*rect*/, Feature & clippedFeature) const
+ {
+ clippedFeature = m_Feature;
+ return true;
+ }
+
+private:
+ Feature const & m_Feature;
+};
+
+}
diff --git a/indexer/indexer_tool/feature_generator.cpp b/indexer/indexer_tool/feature_generator.cpp
new file mode 100644
index 0000000000..537cd6fc1c
--- /dev/null
+++ b/indexer/indexer_tool/feature_generator.cpp
@@ -0,0 +1,304 @@
+#include "feature_generator.hpp"
+#include "feature_bucketer.hpp"
+#include "data_cache_file.hpp"
+#include "osm_element.hpp"
+#include "../../indexer/data_header.hpp"
+#include "../../indexer/osm_decl.hpp"
+#include "../../indexer/data_header_reader.hpp"
+#include "../../coding/varint.hpp"
+#include "../../base/assert.hpp"
+#include "../../base/logging.hpp"
+#include "../../base/stl_add.hpp"
+#include "../../std/bind.hpp"
+#include "../../std/unordered_map.hpp"
+
+namespace feature
+{
+
+template <class TNodesHolder>
+class FileHolder : public cache::BaseFileHolder<TNodesHolder, cache::DataFileReader, FileReader>
+{
+ typedef cache::DataFileReader reader_t;
+ typedef cache::BaseFileHolder<TNodesHolder, reader_t, FileReader> base_type;
+
+ typedef typename base_type::offset_map_t offset_map_t;
+ typedef typename base_type::ways_map_t ways_map_t;
+
+ typedef typename base_type::user_id_t user_id_t;
+
+ template <class TElement, class ToDo> struct process_base
+ {
+ reader_t & m_reader;
+ protected:
+ ToDo & m_toDo;
+ public:
+ process_base(reader_t & reader, ToDo & toDo) : m_reader(reader), m_toDo(toDo) {}
+
+ bool operator() (uint64_t id)
+ {
+ TElement e;
+ if (m_reader.Read(id, e))
+ return m_toDo(id, e);
+ return false;
+ }
+ };
+
+ template <class ToDo> struct process_relation : public process_base<RelationElement, ToDo>
+ {
+ typedef process_base<RelationElement, ToDo> base_type;
+ public:
+ process_relation(reader_t & reader, ToDo & toDo) : base_type(reader, toDo) {}
+ };
+
+ template <class ToDo> struct process_relation_cached : public process_relation<ToDo>
+ {
+ typedef process_relation<ToDo> base_type;
+
+ public:
+ process_relation_cached(reader_t & rels, ToDo & toDo)
+ : base_type(rels, toDo) {}
+
+ bool operator() (uint64_t id)
+ {
+ switch (this->m_toDo(id))
+ {
+ case 1: return true;
+ case -1: return false;
+ default: return base_type::operator()(id);
+ }
+ }
+ };
+
+public:
+ FileHolder(TNodesHolder & holder, string const & dir) : base_type(holder, dir) {}
+
+ bool GetNode(uint64_t id, double & lat, double & lng)
+ {
+ return this->m_nodes.GetPoint(id, lat, lng);
+ }
+
+ bool GetWay(user_id_t id, WayElement & e)
+ {
+ return this->m_ways.Read(id, e);
+ }
+
+ bool GetNextWay(user_id_t & prevWay, user_id_t node, WayElement & e)
+ {
+ typedef typename ways_map_t::iter_t iter_t;
+ pair<iter_t, iter_t> range = this->m_mappedWays.GetRange(node);
+ for (; range.first != range.second; ++range.first)
+ {
+ cache::MappedWay const & w = range.first->second;
+ if (w.m_type != cache::MappedWay::coast_opposite && w.m_id != prevWay)
+ {
+ this->m_ways.Read(w.m_id, e);
+ prevWay = w.m_id;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ template <class ToDo> void ForEachRelationByWay(user_id_t id, ToDo & toDo)
+ {
+ process_relation<ToDo> processor(this->m_relations, toDo);
+ this->m_ways2rel.for_each_ret(id, processor);
+ }
+
+ template <class ToDo> void ForEachRelationByWayCached(user_id_t id, ToDo & toDo)
+ {
+ process_relation_cached<ToDo> processor(this->m_relations, toDo);
+ this->m_ways2rel.for_each_ret(id, processor);
+ }
+
+ void LoadIndex()
+ {
+ this->m_ways.LoadOffsets();
+ this->m_relations.LoadOffsets();
+
+ this->m_nodes2rel.read_to_memory();
+ this->m_ways2rel.read_to_memory();
+ this->m_mappedWays.read_to_memory();
+ }
+};
+
+void FeaturesCollector::Init()
+{
+ // write empty stub, will be updated in Finish()
+ WriteDataHeader(m_datFile, feature::DataHeader());
+}
+
+FeaturesCollector::FeaturesCollector(string const & datFile) : m_datFile(datFile)
+{
+ Init();
+}
+
+FeaturesCollector::FeaturesCollector(string const & bucketName,
+ FeaturesCollector::InitDataType const & datFilePrefixSuffix)
+ : m_datFile(datFilePrefixSuffix.first + bucketName + datFilePrefixSuffix.second)
+{
+ Init();
+}
+
+void FeaturesCollector::operator() (FeatureBuilder const & f)
+{
+#ifdef DEBUG
+ // .dat file should be less than 4Gb
+ uint64_t const pos = m_datFile.Pos();
+ ASSERT_EQUAL ( static_cast<uint64_t>(static_cast<uint32_t>(pos)), pos,
+ ("Feature offset is out of 32bit boundary :(") );
+#endif
+
+ vector<char> bytes;
+ f.Serialize(bytes);
+ size_t const sz = bytes.size();
+ CHECK(sz, ("Empty feature! WTF?"));
+
+ if (sz > 0)
+ {
+ WriteVarUint(m_datFile, sz);
+ m_datFile.Write(&bytes[0], sz);
+
+ Feature feature(bytes);
+ m_bounds.Add(feature.GetLimitRect());
+ }
+}
+
+FeaturesCollector::~FeaturesCollector()
+{
+ // rewrite map information with actual data
+ m_datFile.Seek(0);
+ feature::DataHeader header;
+ header.SetBounds(m_bounds);
+ WriteDataHeader(m_datFile, header);
+}
+
+class points_in_file
+{
+ FileReader m_file;
+
+public:
+ points_in_file(string const & name) : m_file(name) {}
+
+ bool GetPoint(uint64_t id, double & lat, double & lng) const
+ {
+ LatLon ll;
+ m_file.Read(id * sizeof(ll), &ll, sizeof(ll));
+
+ // assume that valid coordinate is not (0, 0)
+ if (ll.lat != 0.0 || ll.lon != 0.0)
+ {
+ lat = ll.lat;
+ lng = ll.lon;
+ return true;
+ }
+ else
+ {
+ LOG(LERROR, ("Node with id = ", id, " not found!"));
+ return false;
+ }
+ }
+};
+
+class points_in_map
+{
+ typedef unordered_map<uint64_t, pair<double, double> > cont_t;
+ typedef cont_t::const_iterator iter_t;
+ cont_t m_map;
+
+ static bool equal_coord(double d1, double d2)
+ {
+ return ::fabs(d1 - d2) < 1.0E-8;
+ }
+
+public:
+ points_in_map(string const & name)
+ {
+ FileReader reader(name);
+ uint64_t const count = reader.Size();
+
+ uint64_t pos = 0;
+ while (pos < count)
+ {
+ LatLonPos ll;
+ reader.Read(pos, &ll, sizeof(ll));
+
+ pair<iter_t, bool> ret = m_map.insert(make_pair(ll.pos, make_pair(ll.lat, ll.lon)));
+ if (ret.second == true)
+ {
+#ifdef DEBUG
+ pair<double, double> const & c = ret.first->second;
+ ASSERT ( equal_coord(c.first, ll.lat), () );
+ ASSERT ( equal_coord(c.second, ll.lon), () );
+#endif
+ }
+
+ pos += sizeof(ll);
+ }
+ }
+
+ bool GetPoint(uint64_t id, double & lat, double & lng) const
+ {
+ iter_t i = m_map.find(id);
+ if (i != m_map.end())
+ {
+ lat = i->second.first;
+ lng = i->second.second;
+ return true;
+ }
+ return false;
+ }
+};
+
+template <class TNodesHolder, template <class, class> class TParser>
+bool GenerateImpl(GenerateInfo & info)
+{
+ CHECK_GREATER_OR_EQUAL(info.cellBucketingLevel, 0, ());
+ CHECK_LESS(info.cellBucketingLevel, 10, ());
+
+ try
+ {
+ TNodesHolder nodes(info.dir + NODES_FILE);
+
+ typedef FileHolder<TNodesHolder> holder_t;
+ holder_t holder(nodes, info.dir);
+
+ holder.LoadIndex();
+
+ FeaturesCollector::InitDataType collectorInitData(info.datFilePrefix, info.datFileSuffix);
+
+ typedef CellFeatureBucketer<FeaturesCollector, SimpleFeatureClipper, MercatorBounds, RectId>
+ FeatureBucketerType;
+ FeatureBucketerType bucketer(info.cellBucketingLevel, collectorInitData, info.m_maxScaleForWorldFeatures);
+ {
+ TParser<FeatureBucketerType, holder_t> parser(bucketer, holder);
+ ParseXMLFromStdIn(parser);
+ }
+ bucketer.GetBucketNames(MakeBackInsertFunctor(info.bucketNames));
+ }
+ catch (Reader::Exception const & e)
+ {
+ LOG(LERROR, ("Error with file ", e.what()));
+ return false;
+ }
+
+ return true;
+}
+
+bool GenerateFeatures(GenerateInfo & info, bool lightNodes)
+{
+ if (lightNodes)
+ return GenerateImpl<points_in_map, SecondPassParserUsual>(info);
+ else
+ return GenerateImpl<points_in_file, SecondPassParserUsual>(info);
+}
+
+bool GenerateCoastlines(GenerateInfo & info, bool lightNodes)
+{
+ if (lightNodes)
+ return GenerateImpl<points_in_map, SecondPassParserJoin>(info);
+ else
+ return GenerateImpl<points_in_file, SecondPassParserJoin>(info);
+}
+
+}
diff --git a/indexer/indexer_tool/feature_generator.hpp b/indexer/indexer_tool/feature_generator.hpp
new file mode 100644
index 0000000000..8224e51aad
--- /dev/null
+++ b/indexer/indexer_tool/feature_generator.hpp
@@ -0,0 +1,45 @@
+#pragma once
+#include "../../indexer/feature.hpp"
+#include "../../indexer/osm_decl.hpp"
+#include "../../geometry/rect2d.hpp"
+#include "../../coding/file_writer.hpp"
+#include "../../std/string.hpp"
+
+namespace feature
+{
+ struct GenerateInfo
+ {
+ GenerateInfo() : m_maxScaleForWorldFeatures(-1) {}
+ string dir, datFilePrefix, datFileSuffix;
+ int cellBucketingLevel;
+ vector<string> bucketNames;
+ /// Features with scale level [0..maxScaleForWorldFeatures] will be
+ /// included into separate world data file
+ /// @note if -1, world file will not be created
+ int m_maxScaleForWorldFeatures;
+ };
+
+ bool GenerateFeatures(GenerateInfo & info, bool lightNodes);
+ bool GenerateCoastlines(GenerateInfo & info, bool lightNodes);
+
+ // Writes features to dat file.
+ class FeaturesCollector
+ {
+ FileWriter m_datFile;
+ m2::RectD m_bounds;
+
+ void Init();
+
+ public:
+ ~FeaturesCollector();
+
+ // Stores prefix and suffix of a dat file name.
+ typedef pair<string, string> InitDataType;
+
+ explicit FeaturesCollector(string const & datFile);
+ FeaturesCollector(string const & bucketName, InitDataType const & datFilePrefixSuffix);
+
+ void operator() (FeatureBuilder const & f);
+ void operator() (Feature const & f) { (*this)(f.GetFeatureBuilder()); }
+ };
+}
diff --git a/indexer/indexer_tool/feature_sorter.cpp b/indexer/indexer_tool/feature_sorter.cpp
new file mode 100644
index 0000000000..d2b849bcef
--- /dev/null
+++ b/indexer/indexer_tool/feature_sorter.cpp
@@ -0,0 +1,110 @@
+#include "feature_sorter.hpp"
+#include "feature_generator.hpp"
+
+#include "../../indexer/data_header.hpp"
+#include "../../indexer/data_header_reader.hpp"
+#include "../../indexer/feature_processor.hpp"
+#include "../../indexer/feature_visibility.hpp"
+#include "../../indexer/scales.hpp"
+
+#include "../../platform/platform.hpp"
+
+#include "../../coding/file_writer.hpp"
+
+#include "../../base/logging.hpp"
+#include "../../base/start_mem_debug.hpp"
+
+
+namespace
+{
+ typedef pair<uint64_t, uint64_t> TCellAndOffset;
+
+ class CalculateMidPoints
+ {
+ double m_midX;
+ double m_midY;
+ size_t m_counter;
+
+ public:
+ std::vector<TCellAndOffset> m_vec;
+
+ void operator() (Feature const & ft, uint64_t pos)
+ {
+ // reset state
+ m_midX = 0.0;
+ m_midY = 0.0;
+ m_counter = 0;
+ ft.ForEachPointRef(*this);
+ m_midX /= m_counter;
+ m_midY /= m_counter;
+
+ uint64_t const pointAsInt64 = PointToInt64(m_midX, m_midY);
+ uint64_t const featureScale = feature::MinDrawableScaleForFeature(ft);
+ CHECK(featureScale <= scales::GetUpperScale(), ("Dat file contain invisible feature"));
+
+ uint64_t const order = (featureScale << 59) | (pointAsInt64 >> 5);
+ m_vec.push_back(make_pair(order, pos));
+ }
+
+ void operator() (CoordPointT const & point)
+ {
+ m_midX += point.first;
+ m_midY += point.second;
+ ++m_counter;
+ }
+ };
+
+ bool SortMidPointsFunc(TCellAndOffset const & c1, TCellAndOffset const & c2)
+ {
+ return c1.first < c2.first;
+ }
+
+ template <typename TReader>
+ void ReadFeature(TReader const & reader, Feature & ft, uint64_t offset)
+ {
+ ReaderSource<TReader> src(reader);
+ src.Skip(offset);
+
+ feature::ReadFromSource(src, ft);
+ }
+}
+
+namespace feature
+{
+ void SortDatFile(string const & datFilePath, bool removeOriginalFile)
+ {
+ // rename input file
+ Platform & platform = GetPlatform();
+ string tempDatFilePath(datFilePath);
+ tempDatFilePath += ".notsorted";
+
+ // file doesn't exist
+ if (!platform.RenameFileX(datFilePath, tempDatFilePath))
+ {
+ LOG(LINFO, ("File ", datFilePath, " doesn't exist or sharing violation!"));
+ return;
+ }
+
+ // stores cellIds for middle points
+ CalculateMidPoints midPoints;
+ ForEachFromDat(tempDatFilePath, midPoints);
+
+ std::sort(midPoints.m_vec.begin(), midPoints.m_vec.end(), &SortMidPointsFunc);
+
+ // store sorted features
+ {
+ FeaturesCollector collector(datFilePath);
+ FileReader notSortedFileReader(tempDatFilePath);
+ Feature ft;
+ for (size_t i = 0; i < midPoints.m_vec.size(); ++i)
+ {
+ ReadFeature(notSortedFileReader, ft, midPoints.m_vec[i].second);
+ collector(ft.GetFeatureBuilder());
+ }
+ }
+
+ // remove old not-sorted dat file
+ if (removeOriginalFile)
+ FileWriter::DeleteFile(tempDatFilePath);
+ }
+} // namespace feature
diff --git a/indexer/indexer_tool/feature_sorter.hpp b/indexer/indexer_tool/feature_sorter.hpp
new file mode 100644
index 0000000000..0e8ffe1f6b
--- /dev/null
+++ b/indexer/indexer_tool/feature_sorter.hpp
@@ -0,0 +1,9 @@
+#pragma once
+
+#include "../../std/string.hpp"
+
+namespace feature
+{
+ // sorts features in the given file by their mid points
+ void SortDatFile(string const & datFile, bool removeOriginalFile = true);
+}
diff --git a/indexer/indexer_tool/first_pass_parser.hpp b/indexer/indexer_tool/first_pass_parser.hpp
new file mode 100644
index 0000000000..1b24ff994a
--- /dev/null
+++ b/indexer/indexer_tool/first_pass_parser.hpp
@@ -0,0 +1,106 @@
+#pragma once
+
+#include "../../indexer/xml_element.hpp"
+#include "../../indexer/osm_decl.hpp"
+#include "../../indexer/mercator.hpp"
+
+#include "../../base/string_utils.hpp"
+
+
+template <class THolder>
+class FirstPassParser : public BaseOSMParser
+{
+ THolder & m_holder;
+
+public:
+ FirstPassParser(THolder & holder) : m_holder(holder)
+ {
+ static char const * tags[] = { "osm", "node", "way", "relation" };
+ SetTags(tags);
+ }
+
+protected:
+ virtual void EmitElement(XMLElement * p)
+ {
+ uint64_t id;
+ VERIFY ( utils::to_uint64(p->attrs["id"], id), ("Unknown element with invalid id : ", p->attrs["id"]) );
+
+ if (p->name == "node")
+ {
+ // store point
+
+ double lat, lng;
+ VERIFY ( utils::to_double(p->attrs["lat"], lat), ("Bad node lat : ", p->attrs["lat"]) );
+ VERIFY ( utils::to_double(p->attrs["lon"], lng), ("Bad node lon : ", p->attrs["lon"]) );
+
+ // convert to mercator
+ lat = MercatorBounds::LatToY(lat);
+ lng = MercatorBounds::LonToX(lng);
+
+ m_holder.AddNode(id, lat, lng);
+ }
+ else if (p->name == "way")
+ {
+ // store way
+
+ WayElement e;
+ bool bUnite = false;
+ bool bEmptyTags = true;
+
+ for (size_t i = 0; i < p->childs.size(); ++i)
+ {
+ if (p->childs[i].name == "nd")
+ {
+ uint64_t ref;
+ VERIFY ( utils::to_uint64(p->childs[i].attrs["ref"], ref), ("Bad node ref in way : ", p->childs[i].attrs["ref"]) );
+ e.nodes.push_back(ref);
+ }
+ else if (!bUnite && (p->childs[i].name == "tag"))
+ {
+ bEmptyTags = false;
+
+ // process way's tags to define - if we need to join ways
+ string const & k = p->childs[i].attrs["k"];
+ string const & v = p->childs[i].attrs["v"];
+ bUnite = feature::NeedUnite(k, v);
+ }
+ }
+
+ if (e.IsValid())
+ {
+ m_holder.AddWay(id, e);
+ if (bUnite || bEmptyTags)
+ m_holder.AddMappedWay(id, e, bEmptyTags);
+ }
+ }
+ else if (p->name == "relation")
+ {
+ // store relation
+
+ RelationElement e;
+ for (size_t i = 0; i < p->childs.size(); ++i)
+ {
+ if (p->childs[i].name == "member")
+ {
+ uint64_t ref;
+ VERIFY ( utils::to_uint64(p->childs[i].attrs["ref"], ref), ("Bad ref in relation : ", p->childs[i].attrs["ref"]) );
+
+ string const & type = p->childs[i].attrs["type"];
+ string const & role = p->childs[i].attrs["role"];
+ if (type == "node")
+ e.nodes.push_back(make_pair(ref, role));
+ else
+ e.ways.push_back(make_pair(ref, role));
+ }
+ else if (p->childs[i].name == "tag")
+ {
+ // relation tags writing as is
+ e.tags.insert(make_pair(p->childs[i].attrs["k"], p->childs[i].attrs["v"]));
+ }
+ }
+
+ if (e.IsValid())
+ m_holder.AddRelation(id, e);
+ }
+ }
+};
diff --git a/indexer/indexer_tool/grid_generator.cpp b/indexer/indexer_tool/grid_generator.cpp
new file mode 100644
index 0000000000..0f4f6c5df0
--- /dev/null
+++ b/indexer/indexer_tool/grid_generator.cpp
@@ -0,0 +1,192 @@
+#include "grid_generator.hpp"
+
+#include "../../base/logging.hpp"
+
+#include "../../indexer/cell_id.hpp"
+#include "../../indexer/mercator.hpp"
+
+// tags used for grid drawing
+#define GRIDKEY "mapswithme"
+#define GRIDVALUE "grid"
+#define CAPTIONKEY "place"
+#define CAPTIONVALUE "country"
+
+namespace grid
+{
+ static size_t const MIN_GRID_LEVEL = 1;
+ static size_t const MAX_GRID_LEVEL = 10;
+
+ template <class TCellId>
+ string MercatorPointToCellIdString(double x, double y, size_t bucketingLevel)
+ {
+ TCellId id = CellIdConverter<MercatorBounds, TCellId>::ToCellId(x, y);
+ return id.ToString().substr(0, bucketingLevel);
+ }
+
+ void GenerateGridToStdout(size_t bucketingLevel)
+ {
+ if (bucketingLevel < MIN_GRID_LEVEL || bucketingLevel > MAX_GRID_LEVEL)
+ {
+ LOG(LERROR, ("Bucketing level", bucketingLevel, "for grid is not within valid range [", MIN_GRID_LEVEL, "..", MAX_GRID_LEVEL, "]"));
+ return;
+ }
+
+ size_t const COUNT = 2 << (bucketingLevel - 1);
+ double const STEPX = (MercatorBounds::maxX - MercatorBounds::minX) / COUNT;
+ double const STEPY = (MercatorBounds::maxY - MercatorBounds::minY) / COUNT;
+
+ cout <<
+ "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
+ "<osm version=\"0.6\" generator=\"MapsWithMe Indexer Tool\">\n"
+ " <bounds minlat=\"" << MercatorBounds::YToLat(MercatorBounds::minY) <<
+ "\" minlon=\"" << MercatorBounds::XToLon(MercatorBounds::minX) <<
+ "\" maxlat=\"" << MercatorBounds::YToLat(MercatorBounds::maxY) <<
+ "\" maxlon=\"" << MercatorBounds::XToLon(MercatorBounds::maxX) << "\"/>\n";
+
+ // generate nodes and ways
+ size_t nodeID = 1;
+ size_t wayID = 1;
+ for (double y = MercatorBounds::minY; y <= MercatorBounds::maxY; y += STEPY)
+ {
+ size_t const firstID = nodeID;
+ cout <<
+ " <node id=\"" << nodeID++ <<
+ "\" lat=\"" << MercatorBounds::YToLat(y) <<
+ "\" lon=\"" << MercatorBounds::XToLon(MercatorBounds::minX) <<
+ "\"/>\n";
+ size_t const secondID = nodeID;
+ cout <<
+ " <node id=\"" << nodeID++ <<
+ "\" lat=\"" << MercatorBounds::YToLat(y) <<
+ "\" lon=\"" << MercatorBounds::XToLon(MercatorBounds::maxX) <<
+ "\"/>\n";
+ cout <<
+ " <way id=\"" << wayID++ << "\">\n"
+ " <nd ref=\"" << firstID << "\"/>\n"
+ " <nd ref=\"" << secondID << "\"/>\n"
+ " <tag k=\"" << GRIDKEY << "\" v=\"" << GRIDVALUE << "\"/>\n"
+ " <tag k=\"layer\" v=\"-5\"/>\n"
+ " </way>\n";
+ }
+ for (double x = MercatorBounds::minX; x <= MercatorBounds::maxX; x += STEPX)
+ {
+ size_t const firstID = nodeID;
+ cout <<
+ " <node id=\"" << nodeID++ <<
+ "\" lat=\"" << MercatorBounds::YToLat(MercatorBounds::minY) <<
+ "\" lon=\"" << MercatorBounds::XToLon(x) <<
+ "\"/>\n";
+ size_t const secondID = nodeID;
+ cout <<
+ " <node id=\"" << nodeID++ <<
+ "\" lat=\"" << MercatorBounds::YToLat(MercatorBounds::maxY) <<
+ "\" lon=\"" << MercatorBounds::XToLon(x) <<
+ "\"/>\n";
+ cout <<
+ " <way id=\"" << wayID++ << "\">\n"
+ " <nd ref=\"" << firstID << "\"/>\n"
+ " <nd ref=\"" << secondID << "\"/>\n"
+ " <tag k=\"" << GRIDKEY << "\" v=\"" << GRIDVALUE << "\"/>\n"
+ " <tag k=\"layer\" v=\"-5\"/>\n"
+ " </way>\n";
+ }
+
+ // generate nodes with captions
+ for (size_t y = 0; y <= COUNT - 1; ++y)
+ {
+ for (size_t x = 0; x <= COUNT - 1; ++x)
+ {
+ double const mercY = MercatorBounds::minY + y * STEPY + STEPY / 2;
+ double const mercX = MercatorBounds::minX + x * STEPX + STEPX / 2;
+ string const title = MercatorPointToCellIdString<m2::CellId<MAX_GRID_LEVEL> >(mercX, mercY, bucketingLevel);
+ cout <<
+ " <node id=\"" << nodeID++ <<
+ "\" lat=\"" << MercatorBounds::YToLat(mercY) <<
+ "\" lon=\"" << MercatorBounds::XToLon(mercX) <<
+ "\">\n";
+ cout <<
+ " <tag k=\"" << CAPTIONKEY << "\" v=\"" << CAPTIONVALUE << "\"/>\n";
+ cout <<
+ " <tag k=\"name\" v=\"" << title << "\"/>\n";
+ cout <<
+ " </node>\n";
+ }
+ }
+ cout <<
+ "</osm>\n";
+ }
+
+/* void GenerateGridToStdout(size_t bucketingLevel)
+ {
+ if (bucketingLevel < MIN_GRID_LEVEL || bucketingLevel > MAX_GRID_LEVEL)
+ {
+ LOG(LERROR, ("Bucketing level", bucketingLevel, "for grid is not within valid range [", MIN_GRID_LEVEL, "..", MAX_GRID_LEVEL, "]"));
+ return;
+ }
+
+ size_t const COUNT = 2 << (bucketingLevel - 1);
+ double const STEPX = (MercatorBounds::maxX - MercatorBounds::minX) / COUNT;
+ double const STEPY = (MercatorBounds::maxY - MercatorBounds::minY) / COUNT;
+
+ cout <<
+ "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
+ "<osm version=\"0.6\" generator=\"MapsWithMe Indexer Tool\">\n"
+ " <bounds minlat=\"" << MercatorBounds::YToLat(MercatorBounds::minY) <<
+ "\" minlon=\"" << MercatorBounds::XToLon(MercatorBounds::minX) <<
+ "\" maxlat=\"" << MercatorBounds::YToLat(MercatorBounds::maxY) <<
+ "\" maxlon=\"" << MercatorBounds::XToLon(MercatorBounds::maxX) << "\"/>\n";
+
+ // generate nodes
+ size_t nodeID = 1;
+ for (double y = MercatorBounds::minY; y <= MercatorBounds::maxY; y += STEPY)
+ {
+ for (double x = MercatorBounds::minX; x <= MercatorBounds::maxX; x += STEPX)
+ {
+ cout <<
+ " <node id=\"" << nodeID++ <<
+ "\" lat=\"" << MercatorBounds::YToLat(y) <<
+ "\" lon=\"" << MercatorBounds::XToLon(x) <<
+ "\"/>\n";
+ }
+ }
+ // generate squares and captions
+ size_t wayID = 1;
+ for (size_t y = 0; y <= COUNT - 1; ++y)
+ {
+ for (size_t x = 0; x <= COUNT - 1; ++x)
+ {
+ size_t const first = x + y * (COUNT + 1) + 1;
+ size_t const second = first + 1;
+ size_t const third = second + COUNT + 1;
+ size_t const fourth = third - 1;
+ string title = CellStringFromXYLevel(x, y, bucketingLevel);
+ ++nodeID;
+ cout <<
+ " <node id=\"" << nodeID <<
+ "\" lat=\"" << MercatorBounds::YToLat(MercatorBounds::minY + y * STEPY + STEPY / 2) <<
+ "\" lon=\"" << MercatorBounds::XToLon(MercatorBounds::minX + x * STEPX + STEPX / 2) <<
+ "\">\n";
+ cout <<
+ " <tag k=\"" << TAGKEY << "\" v=\"" << CAPTIONVALUE << "\"/>\n";
+ cout <<
+ " <tag k=\"name\" v=\"" << title << "\"/>\n";
+ cout <<
+ " </node>\n";
+
+ cout <<
+ " <way id=\"" << wayID++ << "\">\n"
+ " <nd ref=\"" << first << "\"/>\n"
+ " <nd ref=\"" << second << "\"/>\n"
+ " <nd ref=\"" << third << "\"/>\n"
+ " <nd ref=\"" << fourth << "\"/>\n"
+ " <nd ref=\"" << first << "\"/>\n"
+ " <tag k=\"name\" v=\"" << title << "\"/>\n"
+ " <tag k=\"" << TAGKEY << "\" v=\"" << GRIDVALUE << "\"/>\n"
+ " <tag k=\"layer\" v=\"-5\"/>\n"
+ " </way>\n";
+ }
+ }
+ cout <<
+ "</osm>\n";
+ }*/
+}
diff --git a/indexer/indexer_tool/grid_generator.hpp b/indexer/indexer_tool/grid_generator.hpp
new file mode 100644
index 0000000000..3255f8667c
--- /dev/null
+++ b/indexer/indexer_tool/grid_generator.hpp
@@ -0,0 +1,8 @@
+#pragma once
+
+#include "../../std/iostream.hpp"
+
+namespace grid
+{
+ void GenerateGridToStdout(size_t bucketingLevel);
+}
diff --git a/indexer/indexer_tool/indexer_tool.cpp b/indexer/indexer_tool/indexer_tool.cpp
new file mode 100644
index 0000000000..dab8f7eb46
--- /dev/null
+++ b/indexer/indexer_tool/indexer_tool.cpp
@@ -0,0 +1,159 @@
+#include "data_generator.hpp"
+#include "feature_generator.hpp"
+#include "feature_sorter.hpp"
+#include "update_generator.hpp"
+#include "feature_bucketer.hpp"
+#include "grid_generator.hpp"
+
+#include "../classif_routine.hpp"
+#include "../features_vector.hpp"
+#include "../index_builder.hpp"
+#include "../osm_decl.hpp"
+#include "../feature_processor.hpp"
+#include "../data_header.hpp"
+#include "../defines.hpp"
+
+#include "../../platform/platform.hpp"
+
+#include "../../3party/gflags/src/gflags/gflags.h"
+
+#include "../../std/ctime.hpp"
+#include "../../std/iostream.hpp"
+#include "../../std/iomanip.hpp"
+#include "../../std/numeric.hpp"
+
+//#include "../../version/version.h"
+
+#include "../../base/start_mem_debug.hpp"
+
+DEFINE_bool(version, false, "Display version");
+DEFINE_string(countries_path, "",
+ "If specified, update.maps file will be generated from cells in the path");
+
+DEFINE_bool(sort_features, false, "Sort features inside .dat for better cache-friendliness.");
+DEFINE_bool(generate_classif, false, "Generate classificator.");
+DEFINE_bool(generate_intermediate_data, false, "Generate intermediate data.");
+DEFINE_bool(generate_final_data, false, "Generate final data.");
+DEFINE_bool(generate_index, false, "Generate index.");
+DEFINE_bool(generate_grid, false, "Generate grid for given bucketing_level");
+DEFINE_bool(use_light_nodes, false,
+ "If true, use temporary vector of nodes, instead of huge temp file");
+DEFINE_string(data_path, "", "Working directory, 'path_to_exe/../../data' if empty.");
+DEFINE_string(output, "", "Prefix of filenames of outputted .dat and .idx files.");
+DEFINE_string(intermediate_data_path, "", "Path to store nodes, ways, relations.");
+DEFINE_int32(bucketing_level, 7, "Level of cell ids for bucketing.");
+DEFINE_int32(worldmap_max_zoom, -1, "If specified, features for zoomlevels [0..this_value] "
+ " which are enabled in classificator will be added to the separate world.map");
+
+string AddSlashIfNeeded(string const & str)
+{
+ string result(str);
+ size_t const size = result.size();
+ if (size)
+ {
+ if (result.find_last_of('\\') == size - 1)
+ result[size - 1] = '/';
+ else
+ if (result.find_last_of('/') != size - 1)
+ result.push_back('/');
+ }
+ return result;
+}
+
+int main(int argc, char ** argv)
+{
+ google::SetUsageMessage(
+ "Takes OSM XML data from stdin and creates data and index files in several passes.");
+
+ google::ParseCommandLineFlags(&argc, &argv, true);
+
+ string const path =
+ FLAGS_data_path.empty() ? GetPlatform().WorkingDir() : AddSlashIfNeeded(FLAGS_data_path);
+
+ //if (FLAGS_version)
+ //{
+ // cout << "Tool version: " << VERSION_STRING << endl;
+ // cout << "Built on: " << VERSION_DATE_STRING << endl;
+ //}
+
+ // Make a classificator
+ if (FLAGS_generate_classif)
+ {
+ classificator::GenerateAndWrite(path);
+ }
+
+ if (FLAGS_generate_grid)
+ {
+ grid::GenerateGridToStdout(FLAGS_bucketing_level);
+ }
+
+ // Generating intermediate files
+ if (FLAGS_generate_intermediate_data)
+ {
+ LOG(LINFO, ("Generating intermediate data ...."));
+ if (!data::GenerateToFile(FLAGS_intermediate_data_path, FLAGS_use_light_nodes))
+ return -1;
+ }
+
+ feature::GenerateInfo genInfo;
+ genInfo.dir = FLAGS_intermediate_data_path;
+
+ // Generate dat file
+ if (FLAGS_generate_final_data)
+ {
+ LOG(LINFO, ("Generating final data ..."));
+
+ classificator::Read(path);
+ classificator::PrepareForFeatureGeneration();
+
+ genInfo.datFilePrefix = path + FLAGS_output + (FLAGS_bucketing_level > 0 ? "-" : "");
+ genInfo.datFileSuffix = DATA_FILE_EXTENSION;
+ genInfo.cellBucketingLevel = FLAGS_bucketing_level;
+ genInfo.m_maxScaleForWorldFeatures = FLAGS_worldmap_max_zoom;
+
+ if (!feature::GenerateFeatures(genInfo, FLAGS_use_light_nodes))
+ {
+ return -1;
+ }
+
+ for (size_t i = 0; i < genInfo.bucketNames.size(); ++i)
+ genInfo.bucketNames[i] = genInfo.datFilePrefix + genInfo.bucketNames[i] + genInfo.datFileSuffix;
+ if (FLAGS_worldmap_max_zoom >= 0)
+ genInfo.bucketNames.push_back(genInfo.datFilePrefix + WORLD_FILE_NAME + genInfo.datFileSuffix);
+ }
+ else
+ {
+ genInfo.bucketNames.push_back(path + FLAGS_output + DATA_FILE_EXTENSION);
+ }
+
+ // Enumerate over all dat files that were created.
+ for (size_t i = 0; i < genInfo.bucketNames.size(); ++i)
+ {
+ string const datFile = genInfo.bucketNames[i];
+ if (FLAGS_sort_features)
+ {
+ LOG(LINFO, ("Sorting features inside", datFile));
+ feature::SortDatFile(datFile);
+ }
+
+ if (FLAGS_generate_index)
+ {
+ LOG(LINFO, ("Generating index for", datFile));
+ string const indexFile = mapinfo::IndexFileForDatFile(datFile);
+ if (!indexer::BuildIndexFromDatFile(indexFile, datFile,
+ FLAGS_intermediate_data_path + FLAGS_output))
+ {
+ LOG(LCRITICAL, ("Error generating index."));
+ }
+ }
+ }
+
+ // Create http update list for countries and corresponding files
+ if (FLAGS_countries_path.size())
+ {
+ LOG(LINFO, ("Creating maps.update file..."));
+ update::GenerateMapsList(path, FLAGS_countries_path, UPDATE_BASE_URL);
+ }
+
+ return 0;
+}
diff --git a/indexer/indexer_tool/indexer_tool.pro b/indexer/indexer_tool/indexer_tool.pro
new file mode 100644
index 0000000000..ceeb274daa
--- /dev/null
+++ b/indexer/indexer_tool/indexer_tool.pro
@@ -0,0 +1,33 @@
+# Indexer binary
+
+ROOT_DIR = ../..
+DEPENDENCIES = map indexer platform geometry coding base gflags expat sgitess version
+
+include($$ROOT_DIR/common.pri)
+
+CONFIG += console
+CONFIG -= app_bundle
+TEMPLATE = app
+
+# needed for Platform::WorkingDir()
+QT += core
+
+SOURCES += \
+ indexer_tool.cpp \
+ data_generator.cpp \
+ feature_generator.cpp \
+ feature_sorter.cpp \
+ tesselator.cpp \
+ update_generator.cpp \
+ grid_generator.cpp \
+
+HEADERS += \
+ osm_element.hpp \
+ data_generator.hpp \
+ feature_generator.hpp \
+ first_pass_parser.hpp \
+ data_cache_file.hpp \
+ feature_sorter.hpp \
+ update_generator.hpp \
+ feature_bucketer.hpp \
+ grid_generator.hpp \
diff --git a/indexer/indexer_tool/osm_element.hpp b/indexer/indexer_tool/osm_element.hpp
new file mode 100644
index 0000000000..2712db7907
--- /dev/null
+++ b/indexer/indexer_tool/osm_element.hpp
@@ -0,0 +1,421 @@
+#pragma once
+
+#include "../../indexer/feature.hpp"
+#include "../../indexer/osm2type.hpp"
+#include "../../indexer/xml_element.hpp"
+#include "../../indexer/osm_decl.hpp"
+#include "../../indexer/feature_visibility.hpp"
+
+#include "../../base/string_utils.hpp"
+#include "../../base/logging.hpp"
+
+#include "../../std/unordered_map.hpp"
+#include "../../std/list.hpp"
+#include "../../std/set.hpp"
+#include "../../std/vector.hpp"
+
+#include "../../base/start_mem_debug.hpp"
+
+namespace feature
+{
+ typedef list<vector<m2::PointD> > holes_cont_t;
+ void TesselateInterior(FeatureBuilder & featureBuilder, feature::holes_cont_t const & holes);
+}
+
+/// @param TEmitter Feature accumulating policy
+/// @param THolder Nodes, ways, relations holder
+template <class TEmitter, class THolder>
+class SecondPassParserBase : public BaseOSMParser
+{
+protected:
+ TEmitter & m_emitter;
+ THolder & m_holder;
+
+ /// max possible number of types per feature
+ static const size_t max_number_of_types = 16;
+
+ SecondPassParserBase(TEmitter & emitter, THolder & holder)
+ : m_emitter(emitter), m_holder(holder)
+ {
+ static char const * tags[] = { "osm", "node", "way" };
+ SetTags(tags);
+ }
+
+ /// Finding of "holes" in area objects (multipolygon).
+ class multipolygon_processor
+ {
+ uint64_t m_id; ///< id of way to find it's holes
+ THolder & m_holder;
+
+ public:
+ /// @param[out] list of holes
+ feature::holes_cont_t m_holes;
+
+ multipolygon_processor(uint64_t id, THolder & holder) : m_id(id), m_holder(holder) {}
+
+ /// 1. relations process function
+ bool operator() (uint64_t /*id*/, RelationElement const & e)
+ {
+ if (e.GetType() == "multipolygon")
+ {
+ string role;
+ if (e.FindWay(m_id, role) && (role == "outer"))
+ {
+ e.ForEachWay(*this);
+ // stop processing (??? assume that "outer way" exists in one relation only ???)
+ return true;
+ }
+ }
+ return false;
+ }
+ /// 2. "ways in relation" process function
+ void operator() (uint64_t id, string const & role)
+ {
+ if (id != m_id && role == "inner")
+ {
+ WayElement e;
+ if (m_holder.GetWay(id, e))
+ {
+ m_holes.push_back(vector<m2::PointD>());
+ e.ForEachPoint(*this);
+ }
+ }
+ }
+ /// 3. "points in way" process function
+ void operator () (uint64_t id)
+ {
+ double lat, lng;
+ if (m_holder.GetNode(id, lat, lng))
+ {
+ // lng is "x", lat is "y" - see Feature::AddPoint
+ m_holes.back().push_back(m2::PointD(lng, lat));
+ }
+ }
+ };
+
+ /// Feature description struct.
+ struct value_t
+ {
+ typedef vector<uint32_t> types_t;
+ types_t types; ///< 1-n types, not empty
+ string name; ///< 1-1 name, @todo 1-n names
+ int32_t layer; ///< layer
+
+ value_t()
+ {
+ types.reserve(max_number_of_types);
+ }
+ bool IsValid() const { return !types.empty(); }
+ void Add(value_t const & v)
+ {
+ types.insert(types.end(), v.types.begin(), v.types.end());
+ }
+ };
+
+ /// Feature types processor.
+ class type_processor
+ {
+ void make_xml_element(RelationElement const & rel, XMLElement & out)
+ {
+ for (map<string, string>::const_iterator i = rel.tags.begin(); i != rel.tags.end(); ++i)
+ {
+ if (i->first == "type") continue;
+
+ out.childs.push_back(XMLElement());
+ XMLElement & e = out.childs.back();
+ e.name = "tag";
+ e.attrs["k"] = i->first;
+ e.attrs["v"] = i->second;
+ }
+ }
+
+ uint64_t m_featureID;
+ /// @param[out] feature value as result
+ value_t * m_val;
+
+ /// cache: relation id -> feature value (for fast feature parsing)
+ unordered_map<uint64_t, value_t> m_typeCache;
+
+ public:
+ void Reset(uint64_t fID, value_t * val)
+ {
+ m_featureID = fID;
+ m_val = val;
+ }
+
+ /// 1. "initial relation" process
+ int operator() (uint64_t id)
+ {
+ typename unordered_map<uint64_t, value_t>::const_iterator i = m_typeCache.find(id);
+ if (i != m_typeCache.end())
+ {
+ m_val->Add(i->second);
+ return -1; // continue process relations
+ }
+ return 0; // read relation from file (see next operator)
+ }
+
+ /// 2. "relation from file" process
+ bool operator() (uint64_t id, RelationElement const & rel)
+ {
+ // "multipolygon relations" process only for "outer" way-features
+ if (rel.GetType() == "multipolygon")
+ {
+ string role;
+ if (rel.FindWay(m_featureID, role)) // feature is way ...
+ if (role != "outer")
+ return false;
+ }
+
+ // make XMLElement struct from relation's tags for GetNameAndType function.
+ XMLElement e;
+ make_xml_element(rel, e);
+
+ value_t val;
+ if (ftype::GetNameAndType(&e, val.types, val.name, val.layer))
+ {
+ m_typeCache[id] = val;
+ m_val->Add(val);
+ }
+ else
+ m_typeCache[id] = value_t();
+
+ // continue process relations
+ return false;
+ }
+ } m_typeProcessor;
+
+ bool GetPoint(uint64_t id, m2::PointD & pt)
+ {
+ return m_holder.GetNode(id, pt.y, pt.x);
+ }
+
+ void FinishAreaFeature(uint64_t id, FeatureBuilder & ft)
+ {
+ if (ft.IsGeometryClosed())
+ {
+ multipolygon_processor processor(id, m_holder);
+ m_holder.ForEachRelationByWay(id, processor);
+ feature::TesselateInterior(ft, processor.m_holes);
+ }
+ }
+
+ bool ParseType(XMLElement * p, uint64_t & id, value_t & fValue)
+ {
+ VERIFY ( utils::to_uint64(p->attrs["id"], id),
+ ("Unknown element with invalid id : ", p->attrs["id"]) );
+
+ // try to get type from element tags
+ ftype::GetNameAndType(p, fValue.types, fValue.name, fValue.layer);
+
+ // try to get type from relations tags
+ m_typeProcessor.Reset(id, &fValue);
+ m_holder.ForEachRelationByWayCached(id, m_typeProcessor);
+
+ // remove duplicating types
+ sort(fValue.types.begin(), fValue.types.end());
+ fValue.types.erase(unique(fValue.types.begin(), fValue.types.end()), fValue.types.end());
+
+ // unrecognized feature by classificator
+ return fValue.IsValid();
+ }
+};
+
+template <class TEmitter, class THolder>
+class SecondPassParserJoin : public SecondPassParserBase<TEmitter, THolder>
+{
+ typedef SecondPassParserBase<TEmitter, THolder> base_type;
+
+ set<uint64_t> m_usedDirect;
+
+ bool TryEmitUnited(uint64_t featureID, FeatureBuilder & ft)
+ {
+ // check, if feature already processed early
+ if (m_usedDirect.count(featureID) > 0)
+ return true;
+
+ set<uint64_t> path;
+ path.insert(featureID);
+
+ WayElement e;
+
+ // process geometry of initial way itself
+ base_type::m_holder.GetWay(featureID, e);
+ if (e.nodes.empty())
+ return false;
+
+ for (size_t i = 0; i < e.nodes.size(); ++i)
+ {
+ m2::PointD pt;
+ if (base_type::GetPoint(e.nodes[i], pt))
+ ft.AddPoint(pt);
+ else
+ return false;
+ }
+
+ // process connected ways in cycle while ...
+ uint64_t fID = featureID;
+ while (!ft.IsGeometryClosed())
+ {
+ uint64_t const nodeID = e.nodes.back();
+ if (!base_type::m_holder.GetNextWay(fID, nodeID, e))
+ break;
+
+ if (!path.insert(fID).second)
+ {
+ LOG_SHORT(LWARNING, ("JOIN_DBG! Cycle found during way joining, duplicate id = ", fID));
+ break;
+ }
+
+ // skip first point, because it's equal with previous
+ size_t i;
+ int inc;
+ if (e.nodes.front() == nodeID)
+ {
+ i = 1;
+ inc = 1;
+ }
+ else
+ {
+ ASSERT ( e.nodes.back() == nodeID, () );
+
+ i = e.nodes.size() - 2;
+ inc = -1;
+ }
+
+ size_t count = 1;
+ while (count++ < e.nodes.size())
+ {
+ m2::PointD pt;
+ if (base_type::GetPoint(e.nodes[i], pt))
+ ft.AddPoint(pt);
+ else
+ return false;
+
+ i += inc;
+ }
+ }
+
+ if (ft.IsGeometryClosed())
+ {
+ m_usedDirect.insert(path.begin(), path.end());
+
+ base_type::FinishAreaFeature(featureID, ft);
+
+ base_type::m_emitter(ft);
+ return true;
+ }
+ else
+ {
+ LOG_SHORT(LWARNING, ("JOIN_DBG! Ways not connected for root way = ", featureID));
+ return false;
+ }
+ }
+
+protected:
+ virtual void EmitElement(XMLElement * p)
+ {
+ uint64_t id;
+ typename base_type::value_t fValue;
+ if (!ParseType(p, id, fValue))
+ return;
+
+ // check, if we can make united feature
+ for (typename base_type::value_t::types_t::iterator i = fValue.types.begin(); i != fValue.types.end(); ++i)
+ if (feature::NeedUnite(*i))
+ {
+ FeatureBuilder ft;
+ ft.AddName(fValue.name);
+ ft.AddTypes(fValue.types.begin(), fValue.types.end());
+ ft.AddLayer(fValue.layer);
+
+ TryEmitUnited(id, ft);
+ break;
+ }
+ }
+
+public:
+ SecondPassParserJoin(TEmitter & emitter, THolder & holder)
+ : base_type(emitter, holder)
+ {
+ }
+};
+
+template <class TEmitter, class THolder>
+class SecondPassParserUsual : public SecondPassParserBase<TEmitter, THolder>
+{
+ typedef SecondPassParserBase<TEmitter, THolder> base_type;
+
+protected:
+ virtual void EmitElement(XMLElement * p)
+ {
+ uint64_t id;
+ typename base_type::value_t fValue;
+ if (!ParseType(p, id, fValue))
+ return;
+
+ FeatureBuilder ft;
+ ft.AddName(fValue.name);
+ ft.AddTypes(fValue.types.begin(), fValue.types.end());
+ ft.AddLayer(fValue.layer);
+
+ if (p->name == "node")
+ {
+ if (!feature::IsDrawableLike(fValue.types, feature::fpoint))
+ return;
+
+ m2::PointD pt;
+ if (p->childs.empty() || !base_type::GetPoint(id, pt))
+ return;
+
+ ft.AddPoint(pt);
+ }
+ else if (p->name == "way")
+ {
+ bool const isArea = feature::IsDrawableLike(fValue.types, feature::farea);
+
+ if (!feature::IsDrawableLike(fValue.types, feature::fline) && !isArea)
+ return;
+
+ // geometry of feature
+ for (size_t i = 0; i < p->childs.size(); ++i)
+ {
+ if (p->childs[i].name == "nd")
+ {
+ uint64_t id;
+ VERIFY ( utils::to_uint64(p->childs[i].attrs["ref"], id),
+ ("Bad node ref in way : ", p->childs[i].attrs["ref"]) );
+
+ m2::PointD pt;
+ if (!base_type::GetPoint(id, pt))
+ return;
+
+ ft.AddPoint(pt);
+ }
+ }
+
+ if (ft.GetPointsCount() <= 1)
+ return;
+
+ // Get the tesselation for an area object (only if it has area drawing rules,
+ // otherwise it will stay a linear object).
+ if (isArea)
+ base_type::FinishAreaFeature(id, ft);
+ }
+ else
+ {
+ ASSERT ( false, ("Unknown osm element : ", p->name) );
+ return;
+ }
+
+ base_type::m_emitter(ft);
+ }
+
+public:
+ SecondPassParserUsual(TEmitter & emitter, THolder & holder)
+ : base_type(emitter, holder)
+ {
+ }
+};
+
+#include "../../base/stop_mem_debug.hpp"
diff --git a/indexer/indexer_tool/tesselator.cpp b/indexer/indexer_tool/tesselator.cpp
new file mode 100644
index 0000000000..ea69e89b50
--- /dev/null
+++ b/indexer/indexer_tool/tesselator.cpp
@@ -0,0 +1,72 @@
+#include "../../3party/sgitess/interface.h"
+#include "osm_element.hpp"
+
+namespace feature
+{
+ struct AddTessPointF
+ {
+ tess::Tesselator & m_tess;
+ AddTessPointF(tess::Tesselator & tess) : m_tess(tess)
+ {}
+ void operator()(CoordPointT const & p)
+ {
+ m_tess.add(tess::Vertex(p.first, p.second));
+ }
+ };
+
+ void TesselateInterior(FeatureBuilder & featureBuilder, feature::holes_cont_t const & holes)
+ {
+ vector<char> serial;
+ featureBuilder.Serialize(serial);
+ Feature feature(serial);
+
+ ASSERT(featureBuilder.IsGeometryClosed(), ());
+ {
+ tess::VectorDispatcher disp;
+ tess::Tesselator tess;
+ tess.setDispatcher(&disp);
+ tess.setWindingRule(tess::WindingOdd);
+
+ tess.beginPolygon();
+
+ tess.beginContour();
+ feature.ForEachPoint(AddTessPointF(tess));
+ tess.endContour();
+
+ for (feature::holes_cont_t::const_iterator it = holes.begin(); it != holes.end(); ++it)
+ {
+ tess.beginContour();
+ for (size_t i = 0; i < (*it).size(); ++i)
+ tess.add(tess::Vertex((*it)[i].x, (*it)[i].y));
+ tess.endContour();
+ }
+
+ tess.endPolygon();
+
+ for (size_t i = 0; i < disp.indices().size(); ++i)
+ {
+ vector<m2::PointD> vertices;
+ switch (disp.indices()[i].first)
+ {
+ case tess::TrianglesFan:
+ case tess::TrianglesStrip:
+ case tess::LineLoop:
+ ASSERT(0, ("We've got invalid type during teselation:", disp.indices()[i].first));
+ case tess::TrianglesList: break;
+ }
+
+ for (size_t j = 0; j < disp.indices()[i].second.size(); ++j)
+ {
+ int const idx = disp.indices()[i].second[j];
+ tess::Vertex const & v = disp.vertices()[idx];
+ vertices.push_back(m2::PointD(v.x, v.y));
+ }
+
+ ASSERT_EQUAL(vertices.size() % 3, 0, ());
+ size_t const triangleCount = vertices.size() / 3;
+ for (size_t i = 0; i < triangleCount; ++i)
+ featureBuilder.AddTriangle(vertices[3*i + 0], vertices[3*i + 1], vertices[3*i + 2]);
+ }
+ }
+ }
+}
diff --git a/indexer/indexer_tool/update_generator.cpp b/indexer/indexer_tool/update_generator.cpp
new file mode 100644
index 0000000000..a289f74e51
--- /dev/null
+++ b/indexer/indexer_tool/update_generator.cpp
@@ -0,0 +1,217 @@
+#include "update_generator.hpp"
+
+#include "../../coding/file_writer.hpp"
+
+#include "../../platform/platform.hpp"
+
+#include "../../indexer/country.hpp"
+#include "../../indexer/defines.hpp"
+
+#include "../../base/string_utils.hpp"
+#include "../../base/logging.hpp"
+
+#include "../../std/target_os.hpp"
+#include "../../std/fstream.hpp"
+
+#define GROUP_FILE_EXTENSION ".group"
+#define REGION_FILE_EXTENSION ".regions"
+#define CELLID_FILE_EXTENSION ".cells"
+
+#define PREFIX_CHAR '-'
+
+#ifdef OMIM_OS_WINDOWS_NATIVE
+ #define DIR_SEP '\\'
+#else
+ #define DIR_SEP '/'
+#endif
+
+namespace update
+{
+ typedef vector<string> TCellIds;
+ typedef pair<mapinfo::Country, TCellIds> TCountryCells;
+ typedef vector<TCountryCells> TCountryCellsContainer;
+
+ string ChopExtension(string const & nameWithExtension)
+ {
+ size_t dotPos = nameWithExtension.rfind('.');
+ return nameWithExtension.substr(0, dotPos);
+ }
+
+ string ChopPrefix(string const & nameWithPrefix)
+ {
+ size_t prefixPos = nameWithPrefix.rfind(PREFIX_CHAR);
+ if (prefixPos != string::npos && (prefixPos + 1) < nameWithPrefix.size())
+ return nameWithPrefix.substr(prefixPos + 1, string::npos);
+ else
+ return nameWithPrefix;
+ }
+
+ bool LoadCountryCells(string const & fullPath, TCellIds & outCells)
+ {
+ outCells.clear();
+ ifstream file(fullPath.c_str());
+ string cell;
+ while (file.good())
+ {
+ getline(file, cell);
+ if (cell.size())
+ outCells.push_back(cell);
+ }
+ return !outCells.empty();
+ }
+
+ /// @param path where folders with groups reside (Africa, Asia etc.)
+ /// @return true if loaded correctly
+ bool ScanAndLoadCountryCells(string path, TCountryCellsContainer & outCells)
+ {
+ if (path.empty())
+ return false;
+ // fix missing slash
+ if (path[path.size() - 1] != DIR_SEP)
+ path.push_back(DIR_SEP);
+
+ outCells.clear();
+
+ Platform & platform = GetPlatform();
+ // get all groups
+ Platform::FilesList groups;
+ platform.GetFilesInDir(path, "*" GROUP_FILE_EXTENSION, groups);
+ for (Platform::FilesList::iterator itGroup = groups.begin(); itGroup != groups.end(); ++itGroup)
+ {
+ // get countries with regions
+ Platform::FilesList countries;
+ platform.GetFilesInDir(path + DIR_SEP + *itGroup, "*" REGION_FILE_EXTENSION, countries);
+ for (Platform::FilesList::iterator itCountry = countries.begin(); itCountry != countries.end(); ++itCountry)
+ { // get all regions
+ Platform::FilesList regions;
+ platform.GetFilesInDir(path + DIR_SEP + *itGroup + DIR_SEP + *itCountry,
+ "*" CELLID_FILE_EXTENSION, regions);
+ for (Platform::FilesList::iterator itRegion = regions.begin(); itRegion != regions.end(); ++itRegion)
+ {
+ TCellIds cells;
+ string fullPath = path + *itGroup + DIR_SEP + *itCountry + DIR_SEP + *itRegion;
+ if (LoadCountryCells(fullPath, cells))
+ {
+ outCells.push_back(TCountryCells(mapinfo::Country(ChopExtension(*itGroup),
+ ChopExtension(*itCountry),
+ ChopExtension(*itRegion)),
+ cells));
+ }
+ else
+ {
+ LOG(LERROR, ("Can't load cells from file", fullPath));
+ }
+ }
+ }
+
+ // get countries without regions
+ countries.clear();
+ platform.GetFilesInDir(path + DIR_SEP + *itGroup, "*" CELLID_FILE_EXTENSION, countries);
+ for (Platform::FilesList::iterator itCountry = countries.begin(); itCountry != countries.end(); ++itCountry)
+ {
+ TCellIds cells;
+ string fullPath = path + *itGroup + DIR_SEP + *itCountry;
+ if (LoadCountryCells(fullPath, cells))
+ {
+ outCells.push_back(TCountryCells(mapinfo::Country(ChopExtension(*itGroup),
+ ChopExtension(*itCountry),
+ ""),
+ cells));
+ }
+ else
+ {
+ LOG(LERROR, ("Can't load cells from file", fullPath));
+ }
+ }
+ }
+ return !outCells.empty();
+ }
+
+ class CellChecker
+ {
+ string const m_path, m_file, m_url;
+
+ public:
+ CellChecker(string const & dataPath, string const & dataFileName, string const & baseUrl)
+ : m_path(dataPath), m_file(dataFileName), m_url(baseUrl) {}
+ void operator()(TCountryCells & cells) const
+ {
+ string const fileCell = ChopPrefix(ChopExtension(m_file));
+ for (TCellIds::iterator it = cells.second.begin(); it != cells.second.end(); ++it)
+ {
+ // check if country contains tile with this cell
+ if (fileCell.find(*it) == 0 || it->find(fileCell) == 0)
+ {
+ // data file
+ uint64_t fileSize = 0;
+ CHECK(GetPlatform().GetFileSize(m_path + m_file, fileSize), ("Non-existing file?"));
+ cells.first.AddUrl(mapinfo::TUrl(m_url + m_file, fileSize));
+ // index file
+ string const indexFileName = mapinfo::IndexFileForDatFile(m_file);
+ CHECK(GetPlatform().GetFileSize(m_path + indexFileName, fileSize), ("Non-existing file?"));
+ cells.first.AddUrl(mapinfo::TUrl(m_url + indexFileName, fileSize));
+ break;
+ }
+ }
+ }
+ };
+
+ class CountryAdder
+ {
+ mapinfo::TCountriesContainer & m_countries;
+
+ public:
+ CountryAdder(mapinfo::TCountriesContainer & outCountries)
+ : m_countries(outCountries) {}
+ void operator()(TCountryCells const & cells)
+ {
+ if (cells.first.Urls().size())
+ m_countries[cells.first.Group()].push_back(cells.first);
+ }
+ };
+
+ class GroupSorter
+ {
+ public:
+ void operator()(mapinfo::TCountriesContainer::value_type & toSort)
+ {
+ sort(toSort.second.begin(), toSort.second.end());
+ }
+ };
+
+ bool GenerateMapsList(string const & pathToMaps, string const & pathToCountries, string const & baseUrl)
+ {
+ Platform & platform = GetPlatform();
+
+ TCountryCellsContainer countriesCells;
+ if (!ScanAndLoadCountryCells(pathToCountries, countriesCells))
+ {
+ LOG(LERROR, ("Can't load countries' cells from path", pathToCountries));
+ return false;
+ }
+
+ Platform::FilesList datFiles;
+ if (!platform.GetFilesInDir(pathToMaps, "*" DATA_FILE_EXTENSION, datFiles))
+ {
+ LOG(LERROR, ("Can't find any data files at path", pathToMaps));
+ return false;
+ }
+
+ // update each country's urls corresponding to existing data files
+ for (Platform::FilesList::iterator it = datFiles.begin(); it != datFiles.end(); ++it)
+ {
+ for_each(countriesCells.begin(), countriesCells.end(), CellChecker(pathToMaps, *it, baseUrl));
+ }
+
+ // save update list
+ mapinfo::TCountriesContainer countries;
+ for_each(countriesCells.begin(), countriesCells.end(), CountryAdder(countries));
+
+ // sort groups
+ for_each(countries.begin(), countries.end(), GroupSorter());
+
+ FileWriter writer(pathToMaps + UPDATE_CHECK_FILE);
+ SaveCountries(countries, writer);
+ return true;
+ }
+} // namespace update
diff --git a/indexer/indexer_tool/update_generator.hpp b/indexer/indexer_tool/update_generator.hpp
new file mode 100644
index 0000000000..5ae5a590e6
--- /dev/null
+++ b/indexer/indexer_tool/update_generator.hpp
@@ -0,0 +1,8 @@
+#pragma once
+
+#include "../../std/string.hpp"
+
+namespace update
+{
+ bool GenerateMapsList(string const & pathToMaps, string const & pathToCountries, string const & baseUrl);
+} // namespace update
diff --git a/indexer/interval_index.hpp b/indexer/interval_index.hpp
new file mode 100644
index 0000000000..e23e6e5b48
--- /dev/null
+++ b/indexer/interval_index.hpp
@@ -0,0 +1,116 @@
+#pragma once
+#include "../coding/endianness.hpp"
+#include "../base/base.hpp"
+#include "../base/assert.hpp"
+
+class IntervalIndexBase
+{
+public:
+#pragma pack(push, 1)
+ struct Header
+ {
+ uint8_t m_CellIdLeafBytes;
+ };
+
+ struct Index
+ {
+ uint32_t m_BaseOffset;
+ uint16_t m_Count[256];
+ };
+#pragma pack(pop)
+};
+
+template <typename ValueT, class ReaderT>
+class IntervalIndex : public IntervalIndexBase
+{
+public:
+ IntervalIndex(ReaderT const & reader, int cellIdBytes = 5)
+ : m_Reader(reader), m_CellIdBytes(cellIdBytes)
+ {
+ m_Reader.Read(0, &m_Header, sizeof(m_Header));
+ ReadIndex(sizeof(m_Header), m_Level0Index);
+ }
+
+ template <typename F>
+ void ForEach(F const & f, uint64_t beg, uint64_t end) const
+ {
+ ASSERT_LESS(beg, 1ULL << 8 * m_CellIdBytes, (beg, end));
+ ASSERT_LESS_OR_EQUAL(end, 1ULL << 8 * m_CellIdBytes, (beg, end));
+ // end is inclusive in ForEachImpl().
+ --end;
+ ForEachImpl(f, beg, end, m_Level0Index, m_CellIdBytes - 1);
+ }
+
+private:
+ template <typename F>
+ void ForEachImpl(F const & f, uint64_t beg, uint64_t end, Index const & index, int level) const
+ {
+ uint32_t const beg0 = static_cast<uint32_t>(beg >> (8 * level));
+ uint32_t const end0 = static_cast<uint32_t>(end >> (8 * level));
+ uint32_t cumCount = 0;
+ for (uint32_t i = 0; i < beg0; ++i)
+ cumCount += index.m_Count[i];
+ for (uint32_t i = beg0; i <= end0; ++i)
+ {
+ ASSERT_LESS(i, 256, ());
+ if (index.m_Count[i] != 0)
+ {
+ uint64_t const levelBytesFF = (1ULL << 8 * level) - 1;
+ uint64_t const b1 = (i == beg0) ? (beg & levelBytesFF) : 0;
+ uint64_t const e1 = (i == end0) ? (end & levelBytesFF) : levelBytesFF;
+ if (level > m_Header.m_CellIdLeafBytes)
+ {
+ Index index1;
+ ReadIndex(index.m_BaseOffset + (cumCount * sizeof(Index)), index1);
+ ForEachImpl(f, b1, e1, index1, level - 1);
+ }
+ else
+ {
+ // TODO: Use binary search here if count is very large.
+ uint32_t const step = sizeof(ValueT) + m_Header.m_CellIdLeafBytes;
+ uint32_t const count = index.m_Count[i];
+ uint32_t pos = index.m_BaseOffset + (cumCount * step);
+ vector<char> data(step * count);
+ char const * pData = &data[0];
+ m_Reader.Read(pos, &data[0], data.size());
+ for (uint32_t j = 0; j < count; ++j, pData += step)
+ // for (uint32_t j = 0; j < count; ++j, pos += step)
+ {
+ Value value;
+ value.m_CellId = 0;
+ memcpy(&value, pData, step);
+ // m_Reader.Read(pos, &value, step);
+ uint32_t const cellId = SwapIfBigEndian(value.m_CellId);
+ if (b1 <= cellId && cellId <= e1)
+ f(SwapIfBigEndian(value.m_Value));
+ }
+ }
+ cumCount += index.m_Count[i];
+ }
+ }
+ }
+
+ void ReadIndex(uint64_t pos, Index & index) const
+ {
+ m_Reader.Read(pos, &index, sizeof(Index));
+ if (IsBigEndian())
+ {
+ index.m_BaseOffset = SwapIfBigEndian(index.m_BaseOffset);
+ for (uint32_t i = 0; i < 256; ++i)
+ index.m_Count[i] = SwapIfBigEndian(index.m_Count[i]);
+ }
+ }
+
+#pragma pack(push, 1)
+ struct Value
+ {
+ ValueT m_Value;
+ uint32_t m_CellId;
+ };
+#pragma pack(pop)
+
+ ReaderT m_Reader;
+ Header m_Header;
+ Index m_Level0Index;
+ int m_CellIdBytes;
+};
diff --git a/indexer/interval_index_builder.hpp b/indexer/interval_index_builder.hpp
new file mode 100644
index 0000000000..41df0d93fa
--- /dev/null
+++ b/indexer/interval_index_builder.hpp
@@ -0,0 +1,137 @@
+#pragma once
+#include "interval_index.hpp"
+#include "../coding/endianness.hpp"
+#include "../coding/write_to_sink.hpp"
+#include "../base/assert.hpp"
+#include "../base/base.hpp"
+#include "../base/logging.hpp"
+#include "../std/vector.hpp"
+#include "../std/memcpy.hpp"
+
+template <int kCellIdBytes, typename CellIdValueIterT, class SinkT>
+void BuildIntervalIndex(CellIdValueIterT const & beg, CellIdValueIterT const & end,
+ SinkT & writer, int const leafBytes = 1)
+{
+#ifdef DEBUG
+ // Check that [beg, end) is sorted and log most populous cell.
+ if (beg != end)
+ {
+ uint32_t count = 0;
+ uint32_t maxCount = 0;
+ typename CellIdValueIterT::value_type mostPopulousCell;
+ CellIdValueIterT it = beg;
+ int64_t prev = it->first;
+ for (++it; it != end; ++it)
+ {
+ ASSERT_GREATER(it->first, 0, ());
+ ASSERT_LESS_OR_EQUAL(prev, it->first, ());
+ count = (prev == it->first ? count + 1 : 0);
+ if (count > maxCount)
+ {
+ maxCount = count;
+ mostPopulousCell = *it;
+ }
+ prev = it->first;
+ }
+ if (maxCount > 0)
+ {
+ LOG(LINFO, ("Most populous cell:", maxCount,
+ mostPopulousCell.first, mostPopulousCell.second));
+ }
+ }
+ for (CellIdValueIterT it = beg; it != end; ++it)
+ ASSERT_LESS(it->first, 1ULL << 8 * kCellIdBytes, ());
+#endif
+ // Write header.
+ {
+ IntervalIndexBase::Header header;
+ header.m_CellIdLeafBytes = leafBytes;
+ writer.Write(&header, sizeof(header));
+ }
+
+#ifdef DEBUG
+ vector<uint32_t> childOffsets;
+ childOffsets.push_back(static_cast<uint32_t>(writer.Pos()));
+#endif
+ uint32_t childOffset = static_cast<uint32_t>(writer.Pos()) + sizeof(IntervalIndexBase::Index);
+ uint32_t thisLevelCount = 1;
+ for (int level = kCellIdBytes - 1; level >= leafBytes; --level)
+ {
+ LOG(LINFO, ("Building interval index, level", level));
+#ifdef DEBUG
+ ASSERT_EQUAL(childOffsets.back(), writer.Pos(), ());
+ childOffsets.push_back(childOffset);
+#endif
+ uint64_t const initialWriterPos = writer.Pos();
+ uint32_t childCount = 0, totalChildCount = 0;
+ IntervalIndexBase::Index index;
+ memset(&index, 0, sizeof(index));
+ uint64_t prevParentBytes = 0;
+ uint8_t prevByte = 0;
+ for (CellIdValueIterT it = beg; it != end; ++it)
+ {
+ uint64_t id = it->first;
+ uint64_t const thisParentBytes = id >> 8 * (level + 1);
+ uint8_t const thisByte = static_cast<uint8_t>(0xFF & (id >> 8 * level));
+ if (it != beg && prevParentBytes != thisParentBytes)
+ {
+ // Writing index for previous parent.
+ index.m_BaseOffset = SwapIfBigEndian(childOffset);
+ for (uint32_t i = 0; i < 256; ++i)
+ index.m_Count[i] = SwapIfBigEndian(index.m_Count[i]);
+ writer.Write(&index, sizeof(index));
+ memset(&index, 0, sizeof(index));
+ if (level != leafBytes)
+ childOffset += childCount * sizeof(index);
+ else
+ childOffset += (sizeof(beg->second) + leafBytes) * childCount;
+ childCount = 0;
+ --thisLevelCount;
+ }
+
+ if (level == leafBytes || it == beg ||
+ prevByte != thisByte || prevParentBytes != thisParentBytes)
+ {
+ ++childCount;
+ ++totalChildCount;
+ ++index.m_Count[thisByte];
+ CHECK_LESS(
+ index.m_Count[thisByte], 65535,
+ (level, leafBytes, prevByte, thisByte, prevParentBytes, thisParentBytes, it->first));
+ }
+
+ prevParentBytes = thisParentBytes;
+ prevByte = thisByte;
+ }
+ index.m_BaseOffset = SwapIfBigEndian(childOffset);
+ for (uint32_t i = 0; i < 256; ++i)
+ index.m_Count[i] = SwapIfBigEndian(index.m_Count[i]);
+ writer.Write(&index, sizeof(index));
+ memset(&index, 0, sizeof(index));
+ // if level == leafBytes, this is wrong, but childOffset is not needed any more.
+ childOffset += childCount * sizeof(IntervalIndexBase::Index);
+ --thisLevelCount;
+ CHECK_EQUAL(thisLevelCount, 0, (kCellIdBytes, leafBytes));
+ thisLevelCount = totalChildCount;
+
+ LOG(LINFO, ("Level size:", writer.Pos() - initialWriterPos));
+
+ if (beg == end)
+ break;
+ }
+
+ // Writing values.
+ ASSERT_EQUAL(childOffsets.back(), writer.Pos(), ());
+ LOG(LINFO, ("Building interval, leaves."));
+ uint64_t const initialWriterPos = writer.Pos();
+ uint32_t const mask = (1ULL << 8 * leafBytes) - 1;
+ for (CellIdValueIterT it = beg; it != end; ++it)
+ {
+ WriteToSink(writer, it->second);
+ uint32_t cellId = static_cast<uint32_t>(it->first & mask);
+ cellId = SwapIfBigEndian(cellId);
+ writer.Write(&cellId, leafBytes);
+ }
+ LOG(LINFO, ("Level size:", writer.Pos() - initialWriterPos));
+ LOG(LINFO, ("Interval index building done."));
+}
diff --git a/indexer/mercator.cpp b/indexer/mercator.cpp
new file mode 100644
index 0000000000..3bc3f9f486
--- /dev/null
+++ b/indexer/mercator.cpp
@@ -0,0 +1,6 @@
+#include "mercator.hpp"
+
+double MercatorBounds::minX = -180;
+double MercatorBounds::maxX = 180;
+double MercatorBounds::minY = -180;
+double MercatorBounds::maxY = 180;
diff --git a/indexer/mercator.hpp b/indexer/mercator.hpp
new file mode 100644
index 0000000000..21cabf7d99
--- /dev/null
+++ b/indexer/mercator.hpp
@@ -0,0 +1,33 @@
+#pragma once
+#include "../geometry/point2d.hpp"
+#include "../base/math.hpp"
+
+struct MercatorBounds
+{
+ static double minX;
+ static double maxX;
+ static double minY;
+ static double maxY;
+
+ inline static double YToLat(double y)
+ {
+ return my::RadToDeg(2.0 * atan(exp(my::DegToRad(y))) - math::pi / 2.0);
+ }
+
+ inline static double LatToY(double lat)
+ {
+ lat = my::clamp(lat, -86.0, 86.0);
+ double const res = my::RadToDeg(log(tan(my::DegToRad(45.0 + lat * 0.5))));
+ return my::clamp(res, -180.0, 180.0);
+ }
+
+ inline static double XToLon(double x)
+ {
+ return x;
+ }
+
+ inline static double LonToX(double lon)
+ {
+ return lon;
+ }
+};
diff --git a/indexer/osm2type.cpp b/indexer/osm2type.cpp
new file mode 100644
index 0000000000..5b1092610c
--- /dev/null
+++ b/indexer/osm2type.cpp
@@ -0,0 +1,646 @@
+#include "osm2type.hpp"
+#include "classificator.hpp"
+#include "drawing_rules.hpp"
+#include "feature_visibility.hpp"
+#include "xml_element.hpp"
+
+#include "../coding/parse_xml.hpp"
+#include "../coding/file_reader.hpp"
+
+#include "../base/assert.hpp"
+#include "../base/string_utils.hpp"
+
+#include "../std/fstream.hpp"
+#include "../std/bind.hpp"
+#include "../std/vector.hpp"
+#include "../std/set.hpp"
+#include "../std/algorithm.hpp"
+
+#include "../base/start_mem_debug.hpp"
+
+namespace ftype {
+
+ namespace
+ {
+ /// get value of mark (1 == "yes", -1 == "no", 0 == not a "yes\no")
+ static int get_mark_value(string const & k, string const & v)
+ {
+ static char const * aTrue[] = { "yes", "true", "1", "*" };
+ static char const * aFalse[] = { "no", "false", "-1" };
+
+ utils::TokenizeIterator it(v, "|");
+ while (!it.end())
+ {
+ if (utils::IsInArray(aTrue, *it)) return 1;
+ if (utils::IsInArray(aFalse, *it)) return -1;
+ ++it;
+ }
+
+ // "~" means no this tag, so sometimes it means true,
+ // and all other cases - false. Choose according to key.
+ if (v == "~")
+ return (k == "access" ? 1 : -1);
+
+ return 0;
+ }
+
+ class OSMTypesStream
+ {
+ /// @name processing elements definitions
+ //@{
+ struct element_t
+ {
+ element_t() : pObj(0) {}
+
+ string name;
+ map<string, string> attr;
+
+ ClassifObject * pObj;
+ };
+
+ vector<element_t> m_elements;
+ element_t & current() { return m_elements.back(); }
+
+ int m_priority;
+ //@}
+
+ /// check if element is a draw rule (commonly it's a leaf in xml)
+ static bool is_draw_rule(string const & e)
+ {
+ static char const * rules[] = { "line", "tunnel", "area", "symbol", "caption", "text",
+ "circle", "pathText", "wayMarker" };
+ return utils::IsInArray(rules, e);
+ }
+
+ uint8_t get_rule_type()
+ {
+ int count = static_cast<int>(m_elements.size()) - 2;
+ ASSERT ( count >= 0, (count) );
+
+ string e;
+ while (e.empty() && count >= 0)
+ {
+ e = m_elements[count].attr["e"];
+ --count;
+ }
+ ASSERT ( !e.empty(), () );
+
+ utils::TokenizeIterator it(e, "|");
+ uint8_t ret = 0;
+ while (!it.end())
+ {
+ string const & s = *it;
+ if (s == "node")
+ ret |= drule::node;
+ else if (s == "way")
+ ret |= drule::way;
+ ++it;
+ }
+
+ ASSERT ( ret != 0, () );
+ return static_cast<drule::rule_geo_t>(ret);
+ }
+
+ /// check if it's our element to parse
+ static bool is_our_element(string const & e)
+ {
+ static char const * elems[] = { "rules", "rule", "else", "layer",
+ // addclass appear in small scales (6-11)
+ // don't skip it during parsing, but we don't process it like a rule
+ "addclass" };
+ return (utils::IsInArray(elems, e) || is_draw_rule(e));
+ }
+
+ /// check if it's processing key
+ static bool is_valid_key(string const & k)
+ {
+ static char const * bad[] = { "osmarender:render", "osmarender:rendername",
+ "osmarender:renderref", "addr:housenumber" };
+ return (!k.empty() && !utils::IsInArray(bad, k));
+ }
+
+ static bool is_valid_value(string const & v)
+ {
+ return !v.empty();
+ }
+
+ /// check if key is a 'mark'
+ static bool is_mark_key(string const & k)
+ {
+ static char const * mark[] = { "bridge", "tunnel", "area", "lock", "oneway", "junction",
+ "embankment", "cutting", "motorroad", "cycleway",
+ "bicycle", "horse", "capital", "fee" };
+ return utils::IsInArray(mark, k);
+ }
+
+ static bool process_feature_like_mark_from_root(string const & /*k*/, string const & v)
+ {
+ static char const * mark[] = { "turning_circle", "dyke", "dike", "levee", "embankment" };
+ return utils::IsInArray(mark, v);
+ }
+
+ static bool process_feature_like_mark(string const & k, string const & v)
+ {
+ return (k == "highway" && (v == "construction" || v == "disused"));
+ }
+
+ /// check if skip whole element by it's key
+ static bool is_skip_element_by_key(string const & k)
+ {
+ static char const * skip[] = { "addr:housenumber", "fixme" };
+ return utils::IsInArray(skip, k);
+ }
+
+ /// skip element and all it's sub-elements
+ bool m_forceSkip;
+
+ public:
+ OSMTypesStream() : m_priority(0), m_forceSkip(false) {}
+
+ bool Push(string const & name)
+ {
+ if (!m_forceSkip && is_our_element(name))
+ {
+ m_elements.push_back(element_t());
+ current().name = name;
+ return true;
+ }
+
+ return false;
+ }
+
+ public:
+ void AddAttr(string name, string value)
+ {
+ // make lower case for equivalent string comparison
+ utils::make_lower_case(name);
+ utils::make_lower_case(value);
+
+ if ((name == "k") && is_skip_element_by_key(value))
+ m_forceSkip = true;
+ else
+ current().attr[name] = value;
+ }
+
+ ClassifObject * get_root() { return classif().GetMutableRoot(); }
+
+ void Pop(string const & /*element*/)
+ {
+ if (!m_forceSkip)
+ add_type_recursive(get_root(), 0, vector<string>());
+ else
+ m_forceSkip = false;
+
+ m_elements.pop_back();
+ }
+
+ private:
+ vector<string> make_concat(vector<string> const & v, int intV, string const & s)
+ {
+ if (intV == 1)
+ {
+ vector<string> vv;
+ vv.reserve(v.size() + 1);
+ bool inserted = false;
+ for (size_t i = 0; i < v.size(); ++i)
+ {
+ if (!(v[i] < s) && !inserted)
+ {
+ inserted = true;
+ vv.push_back(s);
+ }
+ vv.push_back(v[i]);
+ }
+ if (!inserted) vv.push_back(s);
+
+ return vv;
+ }
+ else return v;
+ }
+
+ /// get parent of object (p) in created chain of elements
+ ClassifObject * get_parent_of(size_t i, ClassifObject * p)
+ {
+ ASSERT ( i > 0, () );
+ while (--i > 0)
+ if (m_elements[i].pObj == p) break;
+
+ ASSERT ( i > 0, () );
+ while (--i > 0)
+ if (m_elements[i].pObj)
+ return m_elements[i].pObj;
+
+ return get_root();
+ }
+
+ void clear_states(size_t start)
+ {
+ for (size_t i = start; i < m_elements.size(); ++i)
+ m_elements[i].pObj = 0;
+ }
+
+ void add_type_recursive(ClassifObject * pParent,
+ size_t start,
+ std::vector<string> const & marks)
+ {
+ for (size_t i = start; i < m_elements.size(); ++i)
+ {
+ element_t & e = m_elements[i];
+
+ if (e.pObj) continue;
+
+ if (e.name == "rule")
+ {
+ // process rule
+ string k = e.attr["k"];
+ if (!is_valid_key(k)) continue;
+
+ string v = e.attr["v"];
+ if (!is_valid_value(v)) continue;
+
+ utils::TokenizeIterator iK(k, "|");
+ if (iK.is_last())
+ {
+ // process one key
+ ASSERT ( *iK == k, () );
+
+ int intV = get_mark_value(k, v);
+ if (is_mark_key(k) && (intV != 0))
+ {
+ // key is a mark, so save it and go futher
+ add_type_recursive(pParent, i + 1, make_concat(marks, intV, k));
+ clear_states(i);
+ }
+ else
+ {
+ // buildings assume as feature type
+ bool lets_try = (k == "building" && intV == 1);
+
+ // default access is yes. If "no" - make additional feature type
+ if (!lets_try && (k == "access" && intV == -1))
+ {
+ lets_try = true;
+ intV = 0;
+ v = "no-access";
+ }
+
+ if (!lets_try && intV != 0)
+ {
+ // skip this keys, because they are dummy
+ continue;
+ }
+ else
+ {
+ // add root or criterion
+ if (pParent == get_root())
+ {
+ pParent = pParent->Add(k);
+ e.pObj = pParent;
+
+ // use m_elements[1] to hold first parent of futher creation objects
+ // need for correct working "get_parent_of" function
+ m_elements[1].pObj = pParent;
+ }
+ else
+ {
+ // avoid recursion like this:
+ // <k = "x", v = "a|b|c">
+ // <k = "x", v = "a">
+ // <k = "x", v = "b">
+ // <k = "x", v = "c">
+ ClassifObject * ppParent = get_parent_of(i, pParent);
+ if (k != ppParent->GetName())
+ {
+ // do not set criterion like base object
+ if (k != pParent->GetName() &&
+ !process_feature_like_mark(pParent->GetName(), k))
+ pParent->AddCriterion(k);
+ }
+ else
+ pParent = ppParent;
+ }
+
+ // process values
+ utils::TokenizeIterator iV(v, "|");
+ while (!iV.end())
+ {
+ bool const b1 = process_feature_like_mark_from_root(k, *iV);
+ if (b1 || process_feature_like_mark(k, *iV))
+ {
+ // process value like mark, so save it and go futher
+ add_type_recursive(
+ b1 ? get_root() : pParent, i + 1, make_concat(marks, 1, *iV));
+ clear_states(i);
+ }
+ else
+ {
+ ClassifObject * p = pParent;
+ if (intV == 0)
+ p = pParent->Add(*iV);
+ e.pObj = p;
+
+ add_type_recursive(p, i + 1, marks);
+ clear_states(i);
+ }
+
+ ++iV;
+ }
+ }
+ }
+ }
+ else
+ {
+ char const * aTry[] = { "natural", "landuse" };
+
+ while (!iK.end())
+ {
+ // let's try to add root keys
+ bool addMode = (pParent == get_root() && utils::IsInArray(aTry, *iK));
+
+ ClassifObject * p = (addMode ? pParent->Add(*iK) : pParent->Find(*iK));
+ if (p && (get_mark_value(*iK, v) == 0))
+ {
+ if (p->IsCriterion()) p = pParent;
+
+ utils::TokenizeIterator iV(v, "|");
+ while (!iV.end())
+ {
+ ClassifObject * pp = (addMode ? p->Add(*iV) : p->Find(*iV));
+ if (pp)
+ {
+ e.pObj = pp;
+
+ add_type_recursive(pp, i + 1, marks);
+ clear_states(i);
+ }
+ ++iV;
+ }
+ }
+ ++iK;
+ }
+ }
+
+ return; // processed to the end - exit
+ }
+ else if (is_draw_rule(e.name))
+ {
+ ASSERT ( i == m_elements.size()-1, ("drawing rules should be leavs") );
+
+ // process draw rule
+ if (pParent != get_root())
+ {
+ if (!marks.empty())
+ {
+ // make final mark string
+ string res;
+ for (size_t i = 0; i < marks.size(); ++i)
+ {
+ if (!res.empty()) res += '-';
+ res += marks[i];
+ }
+
+ pParent = pParent->Add(res);
+ }
+
+ vector<drule::Key> keys;
+ drule::rules().CreateRules(e.name, get_rule_type(), e.attr, keys);
+
+ // if no "layer" tag, then atoi returns 0 - it's ok for us
+ // 1000 - is a base count of rules for layer
+ int const layer = atoi(e.attr["layer"].c_str()) * drule::layer_base_priority;
+ for (size_t i = 0; i < keys.size(); ++i)
+ keys[i].SetPriority(layer + m_priority++);
+
+ for_each(keys.begin(), keys.end(), bind(&ClassifObject::AddDrawRule, pParent, _1));
+ }
+ }
+ }
+ }
+ };
+ }
+
+ void ParseOSMTypes(char const * fPath, int scale)
+ {
+ drule::rules().SetParseFile(fPath, scale);
+
+ FileReader reader(fPath);
+ ReaderSource<FileReader> source(reader);
+ OSMTypesStream stream;
+ ParseXML(source, stream);
+ }
+
+ namespace
+ {
+ bool is_skip_tag(string const & k, string const & /*v*/)
+ {
+ // skip "cycleway's" tags because they interfer to set a valid types like "highway's"
+ return (k == "created_by" || k == "description" || k == "cycleway" || k == "embankment");
+ }
+
+ template <class ToDo> typename ToDo::result_type for_each_tag(XMLElement * p, ToDo toDo)
+ {
+ typedef typename ToDo::result_type res_t;
+
+ for (size_t i = 0; i < p->childs.size(); ++i)
+ {
+ if (p->childs[i].name == "tag")
+ {
+ string const & k = p->childs[i].attrs["k"];
+ string const & v = p->childs[i].attrs["v"];
+
+ if (is_skip_tag(k, v)) continue;
+
+ // this means "no"
+ //if (get_mark_value(k, v) == -1)
+ // continue;
+
+ res_t res = toDo(k, v);
+ if (res) return res;
+ }
+ }
+ return res_t();
+ }
+
+ bool is_name_tag(string const & k)
+ {
+ return (string::npos != k.find("name"));
+ }
+
+ class do_print
+ {
+ ostream & m_s;
+ public:
+ typedef bool result_type;
+
+ do_print(ostream & s) : m_s(s) {}
+ bool operator() (string const & k, string const & v) const
+ {
+ m_s << k << " <---> " << v << endl;
+ return false;
+ }
+ };
+
+ class do_find_name
+ {
+ size_t & m_count;
+ string & m_name;
+ int32_t & m_layer;
+ public:
+ typedef bool result_type;
+
+ do_find_name(size_t & count, string & name, int32_t & layer)
+ : m_count(count), m_name(name), m_layer(layer)
+ {
+ m_count = 0;
+ m_layer = 0;
+ }
+ bool operator() (string const & k, string const & v)
+ {
+ ++m_count;
+
+ // do not call is_name_tag(k), but exactly "name" tag
+ if (k == "name" && m_name.empty())
+ m_name = v;
+
+ if (k == "layer" && m_layer == 0)
+ m_layer = atoi(v.c_str());
+
+ return false;
+ }
+ };
+
+ class do_find_obj
+ {
+ ClassifObject const * m_parent;
+ bool m_isKey;
+
+ public:
+ typedef ClassifObjectPtr result_type;
+
+ do_find_obj(ClassifObject const * p, bool isKey) : m_parent(p), m_isKey(isKey) {}
+ ClassifObjectPtr operator() (string const & k, string const & v) const
+ {
+ if (!is_name_tag(k))
+ {
+ ClassifObjectPtr p = m_parent->BinaryFind(m_isKey ? k : v);
+ if (p) return p;
+ }
+ return ClassifObjectPtr(0, 0);
+ }
+ };
+
+ class do_find_root_obj : public do_find_obj
+ {
+ typedef do_find_obj base_type;
+
+ set<string> const & m_skipTags;
+
+ public:
+ do_find_root_obj(set<string> const & skipTags)
+ : base_type(classif().GetRoot(), true), m_skipTags(skipTags)
+ {
+ }
+ ClassifObjectPtr operator() (string const & k, string const & v) const
+ {
+ if (m_skipTags.find(k) == m_skipTags.end())
+ return base_type::operator() (k, v);
+
+ return ClassifObjectPtr(0, 0);
+ }
+ };
+
+ typedef vector<ClassifObjectPtr> path_type;
+ }
+
+ ClassifObjectPtr find_object(ClassifObject const * parent, XMLElement * p, bool isKey)
+ {
+ return for_each_tag(p, do_find_obj(parent, isKey));
+ }
+
+ size_t find_name_and_count(XMLElement * p, string & name, int32_t & layer)
+ {
+ size_t count;
+ for_each_tag(p, do_find_name(count, name, layer));
+ return count;
+ }
+
+//#ifdef DEBUG
+// class debug_find_string
+// {
+// string m_comp;
+// public:
+// debug_find_string(string const & comp) : m_comp(comp) {}
+// typedef bool result_type;
+// bool operator() (string const & k, string const & v) const
+// {
+// return (k == m_comp || v == m_comp);
+// }
+// };
+//#endif
+
+ bool GetNameAndType(XMLElement * p, vector<uint32_t> & types, string & name, int32_t & layer)
+ {
+//#ifdef DEBUG
+// // code to set a breakpoint
+// if (for_each_tag(p, debug_find_string("bridge")))
+// {
+// int break_here = 0;
+// }
+//#endif
+
+ // maybe an empty feature
+ if (find_name_and_count(p, name, layer) == 0)
+ return false;
+
+ set<string> skipRootKeys;
+
+ do
+ {
+ path_type path;
+
+ // find first root object by key
+ do_find_root_obj doFindRoot(skipRootKeys);
+ ClassifObjectPtr pRoot = for_each_tag(p, doFindRoot);
+
+ // find path from root
+ ClassifObjectPtr pObj = pRoot;
+ while (pObj)
+ {
+ path.push_back(pObj);
+
+ // next objects trying to find by value first
+ pObj = find_object(path.back().get(), p, false);
+ if (!pObj)
+ {
+ // if no - try find object by key (in case of k = "area", v = "yes")
+ pObj = find_object(path.back().get(), p, true);
+ }
+ }
+
+ size_t const count = path.size();
+ if (count >= 1)
+ {
+ // assign type
+ uint32_t t = ftype::GetEmptyValue();
+
+ for (size_t i = 0; i < count; ++i)
+ ftype::PushValue(t, path[i].GetIndex());
+
+ // use features only with drawing rules
+ if (feature::IsDrawableAny(t))
+ types.push_back(t);
+ }
+
+ if (pRoot)
+ {
+ // save this root to skip, and try again
+ skipRootKeys.insert(pRoot->GetName());
+ }
+ else
+ break;
+
+ } while (true);
+
+ return !types.empty();
+ }
+}
diff --git a/indexer/osm2type.hpp b/indexer/osm2type.hpp
new file mode 100644
index 0000000000..6a2449c29d
--- /dev/null
+++ b/indexer/osm2type.hpp
@@ -0,0 +1,20 @@
+#pragma once
+
+#include "../base/base.hpp"
+
+#include "../std/string.hpp"
+#include "../std/vector.hpp"
+
+#include "../base/start_mem_debug.hpp"
+
+struct XMLElement;
+
+namespace ftype
+{
+ void ParseOSMTypes(char const * fPath, int scale);
+
+ /// Get the types, name and layer fot feature with the tree of tags.
+ bool GetNameAndType(XMLElement * p, vector<uint32_t> & types, string & name, int32_t & layer);
+}
+
+#include "../base/stop_mem_debug.hpp"
diff --git a/indexer/osm_decl.cpp b/indexer/osm_decl.cpp
new file mode 100644
index 0000000000..c53f1ab86a
--- /dev/null
+++ b/indexer/osm_decl.cpp
@@ -0,0 +1,88 @@
+#include "osm_decl.hpp"
+#include "classificator.hpp"
+
+#include "../base/macros.hpp"
+
+#include "../std/target_os.hpp"
+#include "../std/iostream.hpp"
+
+#include "../base/start_mem_debug.hpp"
+
+
+namespace feature
+{
+ char const * arrUnite[1][2] = { { "natural", "coastline" } };
+
+ bool NeedUnite(string const & k, string const & v)
+ {
+ for (size_t i = 0; i < ARRAY_SIZE(arrUnite); ++i)
+ if (k == arrUnite[i][0] && v == arrUnite[i][1])
+ return true;
+
+ return false;
+ }
+
+ bool NeedUnite(uint32_t type)
+ {
+ static uint32_t arrTypes[1] = { 0 };
+
+ if (arrTypes[0] == 0)
+ {
+ // initialize static array
+ for (size_t i = 0; i < ARRAY_SIZE(arrUnite); ++i)
+ {
+ size_t const count = ARRAY_SIZE(arrUnite[i]);
+ vector<string> path(count);
+ for (size_t j = 0; j < count; ++j)
+ path[j] = arrUnite[i][j];
+
+ arrTypes[i] = classif().GetTypeByPath(path);
+ }
+ }
+
+ for (size_t i = 0; i < ARRAY_SIZE(arrTypes); ++i)
+ if (arrTypes[i] == type)
+ return true;
+
+ return false;
+ }
+}
+
+void progress_policy::Begin(string const & /*name*/, size_t factor)
+{
+ m_count = 0;
+ m_factor = factor;
+//#ifndef OMIM_OS_BADA
+// cout << "Progress of " << name << " started" << endl;
+//#endif
+}
+
+void progress_policy::Inc(size_t i /* = 1 */)
+{
+ m_count += i;
+//#ifndef OMIM_OS_BADA
+// if (m_count % m_factor == 0)
+// cout << '.';
+//#endif
+}
+
+void progress_policy::End()
+{
+}
+
+string RelationElement::GetType() const
+{
+ map<string, string>::const_iterator i = tags.find("type");
+ return ((i != tags.end()) ? i->second : string());
+}
+
+bool RelationElement::FindWay(uint64_t id, string & role) const
+{
+ for (size_t i = 0; i < ways.size(); ++i)
+ if (ways[i].first == id)
+ {
+ role = ways[i].second;
+ return true;
+ }
+ return false;
+}
diff --git a/indexer/osm_decl.hpp b/indexer/osm_decl.hpp
new file mode 100644
index 0000000000..62fb770aef
--- /dev/null
+++ b/indexer/osm_decl.hpp
@@ -0,0 +1,114 @@
+#pragma once
+
+#include "std_serialization.hpp"
+
+#include "../std/utility.hpp"
+#include "../std/vector.hpp"
+#include "../std/string.hpp"
+
+
+/// Used to store all world nodes inside temporary index file.
+/// To find node by id, just calculate offset inside index file:
+/// offset_in_file = sizeof(LatLon) * node_ID
+#pragma pack (push, 1)
+struct LatLon
+{
+ double lat;
+ double lon;
+};
+
+struct LatLonPos
+{
+ uint64_t pos;
+ double lat;
+ double lon;
+};
+#pragma pack (pop)
+
+
+#define NODES_FILE "nodes.dat"
+#define WAYS_FILE "ways.dat"
+#define RELATIONS_FILE "relations.dat"
+#define OFFSET_EXT ".offs"
+#define ID2REL_EXT ".id2rel"
+#define MAPPED_WAYS "mapped_ways.n2w"
+
+
+namespace feature
+{
+ /// @name Need to unite features.
+ //@{
+ /// @param[in] k, v Key and Value from relation tags.
+ bool NeedUnite(string const & k, string const & v);
+ /// @param[in] type Type from feature.
+ bool NeedUnite(uint32_t type);
+ //@}
+};
+
+class progress_policy
+{
+ size_t m_count;
+ size_t m_factor;
+
+public:
+ size_t GetCount() const { return m_count; }
+
+ void Begin(string const &, size_t factor);
+ void Inc(size_t i = 1);
+ void End();
+};
+
+struct WayElement
+{
+ vector<uint64_t> nodes;
+
+ bool IsValid() const { return !nodes.empty(); }
+
+ template <class ToDo> void ForEachPoint(ToDo & toDo) const
+ {
+ for (size_t i = 0; i < nodes.size(); ++i)
+ toDo(nodes[i]);
+ }
+};
+
+struct RelationElement
+{
+ typedef vector<pair<uint64_t, string> > ref_vec_t;
+ ref_vec_t nodes, ways;
+ map<string, string> tags;
+
+ bool IsValid() const { return !(nodes.empty() && ways.empty()); }
+
+ string GetType() const;
+ bool FindWay(uint64_t id, string & role) const;
+
+ template <class ToDo> void ForEachWay(ToDo & toDo) const
+ {
+ for (size_t i = 0; i < ways.size(); ++i)
+ toDo(ways[i].first, ways[i].second);
+ }
+};
+
+template <class TArchive> TArchive & operator << (TArchive & ar, WayElement const & e)
+{
+ ar << e.nodes;
+ return ar;
+}
+
+template <class TArchive> TArchive & operator >> (TArchive & ar, WayElement & e)
+{
+ ar >> e.nodes;
+ return ar;
+}
+
+template <class TArchive> TArchive & operator << (TArchive & ar, RelationElement const & e)
+{
+ ar << e.nodes << e.ways << e.tags;
+ return ar;
+}
+
+template <class TArchive> TArchive & operator >> (TArchive & ar, RelationElement & e)
+{
+ ar >> e.nodes >> e.ways >> e.tags;
+ return ar;
+}
diff --git a/indexer/point_to_int64.cpp b/indexer/point_to_int64.cpp
new file mode 100644
index 0000000000..152ba44e93
--- /dev/null
+++ b/indexer/point_to_int64.cpp
@@ -0,0 +1,58 @@
+#include "cell_id.hpp"
+
+#include "../geometry/pointu_to_uint64.hpp"
+
+#include "../base/bits.hpp"
+
+#include "../std/algorithm.hpp"
+
+#include "../base/start_mem_debug.hpp"
+
+
+#define POINT_COORD_BITS 30
+
+int64_t PointToInt64(CoordT x, CoordT y)
+{
+ if (x < MercatorBounds::minX) x = MercatorBounds::minX;
+ if (y < MercatorBounds::minY) y = MercatorBounds::minY;
+ if (x > MercatorBounds::maxX) x = MercatorBounds::maxX;
+ if (y > MercatorBounds::maxY) y = MercatorBounds::maxY;
+ uint32_t const ix = static_cast<uint32_t>(0.5 + (x - MercatorBounds::minX)
+ / (MercatorBounds::maxX - MercatorBounds::minX) * (1 << POINT_COORD_BITS));
+ uint32_t const iy = static_cast<uint32_t>(0.5 + (y - MercatorBounds::minY)
+ / (MercatorBounds::maxY - MercatorBounds::minY) * (1 << POINT_COORD_BITS));
+ int64_t res = static_cast<int64_t>(m2::PointUToUint64(m2::PointU(ix, iy)));
+ ASSERT_LESS_OR_EQUAL(ix, 1 << POINT_COORD_BITS, ());
+ ASSERT_LESS_OR_EQUAL(iy, 1 << POINT_COORD_BITS, ());
+ ASSERT_LESS_OR_EQUAL(res, 3ULL << 2 * POINT_COORD_BITS, ());
+ ASSERT_GREATER_OR_EQUAL(res, 0,
+ ("Highest bits of (ix, iy) are not used, so res should be > 0."));
+ return res;
+}
+
+CoordPointT Int64ToPoint(int64_t v)
+{
+ ASSERT_LESS_OR_EQUAL(v, 3ULL << 2 * POINT_COORD_BITS, ());
+ m2::PointU const pt = m2::Uint64ToPointU(static_cast<uint64_t>(v));
+ CoordT const fx = static_cast<CoordT>(pt.x);
+ CoordT const fy = static_cast<CoordT>(pt.y);
+ return CoordPointT(
+ fx * (MercatorBounds::maxX - MercatorBounds::minX)
+ / (1 << POINT_COORD_BITS) + MercatorBounds::minX,
+ fy * (MercatorBounds::maxY - MercatorBounds::minY)
+ / (1 << POINT_COORD_BITS) + MercatorBounds::minY);
+}
+
+pair<int64_t, int64_t> RectToInt64(m2::RectD const & r)
+{
+ int64_t const p1 = PointToInt64(r.minX(), r.minY());
+ int64_t const p2 = PointToInt64(r.maxX(), r.maxY());
+ return make_pair(p1, p2);
+}
+
+m2::RectD Int64ToRect(pair<int64_t, int64_t> const & p)
+{
+ CoordPointT const pt1 = Int64ToPoint(p.first);
+ CoordPointT const pt2 = Int64ToPoint(p.second);
+ return m2::RectD(m2::PointD(pt1.first, pt1.second), m2::PointD(pt2.first, pt2.second));
+}
diff --git a/indexer/scale_index.cpp b/indexer/scale_index.cpp
new file mode 100644
index 0000000000..e757740a08
--- /dev/null
+++ b/indexer/scale_index.cpp
@@ -0,0 +1,4 @@
+#include "scale_index.hpp"
+
+uint32_t const ScaleIndexBase::kScaleBuckets[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17 };
diff --git a/indexer/scale_index.hpp b/indexer/scale_index.hpp
new file mode 100644
index 0000000000..7042f6cea8
--- /dev/null
+++ b/indexer/scale_index.hpp
@@ -0,0 +1,84 @@
+#pragma once
+
+#include "interval_index.hpp"
+#include "../coding/var_serial_vector.hpp"
+#include "../base/assert.hpp"
+#include "../base/base.hpp"
+#include "../base/macros.hpp"
+#include "../std/algorithm.hpp"
+
+class ScaleIndexBase
+{
+public:
+ enum { NUM_BUCKETS = 18 };
+
+ ScaleIndexBase()
+ {
+#ifdef DEBUG
+ for (size_t i = 0; i < ARRAY_SIZE(kScaleBuckets); ++i)
+ {
+ ASSERT_LESS(kScaleBuckets[i], static_cast<uint32_t>(NUM_BUCKETS), (i));
+ ASSERT(i == 0 || kScaleBuckets[i] >= kScaleBuckets[i-1],
+ (i, kScaleBuckets[i-1], kScaleBuckets[i]));
+ }
+#endif
+ }
+
+ static uint32_t BucketByScale(uint32_t scale)
+ {
+ ASSERT_LESS(scale, ARRAY_SIZE(kScaleBuckets), ());
+ return scale >= ARRAY_SIZE(kScaleBuckets) ? NUM_BUCKETS - 1 : kScaleBuckets[scale];
+ }
+
+ static pair<uint32_t, uint32_t> ScaleRangeForBucket(uint32_t bucket)
+ {
+ // TODO: Cache ScaleRangeForBucket in class member?
+ ASSERT_LESS(bucket, static_cast<uint32_t>(NUM_BUCKETS), ());
+ pair<uint32_t, uint32_t> res(ARRAY_SIZE(kScaleBuckets), 0);
+ for (uint32_t i = 0; i < ARRAY_SIZE(kScaleBuckets); ++i)
+ {
+ if (kScaleBuckets[i] == bucket)
+ {
+ res.first = min(res.first, i);
+ res.second = max(res.second, i + 1);
+ }
+ }
+ return res;
+ }
+
+private:
+ static uint32_t const kScaleBuckets[18];
+};
+
+template <class ReaderT>
+class ScaleIndex : public ScaleIndexBase
+{
+public:
+ typedef ReaderT ReaderType;
+
+ ScaleIndex() {}
+ explicit ScaleIndex(ReaderT const & reader) { Attach(reader); }
+
+ void Attach(ReaderT const & reader)
+ {
+ m_IndexForScale.clear();
+
+ ReaderSource<ReaderT> source(reader);
+ VarSerialVectorReader<ReaderT> treesReader(source);
+ for (size_t i = 0; i < treesReader.Size(); ++i)
+ m_IndexForScale.push_back(IntervalIndexType(treesReader.SubReader(i)));
+ }
+
+ template <typename F>
+ void ForEachInIntervalAndScale(F const & f, uint64_t beg, uint64_t end, uint32_t scale) const
+ {
+ int scaleBucket = BucketByScale(scale);
+ ASSERT_LESS(scaleBucket, static_cast<int>(m_IndexForScale.size()), ());
+ for (int i = 0; i <= scaleBucket; ++i)
+ m_IndexForScale[i].ForEach(f, beg, end);
+ }
+
+private:
+ typedef IntervalIndex<uint32_t, ReaderT> IntervalIndexType;
+ vector<IntervalIndexType> m_IndexForScale;
+};
diff --git a/indexer/scale_index_builder.hpp b/indexer/scale_index_builder.hpp
new file mode 100644
index 0000000000..834b8acbc9
--- /dev/null
+++ b/indexer/scale_index_builder.hpp
@@ -0,0 +1,167 @@
+#pragma once
+#include "scale_index.hpp"
+#include "feature_visibility.hpp"
+#include "covering.hpp"
+#include "feature.hpp"
+#include "interval_index_builder.hpp"
+
+#include "../geometry/covering_stream_optimizer.hpp"
+
+#include "../coding/dd_vector.hpp"
+#include "../coding/file_sort.hpp"
+#include "../coding/var_serial_vector.hpp"
+#include "../coding/writer.hpp"
+
+#include "../base/base.hpp"
+#include "../base/logging.hpp"
+
+#include "../std/string.hpp"
+#include "../std/vector.hpp"
+#include "../std/utility.hpp"
+
+
+#pragma pack(push, 1)
+ struct CellFeaturePair
+ {
+ int64_t first;
+ uint32_t second;
+
+ CellFeaturePair() {}
+ CellFeaturePair(pair<int64_t, uint32_t> const & p) : first(p.first), second(p.second) {}
+ CellFeaturePair(int64_t f, uint32_t s) : first(f), second(s) {}
+
+ bool operator< (CellFeaturePair const & rhs) const
+ {
+ if (first == rhs.first)
+ return (second < rhs.second);
+ return (first < rhs.first);
+ }
+ };
+#pragma pack (pop)
+
+template <class SorterT>
+class FeatureCoverer
+{
+public:
+ FeatureCoverer(uint32_t bucket,
+ SorterT & sorter,
+ uint32_t & numFeatures) :
+ m_Sorter(sorter),
+ m_ScaleRange(ScaleIndexBase::ScaleRangeForBucket(bucket)),
+ m_NumFeatures(numFeatures)
+ {
+ }
+
+ void operator() (Feature const & f, uint32_t offset) const
+ {
+ if (FeatureShouldBeIndexed(f))
+ {
+ vector<int64_t> const cells = covering::CoverFeature(f);
+ for (vector<int64_t>::const_iterator it = cells.begin(); it != cells.end(); ++it)
+ m_Sorter.Add(make_pair(*it, offset));
+ ++m_NumFeatures;
+ return;
+ }
+ }
+
+ bool FeatureShouldBeIndexed(Feature const & f) const
+ {
+ uint32_t const minDrawableScale = feature::MinDrawableScaleForFeature(f);
+ return m_ScaleRange.first <= minDrawableScale && minDrawableScale < m_ScaleRange.second;
+ }
+
+private:
+ SorterT & m_Sorter;
+ pair<uint32_t, uint32_t> m_ScaleRange;
+ uint32_t & m_NumFeatures;
+};
+
+template <class SinkT>
+class CellFeaturePairSinkAdapter
+{
+public:
+ explicit CellFeaturePairSinkAdapter(SinkT & sink) : m_Sink(sink) {}
+
+ void operator() (int64_t cellId, uint64_t value) const
+ {
+ CellFeaturePair cellFeaturePair(cellId, value);
+ m_Sink.Write(&cellFeaturePair, sizeof(cellFeaturePair));
+ }
+
+private:
+ SinkT & m_Sink;
+};
+
+template <class SinkT>
+class FeatureCoveringOptimizeProxySink
+{
+public:
+ FeatureCoveringOptimizeProxySink(SinkT & sink)
+ : m_Sink(sink), m_Optimizer(m_Sink, 100, 10)
+ {
+ }
+
+ ~FeatureCoveringOptimizeProxySink()
+ {
+ m_Optimizer.Flush();
+ }
+
+ void operator () (CellFeaturePair const & cellFeaturePair)
+ {
+ m_Optimizer.Add(cellFeaturePair.first, cellFeaturePair.second);
+ }
+
+private:
+ CellFeaturePairSinkAdapter<SinkT> m_Sink;
+ covering::CoveringStreamOptimizer<RectId, uint32_t, CellFeaturePairSinkAdapter<SinkT> >
+ m_Optimizer;
+};
+
+template <class FeaturesVectorT, class WriterT>
+inline void IndexScales(FeaturesVectorT const & featuresVector,
+ WriterT & writer,
+ string const & tmpFilePrefix)
+{
+ // TODO: Add global feature covering optimization.
+ // TODO: Make scale bucketing dynamic.
+ // TODO: Compute covering only once?
+
+ //typedef pair<int64_t, uint32_t> CellFeaturePair;
+ STATIC_ASSERT(sizeof(CellFeaturePair) == 12);
+
+ VarSerialVectorWriter<WriterT> recordWriter(writer, ScaleIndexBase::NUM_BUCKETS);
+ for (uint32_t bucket = 0; bucket < ScaleIndexBase::NUM_BUCKETS; ++bucket)
+ {
+ LOG(LINFO, ("Building scale index for bucket:", bucket))
+ uint32_t numFeatures = 0;
+ {
+ FileWriter cellsToFeaturesWriter(tmpFilePrefix + ".c2f.sorted");
+
+ typedef FeatureCoveringOptimizeProxySink<FileWriter> OptimizeSink;
+ OptimizeSink optimizeSink(cellsToFeaturesWriter);
+ typedef FileSorter<CellFeaturePair, OptimizeSink> SorterType;
+ SorterType sorter(1024*1024, tmpFilePrefix + ".c2f.tmp", optimizeSink);
+ /*
+ typedef FileSorter<CellFeaturePair, WriterFunctor<FileWriter> > SorterType;
+ WriterFunctor<FileWriter> out(cellsToFeaturesWriter);
+ SorterType sorter(1024*1024, tmpFilePrefix + ".c2f.tmp", out);
+ */
+ featuresVector.ForEachOffset(FeatureCoverer<SorterType>(bucket, sorter, numFeatures));
+ LOG(LINFO, ("Sorting..."));
+ sorter.SortAndFinish();
+ }
+ LOG(LINFO, ("Indexing..."));
+ {
+ FileReader reader(tmpFilePrefix + ".c2f.sorted");
+ uint64_t const numCells = reader.Size() / sizeof(CellFeaturePair);
+ DDVector<CellFeaturePair, FileReader, uint64_t> cellsToFeatures(reader, numCells);
+ LOG(LINFO, ("Being indexed", "features:", numFeatures, "cells:", numCells));
+ SubWriter<WriterT> subWriter(writer);
+ BuildIntervalIndex<5>(cellsToFeatures.begin(), cellsToFeatures.end(), subWriter);
+ }
+ FileWriter::DeleteFile(tmpFilePrefix + ".c2f.sorted");
+ LOG(LINFO, ("Indexing done."));
+ recordWriter.FinishRecord();
+ }
+ LOG(LINFO, ("All scale indexes done."));
+}
diff --git a/indexer/scales.cpp b/indexer/scales.cpp
new file mode 100644
index 0000000000..eb56ff1a99
--- /dev/null
+++ b/indexer/scales.cpp
@@ -0,0 +1,54 @@
+#include "scales.hpp"
+#include "mercator.hpp"
+
+#include "../base/math.hpp"
+
+#include "../std/algorithm.hpp"
+
+#include "../base/start_mem_debug.hpp"
+
+namespace scales
+{
+ /// @name This parameters should be tuned.
+ //@{
+ static const int initial_level = 1;
+
+ double GetM2PFactor(int level)
+ {
+ int const base_scale = 14;
+ int const factor = 1 << my::Abs(level - base_scale);
+
+ if (level < base_scale)
+ return 1 / double(factor);
+ else
+ return factor;
+ }
+ //@}
+
+ int GetScaleLevel(double ratio)
+ {
+ double const level = min(static_cast<double>(GetUpperScale()), log(ratio) / log(2.0) + initial_level);
+ return (level < 0 ? 0 : static_cast<int>(level + 0.5));
+ }
+
+ int GetScaleLevel(m2::RectD const & r)
+ {
+ // TODO: fix scale coefficients for mercator
+ double const dx = (MercatorBounds::maxX - MercatorBounds::minX) / r.SizeX();
+ double const dy = (MercatorBounds::maxY - MercatorBounds::minY) / r.SizeY();
+
+ // get the average ratio
+ return GetScaleLevel((dx + dy) / 2.0);
+ }
+
+ double GetEpsilonForLevel(int level)
+ {
+ return (MercatorBounds::maxX - MercatorBounds::minX) / pow(2.0, double(level + 6 - initial_level));
+ }
+
+ bool IsGoodForLevel(int level, m2::RectD const & r)
+ {
+ // assume that feature is always visible in upper scale
+ return (level == GetUpperScale() || max(r.SizeX(), r.SizeY()) > GetEpsilonForLevel(level));
+ }
+}
diff --git a/indexer/scales.hpp b/indexer/scales.hpp
new file mode 100644
index 0000000000..acf475e02c
--- /dev/null
+++ b/indexer/scales.hpp
@@ -0,0 +1,14 @@
+#pragma once
+
+#include "../geometry/rect2d.hpp"
+
+namespace scales
+{
+ inline int GetUpperScale() { return 17; }
+
+ double GetM2PFactor(int level);
+ int GetScaleLevel(double ratio);
+ int GetScaleLevel(m2::RectD const & r);
+ double GetEpsilonForLevel(int level);
+ bool IsGoodForLevel(int level, m2::RectD const & r);
+}
diff --git a/indexer/std_serialization.hpp b/indexer/std_serialization.hpp
new file mode 100644
index 0000000000..2cb0a13d3b
--- /dev/null
+++ b/indexer/std_serialization.hpp
@@ -0,0 +1,199 @@
+#pragma once
+
+#include "../base/base.hpp"
+
+#include "../std/map.hpp"
+#include "../std/unordered_map.hpp"
+#include "../std/vector.hpp"
+#include "../std/array.hpp"
+#include "../std/utility.hpp"
+
+
+/// @name std containers serialization
+/// TArchive should be an archive class in global namespace.
+//@{
+template <class TArchive, class T1, class T2>
+TArchive & operator << (TArchive & ar, pair<T1, T2> const & t)
+{
+ ar << t.first << t.second;
+ return ar;
+}
+
+template <class TArchive, class T1, class T2>
+TArchive & operator >> (TArchive & ar, pair<T1, T2> & t)
+{
+ ar >> t.first >> t.second;
+ return ar;
+}
+
+template <class TArchive, class TCont> void save_like_map(TArchive & ar, TCont const & rMap)
+{
+ uint32_t const count = rMap.size();
+ ar << count;
+
+ for (typename TCont::const_iterator i = rMap.begin(); i != rMap.end(); ++i)
+ ar << i->first << i->second;
+}
+
+template <class TArchive, class TCont> void load_like_map(TArchive & ar, TCont & rMap)
+{
+ rMap.clear();
+
+ uint32_t count;
+ ar >> count;
+
+ while (count > 0)
+ {
+ typename TCont::key_type t1;
+ typename TCont::mapped_type t2;
+
+ ar >> t1 >> t2;
+ rMap.insert(make_pair(t1, t2));
+
+ --count;
+ }
+}
+
+template <class TArchive, class TCont> void save_like_vector(TArchive & ar, TCont const & rCont)
+{
+ uint32_t const count = rCont.size();
+ ar << count;
+
+ for (uint32_t i = 0; i < count; ++i)
+ ar << rCont[i];
+}
+
+template <class TArchive, class TCont> void load_like_vector(TArchive & ar, TCont & rCont)
+{
+ rCont.clear();
+
+ uint32_t count;
+ ar >> count;
+
+ rCont.resize(count);
+ for (uint32_t i = 0; i < count; ++i)
+ ar >> rCont[i];
+}
+
+template <class TArchive, class T1, class T2> TArchive & operator << (TArchive & ar, map<T1, T2> const & rMap)
+{
+ save_like_map(ar, rMap);
+ return ar;
+}
+
+template <class TArchive, class T1, class T2> TArchive & operator >> (TArchive & ar, map<T1, T2> & rMap)
+{
+ load_like_map(ar, rMap);
+ return ar;
+}
+
+template <class TArchive, class T1, class T2> TArchive & operator << (TArchive & ar, multimap<T1, T2> const & rMap)
+{
+ save_like_map(ar, rMap);
+ return ar;
+}
+
+template <class TArchive, class T1, class T2> TArchive & operator >> (TArchive & ar, multimap<T1, T2> & rMap)
+{
+ load_like_map(ar, rMap);
+ return ar;
+}
+
+template <class TArchive, class T1, class T2> TArchive & operator << (TArchive & ar, unordered_map<T1, T2> const & rMap)
+{
+ save_like_map(ar, rMap);
+ return ar;
+}
+
+template <class TArchive, class T1, class T2> TArchive & operator >> (TArchive & ar, unordered_map<T1, T2> & rMap)
+{
+ load_like_map(ar, rMap);
+ return ar;
+}
+
+template <class TArchive, class T> TArchive & operator << (TArchive & ar, vector<T> const & rVector)
+{
+ save_like_vector(ar, rVector);
+ return ar;
+}
+
+template <class TArchive, class T> TArchive & operator >> (TArchive & ar, vector<T> & rVector)
+{
+ load_like_vector(ar, rVector);
+ return ar;
+}
+
+template <class TArchive, class T, size_t N> TArchive & operator << (TArchive & ar, array<T, N> const & rArray)
+{
+ for (size_t i = 0; i < N; ++i)
+ ar << rArray[i];
+ return ar;
+}
+
+template <class TArchive, class T, size_t N> TArchive & operator >> (TArchive & ar, array<T, N> & rArray)
+{
+ for (size_t i = 0; i < N; ++i)
+ ar >> rArray[i];
+ return ar;
+}
+//@}
+
+namespace serial
+{
+ /// @name This functions invokes overriten do_load for type T with index in array.
+ //@{
+ template <class TArchive, class T> void do_load(TArchive & ar, size_t ind, vector<T> & rVector)
+ {
+ uint32_t count;
+ ar >> count;
+
+ rVector.resize(count);
+ for (uint32_t i = 0; i < count; ++i)
+ do_load(ar, ind, rVector[i]);
+ }
+
+ template <class TArchive, class T, size_t N> void do_load(TArchive & ar, array<T, N> & rArray)
+ {
+ for (size_t i = 0; i < N; ++i)
+ do_load(ar, i, rArray[i]);
+ }
+ //@}
+
+ namespace detail
+ {
+ template <class TArchive> class save_element
+ {
+ TArchive & m_ar;
+ public:
+ save_element(TArchive & ar) : m_ar(ar) {}
+ template <class T> void operator() (T const & t, int)
+ {
+ m_ar << t;
+ }
+ };
+ template <class TArchive> class load_element
+ {
+ TArchive & m_ar;
+ public:
+ load_element(TArchive & ar) : m_ar(ar) {}
+ template <class T> void operator() (T & t, int)
+ {
+ m_ar >> t;
+ }
+ };
+ }
+
+ template <class TArchive, class TTuple>
+ void save_tuple(TArchive & ar, TTuple const & t)
+ {
+ detail::save_element<TArchive> doSave(ar);
+ for_each_tuple(t, doSave);
+ }
+
+ template <class TArchive, class TTuple>
+ void load_tuple(TArchive & ar, TTuple & t)
+ {
+ detail::load_element<TArchive> doLoad(ar);
+ for_each_tuple(t, doLoad);
+ }
+}
diff --git a/indexer/tree_structure.hpp b/indexer/tree_structure.hpp
new file mode 100644
index 0000000000..540d6bd428
--- /dev/null
+++ b/indexer/tree_structure.hpp
@@ -0,0 +1,103 @@
+#pragma once
+
+#include "../base/assert.hpp"
+
+#include "../std/fstream.hpp"
+#include "../std/sstream.hpp"
+
+namespace tree
+{
+ namespace detail
+ {
+ void PrintOffset(size_t offset, ostream & s)
+ {
+ for (size_t i = 0; i < offset; ++i)
+ s << " ";
+ }
+
+ template <class ToDo>
+ void PrintTextTree(size_t offset, ostream & s, ToDo & toDo)
+ {
+ PrintOffset(offset, s);
+
+ // print name as key
+ s << toDo.Name() << " ";
+
+ // serialize object
+ toDo.Serialize(s);
+
+ size_t const count = toDo.BeginChilds();
+ bool const isEmpty = (count == 0);
+
+ // put end marker
+ s << (isEmpty ? "-" : "+") << endl;
+
+ // print chils
+ if (!isEmpty)
+ {
+ offset += 4;
+
+ size_t i = 0;
+ while (i < count)
+ {
+ toDo.Start(i++);
+ PrintTextTree(offset, s, toDo);
+ toDo.End();
+ }
+
+ // end of structure
+ PrintOffset(offset, s);
+ s << "{}" << endl;
+ }
+ }
+ }
+
+ template <class ToDo>
+ void SaveTreeAsText(ostream & s, ToDo & toDo)
+ {
+ detail::PrintTextTree(0, s, toDo);
+ }
+
+ template <class ToDo>
+ bool LoadTreeAsText(istringstream & s, ToDo & toDo)
+ {
+ string name;
+ s >> name;
+ ASSERT ( !name.empty(), ("Error in classificator file") );
+ if (name == "{}") return false;
+
+ // set key name
+ toDo.Name(name);
+
+ // load object itself
+ string strkey;
+ s >> strkey;
+ while (strkey != "+" && strkey != "-")
+ {
+ toDo.Serialize(strkey);
+ s >> strkey;
+ }
+
+ // load children
+ if (strkey == "+")
+ {
+ size_t i = 0;
+ while (true)
+ {
+ toDo.Start(i++);
+ bool const isContinue = LoadTreeAsText(s, toDo);
+ toDo.End();
+
+ if (!isContinue)
+ {
+ toDo.EndChilds();
+ break;
+ }
+ }
+
+ ASSERT ( i <= 64, ("too many features at level = ", name) );
+ }
+
+ return true;
+ }
+}
diff --git a/indexer/xml_element.cpp b/indexer/xml_element.cpp
new file mode 100644
index 0000000000..0a92750f6d
--- /dev/null
+++ b/indexer/xml_element.cpp
@@ -0,0 +1,82 @@
+#include "xml_element.hpp"
+
+#include "../coding/parse_xml.hpp"
+#include "../coding/reader.hpp"
+#include "../std/stdio.hpp"
+
+#include "../std/algorithm.hpp"
+
+bool BaseOSMParser::is_our_tag(string const & name)
+{
+ return (find(m_tags.begin(), m_tags.end(), name) != m_tags.end());
+}
+
+bool BaseOSMParser::Push(string const & name)
+{
+ if (!is_our_tag(name) && (m_depth != 2))
+ return false;
+
+ ++m_depth;
+
+ if (m_depth == 1)
+ {
+ m_current = 0;
+ }
+ else if (m_depth == 2)
+ {
+ m_current = &m_element;
+ m_current->parent = 0;
+ }
+ else
+ {
+ m_current->childs.push_back(XMLElement());
+ m_current->childs.back().parent = m_current;
+ m_current = &m_current->childs.back();
+ }
+
+ if (m_depth >= 2)
+ m_current->name = name;
+ return true;
+}
+
+void BaseOSMParser::AddAttr(string const & name, string const & value)
+{
+ if (m_current)
+ m_current->attrs[name] = value;
+}
+
+void BaseOSMParser::Pop(string const &)
+{
+ --m_depth;
+
+ if (m_depth >= 2)
+ m_current = m_current->parent;
+
+ else if (m_depth == 1)
+ {
+ EmitElement(m_current);
+ m_current->Clear();
+ }
+}
+
+
+struct StdinReader
+{
+ size_t Read(char * buffer, size_t bufferSize)
+ {
+ return fread(buffer, sizeof(char), bufferSize, stdin);
+ }
+};
+
+
+void ParseXMLFromStdIn(BaseOSMParser & parser)
+{
+ StdinReader reader;
+ ParseXML(reader, parser);
+}
+
+void ParseXMLFromFile(FileReader const & reader, BaseOSMParser & parser)
+{
+ ReaderSource<FileReader> src(reader);
+ ParseXML(src, parser);
+}
diff --git a/indexer/xml_element.hpp b/indexer/xml_element.hpp
new file mode 100644
index 0000000000..314a34df36
--- /dev/null
+++ b/indexer/xml_element.hpp
@@ -0,0 +1,48 @@
+#pragma once
+#include "../coding/file_reader.hpp"
+#include "../std/string.hpp"
+#include "../std/vector.hpp"
+#include "../std/map.hpp"
+
+struct XMLElement
+{
+ string name;
+ map<string, string> attrs;
+ vector<XMLElement> childs;
+ XMLElement * parent;
+
+ void Clear()
+ {
+ name.clear();
+ attrs.clear();
+ childs.clear();
+ parent = 0;
+ }
+};
+
+class BaseOSMParser
+{
+ XMLElement m_element;
+ XMLElement * m_current;
+
+ size_t m_depth;
+
+ vector<string> m_tags;
+ bool is_our_tag(string const & name);
+
+public:
+ BaseOSMParser() : m_current(0), m_depth(0) {}
+
+ template <size_t N> void SetTags(char const * (&arr)[N]) { m_tags.assign(&arr[0], &arr[N]); }
+
+ bool Push(string const & name);
+ void AddAttr(string const & name, string const & value);
+ void Pop(string const &);
+
+protected:
+ virtual void EmitElement(XMLElement * p) = 0;
+};
+
+void ParseXMLFromStdIn(BaseOSMParser & parser);
+
+void ParseXMLFromFile(FileReader const & reader, BaseOSMParser & parser);