Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/prusa3d/PrusaSlicer.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/src/agg
diff options
context:
space:
mode:
Diffstat (limited to 'src/agg')
-rw-r--r--src/agg/AUTHORS2
-rw-r--r--src/agg/VERSION2
-rw-r--r--src/agg/agg_array.h1119
-rw-r--r--src/agg/agg_basics.h574
-rw-r--r--src/agg/agg_bezier_arc.h159
-rw-r--r--src/agg/agg_clip_liang_barsky.h333
-rw-r--r--src/agg/agg_color_gray.h1047
-rw-r--r--src/agg/agg_color_rgba.h1353
-rw-r--r--src/agg/agg_config.h44
-rw-r--r--src/agg/agg_conv_transform.h68
-rw-r--r--src/agg/agg_gamma_functions.h132
-rw-r--r--src/agg/agg_gamma_lut.h300
-rw-r--r--src/agg/agg_math.h437
-rw-r--r--src/agg/agg_path_storage.h1582
-rw-r--r--src/agg/agg_pixfmt_base.h97
-rw-r--r--src/agg/agg_pixfmt_gray.h738
-rw-r--r--src/agg/agg_pixfmt_rgb.h995
-rw-r--r--src/agg/agg_rasterizer_cells_aa.h741
-rw-r--r--src/agg/agg_rasterizer_scanline_aa.h481
-rw-r--r--src/agg/agg_rasterizer_scanline_aa_nogamma.h483
-rw-r--r--src/agg/agg_rasterizer_sl_clip.h351
-rw-r--r--src/agg/agg_renderer_base.h731
-rw-r--r--src/agg/agg_renderer_scanline.h854
-rw-r--r--src/agg/agg_rendering_buffer.h300
-rw-r--r--src/agg/agg_scanline_p.h329
-rw-r--r--src/agg/agg_trans_affine.h518
-rw-r--r--src/agg/copying65
27 files changed, 13835 insertions, 0 deletions
diff --git a/src/agg/AUTHORS b/src/agg/AUTHORS
new file mode 100644
index 000000000..2bb6518ec
--- /dev/null
+++ b/src/agg/AUTHORS
@@ -0,0 +1,2 @@
+Anti-Grain Geometry - Version 2.4
+Copyright (C) 2002-2005 Maxim Shemanarev (McSeem)
diff --git a/src/agg/VERSION b/src/agg/VERSION
new file mode 100644
index 000000000..c5de3e3b0
--- /dev/null
+++ b/src/agg/VERSION
@@ -0,0 +1,2 @@
+2.4
+svn revision 128 \ No newline at end of file
diff --git a/src/agg/agg_array.h b/src/agg/agg_array.h
new file mode 100644
index 000000000..8d5668384
--- /dev/null
+++ b/src/agg/agg_array.h
@@ -0,0 +1,1119 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+#ifndef AGG_ARRAY_INCLUDED
+#define AGG_ARRAY_INCLUDED
+
+#include <stddef.h>
+#include <string.h>
+#include "agg_basics.h"
+
+namespace agg
+{
+
+ //-------------------------------------------------------pod_array_adaptor
+ template<class T> class pod_array_adaptor
+ {
+ public:
+ typedef T value_type;
+ pod_array_adaptor(T* array, unsigned size) :
+ m_array(array), m_size(size) {}
+
+ unsigned size() const { return m_size; }
+ const T& operator [] (unsigned i) const { return m_array[i]; }
+ T& operator [] (unsigned i) { return m_array[i]; }
+ const T& at(unsigned i) const { return m_array[i]; }
+ T& at(unsigned i) { return m_array[i]; }
+ T value_at(unsigned i) const { return m_array[i]; }
+
+ private:
+ T* m_array;
+ unsigned m_size;
+ };
+
+
+ //---------------------------------------------------------pod_auto_array
+ template<class T, unsigned Size> class pod_auto_array
+ {
+ public:
+ typedef T value_type;
+ typedef pod_auto_array<T, Size> self_type;
+
+ pod_auto_array() {}
+ explicit pod_auto_array(const T* c)
+ {
+ memcpy(m_array, c, sizeof(T) * Size);
+ }
+
+ const self_type& operator = (const T* c)
+ {
+ memcpy(m_array, c, sizeof(T) * Size);
+ return *this;
+ }
+
+ static unsigned size() { return Size; }
+ const T& operator [] (unsigned i) const { return m_array[i]; }
+ T& operator [] (unsigned i) { return m_array[i]; }
+ const T& at(unsigned i) const { return m_array[i]; }
+ T& at(unsigned i) { return m_array[i]; }
+ T value_at(unsigned i) const { return m_array[i]; }
+
+ private:
+ T m_array[Size];
+ };
+
+
+ //--------------------------------------------------------pod_auto_vector
+ template<class T, unsigned Size> class pod_auto_vector
+ {
+ public:
+ typedef T value_type;
+ typedef pod_auto_vector<T, Size> self_type;
+
+ pod_auto_vector() : m_size(0) {}
+
+ void remove_all() { m_size = 0; }
+ void clear() { m_size = 0; }
+ void add(const T& v) { m_array[m_size++] = v; }
+ void push_back(const T& v) { m_array[m_size++] = v; }
+ void inc_size(unsigned size) { m_size += size; }
+
+ unsigned size() const { return m_size; }
+ const T& operator [] (unsigned i) const { return m_array[i]; }
+ T& operator [] (unsigned i) { return m_array[i]; }
+ const T& at(unsigned i) const { return m_array[i]; }
+ T& at(unsigned i) { return m_array[i]; }
+ T value_at(unsigned i) const { return m_array[i]; }
+
+ private:
+ T m_array[Size];
+ unsigned m_size;
+ };
+
+
+ //---------------------------------------------------------------pod_array
+ template<class T> class pod_array
+ {
+ public:
+ typedef T value_type;
+ typedef pod_array<T> self_type;
+
+ ~pod_array() { pod_allocator<T>::deallocate(m_array, m_size); }
+ pod_array() : m_array(0), m_size(0) {}
+
+ pod_array(unsigned size) :
+ m_array(pod_allocator<T>::allocate(size)),
+ m_size(size)
+ {}
+
+ pod_array(const self_type& v) :
+ m_array(pod_allocator<T>::allocate(v.m_size)),
+ m_size(v.m_size)
+ {
+ memcpy(m_array, v.m_array, sizeof(T) * m_size);
+ }
+
+ void resize(unsigned size)
+ {
+ if(size != m_size)
+ {
+ pod_allocator<T>::deallocate(m_array, m_size);
+ m_array = pod_allocator<T>::allocate(m_size = size);
+ }
+ }
+ const self_type& operator = (const self_type& v)
+ {
+ resize(v.size());
+ memcpy(m_array, v.m_array, sizeof(T) * m_size);
+ return *this;
+ }
+
+ unsigned size() const { return m_size; }
+ const T& operator [] (unsigned i) const { return m_array[i]; }
+ T& operator [] (unsigned i) { return m_array[i]; }
+ const T& at(unsigned i) const { return m_array[i]; }
+ T& at(unsigned i) { return m_array[i]; }
+ T value_at(unsigned i) const { return m_array[i]; }
+
+ const T* data() const { return m_array; }
+ T* data() { return m_array; }
+ private:
+ T* m_array;
+ unsigned m_size;
+ };
+
+
+
+ //--------------------------------------------------------------pod_vector
+ // A simple class template to store Plain Old Data, a vector
+ // of a fixed size. The data is continous in memory
+ //------------------------------------------------------------------------
+ template<class T> class pod_vector
+ {
+ public:
+ typedef T value_type;
+
+ ~pod_vector() { pod_allocator<T>::deallocate(m_array, m_capacity); }
+ pod_vector() : m_size(0), m_capacity(0), m_array(0) {}
+ pod_vector(unsigned cap, unsigned extra_tail=0);
+
+ // Copying
+ pod_vector(const pod_vector<T>&);
+ const pod_vector<T>& operator = (const pod_vector<T>&);
+
+ // Set new capacity. All data is lost, size is set to zero.
+ void capacity(unsigned cap, unsigned extra_tail=0);
+ unsigned capacity() const { return m_capacity; }
+
+ // Allocate n elements. All data is lost,
+ // but elements can be accessed in range 0...size-1.
+ void allocate(unsigned size, unsigned extra_tail=0);
+
+ // Resize keeping the content.
+ void resize(unsigned new_size);
+
+ void zero()
+ {
+ memset(m_array, 0, sizeof(T) * m_size);
+ }
+
+ void add(const T& v) { m_array[m_size++] = v; }
+ void push_back(const T& v) { m_array[m_size++] = v; }
+ void insert_at(unsigned pos, const T& val);
+ void inc_size(unsigned size) { m_size += size; }
+ unsigned size() const { return m_size; }
+ unsigned byte_size() const { return m_size * sizeof(T); }
+ void serialize(int8u* ptr) const;
+ void deserialize(const int8u* data, unsigned byte_size);
+ const T& operator [] (unsigned i) const { return m_array[i]; }
+ T& operator [] (unsigned i) { return m_array[i]; }
+ const T& at(unsigned i) const { return m_array[i]; }
+ T& at(unsigned i) { return m_array[i]; }
+ T value_at(unsigned i) const { return m_array[i]; }
+
+ const T* data() const { return m_array; }
+ T* data() { return m_array; }
+
+ void remove_all() { m_size = 0; }
+ void clear() { m_size = 0; }
+ void cut_at(unsigned num) { if(num < m_size) m_size = num; }
+
+ private:
+ unsigned m_size;
+ unsigned m_capacity;
+ T* m_array;
+ };
+
+ //------------------------------------------------------------------------
+ template<class T>
+ void pod_vector<T>::capacity(unsigned cap, unsigned extra_tail)
+ {
+ m_size = 0;
+ if(cap > m_capacity)
+ {
+ pod_allocator<T>::deallocate(m_array, m_capacity);
+ m_capacity = cap + extra_tail;
+ m_array = m_capacity ? pod_allocator<T>::allocate(m_capacity) : 0;
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class T>
+ void pod_vector<T>::allocate(unsigned size, unsigned extra_tail)
+ {
+ capacity(size, extra_tail);
+ m_size = size;
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class T>
+ void pod_vector<T>::resize(unsigned new_size)
+ {
+ if(new_size > m_size)
+ {
+ if(new_size > m_capacity)
+ {
+ T* data = pod_allocator<T>::allocate(new_size);
+ memcpy(data, m_array, m_size * sizeof(T));
+ pod_allocator<T>::deallocate(m_array, m_capacity);
+ m_array = data;
+ }
+ }
+ else
+ {
+ m_size = new_size;
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class T> pod_vector<T>::pod_vector(unsigned cap, unsigned extra_tail) :
+ m_size(0),
+ m_capacity(cap + extra_tail),
+ m_array(pod_allocator<T>::allocate(m_capacity)) {}
+
+ //------------------------------------------------------------------------
+ template<class T> pod_vector<T>::pod_vector(const pod_vector<T>& v) :
+ m_size(v.m_size),
+ m_capacity(v.m_capacity),
+ m_array(v.m_capacity ? pod_allocator<T>::allocate(v.m_capacity) : 0)
+ {
+ memcpy(m_array, v.m_array, sizeof(T) * v.m_size);
+ }
+
+ //------------------------------------------------------------------------
+ template<class T> const pod_vector<T>&
+ pod_vector<T>::operator = (const pod_vector<T>&v)
+ {
+ allocate(v.m_size);
+ if(v.m_size) memcpy(m_array, v.m_array, sizeof(T) * v.m_size);
+ return *this;
+ }
+
+ //------------------------------------------------------------------------
+ template<class T> void pod_vector<T>::serialize(int8u* ptr) const
+ {
+ if(m_size) memcpy(ptr, m_array, m_size * sizeof(T));
+ }
+
+ //------------------------------------------------------------------------
+ template<class T>
+ void pod_vector<T>::deserialize(const int8u* data, unsigned byte_size)
+ {
+ byte_size /= sizeof(T);
+ allocate(byte_size);
+ if(byte_size) memcpy(m_array, data, byte_size * sizeof(T));
+ }
+
+ //------------------------------------------------------------------------
+ template<class T>
+ void pod_vector<T>::insert_at(unsigned pos, const T& val)
+ {
+ if(pos >= m_size)
+ {
+ m_array[m_size] = val;
+ }
+ else
+ {
+ memmove(m_array + pos + 1, m_array + pos, (m_size - pos) * sizeof(T));
+ m_array[pos] = val;
+ }
+ ++m_size;
+ }
+
+ //---------------------------------------------------------------pod_bvector
+ // A simple class template to store Plain Old Data, similar to std::deque
+ // It doesn't reallocate memory but instead, uses blocks of data of size
+ // of (1 << S), that is, power of two. The data is NOT contiguous in memory,
+ // so the only valid access method is operator [] or curr(), prev(), next()
+ //
+ // There reallocs occure only when the pool of pointers to blocks needs
+ // to be extended (it happens very rarely). You can control the value
+ // of increment to reallocate the pointer buffer. See the second constructor.
+ // By default, the incremeent value equals (1 << S), i.e., the block size.
+ //------------------------------------------------------------------------
+ template<class T, unsigned S=6> class pod_bvector
+ {
+ public:
+ enum block_scale_e
+ {
+ block_shift = S,
+ block_size = 1 << block_shift,
+ block_mask = block_size - 1
+ };
+
+ typedef T value_type;
+
+ ~pod_bvector();
+ pod_bvector();
+ pod_bvector(unsigned block_ptr_inc);
+
+ // Copying
+ pod_bvector(const pod_bvector<T, S>& v);
+ const pod_bvector<T, S>& operator = (const pod_bvector<T, S>& v);
+
+ void remove_all() { m_size = 0; }
+ void clear() { m_size = 0; }
+ void free_all() { free_tail(0); }
+ void free_tail(unsigned size);
+ void add(const T& val);
+ void push_back(const T& val) { add(val); }
+ void modify_last(const T& val);
+ void remove_last();
+
+ int allocate_continuous_block(unsigned num_elements);
+
+ void add_array(const T* ptr, unsigned num_elem)
+ {
+ while(num_elem--)
+ {
+ add(*ptr++);
+ }
+ }
+
+ template<class DataAccessor> void add_data(DataAccessor& data)
+ {
+ while(data.size())
+ {
+ add(*data);
+ ++data;
+ }
+ }
+
+ void cut_at(unsigned size)
+ {
+ if(size < m_size) m_size = size;
+ }
+
+ unsigned size() const { return m_size; }
+
+ const T& operator [] (unsigned i) const
+ {
+ return m_blocks[i >> block_shift][i & block_mask];
+ }
+
+ T& operator [] (unsigned i)
+ {
+ return m_blocks[i >> block_shift][i & block_mask];
+ }
+
+ const T& at(unsigned i) const
+ {
+ return m_blocks[i >> block_shift][i & block_mask];
+ }
+
+ T& at(unsigned i)
+ {
+ return m_blocks[i >> block_shift][i & block_mask];
+ }
+
+ T value_at(unsigned i) const
+ {
+ return m_blocks[i >> block_shift][i & block_mask];
+ }
+
+ const T& curr(unsigned idx) const
+ {
+ return (*this)[idx];
+ }
+
+ T& curr(unsigned idx)
+ {
+ return (*this)[idx];
+ }
+
+ const T& prev(unsigned idx) const
+ {
+ return (*this)[(idx + m_size - 1) % m_size];
+ }
+
+ T& prev(unsigned idx)
+ {
+ return (*this)[(idx + m_size - 1) % m_size];
+ }
+
+ const T& next(unsigned idx) const
+ {
+ return (*this)[(idx + 1) % m_size];
+ }
+
+ T& next(unsigned idx)
+ {
+ return (*this)[(idx + 1) % m_size];
+ }
+
+ const T& last() const
+ {
+ return (*this)[m_size - 1];
+ }
+
+ T& last()
+ {
+ return (*this)[m_size - 1];
+ }
+
+ unsigned byte_size() const;
+ void serialize(int8u* ptr) const;
+ void deserialize(const int8u* data, unsigned byte_size);
+ void deserialize(unsigned start, const T& empty_val,
+ const int8u* data, unsigned byte_size);
+
+ template<class ByteAccessor>
+ void deserialize(ByteAccessor data)
+ {
+ remove_all();
+ unsigned elem_size = data.size() / sizeof(T);
+
+ for(unsigned i = 0; i < elem_size; ++i)
+ {
+ int8u* ptr = (int8u*)data_ptr();
+ for(unsigned j = 0; j < sizeof(T); ++j)
+ {
+ *ptr++ = *data;
+ ++data;
+ }
+ ++m_size;
+ }
+ }
+
+ template<class ByteAccessor>
+ void deserialize(unsigned start, const T& empty_val, ByteAccessor data)
+ {
+ while(m_size < start)
+ {
+ add(empty_val);
+ }
+
+ unsigned elem_size = data.size() / sizeof(T);
+ for(unsigned i = 0; i < elem_size; ++i)
+ {
+ int8u* ptr;
+ if(start + i < m_size)
+ {
+ ptr = (int8u*)(&((*this)[start + i]));
+ }
+ else
+ {
+ ptr = (int8u*)data_ptr();
+ ++m_size;
+ }
+ for(unsigned j = 0; j < sizeof(T); ++j)
+ {
+ *ptr++ = *data;
+ ++data;
+ }
+ }
+ }
+
+ const T* block(unsigned nb) const { return m_blocks[nb]; }
+
+ private:
+ void allocate_block(unsigned nb);
+ T* data_ptr();
+
+ unsigned m_size;
+ unsigned m_num_blocks;
+ unsigned m_max_blocks;
+ T** m_blocks;
+ unsigned m_block_ptr_inc;
+ };
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S> pod_bvector<T, S>::~pod_bvector()
+ {
+ if(m_num_blocks)
+ {
+ T** blk = m_blocks + m_num_blocks - 1;
+ while(m_num_blocks--)
+ {
+ pod_allocator<T>::deallocate(*blk, block_size);
+ --blk;
+ }
+ }
+ pod_allocator<T*>::deallocate(m_blocks, m_max_blocks);
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S>
+ void pod_bvector<T, S>::free_tail(unsigned size)
+ {
+ if(size < m_size)
+ {
+ unsigned nb = (size + block_mask) >> block_shift;
+ while(m_num_blocks > nb)
+ {
+ pod_allocator<T>::deallocate(m_blocks[--m_num_blocks], block_size);
+ }
+ if(m_num_blocks == 0)
+ {
+ pod_allocator<T*>::deallocate(m_blocks, m_max_blocks);
+ m_blocks = 0;
+ m_max_blocks = 0;
+ }
+ m_size = size;
+ }
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S> pod_bvector<T, S>::pod_bvector() :
+ m_size(0),
+ m_num_blocks(0),
+ m_max_blocks(0),
+ m_blocks(0),
+ m_block_ptr_inc(block_size)
+ {
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S>
+ pod_bvector<T, S>::pod_bvector(unsigned block_ptr_inc) :
+ m_size(0),
+ m_num_blocks(0),
+ m_max_blocks(0),
+ m_blocks(0),
+ m_block_ptr_inc(block_ptr_inc)
+ {
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S>
+ pod_bvector<T, S>::pod_bvector(const pod_bvector<T, S>& v) :
+ m_size(v.m_size),
+ m_num_blocks(v.m_num_blocks),
+ m_max_blocks(v.m_max_blocks),
+ m_blocks(v.m_max_blocks ?
+ pod_allocator<T*>::allocate(v.m_max_blocks) :
+ 0),
+ m_block_ptr_inc(v.m_block_ptr_inc)
+ {
+ unsigned i;
+ for(i = 0; i < v.m_num_blocks; ++i)
+ {
+ m_blocks[i] = pod_allocator<T>::allocate(block_size);
+ memcpy(m_blocks[i], v.m_blocks[i], block_size * sizeof(T));
+ }
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S>
+ const pod_bvector<T, S>&
+ pod_bvector<T, S>::operator = (const pod_bvector<T, S>& v)
+ {
+ unsigned i;
+ for(i = m_num_blocks; i < v.m_num_blocks; ++i)
+ {
+ allocate_block(i);
+ }
+ for(i = 0; i < v.m_num_blocks; ++i)
+ {
+ memcpy(m_blocks[i], v.m_blocks[i], block_size * sizeof(T));
+ }
+ m_size = v.m_size;
+ return *this;
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S>
+ void pod_bvector<T, S>::allocate_block(unsigned nb)
+ {
+ if(nb >= m_max_blocks)
+ {
+ T** new_blocks = pod_allocator<T*>::allocate(m_max_blocks + m_block_ptr_inc);
+
+ if(m_blocks)
+ {
+ memcpy(new_blocks,
+ m_blocks,
+ m_num_blocks * sizeof(T*));
+
+ pod_allocator<T*>::deallocate(m_blocks, m_max_blocks);
+ }
+ m_blocks = new_blocks;
+ m_max_blocks += m_block_ptr_inc;
+ }
+ m_blocks[nb] = pod_allocator<T>::allocate(block_size);
+ m_num_blocks++;
+ }
+
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S>
+ inline T* pod_bvector<T, S>::data_ptr()
+ {
+ unsigned nb = m_size >> block_shift;
+ if(nb >= m_num_blocks)
+ {
+ allocate_block(nb);
+ }
+ return m_blocks[nb] + (m_size & block_mask);
+ }
+
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S>
+ inline void pod_bvector<T, S>::add(const T& val)
+ {
+ *data_ptr() = val;
+ ++m_size;
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S>
+ inline void pod_bvector<T, S>::remove_last()
+ {
+ if(m_size) --m_size;
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S>
+ void pod_bvector<T, S>::modify_last(const T& val)
+ {
+ remove_last();
+ add(val);
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S>
+ int pod_bvector<T, S>::allocate_continuous_block(unsigned num_elements)
+ {
+ if(num_elements < block_size)
+ {
+ data_ptr(); // Allocate initial block if necessary
+ unsigned rest = block_size - (m_size & block_mask);
+ unsigned index;
+ if(num_elements <= rest)
+ {
+ // The rest of the block is good, we can use it
+ //-----------------
+ index = m_size;
+ m_size += num_elements;
+ return index;
+ }
+
+ // New block
+ //---------------
+ m_size += rest;
+ data_ptr();
+ index = m_size;
+ m_size += num_elements;
+ return index;
+ }
+ return -1; // Impossible to allocate
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S>
+ unsigned pod_bvector<T, S>::byte_size() const
+ {
+ return m_size * sizeof(T);
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S>
+ void pod_bvector<T, S>::serialize(int8u* ptr) const
+ {
+ unsigned i;
+ for(i = 0; i < m_size; i++)
+ {
+ memcpy(ptr, &(*this)[i], sizeof(T));
+ ptr += sizeof(T);
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S>
+ void pod_bvector<T, S>::deserialize(const int8u* data, unsigned byte_size)
+ {
+ remove_all();
+ byte_size /= sizeof(T);
+ for(unsigned i = 0; i < byte_size; ++i)
+ {
+ T* ptr = data_ptr();
+ memcpy(ptr, data, sizeof(T));
+ ++m_size;
+ data += sizeof(T);
+ }
+ }
+
+
+ // Replace or add a number of elements starting from "start" position
+ //------------------------------------------------------------------------
+ template<class T, unsigned S>
+ void pod_bvector<T, S>::deserialize(unsigned start, const T& empty_val,
+ const int8u* data, unsigned byte_size)
+ {
+ while(m_size < start)
+ {
+ add(empty_val);
+ }
+
+ byte_size /= sizeof(T);
+ for(unsigned i = 0; i < byte_size; ++i)
+ {
+ if(start + i < m_size)
+ {
+ memcpy(&((*this)[start + i]), data, sizeof(T));
+ }
+ else
+ {
+ T* ptr = data_ptr();
+ memcpy(ptr, data, sizeof(T));
+ ++m_size;
+ }
+ data += sizeof(T);
+ }
+ }
+
+
+ //---------------------------------------------------------block_allocator
+ // Allocator for arbitrary POD data. Most usable in different cache
+ // systems for efficient memory allocations.
+ // Memory is allocated with blocks of fixed size ("block_size" in
+ // the constructor). If required size exceeds the block size the allocator
+ // creates a new block of the required size. However, the most efficient
+ // use is when the average reqired size is much less than the block size.
+ //------------------------------------------------------------------------
+ class block_allocator
+ {
+ struct block_type
+ {
+ int8u* data;
+ unsigned size;
+ };
+
+ public:
+ void remove_all()
+ {
+ if(m_num_blocks)
+ {
+ block_type* blk = m_blocks + m_num_blocks - 1;
+ while(m_num_blocks--)
+ {
+ pod_allocator<int8u>::deallocate(blk->data, blk->size);
+ --blk;
+ }
+ pod_allocator<block_type>::deallocate(m_blocks, m_max_blocks);
+ }
+ m_num_blocks = 0;
+ m_max_blocks = 0;
+ m_blocks = 0;
+ m_buf_ptr = 0;
+ m_rest = 0;
+ }
+
+ ~block_allocator()
+ {
+ remove_all();
+ }
+
+ block_allocator(unsigned block_size, unsigned block_ptr_inc=256-8) :
+ m_block_size(block_size),
+ m_block_ptr_inc(block_ptr_inc),
+ m_num_blocks(0),
+ m_max_blocks(0),
+ m_blocks(0),
+ m_buf_ptr(0),
+ m_rest(0)
+ {
+ }
+
+
+ int8u* allocate(unsigned size, unsigned alignment=1)
+ {
+ if(size == 0) return 0;
+ if(size <= m_rest)
+ {
+ int8u* ptr = m_buf_ptr;
+ if(alignment > 1)
+ {
+ unsigned align =
+ (alignment - unsigned((size_t)ptr) % alignment) % alignment;
+
+ size += align;
+ ptr += align;
+ if(size <= m_rest)
+ {
+ m_rest -= size;
+ m_buf_ptr += size;
+ return ptr;
+ }
+ allocate_block(size);
+ return allocate(size - align, alignment);
+ }
+ m_rest -= size;
+ m_buf_ptr += size;
+ return ptr;
+ }
+ allocate_block(size + alignment - 1);
+ return allocate(size, alignment);
+ }
+
+
+ private:
+ void allocate_block(unsigned size)
+ {
+ if(size < m_block_size) size = m_block_size;
+ if(m_num_blocks >= m_max_blocks)
+ {
+ block_type* new_blocks =
+ pod_allocator<block_type>::allocate(m_max_blocks + m_block_ptr_inc);
+
+ if(m_blocks)
+ {
+ memcpy(new_blocks,
+ m_blocks,
+ m_num_blocks * sizeof(block_type));
+ pod_allocator<block_type>::deallocate(m_blocks, m_max_blocks);
+ }
+ m_blocks = new_blocks;
+ m_max_blocks += m_block_ptr_inc;
+ }
+
+ m_blocks[m_num_blocks].size = size;
+ m_blocks[m_num_blocks].data =
+ m_buf_ptr =
+ pod_allocator<int8u>::allocate(size);
+
+ m_num_blocks++;
+ m_rest = size;
+ }
+
+ unsigned m_block_size;
+ unsigned m_block_ptr_inc;
+ unsigned m_num_blocks;
+ unsigned m_max_blocks;
+ block_type* m_blocks;
+ int8u* m_buf_ptr;
+ unsigned m_rest;
+ };
+
+
+
+
+
+
+
+
+ //------------------------------------------------------------------------
+ enum quick_sort_threshold_e
+ {
+ quick_sort_threshold = 9
+ };
+
+
+ //-----------------------------------------------------------swap_elements
+ template<class T> inline void swap_elements(T& a, T& b)
+ {
+ T temp = a;
+ a = b;
+ b = temp;
+ }
+
+
+ //--------------------------------------------------------------quick_sort
+ template<class Array, class Less>
+ void quick_sort(Array& arr, Less less)
+ {
+ if(arr.size() < 2) return;
+
+ typename Array::value_type* e1;
+ typename Array::value_type* e2;
+
+ int stack[80];
+ int* top = stack;
+ int limit = arr.size();
+ int base = 0;
+
+ for(;;)
+ {
+ int len = limit - base;
+
+ int i;
+ int j;
+ int pivot;
+
+ if(len > quick_sort_threshold)
+ {
+ // we use base + len/2 as the pivot
+ pivot = base + len / 2;
+ swap_elements(arr[base], arr[pivot]);
+
+ i = base + 1;
+ j = limit - 1;
+
+ // now ensure that *i <= *base <= *j
+ e1 = &(arr[j]);
+ e2 = &(arr[i]);
+ if(less(*e1, *e2)) swap_elements(*e1, *e2);
+
+ e1 = &(arr[base]);
+ e2 = &(arr[i]);
+ if(less(*e1, *e2)) swap_elements(*e1, *e2);
+
+ e1 = &(arr[j]);
+ e2 = &(arr[base]);
+ if(less(*e1, *e2)) swap_elements(*e1, *e2);
+
+ for(;;)
+ {
+ do i++; while( less(arr[i], arr[base]) );
+ do j--; while( less(arr[base], arr[j]) );
+
+ if( i > j )
+ {
+ break;
+ }
+
+ swap_elements(arr[i], arr[j]);
+ }
+
+ swap_elements(arr[base], arr[j]);
+
+ // now, push the largest sub-array
+ if(j - base > limit - i)
+ {
+ top[0] = base;
+ top[1] = j;
+ base = i;
+ }
+ else
+ {
+ top[0] = i;
+ top[1] = limit;
+ limit = j;
+ }
+ top += 2;
+ }
+ else
+ {
+ // the sub-array is small, perform insertion sort
+ j = base;
+ i = j + 1;
+
+ for(; i < limit; j = i, i++)
+ {
+ for(; less(*(e1 = &(arr[j + 1])), *(e2 = &(arr[j]))); j--)
+ {
+ swap_elements(*e1, *e2);
+ if(j == base)
+ {
+ break;
+ }
+ }
+ }
+ if(top > stack)
+ {
+ top -= 2;
+ base = top[0];
+ limit = top[1];
+ }
+ else
+ {
+ break;
+ }
+ }
+ }
+ }
+
+
+
+
+ //------------------------------------------------------remove_duplicates
+ // Remove duplicates from a sorted array. It doesn't cut the
+ // tail of the array, it just returns the number of remaining elements.
+ //-----------------------------------------------------------------------
+ template<class Array, class Equal>
+ unsigned remove_duplicates(Array& arr, Equal equal)
+ {
+ if(arr.size() < 2) return arr.size();
+
+ unsigned i, j;
+ for(i = 1, j = 1; i < arr.size(); i++)
+ {
+ typename Array::value_type& e = arr[i];
+ if(!equal(e, arr[i - 1]))
+ {
+ arr[j++] = e;
+ }
+ }
+ return j;
+ }
+
+ //--------------------------------------------------------invert_container
+ template<class Array> void invert_container(Array& arr)
+ {
+ int i = 0;
+ int j = arr.size() - 1;
+ while(i < j)
+ {
+ swap_elements(arr[i++], arr[j--]);
+ }
+ }
+
+ //------------------------------------------------------binary_search_pos
+ template<class Array, class Value, class Less>
+ unsigned binary_search_pos(const Array& arr, const Value& val, Less less)
+ {
+ if(arr.size() == 0) return 0;
+
+ unsigned beg = 0;
+ unsigned end = arr.size() - 1;
+
+ if(less(val, arr[0])) return 0;
+ if(less(arr[end], val)) return end + 1;
+
+ while(end - beg > 1)
+ {
+ unsigned mid = (end + beg) >> 1;
+ if(less(val, arr[mid])) end = mid;
+ else beg = mid;
+ }
+
+ //if(beg <= 0 && less(val, arr[0])) return 0;
+ //if(end >= arr.size() - 1 && less(arr[end], val)) ++end;
+
+ return end;
+ }
+
+ //----------------------------------------------------------range_adaptor
+ template<class Array> class range_adaptor
+ {
+ public:
+ typedef typename Array::value_type value_type;
+
+ range_adaptor(Array& array, unsigned start, unsigned size) :
+ m_array(array), m_start(start), m_size(size)
+ {}
+
+ unsigned size() const { return m_size; }
+ const value_type& operator [] (unsigned i) const { return m_array[m_start + i]; }
+ value_type& operator [] (unsigned i) { return m_array[m_start + i]; }
+ const value_type& at(unsigned i) const { return m_array[m_start + i]; }
+ value_type& at(unsigned i) { return m_array[m_start + i]; }
+ value_type value_at(unsigned i) const { return m_array[m_start + i]; }
+
+ private:
+ Array& m_array;
+ unsigned m_start;
+ unsigned m_size;
+ };
+
+ //---------------------------------------------------------------int_less
+ inline bool int_less(int a, int b) { return a < b; }
+
+ //------------------------------------------------------------int_greater
+ inline bool int_greater(int a, int b) { return a > b; }
+
+ //----------------------------------------------------------unsigned_less
+ inline bool unsigned_less(unsigned a, unsigned b) { return a < b; }
+
+ //-------------------------------------------------------unsigned_greater
+ inline bool unsigned_greater(unsigned a, unsigned b) { return a > b; }
+}
+
+#endif
diff --git a/src/agg/agg_basics.h b/src/agg/agg_basics.h
new file mode 100644
index 000000000..273850ba1
--- /dev/null
+++ b/src/agg/agg_basics.h
@@ -0,0 +1,574 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+
+#ifndef AGG_BASICS_INCLUDED
+#define AGG_BASICS_INCLUDED
+
+#include <cmath>
+#include "agg_config.h"
+
+//---------------------------------------------------------AGG_CUSTOM_ALLOCATOR
+#ifdef AGG_CUSTOM_ALLOCATOR
+#include "agg_allocator.h"
+#else
+namespace agg
+{
+ // The policy of all AGG containers and memory allocation strategy
+ // in general is that no allocated data requires explicit construction.
+ // It means that the allocator can be really simple; you can even
+ // replace new/delete to malloc/free. The constructors and destructors
+ // won't be called in this case, however everything will remain working.
+ // The second argument of deallocate() is the size of the allocated
+ // block. You can use this information if you wish.
+ //------------------------------------------------------------pod_allocator
+ template<class T> struct pod_allocator
+ {
+ static T* allocate(unsigned num) { return new T [num]; }
+ static void deallocate(T* ptr, unsigned) { delete [] ptr; }
+ };
+
+ // Single object allocator. It's also can be replaced with your custom
+ // allocator. The difference is that it can only allocate a single
+ // object and the constructor and destructor must be called.
+ // In AGG there is no need to allocate an array of objects with
+ // calling their constructors (only single ones). So that, if you
+ // replace these new/delete to malloc/free make sure that the in-place
+ // new is called and take care of calling the destructor too.
+ //------------------------------------------------------------obj_allocator
+ template<class T> struct obj_allocator
+ {
+ static T* allocate() { return new T; }
+ static void deallocate(T* ptr) { delete ptr; }
+ };
+}
+#endif
+
+
+//-------------------------------------------------------- Default basic types
+//
+// If the compiler has different capacity of the basic types you can redefine
+// them via the compiler command line or by generating agg_config.h that is
+// empty by default.
+//
+#ifndef AGG_INT8
+#define AGG_INT8 signed char
+#endif
+
+#ifndef AGG_INT8U
+#define AGG_INT8U unsigned char
+#endif
+
+#ifndef AGG_INT16
+#define AGG_INT16 short
+#endif
+
+#ifndef AGG_INT16U
+#define AGG_INT16U unsigned short
+#endif
+
+#ifndef AGG_INT32
+#define AGG_INT32 int
+#endif
+
+#ifndef AGG_INT32U
+#define AGG_INT32U unsigned
+#endif
+
+#ifndef AGG_INT64
+#if defined(_MSC_VER) || defined(__BORLANDC__)
+#define AGG_INT64 signed __int64
+#else
+#define AGG_INT64 signed long long
+#endif
+#endif
+
+#ifndef AGG_INT64U
+#if defined(_MSC_VER) || defined(__BORLANDC__)
+#define AGG_INT64U unsigned __int64
+#else
+#define AGG_INT64U unsigned long long
+#endif
+#endif
+
+//------------------------------------------------ Some fixes for MS Visual C++
+#if defined(_MSC_VER)
+#pragma warning(disable:4786) // Identifier was truncated...
+#endif
+
+#if defined(_MSC_VER)
+#define AGG_INLINE __forceinline
+#else
+#define AGG_INLINE inline
+#endif
+
+namespace agg
+{
+ //-------------------------------------------------------------------------
+ typedef AGG_INT8 int8; //----int8
+ typedef AGG_INT8U int8u; //----int8u
+ typedef AGG_INT16 int16; //----int16
+ typedef AGG_INT16U int16u; //----int16u
+ typedef AGG_INT32 int32; //----int32
+ typedef AGG_INT32U int32u; //----int32u
+ typedef AGG_INT64 int64; //----int64
+ typedef AGG_INT64U int64u; //----int64u
+
+#if defined(AGG_FISTP)
+#pragma warning(push)
+#pragma warning(disable : 4035) //Disable warning "no return value"
+ AGG_INLINE int iround(double v) //-------iround
+ {
+ int t;
+ __asm fld qword ptr [v]
+ __asm fistp dword ptr [t]
+ __asm mov eax, dword ptr [t]
+ }
+ AGG_INLINE unsigned uround(double v) //-------uround
+ {
+ unsigned t;
+ __asm fld qword ptr [v]
+ __asm fistp dword ptr [t]
+ __asm mov eax, dword ptr [t]
+ }
+#pragma warning(pop)
+ AGG_INLINE int ifloor(double v)
+ {
+ return int(floor(v));
+ }
+ AGG_INLINE unsigned ufloor(double v) //-------ufloor
+ {
+ return unsigned(floor(v));
+ }
+ AGG_INLINE int iceil(double v)
+ {
+ return int(ceil(v));
+ }
+ AGG_INLINE unsigned uceil(double v) //--------uceil
+ {
+ return unsigned(ceil(v));
+ }
+#elif defined(AGG_QIFIST)
+ AGG_INLINE int iround(double v)
+ {
+ return int(v);
+ }
+ AGG_INLINE int uround(double v)
+ {
+ return unsigned(v);
+ }
+ AGG_INLINE int ifloor(double v)
+ {
+ return int(floor(v));
+ }
+ AGG_INLINE unsigned ufloor(double v)
+ {
+ return unsigned(floor(v));
+ }
+ AGG_INLINE int iceil(double v)
+ {
+ return int(ceil(v));
+ }
+ AGG_INLINE unsigned uceil(double v)
+ {
+ return unsigned(ceil(v));
+ }
+#else
+ AGG_INLINE int iround(double v)
+ {
+ return int((v < 0.0) ? v - 0.5 : v + 0.5);
+ }
+ AGG_INLINE int uround(double v)
+ {
+ return unsigned(v + 0.5);
+ }
+ AGG_INLINE int ifloor(double v)
+ {
+ int i = int(v);
+ return i - (i > v);
+ }
+ AGG_INLINE unsigned ufloor(double v)
+ {
+ return unsigned(v);
+ }
+ AGG_INLINE int iceil(double v)
+ {
+ return int(ceil(v));
+ }
+ AGG_INLINE unsigned uceil(double v)
+ {
+ return unsigned(ceil(v));
+ }
+#endif
+
+ //---------------------------------------------------------------saturation
+ template<int Limit> struct saturation
+ {
+ AGG_INLINE static int iround(double v)
+ {
+ if(v < double(-Limit)) return -Limit;
+ if(v > double( Limit)) return Limit;
+ return agg::iround(v);
+ }
+ };
+
+ //------------------------------------------------------------------mul_one
+ template<unsigned Shift> struct mul_one
+ {
+ AGG_INLINE static unsigned mul(unsigned a, unsigned b)
+ {
+ unsigned q = a * b + (1 << (Shift-1));
+ return (q + (q >> Shift)) >> Shift;
+ }
+ };
+
+ //-------------------------------------------------------------------------
+ typedef unsigned char cover_type; //----cover_type
+ enum cover_scale_e
+ {
+ cover_shift = 8, //----cover_shift
+ cover_size = 1 << cover_shift, //----cover_size
+ cover_mask = cover_size - 1, //----cover_mask
+ cover_none = 0, //----cover_none
+ cover_full = cover_mask //----cover_full
+ };
+
+ //----------------------------------------------------poly_subpixel_scale_e
+ // These constants determine the subpixel accuracy, to be more precise,
+ // the number of bits of the fractional part of the coordinates.
+ // The possible coordinate capacity in bits can be calculated by formula:
+ // sizeof(int) * 8 - poly_subpixel_shift, i.e, for 32-bit integers and
+ // 8-bits fractional part the capacity is 24 bits.
+ enum poly_subpixel_scale_e
+ {
+ poly_subpixel_shift = 8, //----poly_subpixel_shift
+ poly_subpixel_scale = 1<<poly_subpixel_shift, //----poly_subpixel_scale
+ poly_subpixel_mask = poly_subpixel_scale-1 //----poly_subpixel_mask
+ };
+
+ //----------------------------------------------------------filling_rule_e
+ enum filling_rule_e
+ {
+ fill_non_zero,
+ fill_even_odd
+ };
+
+ //-----------------------------------------------------------------------pi
+ const double pi = 3.14159265358979323846;
+
+ //------------------------------------------------------------------deg2rad
+ inline double deg2rad(double deg)
+ {
+ return deg * pi / 180.0;
+ }
+
+ //------------------------------------------------------------------rad2deg
+ inline double rad2deg(double rad)
+ {
+ return rad * 180.0 / pi;
+ }
+
+ //----------------------------------------------------------------rect_base
+ template<class T> struct rect_base
+ {
+ typedef T value_type;
+ typedef rect_base<T> self_type;
+ T x1, y1, x2, y2;
+
+ rect_base() {}
+ rect_base(T x1_, T y1_, T x2_, T y2_) :
+ x1(x1_), y1(y1_), x2(x2_), y2(y2_) {}
+
+ void init(T x1_, T y1_, T x2_, T y2_)
+ {
+ x1 = x1_; y1 = y1_; x2 = x2_; y2 = y2_;
+ }
+
+ const self_type& normalize()
+ {
+ T t;
+ if(x1 > x2) { t = x1; x1 = x2; x2 = t; }
+ if(y1 > y2) { t = y1; y1 = y2; y2 = t; }
+ return *this;
+ }
+
+ bool clip(const self_type& r)
+ {
+ if(x2 > r.x2) x2 = r.x2;
+ if(y2 > r.y2) y2 = r.y2;
+ if(x1 < r.x1) x1 = r.x1;
+ if(y1 < r.y1) y1 = r.y1;
+ return x1 <= x2 && y1 <= y2;
+ }
+
+ bool is_valid() const
+ {
+ return x1 <= x2 && y1 <= y2;
+ }
+
+ bool hit_test(T x, T y) const
+ {
+ return (x >= x1 && x <= x2 && y >= y1 && y <= y2);
+ }
+
+ bool overlaps(const self_type& r) const
+ {
+ return !(r.x1 > x2 || r.x2 < x1
+ || r.y1 > y2 || r.y2 < y1);
+ }
+ };
+
+ //-----------------------------------------------------intersect_rectangles
+ template<class Rect>
+ inline Rect intersect_rectangles(const Rect& r1, const Rect& r2)
+ {
+ Rect r = r1;
+
+ // First process x2,y2 because the other order
+ // results in Internal Compiler Error under
+ // Microsoft Visual C++ .NET 2003 69462-335-0000007-18038 in
+ // case of "Maximize Speed" optimization option.
+ //-----------------
+ if(r.x2 > r2.x2) r.x2 = r2.x2;
+ if(r.y2 > r2.y2) r.y2 = r2.y2;
+ if(r.x1 < r2.x1) r.x1 = r2.x1;
+ if(r.y1 < r2.y1) r.y1 = r2.y1;
+ return r;
+ }
+
+
+ //---------------------------------------------------------unite_rectangles
+ template<class Rect>
+ inline Rect unite_rectangles(const Rect& r1, const Rect& r2)
+ {
+ Rect r = r1;
+ if(r.x2 < r2.x2) r.x2 = r2.x2;
+ if(r.y2 < r2.y2) r.y2 = r2.y2;
+ if(r.x1 > r2.x1) r.x1 = r2.x1;
+ if(r.y1 > r2.y1) r.y1 = r2.y1;
+ return r;
+ }
+
+ typedef rect_base<int> rect_i; //----rect_i
+ typedef rect_base<float> rect_f; //----rect_f
+ typedef rect_base<double> rect_d; //----rect_d
+
+ //---------------------------------------------------------path_commands_e
+ enum path_commands_e
+ {
+ path_cmd_stop = 0, //----path_cmd_stop
+ path_cmd_move_to = 1, //----path_cmd_move_to
+ path_cmd_line_to = 2, //----path_cmd_line_to
+ path_cmd_curve3 = 3, //----path_cmd_curve3
+ path_cmd_curve4 = 4, //----path_cmd_curve4
+ path_cmd_curveN = 5, //----path_cmd_curveN
+ path_cmd_catrom = 6, //----path_cmd_catrom
+ path_cmd_ubspline = 7, //----path_cmd_ubspline
+ path_cmd_end_poly = 0x0F, //----path_cmd_end_poly
+ path_cmd_mask = 0x0F //----path_cmd_mask
+ };
+
+ //------------------------------------------------------------path_flags_e
+ enum path_flags_e
+ {
+ path_flags_none = 0, //----path_flags_none
+ path_flags_ccw = 0x10, //----path_flags_ccw
+ path_flags_cw = 0x20, //----path_flags_cw
+ path_flags_close = 0x40, //----path_flags_close
+ path_flags_mask = 0xF0 //----path_flags_mask
+ };
+
+ //---------------------------------------------------------------is_vertex
+ inline bool is_vertex(unsigned c)
+ {
+ return c >= path_cmd_move_to && c < path_cmd_end_poly;
+ }
+
+ //--------------------------------------------------------------is_drawing
+ inline bool is_drawing(unsigned c)
+ {
+ return c >= path_cmd_line_to && c < path_cmd_end_poly;
+ }
+
+ //-----------------------------------------------------------------is_stop
+ inline bool is_stop(unsigned c)
+ {
+ return c == path_cmd_stop;
+ }
+
+ //--------------------------------------------------------------is_move_to
+ inline bool is_move_to(unsigned c)
+ {
+ return c == path_cmd_move_to;
+ }
+
+ //--------------------------------------------------------------is_line_to
+ inline bool is_line_to(unsigned c)
+ {
+ return c == path_cmd_line_to;
+ }
+
+ //----------------------------------------------------------------is_curve
+ inline bool is_curve(unsigned c)
+ {
+ return c == path_cmd_curve3 || c == path_cmd_curve4;
+ }
+
+ //---------------------------------------------------------------is_curve3
+ inline bool is_curve3(unsigned c)
+ {
+ return c == path_cmd_curve3;
+ }
+
+ //---------------------------------------------------------------is_curve4
+ inline bool is_curve4(unsigned c)
+ {
+ return c == path_cmd_curve4;
+ }
+
+ //-------------------------------------------------------------is_end_poly
+ inline bool is_end_poly(unsigned c)
+ {
+ return (c & path_cmd_mask) == path_cmd_end_poly;
+ }
+
+ //----------------------------------------------------------------is_close
+ inline bool is_close(unsigned c)
+ {
+ return (c & ~(path_flags_cw | path_flags_ccw)) ==
+ (path_cmd_end_poly | path_flags_close);
+ }
+
+ //------------------------------------------------------------is_next_poly
+ inline bool is_next_poly(unsigned c)
+ {
+ return is_stop(c) || is_move_to(c) || is_end_poly(c);
+ }
+
+ //-------------------------------------------------------------------is_cw
+ inline bool is_cw(unsigned c)
+ {
+ return (c & path_flags_cw) != 0;
+ }
+
+ //------------------------------------------------------------------is_ccw
+ inline bool is_ccw(unsigned c)
+ {
+ return (c & path_flags_ccw) != 0;
+ }
+
+ //-------------------------------------------------------------is_oriented
+ inline bool is_oriented(unsigned c)
+ {
+ return (c & (path_flags_cw | path_flags_ccw)) != 0;
+ }
+
+ //---------------------------------------------------------------is_closed
+ inline bool is_closed(unsigned c)
+ {
+ return (c & path_flags_close) != 0;
+ }
+
+ //----------------------------------------------------------get_close_flag
+ inline unsigned get_close_flag(unsigned c)
+ {
+ return c & path_flags_close;
+ }
+
+ //-------------------------------------------------------clear_orientation
+ inline unsigned clear_orientation(unsigned c)
+ {
+ return c & ~(path_flags_cw | path_flags_ccw);
+ }
+
+ //---------------------------------------------------------get_orientation
+ inline unsigned get_orientation(unsigned c)
+ {
+ return c & (path_flags_cw | path_flags_ccw);
+ }
+
+ //---------------------------------------------------------set_orientation
+ inline unsigned set_orientation(unsigned c, unsigned o)
+ {
+ return clear_orientation(c) | o;
+ }
+
+ //--------------------------------------------------------------point_base
+ template<class T> struct point_base
+ {
+ typedef T value_type;
+ T x,y;
+ point_base() {}
+ point_base(T x_, T y_) : x(x_), y(y_) {}
+ };
+ typedef point_base<int> point_i; //-----point_i
+ typedef point_base<float> point_f; //-----point_f
+ typedef point_base<double> point_d; //-----point_d
+
+ //-------------------------------------------------------------vertex_base
+ template<class T> struct vertex_base
+ {
+ typedef T value_type;
+ T x,y;
+ unsigned cmd;
+ vertex_base() {}
+ vertex_base(T x_, T y_, unsigned cmd_) : x(x_), y(y_), cmd(cmd_) {}
+ };
+ typedef vertex_base<int> vertex_i; //-----vertex_i
+ typedef vertex_base<float> vertex_f; //-----vertex_f
+ typedef vertex_base<double> vertex_d; //-----vertex_d
+
+ //----------------------------------------------------------------row_info
+ template<class T> struct row_info
+ {
+ int x1, x2;
+ T* ptr;
+ row_info() {}
+ row_info(int x1_, int x2_, T* ptr_) : x1(x1_), x2(x2_), ptr(ptr_) {}
+ };
+
+ //----------------------------------------------------------const_row_info
+ template<class T> struct const_row_info
+ {
+ int x1, x2;
+ const T* ptr;
+ const_row_info() {}
+ const_row_info(int x1_, int x2_, const T* ptr_) :
+ x1(x1_), x2(x2_), ptr(ptr_) {}
+ };
+
+ //------------------------------------------------------------is_equal_eps
+ template<class T> inline bool is_equal_eps(T v1, T v2, T epsilon)
+ {
+ bool neg1 = v1 < 0.0;
+ bool neg2 = v2 < 0.0;
+
+ if (neg1 != neg2)
+ return std::fabs(v1) < epsilon && std::fabs(v2) < epsilon;
+
+ int int1, int2;
+ std::frexp(v1, &int1);
+ std::frexp(v2, &int2);
+ int min12 = int1 < int2 ? int1 : int2;
+
+ v1 = std::ldexp(v1, -min12);
+ v2 = std::ldexp(v2, -min12);
+
+ return std::fabs(v1 - v2) < epsilon;
+ }
+}
+
+
+#endif
+
diff --git a/src/agg/agg_bezier_arc.h b/src/agg/agg_bezier_arc.h
new file mode 100644
index 000000000..cfd9308ea
--- /dev/null
+++ b/src/agg/agg_bezier_arc.h
@@ -0,0 +1,159 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Arc generator. Produces at most 4 consecutive cubic bezier curves, i.e.,
+// 4, 7, 10, or 13 vertices.
+//
+//----------------------------------------------------------------------------
+
+#ifndef AGG_BEZIER_ARC_INCLUDED
+#define AGG_BEZIER_ARC_INCLUDED
+
+#include "agg_conv_transform.h"
+
+namespace agg
+{
+
+ //-----------------------------------------------------------------------
+ void arc_to_bezier(double cx, double cy, double rx, double ry,
+ double start_angle, double sweep_angle,
+ double* curve);
+
+
+ //==============================================================bezier_arc
+ //
+ // See implemantaion agg_bezier_arc.cpp
+ //
+ class bezier_arc
+ {
+ public:
+ //--------------------------------------------------------------------
+ bezier_arc() : m_vertex(26), m_num_vertices(0), m_cmd(path_cmd_line_to) {}
+ bezier_arc(double x, double y,
+ double rx, double ry,
+ double start_angle,
+ double sweep_angle)
+ {
+ init(x, y, rx, ry, start_angle, sweep_angle);
+ }
+
+ //--------------------------------------------------------------------
+ void init(double x, double y,
+ double rx, double ry,
+ double start_angle,
+ double sweep_angle);
+
+ //--------------------------------------------------------------------
+ void rewind(unsigned)
+ {
+ m_vertex = 0;
+ }
+
+ //--------------------------------------------------------------------
+ unsigned vertex(double* x, double* y)
+ {
+ if(m_vertex >= m_num_vertices) return path_cmd_stop;
+ *x = m_vertices[m_vertex];
+ *y = m_vertices[m_vertex + 1];
+ m_vertex += 2;
+ return (m_vertex == 2) ? unsigned(path_cmd_move_to) : m_cmd;
+ }
+
+ // Supplemantary functions. num_vertices() actually returns doubled
+ // number of vertices. That is, for 1 vertex it returns 2.
+ //--------------------------------------------------------------------
+ unsigned num_vertices() const { return m_num_vertices; }
+ const double* vertices() const { return m_vertices; }
+ double* vertices() { return m_vertices; }
+
+ private:
+ unsigned m_vertex;
+ unsigned m_num_vertices;
+ double m_vertices[26];
+ unsigned m_cmd;
+ };
+
+
+
+ //==========================================================bezier_arc_svg
+ // Compute an SVG-style bezier arc.
+ //
+ // Computes an elliptical arc from (x1, y1) to (x2, y2). The size and
+ // orientation of the ellipse are defined by two radii (rx, ry)
+ // and an x-axis-rotation, which indicates how the ellipse as a whole
+ // is rotated relative to the current coordinate system. The center
+ // (cx, cy) of the ellipse is calculated automatically to satisfy the
+ // constraints imposed by the other parameters.
+ // large-arc-flag and sweep-flag contribute to the automatic calculations
+ // and help determine how the arc is drawn.
+ class bezier_arc_svg
+ {
+ public:
+ //--------------------------------------------------------------------
+ bezier_arc_svg() : m_arc(), m_radii_ok(false) {}
+
+ bezier_arc_svg(double x1, double y1,
+ double rx, double ry,
+ double angle,
+ bool large_arc_flag,
+ bool sweep_flag,
+ double x2, double y2) :
+ m_arc(), m_radii_ok(false)
+ {
+ init(x1, y1, rx, ry, angle, large_arc_flag, sweep_flag, x2, y2);
+ }
+
+ //--------------------------------------------------------------------
+ void init(double x1, double y1,
+ double rx, double ry,
+ double angle,
+ bool large_arc_flag,
+ bool sweep_flag,
+ double x2, double y2);
+
+ //--------------------------------------------------------------------
+ bool radii_ok() const { return m_radii_ok; }
+
+ //--------------------------------------------------------------------
+ void rewind(unsigned)
+ {
+ m_arc.rewind(0);
+ }
+
+ //--------------------------------------------------------------------
+ unsigned vertex(double* x, double* y)
+ {
+ return m_arc.vertex(x, y);
+ }
+
+ // Supplemantary functions. num_vertices() actually returns doubled
+ // number of vertices. That is, for 1 vertex it returns 2.
+ //--------------------------------------------------------------------
+ unsigned num_vertices() const { return m_arc.num_vertices(); }
+ const double* vertices() const { return m_arc.vertices(); }
+ double* vertices() { return m_arc.vertices(); }
+
+ private:
+ bezier_arc m_arc;
+ bool m_radii_ok;
+ };
+
+
+
+
+}
+
+
+#endif
diff --git a/src/agg/agg_clip_liang_barsky.h b/src/agg/agg_clip_liang_barsky.h
new file mode 100644
index 000000000..4b5fedbab
--- /dev/null
+++ b/src/agg/agg_clip_liang_barsky.h
@@ -0,0 +1,333 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Liang-Barsky clipping
+//
+//----------------------------------------------------------------------------
+#ifndef AGG_CLIP_LIANG_BARSKY_INCLUDED
+#define AGG_CLIP_LIANG_BARSKY_INCLUDED
+
+#include "agg_basics.h"
+
+namespace agg
+{
+
+ //------------------------------------------------------------------------
+ enum clipping_flags_e
+ {
+ clipping_flags_x1_clipped = 4,
+ clipping_flags_x2_clipped = 1,
+ clipping_flags_y1_clipped = 8,
+ clipping_flags_y2_clipped = 2,
+ clipping_flags_x_clipped = clipping_flags_x1_clipped | clipping_flags_x2_clipped,
+ clipping_flags_y_clipped = clipping_flags_y1_clipped | clipping_flags_y2_clipped
+ };
+
+ //----------------------------------------------------------clipping_flags
+ // Determine the clipping code of the vertex according to the
+ // Cyrus-Beck line clipping algorithm
+ //
+ // | |
+ // 0110 | 0010 | 0011
+ // | |
+ // -------+--------+-------- clip_box.y2
+ // | |
+ // 0100 | 0000 | 0001
+ // | |
+ // -------+--------+-------- clip_box.y1
+ // | |
+ // 1100 | 1000 | 1001
+ // | |
+ // clip_box.x1 clip_box.x2
+ //
+ //
+ template<class T>
+ inline unsigned clipping_flags(T x, T y, const rect_base<T>& clip_box)
+ {
+ return (x > clip_box.x2) |
+ ((y > clip_box.y2) << 1) |
+ ((x < clip_box.x1) << 2) |
+ ((y < clip_box.y1) << 3);
+ }
+
+ //--------------------------------------------------------clipping_flags_x
+ template<class T>
+ inline unsigned clipping_flags_x(T x, const rect_base<T>& clip_box)
+ {
+ return (x > clip_box.x2) | ((x < clip_box.x1) << 2);
+ }
+
+
+ //--------------------------------------------------------clipping_flags_y
+ template<class T>
+ inline unsigned clipping_flags_y(T y, const rect_base<T>& clip_box)
+ {
+ return ((y > clip_box.y2) << 1) | ((y < clip_box.y1) << 3);
+ }
+
+
+ //-------------------------------------------------------clip_liang_barsky
+ template<class T>
+ inline unsigned clip_liang_barsky(T x1, T y1, T x2, T y2,
+ const rect_base<T>& clip_box,
+ T* x, T* y)
+ {
+ const double nearzero = 1e-30;
+
+ double deltax = x2 - x1;
+ double deltay = y2 - y1;
+ double xin;
+ double xout;
+ double yin;
+ double yout;
+ double tinx;
+ double tiny;
+ double toutx;
+ double touty;
+ double tin1;
+ double tin2;
+ double tout1;
+ unsigned np = 0;
+
+ if(deltax == 0.0)
+ {
+ // bump off of the vertical
+ deltax = (x1 > clip_box.x1) ? -nearzero : nearzero;
+ }
+
+ if(deltay == 0.0)
+ {
+ // bump off of the horizontal
+ deltay = (y1 > clip_box.y1) ? -nearzero : nearzero;
+ }
+
+ if(deltax > 0.0)
+ {
+ // points to right
+ xin = clip_box.x1;
+ xout = clip_box.x2;
+ }
+ else
+ {
+ xin = clip_box.x2;
+ xout = clip_box.x1;
+ }
+
+ if(deltay > 0.0)
+ {
+ // points up
+ yin = clip_box.y1;
+ yout = clip_box.y2;
+ }
+ else
+ {
+ yin = clip_box.y2;
+ yout = clip_box.y1;
+ }
+
+ tinx = (xin - x1) / deltax;
+ tiny = (yin - y1) / deltay;
+
+ if (tinx < tiny)
+ {
+ // hits x first
+ tin1 = tinx;
+ tin2 = tiny;
+ }
+ else
+ {
+ // hits y first
+ tin1 = tiny;
+ tin2 = tinx;
+ }
+
+ if(tin1 <= 1.0)
+ {
+ if(0.0 < tin1)
+ {
+ *x++ = (T)xin;
+ *y++ = (T)yin;
+ ++np;
+ }
+
+ if(tin2 <= 1.0)
+ {
+ toutx = (xout - x1) / deltax;
+ touty = (yout - y1) / deltay;
+
+ tout1 = (toutx < touty) ? toutx : touty;
+
+ if(tin2 > 0.0 || tout1 > 0.0)
+ {
+ if(tin2 <= tout1)
+ {
+ if(tin2 > 0.0)
+ {
+ if(tinx > tiny)
+ {
+ *x++ = (T)xin;
+ *y++ = (T)(y1 + tinx * deltay);
+ }
+ else
+ {
+ *x++ = (T)(x1 + tiny * deltax);
+ *y++ = (T)yin;
+ }
+ ++np;
+ }
+
+ if(tout1 < 1.0)
+ {
+ if(toutx < touty)
+ {
+ *x++ = (T)xout;
+ *y++ = (T)(y1 + toutx * deltay);
+ }
+ else
+ {
+ *x++ = (T)(x1 + touty * deltax);
+ *y++ = (T)yout;
+ }
+ }
+ else
+ {
+ *x++ = x2;
+ *y++ = y2;
+ }
+ ++np;
+ }
+ else
+ {
+ if(tinx > tiny)
+ {
+ *x++ = (T)xin;
+ *y++ = (T)yout;
+ }
+ else
+ {
+ *x++ = (T)xout;
+ *y++ = (T)yin;
+ }
+ ++np;
+ }
+ }
+ }
+ }
+ return np;
+ }
+
+
+ //----------------------------------------------------------------------------
+ template<class T>
+ bool clip_move_point(T x1, T y1, T x2, T y2,
+ const rect_base<T>& clip_box,
+ T* x, T* y, unsigned flags)
+ {
+ T bound;
+
+ if(flags & clipping_flags_x_clipped)
+ {
+ if(x1 == x2)
+ {
+ return false;
+ }
+ bound = (flags & clipping_flags_x1_clipped) ? clip_box.x1 : clip_box.x2;
+ *y = (T)(double(bound - x1) * (y2 - y1) / (x2 - x1) + y1);
+ *x = bound;
+ }
+
+ flags = clipping_flags_y(*y, clip_box);
+ if(flags & clipping_flags_y_clipped)
+ {
+ if(y1 == y2)
+ {
+ return false;
+ }
+ bound = (flags & clipping_flags_y1_clipped) ? clip_box.y1 : clip_box.y2;
+ *x = (T)(double(bound - y1) * (x2 - x1) / (y2 - y1) + x1);
+ *y = bound;
+ }
+ return true;
+ }
+
+ //-------------------------------------------------------clip_line_segment
+ // Returns: ret >= 4 - Fully clipped
+ // (ret & 1) != 0 - First point has been moved
+ // (ret & 2) != 0 - Second point has been moved
+ //
+ template<class T>
+ unsigned clip_line_segment(T* x1, T* y1, T* x2, T* y2,
+ const rect_base<T>& clip_box)
+ {
+ unsigned f1 = clipping_flags(*x1, *y1, clip_box);
+ unsigned f2 = clipping_flags(*x2, *y2, clip_box);
+ unsigned ret = 0;
+
+ if((f2 | f1) == 0)
+ {
+ // Fully visible
+ return 0;
+ }
+
+ if((f1 & clipping_flags_x_clipped) != 0 &&
+ (f1 & clipping_flags_x_clipped) == (f2 & clipping_flags_x_clipped))
+ {
+ // Fully clipped
+ return 4;
+ }
+
+ if((f1 & clipping_flags_y_clipped) != 0 &&
+ (f1 & clipping_flags_y_clipped) == (f2 & clipping_flags_y_clipped))
+ {
+ // Fully clipped
+ return 4;
+ }
+
+ T tx1 = *x1;
+ T ty1 = *y1;
+ T tx2 = *x2;
+ T ty2 = *y2;
+ if(f1)
+ {
+ if(!clip_move_point(tx1, ty1, tx2, ty2, clip_box, x1, y1, f1))
+ {
+ return 4;
+ }
+ if(*x1 == *x2 && *y1 == *y2)
+ {
+ return 4;
+ }
+ ret |= 1;
+ }
+ if(f2)
+ {
+ if(!clip_move_point(tx1, ty1, tx2, ty2, clip_box, x2, y2, f2))
+ {
+ return 4;
+ }
+ if(*x1 == *x2 && *y1 == *y2)
+ {
+ return 4;
+ }
+ ret |= 2;
+ }
+ return ret;
+ }
+
+
+}
+
+
+#endif
diff --git a/src/agg/agg_color_gray.h b/src/agg/agg_color_gray.h
new file mode 100644
index 000000000..f66588c11
--- /dev/null
+++ b/src/agg/agg_color_gray.h
@@ -0,0 +1,1047 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Adaptation for high precision colors has been sponsored by
+// Liberty Technology Systems, Inc., visit http://lib-sys.com
+//
+// Liberty Technology Systems, Inc. is the provider of
+// PostScript and PDF technology for software developers.
+//
+//----------------------------------------------------------------------------
+//
+// color types gray8, gray16
+//
+//----------------------------------------------------------------------------
+
+#ifndef AGG_COLOR_GRAY_INCLUDED
+#define AGG_COLOR_GRAY_INCLUDED
+
+#include "agg_basics.h"
+#include "agg_color_rgba.h"
+
+namespace agg
+{
+
+ //===================================================================gray8
+ template<class Colorspace>
+ struct gray8T
+ {
+ typedef int8u value_type;
+ typedef int32u calc_type;
+ typedef int32 long_type;
+ enum base_scale_e
+ {
+ base_shift = 8,
+ base_scale = 1 << base_shift,
+ base_mask = base_scale - 1,
+ base_MSB = 1 << (base_shift - 1)
+ };
+ typedef gray8T self_type;
+
+ value_type v;
+ value_type a;
+
+ static value_type luminance(const rgba& c)
+ {
+ // Calculate grayscale value as per ITU-R BT.709.
+ return value_type(uround((0.2126 * c.r + 0.7152 * c.g + 0.0722 * c.b) * base_mask));
+ }
+
+ static value_type luminance(const rgba8& c)
+ {
+ // Calculate grayscale value as per ITU-R BT.709.
+ return value_type((55u * c.r + 184u * c.g + 18u * c.b) >> 8);
+ }
+
+ static void convert(gray8T<linear>& dst, const gray8T<sRGB>& src)
+ {
+ dst.v = sRGB_conv<value_type>::rgb_from_sRGB(src.v);
+ dst.a = src.a;
+ }
+
+ static void convert(gray8T<sRGB>& dst, const gray8T<linear>& src)
+ {
+ dst.v = sRGB_conv<value_type>::rgb_to_sRGB(src.v);
+ dst.a = src.a;
+ }
+
+ static void convert(gray8T<linear>& dst, const rgba8& src)
+ {
+ dst.v = luminance(src);
+ dst.a = src.a;
+ }
+
+ static void convert(gray8T<linear>& dst, const srgba8& src)
+ {
+ // The RGB weights are only valid for linear values.
+ convert(dst, rgba8(src));
+ }
+
+ static void convert(gray8T<sRGB>& dst, const rgba8& src)
+ {
+ dst.v = sRGB_conv<value_type>::rgb_to_sRGB(luminance(src));
+ dst.a = src.a;
+ }
+
+ static void convert(gray8T<sRGB>& dst, const srgba8& src)
+ {
+ // The RGB weights are only valid for linear values.
+ convert(dst, rgba8(src));
+ }
+
+ //--------------------------------------------------------------------
+ gray8T() {}
+
+ //--------------------------------------------------------------------
+ explicit gray8T(unsigned v_, unsigned a_ = base_mask) :
+ v(int8u(v_)), a(int8u(a_)) {}
+
+ //--------------------------------------------------------------------
+ gray8T(const self_type& c, unsigned a_) :
+ v(c.v), a(value_type(a_)) {}
+
+ //--------------------------------------------------------------------
+ gray8T(const rgba& c) :
+ v(luminance(c)),
+ a(value_type(uround(c.a * base_mask))) {}
+
+ //--------------------------------------------------------------------
+ template<class T>
+ gray8T(const gray8T<T>& c)
+ {
+ convert(*this, c);
+ }
+
+ //--------------------------------------------------------------------
+ template<class T>
+ gray8T(const rgba8T<T>& c)
+ {
+ convert(*this, c);
+ }
+
+ //--------------------------------------------------------------------
+ template<class T>
+ T convert_from_sRGB() const
+ {
+ typename T::value_type y = sRGB_conv<typename T::value_type>::rgb_from_sRGB(v);
+ return T(y, y, y, sRGB_conv<typename T::value_type>::alpha_from_sRGB(a));
+ }
+
+ template<class T>
+ T convert_to_sRGB() const
+ {
+ typename T::value_type y = sRGB_conv<typename T::value_type>::rgb_to_sRGB(v);
+ return T(y, y, y, sRGB_conv<typename T::value_type>::alpha_to_sRGB(a));
+ }
+
+ //--------------------------------------------------------------------
+ rgba8 make_rgba8(const linear&) const
+ {
+ return rgba8(v, v, v, a);
+ }
+
+ rgba8 make_rgba8(const sRGB&) const
+ {
+ return convert_from_sRGB<srgba8>();
+ }
+
+ operator rgba8() const
+ {
+ return make_rgba8(Colorspace());
+ }
+
+ //--------------------------------------------------------------------
+ srgba8 make_srgba8(const linear&) const
+ {
+ return convert_to_sRGB<rgba8>();
+ }
+
+ srgba8 make_srgba8(const sRGB&) const
+ {
+ return srgba8(v, v, v, a);
+ }
+
+ operator srgba8() const
+ {
+ return make_rgba8(Colorspace());
+ }
+
+ //--------------------------------------------------------------------
+ rgba16 make_rgba16(const linear&) const
+ {
+ rgba16::value_type rgb = (v << 8) | v;
+ return rgba16(rgb, rgb, rgb, (a << 8) | a);
+ }
+
+ rgba16 make_rgba16(const sRGB&) const
+ {
+ return convert_from_sRGB<rgba16>();
+ }
+
+ operator rgba16() const
+ {
+ return make_rgba16(Colorspace());
+ }
+
+ //--------------------------------------------------------------------
+ rgba32 make_rgba32(const linear&) const
+ {
+ rgba32::value_type v32 = v / 255.0f;
+ return rgba32(v32, v32, v32, a / 255.0f);
+ }
+
+ rgba32 make_rgba32(const sRGB&) const
+ {
+ return convert_from_sRGB<rgba32>();
+ }
+
+ operator rgba32() const
+ {
+ return make_rgba32(Colorspace());
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE double to_double(value_type a)
+ {
+ return double(a) / base_mask;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type from_double(double a)
+ {
+ return value_type(uround(a * base_mask));
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type empty_value()
+ {
+ return 0;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type full_value()
+ {
+ return base_mask;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE bool is_transparent() const
+ {
+ return a == 0;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE bool is_opaque() const
+ {
+ return a == base_mask;
+ }
+
+ //--------------------------------------------------------------------
+ // Fixed-point multiply, exact over int8u.
+ static AGG_INLINE value_type multiply(value_type a, value_type b)
+ {
+ calc_type t = a * b + base_MSB;
+ return value_type(((t >> base_shift) + t) >> base_shift);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type demultiply(value_type a, value_type b)
+ {
+ if (a * b == 0)
+ {
+ return 0;
+ }
+ else if (a >= b)
+ {
+ return base_mask;
+ }
+ else return value_type((a * base_mask + (b >> 1)) / b);
+ }
+
+ //--------------------------------------------------------------------
+ template<typename T>
+ static AGG_INLINE T downscale(T a)
+ {
+ return a >> base_shift;
+ }
+
+ //--------------------------------------------------------------------
+ template<typename T>
+ static AGG_INLINE T downshift(T a, unsigned n)
+ {
+ return a >> n;
+ }
+
+ //--------------------------------------------------------------------
+ // Fixed-point multiply, exact over int8u.
+ // Specifically for multiplying a color component by a cover.
+ static AGG_INLINE value_type mult_cover(value_type a, value_type b)
+ {
+ return multiply(a, b);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE cover_type scale_cover(cover_type a, value_type b)
+ {
+ return multiply(b, a);
+ }
+
+ //--------------------------------------------------------------------
+ // Interpolate p to q by a, assuming q is premultiplied by a.
+ static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a)
+ {
+ return p + q - multiply(p, a);
+ }
+
+ //--------------------------------------------------------------------
+ // Interpolate p to q by a.
+ static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a)
+ {
+ int t = (q - p) * a + base_MSB - (p > q);
+ return value_type(p + (((t >> base_shift) + t) >> base_shift));
+ }
+
+ //--------------------------------------------------------------------
+ self_type& clear()
+ {
+ v = a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& transparent()
+ {
+ a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& opacity(double a_)
+ {
+ if (a_ < 0) a = 0;
+ else if (a_ > 1) a = 1;
+ else a = (value_type)uround(a_ * double(base_mask));
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ double opacity() const
+ {
+ return double(a) / double(base_mask);
+ }
+
+ //--------------------------------------------------------------------
+ self_type& premultiply()
+ {
+ if (a < base_mask)
+ {
+ if (a == 0) v = 0;
+ else v = multiply(v, a);
+ }
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& demultiply()
+ {
+ if (a < base_mask)
+ {
+ if (a == 0)
+ {
+ v = 0;
+ }
+ else
+ {
+ calc_type v_ = (calc_type(v) * base_mask) / a;
+ v = value_type((v_ > base_mask) ? (value_type)base_mask : v_);
+ }
+ }
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type gradient(self_type c, double k) const
+ {
+ self_type ret;
+ calc_type ik = uround(k * base_scale);
+ ret.v = lerp(v, c.v, ik);
+ ret.a = lerp(a, c.a, ik);
+ return ret;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void add(const self_type& c, unsigned cover)
+ {
+ calc_type cv, ca;
+ if (cover == cover_mask)
+ {
+ if (c.a == base_mask)
+ {
+ *this = c;
+ return;
+ }
+ else
+ {
+ cv = v + c.v;
+ ca = a + c.a;
+ }
+ }
+ else
+ {
+ cv = v + mult_cover(c.v, cover);
+ ca = a + mult_cover(c.a, cover);
+ }
+ v = (value_type)((cv > calc_type(base_mask)) ? calc_type(base_mask) : cv);
+ a = (value_type)((ca > calc_type(base_mask)) ? calc_type(base_mask) : ca);
+ }
+
+ //--------------------------------------------------------------------
+ static self_type no_color() { return self_type(0,0); }
+ };
+
+ typedef gray8T<linear> gray8;
+ typedef gray8T<sRGB> sgray8;
+
+
+ //==================================================================gray16
+ struct gray16
+ {
+ typedef int16u value_type;
+ typedef int32u calc_type;
+ typedef int64 long_type;
+ enum base_scale_e
+ {
+ base_shift = 16,
+ base_scale = 1 << base_shift,
+ base_mask = base_scale - 1,
+ base_MSB = 1 << (base_shift - 1)
+ };
+ typedef gray16 self_type;
+
+ value_type v;
+ value_type a;
+
+ static value_type luminance(const rgba& c)
+ {
+ // Calculate grayscale value as per ITU-R BT.709.
+ return value_type(uround((0.2126 * c.r + 0.7152 * c.g + 0.0722 * c.b) * base_mask));
+ }
+
+ static value_type luminance(const rgba16& c)
+ {
+ // Calculate grayscale value as per ITU-R BT.709.
+ return value_type((13933u * c.r + 46872u * c.g + 4732u * c.b) >> 16);
+ }
+
+ static value_type luminance(const rgba8& c)
+ {
+ return luminance(rgba16(c));
+ }
+
+ static value_type luminance(const srgba8& c)
+ {
+ return luminance(rgba16(c));
+ }
+
+ static value_type luminance(const rgba32& c)
+ {
+ return luminance(rgba(c));
+ }
+
+ //--------------------------------------------------------------------
+ gray16() {}
+
+ //--------------------------------------------------------------------
+ explicit gray16(unsigned v_, unsigned a_ = base_mask) :
+ v(int16u(v_)), a(int16u(a_)) {}
+
+ //--------------------------------------------------------------------
+ gray16(const self_type& c, unsigned a_) :
+ v(c.v), a(value_type(a_)) {}
+
+ //--------------------------------------------------------------------
+ gray16(const rgba& c) :
+ v(luminance(c)),
+ a((value_type)uround(c.a * double(base_mask))) {}
+
+ //--------------------------------------------------------------------
+ gray16(const rgba8& c) :
+ v(luminance(c)),
+ a((value_type(c.a) << 8) | c.a) {}
+
+ //--------------------------------------------------------------------
+ gray16(const srgba8& c) :
+ v(luminance(c)),
+ a((value_type(c.a) << 8) | c.a) {}
+
+ //--------------------------------------------------------------------
+ gray16(const rgba16& c) :
+ v(luminance(c)),
+ a(c.a) {}
+
+ //--------------------------------------------------------------------
+ gray16(const gray8& c) :
+ v((value_type(c.v) << 8) | c.v),
+ a((value_type(c.a) << 8) | c.a) {}
+
+ //--------------------------------------------------------------------
+ gray16(const sgray8& c) :
+ v(sRGB_conv<value_type>::rgb_from_sRGB(c.v)),
+ a(sRGB_conv<value_type>::alpha_from_sRGB(c.a)) {}
+
+ //--------------------------------------------------------------------
+ operator rgba8() const
+ {
+ return rgba8(v >> 8, v >> 8, v >> 8, a >> 8);
+ }
+
+ //--------------------------------------------------------------------
+ operator srgba8() const
+ {
+ value_type y = sRGB_conv<value_type>::rgb_to_sRGB(v);
+ return srgba8(y, y, y, sRGB_conv<value_type>::alpha_to_sRGB(a));
+ }
+
+ //--------------------------------------------------------------------
+ operator rgba16() const
+ {
+ return rgba16(v, v, v, a);
+ }
+
+ //--------------------------------------------------------------------
+ operator rgba32() const
+ {
+ rgba32::value_type v32 = v / 65535.0f;
+ return rgba32(v32, v32, v32, a / 65535.0f);
+ }
+
+ //--------------------------------------------------------------------
+ operator gray8() const
+ {
+ return gray8(v >> 8, a >> 8);
+ }
+
+ //--------------------------------------------------------------------
+ operator sgray8() const
+ {
+ return sgray8(
+ sRGB_conv<value_type>::rgb_to_sRGB(v),
+ sRGB_conv<value_type>::alpha_to_sRGB(a));
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE double to_double(value_type a)
+ {
+ return double(a) / base_mask;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type from_double(double a)
+ {
+ return value_type(uround(a * base_mask));
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type empty_value()
+ {
+ return 0;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type full_value()
+ {
+ return base_mask;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE bool is_transparent() const
+ {
+ return a == 0;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE bool is_opaque() const
+ {
+ return a == base_mask;
+ }
+
+ //--------------------------------------------------------------------
+ // Fixed-point multiply, exact over int16u.
+ static AGG_INLINE value_type multiply(value_type a, value_type b)
+ {
+ calc_type t = a * b + base_MSB;
+ return value_type(((t >> base_shift) + t) >> base_shift);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type demultiply(value_type a, value_type b)
+ {
+ if (a * b == 0)
+ {
+ return 0;
+ }
+ else if (a >= b)
+ {
+ return base_mask;
+ }
+ else return value_type((a * base_mask + (b >> 1)) / b);
+ }
+
+ //--------------------------------------------------------------------
+ template<typename T>
+ static AGG_INLINE T downscale(T a)
+ {
+ return a >> base_shift;
+ }
+
+ //--------------------------------------------------------------------
+ template<typename T>
+ static AGG_INLINE T downshift(T a, unsigned n)
+ {
+ return a >> n;
+ }
+
+ //--------------------------------------------------------------------
+ // Fixed-point multiply, almost exact over int16u.
+ // Specifically for multiplying a color component by a cover.
+ static AGG_INLINE value_type mult_cover(value_type a, cover_type b)
+ {
+ return multiply(a, b << 8 | b);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE cover_type scale_cover(cover_type a, value_type b)
+ {
+ return mult_cover(b, a) >> 8;
+ }
+
+ //--------------------------------------------------------------------
+ // Interpolate p to q by a, assuming q is premultiplied by a.
+ static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a)
+ {
+ return p + q - multiply(p, a);
+ }
+
+ //--------------------------------------------------------------------
+ // Interpolate p to q by a.
+ static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a)
+ {
+ int t = (q - p) * a + base_MSB - (p > q);
+ return value_type(p + (((t >> base_shift) + t) >> base_shift));
+ }
+
+ //--------------------------------------------------------------------
+ self_type& clear()
+ {
+ v = a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& transparent()
+ {
+ a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& opacity(double a_)
+ {
+ if (a_ < 0) a = 0;
+ else if(a_ > 1) a = 1;
+ else a = (value_type)uround(a_ * double(base_mask));
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ double opacity() const
+ {
+ return double(a) / double(base_mask);
+ }
+
+
+ //--------------------------------------------------------------------
+ self_type& premultiply()
+ {
+ if (a < base_mask)
+ {
+ if(a == 0) v = 0;
+ else v = multiply(v, a);
+ }
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& demultiply()
+ {
+ if (a < base_mask)
+ {
+ if (a == 0)
+ {
+ v = 0;
+ }
+ else
+ {
+ calc_type v_ = (calc_type(v) * base_mask) / a;
+ v = (v_ > base_mask) ? value_type(base_mask) : value_type(v_);
+ }
+ }
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type gradient(self_type c, double k) const
+ {
+ self_type ret;
+ calc_type ik = uround(k * base_scale);
+ ret.v = lerp(v, c.v, ik);
+ ret.a = lerp(a, c.a, ik);
+ return ret;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void add(const self_type& c, unsigned cover)
+ {
+ calc_type cv, ca;
+ if (cover == cover_mask)
+ {
+ if (c.a == base_mask)
+ {
+ *this = c;
+ return;
+ }
+ else
+ {
+ cv = v + c.v;
+ ca = a + c.a;
+ }
+ }
+ else
+ {
+ cv = v + mult_cover(c.v, cover);
+ ca = a + mult_cover(c.a, cover);
+ }
+ v = (value_type)((cv > calc_type(base_mask)) ? calc_type(base_mask) : cv);
+ a = (value_type)((ca > calc_type(base_mask)) ? calc_type(base_mask) : ca);
+ }
+
+ //--------------------------------------------------------------------
+ static self_type no_color() { return self_type(0,0); }
+ };
+
+
+ //===================================================================gray32
+ struct gray32
+ {
+ typedef float value_type;
+ typedef double calc_type;
+ typedef double long_type;
+ typedef gray32 self_type;
+
+ value_type v;
+ value_type a;
+
+ // Calculate grayscale value as per ITU-R BT.709.
+ static value_type luminance(double r, double g, double b)
+ {
+ return value_type(0.2126 * r + 0.7152 * g + 0.0722 * b);
+ }
+
+ static value_type luminance(const rgba& c)
+ {
+ return luminance(c.r, c.g, c.b);
+ }
+
+ static value_type luminance(const rgba32& c)
+ {
+ return luminance(c.r, c.g, c.b);
+ }
+
+ static value_type luminance(const rgba8& c)
+ {
+ return luminance(c.r / 255.0, c.g / 255.0, c.g / 255.0);
+ }
+
+ static value_type luminance(const rgba16& c)
+ {
+ return luminance(c.r / 65535.0, c.g / 65535.0, c.g / 65535.0);
+ }
+
+ //--------------------------------------------------------------------
+ gray32() {}
+
+ //--------------------------------------------------------------------
+ explicit gray32(value_type v_, value_type a_ = 1) :
+ v(v_), a(a_) {}
+
+ //--------------------------------------------------------------------
+ gray32(const self_type& c, value_type a_) :
+ v(c.v), a(a_) {}
+
+ //--------------------------------------------------------------------
+ gray32(const rgba& c) :
+ v(luminance(c)),
+ a(value_type(c.a)) {}
+
+ //--------------------------------------------------------------------
+ gray32(const rgba8& c) :
+ v(luminance(c)),
+ a(value_type(c.a / 255.0)) {}
+
+ //--------------------------------------------------------------------
+ gray32(const srgba8& c) :
+ v(luminance(rgba32(c))),
+ a(value_type(c.a / 255.0)) {}
+
+ //--------------------------------------------------------------------
+ gray32(const rgba16& c) :
+ v(luminance(c)),
+ a(value_type(c.a / 65535.0)) {}
+
+ //--------------------------------------------------------------------
+ gray32(const rgba32& c) :
+ v(luminance(c)),
+ a(value_type(c.a)) {}
+
+ //--------------------------------------------------------------------
+ gray32(const gray8& c) :
+ v(value_type(c.v / 255.0)),
+ a(value_type(c.a / 255.0)) {}
+
+ //--------------------------------------------------------------------
+ gray32(const sgray8& c) :
+ v(sRGB_conv<value_type>::rgb_from_sRGB(c.v)),
+ a(sRGB_conv<value_type>::alpha_from_sRGB(c.a)) {}
+
+ //--------------------------------------------------------------------
+ gray32(const gray16& c) :
+ v(value_type(c.v / 65535.0)),
+ a(value_type(c.a / 65535.0)) {}
+
+ //--------------------------------------------------------------------
+ operator rgba() const
+ {
+ return rgba(v, v, v, a);
+ }
+
+ //--------------------------------------------------------------------
+ operator gray8() const
+ {
+ return gray8(uround(v * 255.0), uround(a * 255.0));
+ }
+
+ //--------------------------------------------------------------------
+ operator sgray8() const
+ {
+ // Return (non-premultiplied) sRGB values.
+ return sgray8(
+ sRGB_conv<value_type>::rgb_to_sRGB(v),
+ sRGB_conv<value_type>::alpha_to_sRGB(a));
+ }
+
+ //--------------------------------------------------------------------
+ operator gray16() const
+ {
+ return gray16(uround(v * 65535.0), uround(a * 65535.0));
+ }
+
+ //--------------------------------------------------------------------
+ operator rgba8() const
+ {
+ rgba8::value_type y = uround(v * 255.0);
+ return rgba8(y, y, y, uround(a * 255.0));
+ }
+
+ //--------------------------------------------------------------------
+ operator srgba8() const
+ {
+ srgba8::value_type y = sRGB_conv<value_type>::rgb_to_sRGB(v);
+ return srgba8(y, y, y, sRGB_conv<value_type>::alpha_to_sRGB(a));
+ }
+
+ //--------------------------------------------------------------------
+ operator rgba16() const
+ {
+ rgba16::value_type y = uround(v * 65535.0);
+ return rgba16(y, y, y, uround(a * 65535.0));
+ }
+
+ //--------------------------------------------------------------------
+ operator rgba32() const
+ {
+ return rgba32(v, v, v, a);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE double to_double(value_type a)
+ {
+ return a;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type from_double(double a)
+ {
+ return value_type(a);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type empty_value()
+ {
+ return 0;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type full_value()
+ {
+ return 1;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE bool is_transparent() const
+ {
+ return a <= 0;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE bool is_opaque() const
+ {
+ return a >= 1;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type invert(value_type x)
+ {
+ return 1 - x;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type multiply(value_type a, value_type b)
+ {
+ return value_type(a * b);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type demultiply(value_type a, value_type b)
+ {
+ return (b == 0) ? 0 : value_type(a / b);
+ }
+
+ //--------------------------------------------------------------------
+ template<typename T>
+ static AGG_INLINE T downscale(T a)
+ {
+ return a;
+ }
+
+ //--------------------------------------------------------------------
+ template<typename T>
+ static AGG_INLINE T downshift(T a, unsigned n)
+ {
+ return n > 0 ? a / (1 << n) : a;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type mult_cover(value_type a, cover_type b)
+ {
+ return value_type(a * b / cover_mask);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE cover_type scale_cover(cover_type a, value_type b)
+ {
+ return cover_type(uround(a * b));
+ }
+
+ //--------------------------------------------------------------------
+ // Interpolate p to q by a, assuming q is premultiplied by a.
+ static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a)
+ {
+ return (1 - a) * p + q; // more accurate than "p + q - p * a"
+ }
+
+ //--------------------------------------------------------------------
+ // Interpolate p to q by a.
+ static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a)
+ {
+ // The form "p + a * (q - p)" avoids a multiplication, but may produce an
+ // inaccurate result. For example, "p + (q - p)" may not be exactly equal
+ // to q. Therefore, stick to the basic expression, which at least produces
+ // the correct result at either extreme.
+ return (1 - a) * p + a * q;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& clear()
+ {
+ v = a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& transparent()
+ {
+ a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& opacity(double a_)
+ {
+ if (a_ < 0) a = 0;
+ else if (a_ > 1) a = 1;
+ else a = value_type(a_);
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ double opacity() const
+ {
+ return a;
+ }
+
+
+ //--------------------------------------------------------------------
+ self_type& premultiply()
+ {
+ if (a < 0) v = 0;
+ else if(a < 1) v *= a;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& demultiply()
+ {
+ if (a < 0) v = 0;
+ else if (a < 1) v /= a;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type gradient(self_type c, double k) const
+ {
+ return self_type(
+ value_type(v + (c.v - v) * k),
+ value_type(a + (c.a - a) * k));
+ }
+
+ //--------------------------------------------------------------------
+ static self_type no_color() { return self_type(0,0); }
+ };
+}
+
+
+
+
+#endif
diff --git a/src/agg/agg_color_rgba.h b/src/agg/agg_color_rgba.h
new file mode 100644
index 000000000..ff33a1179
--- /dev/null
+++ b/src/agg/agg_color_rgba.h
@@ -0,0 +1,1353 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+//
+// Adaptation for high precision colors has been sponsored by
+// Liberty Technology Systems, Inc., visit http://lib-sys.com
+//
+// Liberty Technology Systems, Inc. is the provider of
+// PostScript and PDF technology for software developers.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+
+#ifndef AGG_COLOR_RGBA_INCLUDED
+#define AGG_COLOR_RGBA_INCLUDED
+
+#include <math.h>
+#include "agg_basics.h"
+#include "agg_gamma_lut.h"
+
+namespace agg
+{
+ // Supported component orders for RGB and RGBA pixel formats
+ //=======================================================================
+ struct order_rgb { enum rgb_e { R=0, G=1, B=2, N=3 }; };
+ struct order_bgr { enum bgr_e { B=0, G=1, R=2, N=3 }; };
+ struct order_rgba { enum rgba_e { R=0, G=1, B=2, A=3, N=4 }; };
+ struct order_argb { enum argb_e { A=0, R=1, G=2, B=3, N=4 }; };
+ struct order_abgr { enum abgr_e { A=0, B=1, G=2, R=3, N=4 }; };
+ struct order_bgra { enum bgra_e { B=0, G=1, R=2, A=3, N=4 }; };
+
+ // Colorspace tag types.
+ struct linear {};
+ struct sRGB {};
+
+ //====================================================================rgba
+ struct rgba
+ {
+ typedef double value_type;
+
+ double r;
+ double g;
+ double b;
+ double a;
+
+ //--------------------------------------------------------------------
+ rgba() {}
+
+ //--------------------------------------------------------------------
+ rgba(double r_, double g_, double b_, double a_=1.0) :
+ r(r_), g(g_), b(b_), a(a_) {}
+
+ //--------------------------------------------------------------------
+ rgba(const rgba& c, double a_) : r(c.r), g(c.g), b(c.b), a(a_) {}
+
+ //--------------------------------------------------------------------
+ rgba& clear()
+ {
+ r = g = b = a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ rgba& transparent()
+ {
+ a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ rgba& opacity(double a_)
+ {
+ if (a_ < 0) a = 0;
+ else if (a_ > 1) a = 1;
+ else a = a_;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ double opacity() const
+ {
+ return a;
+ }
+
+ //--------------------------------------------------------------------
+ rgba& premultiply()
+ {
+ r *= a;
+ g *= a;
+ b *= a;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ rgba& premultiply(double a_)
+ {
+ if (a <= 0 || a_ <= 0)
+ {
+ r = g = b = a = 0;
+ }
+ else
+ {
+ a_ /= a;
+ r *= a_;
+ g *= a_;
+ b *= a_;
+ a = a_;
+ }
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ rgba& demultiply()
+ {
+ if (a == 0)
+ {
+ r = g = b = 0;
+ }
+ else
+ {
+ double a_ = 1.0 / a;
+ r *= a_;
+ g *= a_;
+ b *= a_;
+ }
+ return *this;
+ }
+
+
+ //--------------------------------------------------------------------
+ rgba gradient(rgba c, double k) const
+ {
+ rgba ret;
+ ret.r = r + (c.r - r) * k;
+ ret.g = g + (c.g - g) * k;
+ ret.b = b + (c.b - b) * k;
+ ret.a = a + (c.a - a) * k;
+ return ret;
+ }
+
+ rgba& operator+=(const rgba& c)
+ {
+ r += c.r;
+ g += c.g;
+ b += c.b;
+ a += c.a;
+ return *this;
+ }
+
+ rgba& operator*=(double k)
+ {
+ r *= k;
+ g *= k;
+ b *= k;
+ a *= k;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ static rgba no_color() { return rgba(0,0,0,0); }
+
+ //--------------------------------------------------------------------
+ static rgba from_wavelength(double wl, double gamma = 1.0);
+
+ //--------------------------------------------------------------------
+ explicit rgba(double wavelen, double gamma=1.0)
+ {
+ *this = from_wavelength(wavelen, gamma);
+ }
+
+ };
+
+ inline rgba operator+(const rgba& a, const rgba& b)
+ {
+ return rgba(a) += b;
+ }
+
+ inline rgba operator*(const rgba& a, double b)
+ {
+ return rgba(a) *= b;
+ }
+
+ //------------------------------------------------------------------------
+ inline rgba rgba::from_wavelength(double wl, double gamma)
+ {
+ rgba t(0.0, 0.0, 0.0);
+
+ if (wl >= 380.0 && wl <= 440.0)
+ {
+ t.r = -1.0 * (wl - 440.0) / (440.0 - 380.0);
+ t.b = 1.0;
+ }
+ else if (wl >= 440.0 && wl <= 490.0)
+ {
+ t.g = (wl - 440.0) / (490.0 - 440.0);
+ t.b = 1.0;
+ }
+ else if (wl >= 490.0 && wl <= 510.0)
+ {
+ t.g = 1.0;
+ t.b = -1.0 * (wl - 510.0) / (510.0 - 490.0);
+ }
+ else if (wl >= 510.0 && wl <= 580.0)
+ {
+ t.r = (wl - 510.0) / (580.0 - 510.0);
+ t.g = 1.0;
+ }
+ else if (wl >= 580.0 && wl <= 645.0)
+ {
+ t.r = 1.0;
+ t.g = -1.0 * (wl - 645.0) / (645.0 - 580.0);
+ }
+ else if (wl >= 645.0 && wl <= 780.0)
+ {
+ t.r = 1.0;
+ }
+
+ double s = 1.0;
+ if (wl > 700.0) s = 0.3 + 0.7 * (780.0 - wl) / (780.0 - 700.0);
+ else if (wl < 420.0) s = 0.3 + 0.7 * (wl - 380.0) / (420.0 - 380.0);
+
+ t.r = pow(t.r * s, gamma);
+ t.g = pow(t.g * s, gamma);
+ t.b = pow(t.b * s, gamma);
+ return t;
+ }
+
+ inline rgba rgba_pre(double r, double g, double b, double a)
+ {
+ return rgba(r, g, b, a).premultiply();
+ }
+
+
+ //===================================================================rgba8
+ template<class Colorspace>
+ struct rgba8T
+ {
+ typedef int8u value_type;
+ typedef int32u calc_type;
+ typedef int32 long_type;
+ enum base_scale_e
+ {
+ base_shift = 8,
+ base_scale = 1 << base_shift,
+ base_mask = base_scale - 1,
+ base_MSB = 1 << (base_shift - 1)
+ };
+ typedef rgba8T self_type;
+
+
+ value_type r;
+ value_type g;
+ value_type b;
+ value_type a;
+
+ static void convert(rgba8T<linear>& dst, const rgba8T<sRGB>& src)
+ {
+ dst.r = sRGB_conv<value_type>::rgb_from_sRGB(src.r);
+ dst.g = sRGB_conv<value_type>::rgb_from_sRGB(src.g);
+ dst.b = sRGB_conv<value_type>::rgb_from_sRGB(src.b);
+ dst.a = src.a;
+ }
+
+ static void convert(rgba8T<sRGB>& dst, const rgba8T<linear>& src)
+ {
+ dst.r = sRGB_conv<value_type>::rgb_to_sRGB(src.r);
+ dst.g = sRGB_conv<value_type>::rgb_to_sRGB(src.g);
+ dst.b = sRGB_conv<value_type>::rgb_to_sRGB(src.b);
+ dst.a = src.a;
+ }
+
+ static void convert(rgba8T<linear>& dst, const rgba& src)
+ {
+ dst.r = value_type(uround(src.r * base_mask));
+ dst.g = value_type(uround(src.g * base_mask));
+ dst.b = value_type(uround(src.b * base_mask));
+ dst.a = value_type(uround(src.a * base_mask));
+ }
+
+ static void convert(rgba8T<sRGB>& dst, const rgba& src)
+ {
+ // Use the "float" table.
+ dst.r = sRGB_conv<float>::rgb_to_sRGB(float(src.r));
+ dst.g = sRGB_conv<float>::rgb_to_sRGB(float(src.g));
+ dst.b = sRGB_conv<float>::rgb_to_sRGB(float(src.b));
+ dst.a = sRGB_conv<float>::alpha_to_sRGB(float(src.a));
+ }
+
+ static void convert(rgba& dst, const rgba8T<linear>& src)
+ {
+ dst.r = src.r / 255.0;
+ dst.g = src.g / 255.0;
+ dst.b = src.b / 255.0;
+ dst.a = src.a / 255.0;
+ }
+
+ static void convert(rgba& dst, const rgba8T<sRGB>& src)
+ {
+ // Use the "float" table.
+ dst.r = sRGB_conv<float>::rgb_from_sRGB(src.r);
+ dst.g = sRGB_conv<float>::rgb_from_sRGB(src.g);
+ dst.b = sRGB_conv<float>::rgb_from_sRGB(src.b);
+ dst.a = sRGB_conv<float>::alpha_from_sRGB(src.a);
+ }
+
+ //--------------------------------------------------------------------
+ rgba8T() {}
+
+ //--------------------------------------------------------------------
+ rgba8T(unsigned r_, unsigned g_, unsigned b_, unsigned a_ = base_mask) :
+ r(value_type(r_)),
+ g(value_type(g_)),
+ b(value_type(b_)),
+ a(value_type(a_)) {}
+
+ //--------------------------------------------------------------------
+ rgba8T(const rgba& c)
+ {
+ convert(*this, c);
+ }
+
+ //--------------------------------------------------------------------
+ rgba8T(const self_type& c, unsigned a_) :
+ r(c.r), g(c.g), b(c.b), a(value_type(a_)) {}
+
+ //--------------------------------------------------------------------
+ template<class T>
+ rgba8T(const rgba8T<T>& c)
+ {
+ convert(*this, c);
+ }
+
+ //--------------------------------------------------------------------
+ operator rgba() const
+ {
+ rgba c;
+ convert(c, *this);
+ return c;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE double to_double(value_type a)
+ {
+ return double(a) / base_mask;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type from_double(double a)
+ {
+ return value_type(uround(a * base_mask));
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type empty_value()
+ {
+ return 0;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type full_value()
+ {
+ return base_mask;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE bool is_transparent() const
+ {
+ return a == 0;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE bool is_opaque() const
+ {
+ return a == base_mask;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type invert(value_type x)
+ {
+ return base_mask - x;
+ }
+
+ //--------------------------------------------------------------------
+ // Fixed-point multiply, exact over int8u.
+ static AGG_INLINE value_type multiply(value_type a, value_type b)
+ {
+ calc_type t = a * b + base_MSB;
+ return value_type(((t >> base_shift) + t) >> base_shift);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type demultiply(value_type a, value_type b)
+ {
+ if (a * b == 0)
+ {
+ return 0;
+ }
+ else if (a >= b)
+ {
+ return base_mask;
+ }
+ else return value_type((a * base_mask + (b >> 1)) / b);
+ }
+
+ //--------------------------------------------------------------------
+ template<typename T>
+ static AGG_INLINE T downscale(T a)
+ {
+ return a >> base_shift;
+ }
+
+ //--------------------------------------------------------------------
+ template<typename T>
+ static AGG_INLINE T downshift(T a, unsigned n)
+ {
+ return a >> n;
+ }
+
+ //--------------------------------------------------------------------
+ // Fixed-point multiply, exact over int8u.
+ // Specifically for multiplying a color component by a cover.
+ static AGG_INLINE value_type mult_cover(value_type a, cover_type b)
+ {
+ return multiply(a, b);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE cover_type scale_cover(cover_type a, value_type b)
+ {
+ return multiply(b, a);
+ }
+
+ //--------------------------------------------------------------------
+ // Interpolate p to q by a, assuming q is premultiplied by a.
+ static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a)
+ {
+ return p + q - multiply(p, a);
+ }
+
+ //--------------------------------------------------------------------
+ // Interpolate p to q by a.
+ static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a)
+ {
+ int t = (q - p) * a + base_MSB - (p > q);
+ return value_type(p + (((t >> base_shift) + t) >> base_shift));
+ }
+
+ //--------------------------------------------------------------------
+ self_type& clear()
+ {
+ r = g = b = a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& transparent()
+ {
+ a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& opacity(double a_)
+ {
+ if (a_ < 0) a = 0;
+ else if (a_ > 1) a = 1;
+ else a = (value_type)uround(a_ * double(base_mask));
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ double opacity() const
+ {
+ return double(a) / double(base_mask);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE self_type& premultiply()
+ {
+ if (a != base_mask)
+ {
+ if (a == 0)
+ {
+ r = g = b = 0;
+ }
+ else
+ {
+ r = multiply(r, a);
+ g = multiply(g, a);
+ b = multiply(b, a);
+ }
+ }
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE self_type& premultiply(unsigned a_)
+ {
+ if (a != base_mask || a_ < base_mask)
+ {
+ if (a == 0 || a_ == 0)
+ {
+ r = g = b = a = 0;
+ }
+ else
+ {
+ calc_type r_ = (calc_type(r) * a_) / a;
+ calc_type g_ = (calc_type(g) * a_) / a;
+ calc_type b_ = (calc_type(b) * a_) / a;
+ r = value_type((r_ > a_) ? a_ : r_);
+ g = value_type((g_ > a_) ? a_ : g_);
+ b = value_type((b_ > a_) ? a_ : b_);
+ a = value_type(a_);
+ }
+ }
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE self_type& demultiply()
+ {
+ if (a < base_mask)
+ {
+ if (a == 0)
+ {
+ r = g = b = 0;
+ }
+ else
+ {
+ calc_type r_ = (calc_type(r) * base_mask) / a;
+ calc_type g_ = (calc_type(g) * base_mask) / a;
+ calc_type b_ = (calc_type(b) * base_mask) / a;
+ r = value_type((r_ > calc_type(base_mask)) ? calc_type(base_mask) : r_);
+ g = value_type((g_ > calc_type(base_mask)) ? calc_type(base_mask) : g_);
+ b = value_type((b_ > calc_type(base_mask)) ? calc_type(base_mask) : b_);
+ }
+ }
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE self_type gradient(const self_type& c, double k) const
+ {
+ self_type ret;
+ calc_type ik = uround(k * base_mask);
+ ret.r = lerp(r, c.r, ik);
+ ret.g = lerp(g, c.g, ik);
+ ret.b = lerp(b, c.b, ik);
+ ret.a = lerp(a, c.a, ik);
+ return ret;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void add(const self_type& c, unsigned cover)
+ {
+ calc_type cr, cg, cb, ca;
+ if (cover == cover_mask)
+ {
+ if (c.a == base_mask)
+ {
+ *this = c;
+ return;
+ }
+ else
+ {
+ cr = r + c.r;
+ cg = g + c.g;
+ cb = b + c.b;
+ ca = a + c.a;
+ }
+ }
+ else
+ {
+ cr = r + mult_cover(c.r, cover);
+ cg = g + mult_cover(c.g, cover);
+ cb = b + mult_cover(c.b, cover);
+ ca = a + mult_cover(c.a, cover);
+ }
+ r = (value_type)((cr > calc_type(base_mask)) ? calc_type(base_mask) : cr);
+ g = (value_type)((cg > calc_type(base_mask)) ? calc_type(base_mask) : cg);
+ b = (value_type)((cb > calc_type(base_mask)) ? calc_type(base_mask) : cb);
+ a = (value_type)((ca > calc_type(base_mask)) ? calc_type(base_mask) : ca);
+ }
+
+ //--------------------------------------------------------------------
+ template<class GammaLUT>
+ AGG_INLINE void apply_gamma_dir(const GammaLUT& gamma)
+ {
+ r = gamma.dir(r);
+ g = gamma.dir(g);
+ b = gamma.dir(b);
+ }
+
+ //--------------------------------------------------------------------
+ template<class GammaLUT>
+ AGG_INLINE void apply_gamma_inv(const GammaLUT& gamma)
+ {
+ r = gamma.inv(r);
+ g = gamma.inv(g);
+ b = gamma.inv(b);
+ }
+
+ //--------------------------------------------------------------------
+ static self_type no_color() { return self_type(0,0,0,0); }
+
+ //--------------------------------------------------------------------
+ static self_type from_wavelength(double wl, double gamma = 1.0)
+ {
+ return self_type(rgba::from_wavelength(wl, gamma));
+ }
+ };
+
+ typedef rgba8T<linear> rgba8;
+ typedef rgba8T<sRGB> srgba8;
+
+
+ //-------------------------------------------------------------rgb8_packed
+ inline rgba8 rgb8_packed(unsigned v)
+ {
+ return rgba8((v >> 16) & 0xFF, (v >> 8) & 0xFF, v & 0xFF);
+ }
+
+ //-------------------------------------------------------------bgr8_packed
+ inline rgba8 bgr8_packed(unsigned v)
+ {
+ return rgba8(v & 0xFF, (v >> 8) & 0xFF, (v >> 16) & 0xFF);
+ }
+
+ //------------------------------------------------------------argb8_packed
+ inline rgba8 argb8_packed(unsigned v)
+ {
+ return rgba8((v >> 16) & 0xFF, (v >> 8) & 0xFF, v & 0xFF, v >> 24);
+ }
+
+ //---------------------------------------------------------rgba8_gamma_dir
+ template<class GammaLUT>
+ rgba8 rgba8_gamma_dir(rgba8 c, const GammaLUT& gamma)
+ {
+ return rgba8(gamma.dir(c.r), gamma.dir(c.g), gamma.dir(c.b), c.a);
+ }
+
+ //---------------------------------------------------------rgba8_gamma_inv
+ template<class GammaLUT>
+ rgba8 rgba8_gamma_inv(rgba8 c, const GammaLUT& gamma)
+ {
+ return rgba8(gamma.inv(c.r), gamma.inv(c.g), gamma.inv(c.b), c.a);
+ }
+
+
+
+ //==================================================================rgba16
+ struct rgba16
+ {
+ typedef int16u value_type;
+ typedef int32u calc_type;
+ typedef int64 long_type;
+ enum base_scale_e
+ {
+ base_shift = 16,
+ base_scale = 1 << base_shift,
+ base_mask = base_scale - 1,
+ base_MSB = 1 << (base_shift - 1)
+ };
+ typedef rgba16 self_type;
+
+ value_type r;
+ value_type g;
+ value_type b;
+ value_type a;
+
+ //--------------------------------------------------------------------
+ rgba16() {}
+
+ //--------------------------------------------------------------------
+ rgba16(unsigned r_, unsigned g_, unsigned b_, unsigned a_=base_mask) :
+ r(value_type(r_)),
+ g(value_type(g_)),
+ b(value_type(b_)),
+ a(value_type(a_)) {}
+
+ //--------------------------------------------------------------------
+ rgba16(const self_type& c, unsigned a_) :
+ r(c.r), g(c.g), b(c.b), a(value_type(a_)) {}
+
+ //--------------------------------------------------------------------
+ rgba16(const rgba& c) :
+ r((value_type)uround(c.r * double(base_mask))),
+ g((value_type)uround(c.g * double(base_mask))),
+ b((value_type)uround(c.b * double(base_mask))),
+ a((value_type)uround(c.a * double(base_mask))) {}
+
+ //--------------------------------------------------------------------
+ rgba16(const rgba8& c) :
+ r(value_type((value_type(c.r) << 8) | c.r)),
+ g(value_type((value_type(c.g) << 8) | c.g)),
+ b(value_type((value_type(c.b) << 8) | c.b)),
+ a(value_type((value_type(c.a) << 8) | c.a)) {}
+
+ //--------------------------------------------------------------------
+ rgba16(const srgba8& c) :
+ r(sRGB_conv<value_type>::rgb_from_sRGB(c.r)),
+ g(sRGB_conv<value_type>::rgb_from_sRGB(c.g)),
+ b(sRGB_conv<value_type>::rgb_from_sRGB(c.b)),
+ a(sRGB_conv<value_type>::alpha_from_sRGB(c.a)) {}
+
+ //--------------------------------------------------------------------
+ operator rgba() const
+ {
+ return rgba(
+ r / 65535.0,
+ g / 65535.0,
+ b / 65535.0,
+ a / 65535.0);
+ }
+
+ //--------------------------------------------------------------------
+ operator rgba8() const
+ {
+ return rgba8(r >> 8, g >> 8, b >> 8, a >> 8);
+ }
+
+ //--------------------------------------------------------------------
+ operator srgba8() const
+ {
+ // Return (non-premultiplied) sRGB values.
+ return srgba8(
+ sRGB_conv<value_type>::rgb_to_sRGB(r),
+ sRGB_conv<value_type>::rgb_to_sRGB(g),
+ sRGB_conv<value_type>::rgb_to_sRGB(b),
+ sRGB_conv<value_type>::alpha_to_sRGB(a));
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE double to_double(value_type a)
+ {
+ return double(a) / base_mask;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type from_double(double a)
+ {
+ return value_type(uround(a * base_mask));
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type empty_value()
+ {
+ return 0;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type full_value()
+ {
+ return base_mask;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE bool is_transparent() const
+ {
+ return a == 0;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE bool is_opaque() const
+ {
+ return a == base_mask;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type invert(value_type x)
+ {
+ return base_mask - x;
+ }
+
+ //--------------------------------------------------------------------
+ // Fixed-point multiply, exact over int16u.
+ static AGG_INLINE value_type multiply(value_type a, value_type b)
+ {
+ calc_type t = a * b + base_MSB;
+ return value_type(((t >> base_shift) + t) >> base_shift);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type demultiply(value_type a, value_type b)
+ {
+ if (a * b == 0)
+ {
+ return 0;
+ }
+ else if (a >= b)
+ {
+ return base_mask;
+ }
+ else return value_type((a * base_mask + (b >> 1)) / b);
+ }
+
+ //--------------------------------------------------------------------
+ template<typename T>
+ static AGG_INLINE T downscale(T a)
+ {
+ return a >> base_shift;
+ }
+
+ //--------------------------------------------------------------------
+ template<typename T>
+ static AGG_INLINE T downshift(T a, unsigned n)
+ {
+ return a >> n;
+ }
+
+ //--------------------------------------------------------------------
+ // Fixed-point multiply, almost exact over int16u.
+ // Specifically for multiplying a color component by a cover.
+ static AGG_INLINE value_type mult_cover(value_type a, cover_type b)
+ {
+ return multiply(a, (b << 8) | b);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE cover_type scale_cover(cover_type a, value_type b)
+ {
+ return multiply((a << 8) | a, b) >> 8;
+ }
+
+ //--------------------------------------------------------------------
+ // Interpolate p to q by a, assuming q is premultiplied by a.
+ static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a)
+ {
+ return p + q - multiply(p, a);
+ }
+
+ //--------------------------------------------------------------------
+ // Interpolate p to q by a.
+ static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a)
+ {
+ int t = (q - p) * a + base_MSB - (p > q);
+ return value_type(p + (((t >> base_shift) + t) >> base_shift));
+ }
+
+ //--------------------------------------------------------------------
+ self_type& clear()
+ {
+ r = g = b = a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& transparent()
+ {
+ a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE self_type& opacity(double a_)
+ {
+ if (a_ < 0) a = 0;
+ if (a_ > 1) a = 1;
+ a = value_type(uround(a_ * double(base_mask)));
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ double opacity() const
+ {
+ return double(a) / double(base_mask);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE self_type& premultiply()
+ {
+ if (a != base_mask)
+ {
+ if (a == 0)
+ {
+ r = g = b = 0;
+ }
+ else
+ {
+ r = multiply(r, a);
+ g = multiply(g, a);
+ b = multiply(b, a);
+ }
+ }
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE self_type& premultiply(unsigned a_)
+ {
+ if (a < base_mask || a_ < base_mask)
+ {
+ if (a == 0 || a_ == 0)
+ {
+ r = g = b = a = 0;
+ }
+ else
+ {
+ calc_type r_ = (calc_type(r) * a_) / a;
+ calc_type g_ = (calc_type(g) * a_) / a;
+ calc_type b_ = (calc_type(b) * a_) / a;
+ r = value_type((r_ > a_) ? a_ : r_);
+ g = value_type((g_ > a_) ? a_ : g_);
+ b = value_type((b_ > a_) ? a_ : b_);
+ a = value_type(a_);
+ }
+ }
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE self_type& demultiply()
+ {
+ if (a < base_mask)
+ {
+ if (a == 0)
+ {
+ r = g = b = 0;
+ }
+ else
+ {
+ calc_type r_ = (calc_type(r) * base_mask) / a;
+ calc_type g_ = (calc_type(g) * base_mask) / a;
+ calc_type b_ = (calc_type(b) * base_mask) / a;
+ r = value_type((r_ > calc_type(base_mask)) ? calc_type(base_mask) : r_);
+ g = value_type((g_ > calc_type(base_mask)) ? calc_type(base_mask) : g_);
+ b = value_type((b_ > calc_type(base_mask)) ? calc_type(base_mask) : b_);
+ }
+ }
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE self_type gradient(const self_type& c, double k) const
+ {
+ self_type ret;
+ calc_type ik = uround(k * base_mask);
+ ret.r = lerp(r, c.r, ik);
+ ret.g = lerp(g, c.g, ik);
+ ret.b = lerp(b, c.b, ik);
+ ret.a = lerp(a, c.a, ik);
+ return ret;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void add(const self_type& c, unsigned cover)
+ {
+ calc_type cr, cg, cb, ca;
+ if (cover == cover_mask)
+ {
+ if (c.a == base_mask)
+ {
+ *this = c;
+ return;
+ }
+ else
+ {
+ cr = r + c.r;
+ cg = g + c.g;
+ cb = b + c.b;
+ ca = a + c.a;
+ }
+ }
+ else
+ {
+ cr = r + mult_cover(c.r, cover);
+ cg = g + mult_cover(c.g, cover);
+ cb = b + mult_cover(c.b, cover);
+ ca = a + mult_cover(c.a, cover);
+ }
+ r = (value_type)((cr > calc_type(base_mask)) ? calc_type(base_mask) : cr);
+ g = (value_type)((cg > calc_type(base_mask)) ? calc_type(base_mask) : cg);
+ b = (value_type)((cb > calc_type(base_mask)) ? calc_type(base_mask) : cb);
+ a = (value_type)((ca > calc_type(base_mask)) ? calc_type(base_mask) : ca);
+ }
+
+ //--------------------------------------------------------------------
+ template<class GammaLUT>
+ AGG_INLINE void apply_gamma_dir(const GammaLUT& gamma)
+ {
+ r = gamma.dir(r);
+ g = gamma.dir(g);
+ b = gamma.dir(b);
+ }
+
+ //--------------------------------------------------------------------
+ template<class GammaLUT>
+ AGG_INLINE void apply_gamma_inv(const GammaLUT& gamma)
+ {
+ r = gamma.inv(r);
+ g = gamma.inv(g);
+ b = gamma.inv(b);
+ }
+
+ //--------------------------------------------------------------------
+ static self_type no_color() { return self_type(0,0,0,0); }
+
+ //--------------------------------------------------------------------
+ static self_type from_wavelength(double wl, double gamma = 1.0)
+ {
+ return self_type(rgba::from_wavelength(wl, gamma));
+ }
+ };
+
+
+ //------------------------------------------------------rgba16_gamma_dir
+ template<class GammaLUT>
+ rgba16 rgba16_gamma_dir(rgba16 c, const GammaLUT& gamma)
+ {
+ return rgba16(gamma.dir(c.r), gamma.dir(c.g), gamma.dir(c.b), c.a);
+ }
+
+ //------------------------------------------------------rgba16_gamma_inv
+ template<class GammaLUT>
+ rgba16 rgba16_gamma_inv(rgba16 c, const GammaLUT& gamma)
+ {
+ return rgba16(gamma.inv(c.r), gamma.inv(c.g), gamma.inv(c.b), c.a);
+ }
+
+ //====================================================================rgba32
+ struct rgba32
+ {
+ typedef float value_type;
+ typedef double calc_type;
+ typedef double long_type;
+ typedef rgba32 self_type;
+
+ value_type r;
+ value_type g;
+ value_type b;
+ value_type a;
+
+ //--------------------------------------------------------------------
+ rgba32() {}
+
+ //--------------------------------------------------------------------
+ rgba32(value_type r_, value_type g_, value_type b_, value_type a_= 1) :
+ r(r_), g(g_), b(b_), a(a_) {}
+
+ //--------------------------------------------------------------------
+ rgba32(const self_type& c, float a_) :
+ r(c.r), g(c.g), b(c.b), a(a_) {}
+
+ //--------------------------------------------------------------------
+ rgba32(const rgba& c) :
+ r(value_type(c.r)), g(value_type(c.g)), b(value_type(c.b)), a(value_type(c.a)) {}
+
+ //--------------------------------------------------------------------
+ rgba32(const rgba8& c) :
+ r(value_type(c.r / 255.0)),
+ g(value_type(c.g / 255.0)),
+ b(value_type(c.b / 255.0)),
+ a(value_type(c.a / 255.0)) {}
+
+ //--------------------------------------------------------------------
+ rgba32(const srgba8& c) :
+ r(sRGB_conv<value_type>::rgb_from_sRGB(c.r)),
+ g(sRGB_conv<value_type>::rgb_from_sRGB(c.g)),
+ b(sRGB_conv<value_type>::rgb_from_sRGB(c.b)),
+ a(sRGB_conv<value_type>::alpha_from_sRGB(c.a)) {}
+
+ //--------------------------------------------------------------------
+ rgba32(const rgba16& c) :
+ r(value_type(c.r / 65535.0)),
+ g(value_type(c.g / 65535.0)),
+ b(value_type(c.b / 65535.0)),
+ a(value_type(c.a / 65535.0)) {}
+
+ //--------------------------------------------------------------------
+ operator rgba() const
+ {
+ return rgba(r, g, b, a);
+ }
+
+ //--------------------------------------------------------------------
+ operator rgba8() const
+ {
+ return rgba8(
+ uround(r * 255.0),
+ uround(g * 255.0),
+ uround(b * 255.0),
+ uround(a * 255.0));
+ }
+
+ //--------------------------------------------------------------------
+ operator srgba8() const
+ {
+ return srgba8(
+ sRGB_conv<value_type>::rgb_to_sRGB(r),
+ sRGB_conv<value_type>::rgb_to_sRGB(g),
+ sRGB_conv<value_type>::rgb_to_sRGB(b),
+ sRGB_conv<value_type>::alpha_to_sRGB(a));
+ }
+
+ //--------------------------------------------------------------------
+ operator rgba16() const
+ {
+ return rgba8(
+ uround(r * 65535.0),
+ uround(g * 65535.0),
+ uround(b * 65535.0),
+ uround(a * 65535.0));
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE double to_double(value_type a)
+ {
+ return a;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type from_double(double a)
+ {
+ return value_type(a);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type empty_value()
+ {
+ return 0;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type full_value()
+ {
+ return 1;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE bool is_transparent() const
+ {
+ return a <= 0;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE bool is_opaque() const
+ {
+ return a >= 1;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type invert(value_type x)
+ {
+ return 1 - x;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type multiply(value_type a, value_type b)
+ {
+ return value_type(a * b);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type demultiply(value_type a, value_type b)
+ {
+ return (b == 0) ? 0 : value_type(a / b);
+ }
+
+ //--------------------------------------------------------------------
+ template<typename T>
+ static AGG_INLINE T downscale(T a)
+ {
+ return a;
+ }
+
+ //--------------------------------------------------------------------
+ template<typename T>
+ static AGG_INLINE T downshift(T a, unsigned n)
+ {
+ return n > 0 ? a / (1 << n) : a;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type mult_cover(value_type a, cover_type b)
+ {
+ return value_type(a * b / cover_mask);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE cover_type scale_cover(cover_type a, value_type b)
+ {
+ return cover_type(uround(a * b));
+ }
+
+ //--------------------------------------------------------------------
+ // Interpolate p to q by a, assuming q is premultiplied by a.
+ static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a)
+ {
+ return (1 - a) * p + q; // more accurate than "p + q - p * a"
+ }
+
+ //--------------------------------------------------------------------
+ // Interpolate p to q by a.
+ static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a)
+ {
+ // The form "p + a * (q - p)" avoids a multiplication, but may produce an
+ // inaccurate result. For example, "p + (q - p)" may not be exactly equal
+ // to q. Therefore, stick to the basic expression, which at least produces
+ // the correct result at either extreme.
+ return (1 - a) * p + a * q;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& clear()
+ {
+ r = g = b = a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& transparent()
+ {
+ a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE self_type& opacity(double a_)
+ {
+ if (a_ < 0) a = 0;
+ else if (a_ > 1) a = 1;
+ else a = value_type(a_);
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ double opacity() const
+ {
+ return a;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE self_type& premultiply()
+ {
+ if (a < 1)
+ {
+ if (a <= 0)
+ {
+ r = g = b = 0;
+ }
+ else
+ {
+ r *= a;
+ g *= a;
+ b *= a;
+ }
+ }
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE self_type& demultiply()
+ {
+ if (a < 1)
+ {
+ if (a <= 0)
+ {
+ r = g = b = 0;
+ }
+ else
+ {
+ r /= a;
+ g /= a;
+ b /= a;
+ }
+ }
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE self_type gradient(const self_type& c, double k) const
+ {
+ self_type ret;
+ ret.r = value_type(r + (c.r - r) * k);
+ ret.g = value_type(g + (c.g - g) * k);
+ ret.b = value_type(b + (c.b - b) * k);
+ ret.a = value_type(a + (c.a - a) * k);
+ return ret;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void add(const self_type& c, unsigned cover)
+ {
+ if (cover == cover_mask)
+ {
+ if (c.is_opaque())
+ {
+ *this = c;
+ return;
+ }
+ else
+ {
+ r += c.r;
+ g += c.g;
+ b += c.b;
+ a += c.a;
+ }
+ }
+ else
+ {
+ r += mult_cover(c.r, cover);
+ g += mult_cover(c.g, cover);
+ b += mult_cover(c.b, cover);
+ a += mult_cover(c.a, cover);
+ }
+ if (a > 1) a = 1;
+ if (r > a) r = a;
+ if (g > a) g = a;
+ if (b > a) b = a;
+ }
+
+ //--------------------------------------------------------------------
+ template<class GammaLUT>
+ AGG_INLINE void apply_gamma_dir(const GammaLUT& gamma)
+ {
+ r = gamma.dir(r);
+ g = gamma.dir(g);
+ b = gamma.dir(b);
+ }
+
+ //--------------------------------------------------------------------
+ template<class GammaLUT>
+ AGG_INLINE void apply_gamma_inv(const GammaLUT& gamma)
+ {
+ r = gamma.inv(r);
+ g = gamma.inv(g);
+ b = gamma.inv(b);
+ }
+
+ //--------------------------------------------------------------------
+ static self_type no_color() { return self_type(0,0,0,0); }
+
+ //--------------------------------------------------------------------
+ static self_type from_wavelength(double wl, double gamma = 1)
+ {
+ return self_type(rgba::from_wavelength(wl, gamma));
+ }
+ };
+}
+
+
+
+#endif
diff --git a/src/agg/agg_config.h b/src/agg/agg_config.h
new file mode 100644
index 000000000..fa1dae2ba
--- /dev/null
+++ b/src/agg/agg_config.h
@@ -0,0 +1,44 @@
+#ifndef AGG_CONFIG_INCLUDED
+#define AGG_CONFIG_INCLUDED
+
+// This file can be used to redefine certain data types.
+
+//---------------------------------------
+// 1. Default basic types such as:
+//
+// AGG_INT8
+// AGG_INT8U
+// AGG_INT16
+// AGG_INT16U
+// AGG_INT32
+// AGG_INT32U
+// AGG_INT64
+// AGG_INT64U
+//
+// Just replace this file with new defines if necessary.
+// For example, if your compiler doesn't have a 64 bit integer type
+// you can still use AGG if you define the follows:
+//
+// #define AGG_INT64 int
+// #define AGG_INT64U unsigned
+//
+// It will result in overflow in 16 bit-per-component image/pattern resampling
+// but it won't result any crash and the rest of the library will remain
+// fully functional.
+
+
+//---------------------------------------
+// 2. Default rendering_buffer type. Can be:
+//
+// Provides faster access for massive pixel operations,
+// such as blur, image filtering:
+// #define AGG_RENDERING_BUFFER row_ptr_cache<int8u>
+//
+// Provides cheaper creation and destruction (no mem allocs):
+// #define AGG_RENDERING_BUFFER row_accessor<int8u>
+//
+// You can still use both of them simultaneously in your applications
+// This #define is used only for default rendering_buffer type,
+// in short hand typedefs like pixfmt_rgba32.
+
+#endif
diff --git a/src/agg/agg_conv_transform.h b/src/agg/agg_conv_transform.h
new file mode 100644
index 000000000..0c88a245b
--- /dev/null
+++ b/src/agg/agg_conv_transform.h
@@ -0,0 +1,68 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// class conv_transform
+//
+//----------------------------------------------------------------------------
+#ifndef AGG_CONV_TRANSFORM_INCLUDED
+#define AGG_CONV_TRANSFORM_INCLUDED
+
+#include "agg_basics.h"
+#include "agg_trans_affine.h"
+
+namespace agg
+{
+
+ //----------------------------------------------------------conv_transform
+ template<class VertexSource, class Transformer=trans_affine> class conv_transform
+ {
+ public:
+ conv_transform(VertexSource& source, Transformer& tr) :
+ m_source(&source), m_trans(&tr) {}
+ void attach(VertexSource& source) { m_source = &source; }
+
+ void rewind(unsigned path_id)
+ {
+ m_source->rewind(path_id);
+ }
+
+ unsigned vertex(double* x, double* y)
+ {
+ unsigned cmd = m_source->vertex(x, y);
+ if(is_vertex(cmd))
+ {
+ m_trans->transform(x, y);
+ }
+ return cmd;
+ }
+
+ void transformer(Transformer& tr)
+ {
+ m_trans = &tr;
+ }
+
+ private:
+ conv_transform(const conv_transform<VertexSource>&);
+ const conv_transform<VertexSource>&
+ operator = (const conv_transform<VertexSource>&);
+
+ VertexSource* m_source;
+ Transformer* m_trans;
+ };
+
+
+}
+
+#endif
diff --git a/src/agg/agg_gamma_functions.h b/src/agg/agg_gamma_functions.h
new file mode 100644
index 000000000..5d720daa9
--- /dev/null
+++ b/src/agg/agg_gamma_functions.h
@@ -0,0 +1,132 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+
+#ifndef AGG_GAMMA_FUNCTIONS_INCLUDED
+#define AGG_GAMMA_FUNCTIONS_INCLUDED
+
+#include <math.h>
+#include "agg_basics.h"
+
+namespace agg
+{
+ //===============================================================gamma_none
+ struct gamma_none
+ {
+ double operator()(double x) const { return x; }
+ };
+
+
+ //==============================================================gamma_power
+ class gamma_power
+ {
+ public:
+ gamma_power() : m_gamma(1.0) {}
+ gamma_power(double g) : m_gamma(g) {}
+
+ void gamma(double g) { m_gamma = g; }
+ double gamma() const { return m_gamma; }
+
+ double operator() (double x) const
+ {
+ return pow(x, m_gamma);
+ }
+
+ private:
+ double m_gamma;
+ };
+
+
+ //==========================================================gamma_threshold
+ class gamma_threshold
+ {
+ public:
+ gamma_threshold() : m_threshold(0.5) {}
+ gamma_threshold(double t) : m_threshold(t) {}
+
+ void threshold(double t) { m_threshold = t; }
+ double threshold() const { return m_threshold; }
+
+ double operator() (double x) const
+ {
+ return (x < m_threshold) ? 0.0 : 1.0;
+ }
+
+ private:
+ double m_threshold;
+ };
+
+
+ //============================================================gamma_linear
+ class gamma_linear
+ {
+ public:
+ gamma_linear() : m_start(0.0), m_end(1.0) {}
+ gamma_linear(double s, double e) : m_start(s), m_end(e) {}
+
+ void set(double s, double e) { m_start = s; m_end = e; }
+ void start(double s) { m_start = s; }
+ void end(double e) { m_end = e; }
+ double start() const { return m_start; }
+ double end() const { return m_end; }
+
+ double operator() (double x) const
+ {
+ if(x < m_start) return 0.0;
+ if(x > m_end) return 1.0;
+ return (x - m_start) / (m_end - m_start);
+ }
+
+ private:
+ double m_start;
+ double m_end;
+ };
+
+
+ //==========================================================gamma_multiply
+ class gamma_multiply
+ {
+ public:
+ gamma_multiply() : m_mul(1.0) {}
+ gamma_multiply(double v) : m_mul(v) {}
+
+ void value(double v) { m_mul = v; }
+ double value() const { return m_mul; }
+
+ double operator() (double x) const
+ {
+ double y = x * m_mul;
+ if(y > 1.0) y = 1.0;
+ return y;
+ }
+
+ private:
+ double m_mul;
+ };
+
+ inline double sRGB_to_linear(double x)
+ {
+ return (x <= 0.04045) ? (x / 12.92) : pow((x + 0.055) / (1.055), 2.4);
+ }
+
+ inline double linear_to_sRGB(double x)
+ {
+ return (x <= 0.0031308) ? (x * 12.92) : (1.055 * pow(x, 1 / 2.4) - 0.055);
+ }
+}
+
+#endif
+
+
+
diff --git a/src/agg/agg_gamma_lut.h b/src/agg/agg_gamma_lut.h
new file mode 100644
index 000000000..e30873632
--- /dev/null
+++ b/src/agg/agg_gamma_lut.h
@@ -0,0 +1,300 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+
+#ifndef AGG_GAMMA_LUT_INCLUDED
+#define AGG_GAMMA_LUT_INCLUDED
+
+#include <math.h>
+#include "agg_basics.h"
+#include "agg_gamma_functions.h"
+
+namespace agg
+{
+ template<class LoResT=int8u,
+ class HiResT=int8u,
+ unsigned GammaShift=8,
+ unsigned HiResShift=8> class gamma_lut
+ {
+ public:
+ typedef gamma_lut<LoResT, HiResT, GammaShift, HiResShift> self_type;
+
+ enum gamma_scale_e
+ {
+ gamma_shift = GammaShift,
+ gamma_size = 1 << gamma_shift,
+ gamma_mask = gamma_size - 1
+ };
+
+ enum hi_res_scale_e
+ {
+ hi_res_shift = HiResShift,
+ hi_res_size = 1 << hi_res_shift,
+ hi_res_mask = hi_res_size - 1
+ };
+
+ ~gamma_lut()
+ {
+ pod_allocator<LoResT>::deallocate(m_inv_gamma, hi_res_size);
+ pod_allocator<HiResT>::deallocate(m_dir_gamma, gamma_size);
+ }
+
+ gamma_lut() :
+ m_gamma(1.0),
+ m_dir_gamma(pod_allocator<HiResT>::allocate(gamma_size)),
+ m_inv_gamma(pod_allocator<LoResT>::allocate(hi_res_size))
+ {
+ unsigned i;
+ for(i = 0; i < gamma_size; i++)
+ {
+ m_dir_gamma[i] = HiResT(i << (hi_res_shift - gamma_shift));
+ }
+
+ for(i = 0; i < hi_res_size; i++)
+ {
+ m_inv_gamma[i] = LoResT(i >> (hi_res_shift - gamma_shift));
+ }
+ }
+
+ gamma_lut(double g) :
+ m_gamma(1.0),
+ m_dir_gamma(pod_allocator<HiResT>::allocate(gamma_size)),
+ m_inv_gamma(pod_allocator<LoResT>::allocate(hi_res_size))
+ {
+ gamma(g);
+ }
+
+ void gamma(double g)
+ {
+ m_gamma = g;
+
+ unsigned i;
+ for(i = 0; i < gamma_size; i++)
+ {
+ m_dir_gamma[i] = (HiResT)
+ uround(pow(i / double(gamma_mask), m_gamma) * double(hi_res_mask));
+ }
+
+ double inv_g = 1.0 / g;
+ for(i = 0; i < hi_res_size; i++)
+ {
+ m_inv_gamma[i] = (LoResT)
+ uround(pow(i / double(hi_res_mask), inv_g) * double(gamma_mask));
+ }
+ }
+
+ double gamma() const
+ {
+ return m_gamma;
+ }
+
+ HiResT dir(LoResT v) const
+ {
+ return m_dir_gamma[unsigned(v)];
+ }
+
+ LoResT inv(HiResT v) const
+ {
+ return m_inv_gamma[unsigned(v)];
+ }
+
+ private:
+ gamma_lut(const self_type&);
+ const self_type& operator = (const self_type&);
+
+ double m_gamma;
+ HiResT* m_dir_gamma;
+ LoResT* m_inv_gamma;
+ };
+
+ //
+ // sRGB support classes
+ //
+
+ // sRGB_lut - implements sRGB conversion for the various types.
+ // Base template is undefined, specializations are provided below.
+ template<class LinearType>
+ class sRGB_lut;
+
+ template<>
+ class sRGB_lut<float>
+ {
+ public:
+ sRGB_lut()
+ {
+ // Generate lookup tables.
+ for (int i = 0; i <= 255; ++i)
+ {
+ m_dir_table[i] = float(sRGB_to_linear(i / 255.0));
+ }
+ for (int i = 0; i <= 65535; ++i)
+ {
+ m_inv_table[i] = uround(255.0 * linear_to_sRGB(i / 65535.0));
+ }
+ }
+
+ float dir(int8u v) const
+ {
+ return m_dir_table[v];
+ }
+
+ int8u inv(float v) const
+ {
+ return m_inv_table[int16u(0.5 + v * 65535)];
+ }
+
+ private:
+ float m_dir_table[256];
+ int8u m_inv_table[65536];
+ };
+
+ template<>
+ class sRGB_lut<int16u>
+ {
+ public:
+ sRGB_lut()
+ {
+ // Generate lookup tables.
+ for (int i = 0; i <= 255; ++i)
+ {
+ m_dir_table[i] = uround(65535.0 * sRGB_to_linear(i / 255.0));
+ }
+ for (int i = 0; i <= 65535; ++i)
+ {
+ m_inv_table[i] = uround(255.0 * linear_to_sRGB(i / 65535.0));
+ }
+ }
+
+ int16u dir(int8u v) const
+ {
+ return m_dir_table[v];
+ }
+
+ int8u inv(int16u v) const
+ {
+ return m_inv_table[v];
+ }
+
+ private:
+ int16u m_dir_table[256];
+ int8u m_inv_table[65536];
+ };
+
+ template<>
+ class sRGB_lut<int8u>
+ {
+ public:
+ sRGB_lut()
+ {
+ // Generate lookup tables.
+ for (int i = 0; i <= 255; ++i)
+ {
+ m_dir_table[i] = uround(255.0 * sRGB_to_linear(i / 255.0));
+ m_inv_table[i] = uround(255.0 * linear_to_sRGB(i / 255.0));
+ }
+ }
+
+ int8u dir(int8u v) const
+ {
+ return m_dir_table[v];
+ }
+
+ int8u inv(int8u v) const
+ {
+ return m_inv_table[v];
+ }
+
+ private:
+ int8u m_dir_table[256];
+ int8u m_inv_table[256];
+ };
+
+ // Common base class for sRGB_conv objects. Defines an internal
+ // sRGB_lut object so that users don't have to.
+ template<class T>
+ class sRGB_conv_base
+ {
+ public:
+ static T rgb_from_sRGB(int8u x)
+ {
+ return lut.dir(x);
+ }
+
+ static int8u rgb_to_sRGB(T x)
+ {
+ return lut.inv(x);
+ }
+
+ private:
+ static sRGB_lut<T> lut;
+ };
+
+ // Definition of sRGB_conv_base::lut. Due to the fact that this a template,
+ // we don't need to place the definition in a cpp file. Hurrah.
+ template<class T>
+ sRGB_lut<T> sRGB_conv_base<T>::lut;
+
+ // Wrapper for sRGB-linear conversion.
+ // Base template is undefined, specializations are provided below.
+ template<class T>
+ class sRGB_conv;
+
+ template<>
+ class sRGB_conv<float> : public sRGB_conv_base<float>
+ {
+ public:
+ static float alpha_from_sRGB(int8u x)
+ {
+ static const double y = 1 / 255.0;
+ return float(x * y);
+ }
+
+ static int8u alpha_to_sRGB(float x)
+ {
+ return int8u(0.5 + x * 255);
+ }
+ };
+
+ template<>
+ class sRGB_conv<int16u> : public sRGB_conv_base<int16u>
+ {
+ public:
+ static int16u alpha_from_sRGB(int8u x)
+ {
+ return (x << 8) | x;
+ }
+
+ static int8u alpha_to_sRGB(int16u x)
+ {
+ return x >> 8;
+ }
+ };
+
+ template<>
+ class sRGB_conv<int8u> : public sRGB_conv_base<int8u>
+ {
+ public:
+ static int8u alpha_from_sRGB(int8u x)
+ {
+ return x;
+ }
+
+ static int8u alpha_to_sRGB(int8u x)
+ {
+ return x;
+ }
+ };
+}
+
+#endif
diff --git a/src/agg/agg_math.h b/src/agg/agg_math.h
new file mode 100644
index 000000000..2ec49cf3f
--- /dev/null
+++ b/src/agg/agg_math.h
@@ -0,0 +1,437 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+// Bessel function (besj) was adapted for use in AGG library by Andy Wilk
+// Contact: castor.vulgaris@gmail.com
+//----------------------------------------------------------------------------
+
+#ifndef AGG_MATH_INCLUDED
+#define AGG_MATH_INCLUDED
+
+#include <math.h>
+#include "agg_basics.h"
+
+namespace agg
+{
+
+ //------------------------------------------------------vertex_dist_epsilon
+ // Coinciding points maximal distance (Epsilon)
+ const double vertex_dist_epsilon = 1e-14;
+
+ //-----------------------------------------------------intersection_epsilon
+ // See calc_intersection
+ const double intersection_epsilon = 1.0e-30;
+
+ //------------------------------------------------------------cross_product
+ AGG_INLINE double cross_product(double x1, double y1,
+ double x2, double y2,
+ double x, double y)
+ {
+ return (x - x2) * (y2 - y1) - (y - y2) * (x2 - x1);
+ }
+
+ //--------------------------------------------------------point_in_triangle
+ AGG_INLINE bool point_in_triangle(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3,
+ double x, double y)
+ {
+ bool cp1 = cross_product(x1, y1, x2, y2, x, y) < 0.0;
+ bool cp2 = cross_product(x2, y2, x3, y3, x, y) < 0.0;
+ bool cp3 = cross_product(x3, y3, x1, y1, x, y) < 0.0;
+ return cp1 == cp2 && cp2 == cp3 && cp3 == cp1;
+ }
+
+ //-----------------------------------------------------------calc_distance
+ AGG_INLINE double calc_distance(double x1, double y1, double x2, double y2)
+ {
+ double dx = x2-x1;
+ double dy = y2-y1;
+ return sqrt(dx * dx + dy * dy);
+ }
+
+ //--------------------------------------------------------calc_sq_distance
+ AGG_INLINE double calc_sq_distance(double x1, double y1, double x2, double y2)
+ {
+ double dx = x2-x1;
+ double dy = y2-y1;
+ return dx * dx + dy * dy;
+ }
+
+ //------------------------------------------------calc_line_point_distance
+ AGG_INLINE double calc_line_point_distance(double x1, double y1,
+ double x2, double y2,
+ double x, double y)
+ {
+ double dx = x2-x1;
+ double dy = y2-y1;
+ double d = sqrt(dx * dx + dy * dy);
+ if(d < vertex_dist_epsilon)
+ {
+ return calc_distance(x1, y1, x, y);
+ }
+ return ((x - x2) * dy - (y - y2) * dx) / d;
+ }
+
+ //-------------------------------------------------------calc_line_point_u
+ AGG_INLINE double calc_segment_point_u(double x1, double y1,
+ double x2, double y2,
+ double x, double y)
+ {
+ double dx = x2 - x1;
+ double dy = y2 - y1;
+
+ if(dx == 0 && dy == 0)
+ {
+ return 0;
+ }
+
+ double pdx = x - x1;
+ double pdy = y - y1;
+
+ return (pdx * dx + pdy * dy) / (dx * dx + dy * dy);
+ }
+
+ //---------------------------------------------calc_line_point_sq_distance
+ AGG_INLINE double calc_segment_point_sq_distance(double x1, double y1,
+ double x2, double y2,
+ double x, double y,
+ double u)
+ {
+ if(u <= 0)
+ {
+ return calc_sq_distance(x, y, x1, y1);
+ }
+ else
+ if(u >= 1)
+ {
+ return calc_sq_distance(x, y, x2, y2);
+ }
+ return calc_sq_distance(x, y, x1 + u * (x2 - x1), y1 + u * (y2 - y1));
+ }
+
+ //---------------------------------------------calc_line_point_sq_distance
+ AGG_INLINE double calc_segment_point_sq_distance(double x1, double y1,
+ double x2, double y2,
+ double x, double y)
+ {
+ return
+ calc_segment_point_sq_distance(
+ x1, y1, x2, y2, x, y,
+ calc_segment_point_u(x1, y1, x2, y2, x, y));
+ }
+
+ //-------------------------------------------------------calc_intersection
+ AGG_INLINE bool calc_intersection(double ax, double ay, double bx, double by,
+ double cx, double cy, double dx, double dy,
+ double* x, double* y)
+ {
+ double num = (ay-cy) * (dx-cx) - (ax-cx) * (dy-cy);
+ double den = (bx-ax) * (dy-cy) - (by-ay) * (dx-cx);
+ if(fabs(den) < intersection_epsilon) return false;
+ double r = num / den;
+ *x = ax + r * (bx-ax);
+ *y = ay + r * (by-ay);
+ return true;
+ }
+
+ //-----------------------------------------------------intersection_exists
+ AGG_INLINE bool intersection_exists(double x1, double y1, double x2, double y2,
+ double x3, double y3, double x4, double y4)
+ {
+ // It's less expensive but you can't control the
+ // boundary conditions: Less or LessEqual
+ double dx1 = x2 - x1;
+ double dy1 = y2 - y1;
+ double dx2 = x4 - x3;
+ double dy2 = y4 - y3;
+ return ((x3 - x2) * dy1 - (y3 - y2) * dx1 < 0.0) !=
+ ((x4 - x2) * dy1 - (y4 - y2) * dx1 < 0.0) &&
+ ((x1 - x4) * dy2 - (y1 - y4) * dx2 < 0.0) !=
+ ((x2 - x4) * dy2 - (y2 - y4) * dx2 < 0.0);
+
+ // It's is more expensive but more flexible
+ // in terms of boundary conditions.
+ //--------------------
+ //double den = (x2-x1) * (y4-y3) - (y2-y1) * (x4-x3);
+ //if(fabs(den) < intersection_epsilon) return false;
+ //double nom1 = (x4-x3) * (y1-y3) - (y4-y3) * (x1-x3);
+ //double nom2 = (x2-x1) * (y1-y3) - (y2-y1) * (x1-x3);
+ //double ua = nom1 / den;
+ //double ub = nom2 / den;
+ //return ua >= 0.0 && ua <= 1.0 && ub >= 0.0 && ub <= 1.0;
+ }
+
+ //--------------------------------------------------------calc_orthogonal
+ AGG_INLINE void calc_orthogonal(double thickness,
+ double x1, double y1,
+ double x2, double y2,
+ double* x, double* y)
+ {
+ double dx = x2 - x1;
+ double dy = y2 - y1;
+ double d = sqrt(dx*dx + dy*dy);
+ *x = thickness * dy / d;
+ *y = -thickness * dx / d;
+ }
+
+ //--------------------------------------------------------dilate_triangle
+ AGG_INLINE void dilate_triangle(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3,
+ double *x, double* y,
+ double d)
+ {
+ double dx1=0.0;
+ double dy1=0.0;
+ double dx2=0.0;
+ double dy2=0.0;
+ double dx3=0.0;
+ double dy3=0.0;
+ double loc = cross_product(x1, y1, x2, y2, x3, y3);
+ if(fabs(loc) > intersection_epsilon)
+ {
+ if(cross_product(x1, y1, x2, y2, x3, y3) > 0.0)
+ {
+ d = -d;
+ }
+ calc_orthogonal(d, x1, y1, x2, y2, &dx1, &dy1);
+ calc_orthogonal(d, x2, y2, x3, y3, &dx2, &dy2);
+ calc_orthogonal(d, x3, y3, x1, y1, &dx3, &dy3);
+ }
+ *x++ = x1 + dx1; *y++ = y1 + dy1;
+ *x++ = x2 + dx1; *y++ = y2 + dy1;
+ *x++ = x2 + dx2; *y++ = y2 + dy2;
+ *x++ = x3 + dx2; *y++ = y3 + dy2;
+ *x++ = x3 + dx3; *y++ = y3 + dy3;
+ *x++ = x1 + dx3; *y++ = y1 + dy3;
+ }
+
+ //------------------------------------------------------calc_triangle_area
+ AGG_INLINE double calc_triangle_area(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3)
+ {
+ return (x1*y2 - x2*y1 + x2*y3 - x3*y2 + x3*y1 - x1*y3) * 0.5;
+ }
+
+ //-------------------------------------------------------calc_polygon_area
+ template<class Storage> double calc_polygon_area(const Storage& st)
+ {
+ unsigned i;
+ double sum = 0.0;
+ double x = st[0].x;
+ double y = st[0].y;
+ double xs = x;
+ double ys = y;
+
+ for(i = 1; i < st.size(); i++)
+ {
+ const typename Storage::value_type& v = st[i];
+ sum += x * v.y - y * v.x;
+ x = v.x;
+ y = v.y;
+ }
+ return (sum + x * ys - y * xs) * 0.5;
+ }
+
+ //------------------------------------------------------------------------
+ // Tables for fast sqrt
+ extern int16u g_sqrt_table[1024];
+ extern int8 g_elder_bit_table[256];
+
+
+ //---------------------------------------------------------------fast_sqrt
+ //Fast integer Sqrt - really fast: no cycles, divisions or multiplications
+ #if defined(_MSC_VER)
+ #pragma warning(push)
+ #pragma warning(disable : 4035) //Disable warning "no return value"
+ #endif
+ AGG_INLINE unsigned fast_sqrt(unsigned val)
+ {
+ #if defined(_M_IX86) && defined(_MSC_VER) && !defined(AGG_NO_ASM)
+ //For Ix86 family processors this assembler code is used.
+ //The key command here is bsr - determination the number of the most
+ //significant bit of the value. For other processors
+ //(and maybe compilers) the pure C "#else" section is used.
+ __asm
+ {
+ mov ebx, val
+ mov edx, 11
+ bsr ecx, ebx
+ sub ecx, 9
+ jle less_than_9_bits
+ shr ecx, 1
+ adc ecx, 0
+ sub edx, ecx
+ shl ecx, 1
+ shr ebx, cl
+ less_than_9_bits:
+ xor eax, eax
+ mov ax, g_sqrt_table[ebx*2]
+ mov ecx, edx
+ shr eax, cl
+ }
+ #else
+
+ //This code is actually pure C and portable to most
+ //arcitectures including 64bit ones.
+ unsigned t = val;
+ int bit=0;
+ unsigned shift = 11;
+
+ //The following piece of code is just an emulation of the
+ //Ix86 assembler command "bsr" (see above). However on old
+ //Intels (like Intel MMX 233MHz) this code is about twice
+ //faster (sic!) then just one "bsr". On PIII and PIV the
+ //bsr is optimized quite well.
+ bit = t >> 24;
+ if(bit)
+ {
+ bit = g_elder_bit_table[bit] + 24;
+ }
+ else
+ {
+ bit = (t >> 16) & 0xFF;
+ if(bit)
+ {
+ bit = g_elder_bit_table[bit] + 16;
+ }
+ else
+ {
+ bit = (t >> 8) & 0xFF;
+ if(bit)
+ {
+ bit = g_elder_bit_table[bit] + 8;
+ }
+ else
+ {
+ bit = g_elder_bit_table[t];
+ }
+ }
+ }
+
+ //This code calculates the sqrt.
+ bit -= 9;
+ if(bit > 0)
+ {
+ bit = (bit >> 1) + (bit & 1);
+ shift -= bit;
+ val >>= (bit << 1);
+ }
+ return g_sqrt_table[val] >> shift;
+ #endif
+ }
+ #if defined(_MSC_VER)
+ #pragma warning(pop)
+ #endif
+
+
+
+
+ //--------------------------------------------------------------------besj
+ // Function BESJ calculates Bessel function of first kind of order n
+ // Arguments:
+ // n - an integer (>=0), the order
+ // x - value at which the Bessel function is required
+ //--------------------
+ // C++ Mathematical Library
+ // Convereted from equivalent FORTRAN library
+ // Converetd by Gareth Walker for use by course 392 computational project
+ // All functions tested and yield the same results as the corresponding
+ // FORTRAN versions.
+ //
+ // If you have any problems using these functions please report them to
+ // M.Muldoon@UMIST.ac.uk
+ //
+ // Documentation available on the web
+ // http://www.ma.umist.ac.uk/mrm/Teaching/392/libs/392.html
+ // Version 1.0 8/98
+ // 29 October, 1999
+ //--------------------
+ // Adapted for use in AGG library by Andy Wilk (castor.vulgaris@gmail.com)
+ //------------------------------------------------------------------------
+ inline double besj(double x, int n)
+ {
+ if(n < 0)
+ {
+ return 0;
+ }
+ double d = 1E-6;
+ double b = 0;
+ if(fabs(x) <= d)
+ {
+ if(n != 0) return 0;
+ return 1;
+ }
+ double b1 = 0; // b1 is the value from the previous iteration
+ // Set up a starting order for recurrence
+ int m1 = (int)fabs(x) + 6;
+ if(fabs(x) > 5)
+ {
+ m1 = (int)(fabs(1.4 * x + 60 / x));
+ }
+ int m2 = (int)(n + 2 + fabs(x) / 4);
+ if (m1 > m2)
+ {
+ m2 = m1;
+ }
+
+ // Apply recurrence down from curent max order
+ for(;;)
+ {
+ double c3 = 0;
+ double c2 = 1E-30;
+ double c4 = 0;
+ int m8 = 1;
+ if (m2 / 2 * 2 == m2)
+ {
+ m8 = -1;
+ }
+ int imax = m2 - 2;
+ for (int i = 1; i <= imax; i++)
+ {
+ double c6 = 2 * (m2 - i) * c2 / x - c3;
+ c3 = c2;
+ c2 = c6;
+ if(m2 - i - 1 == n)
+ {
+ b = c6;
+ }
+ m8 = -1 * m8;
+ if (m8 > 0)
+ {
+ c4 = c4 + 2 * c6;
+ }
+ }
+ double c6 = 2 * c2 / x - c3;
+ if(n == 0)
+ {
+ b = c6;
+ }
+ c4 += c6;
+ b /= c4;
+ if(fabs(b - b1) < d)
+ {
+ return b;
+ }
+ b1 = b;
+ m2 += 3;
+ }
+ }
+
+}
+
+
+#endif
diff --git a/src/agg/agg_path_storage.h b/src/agg/agg_path_storage.h
new file mode 100644
index 000000000..f55c89957
--- /dev/null
+++ b/src/agg/agg_path_storage.h
@@ -0,0 +1,1582 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+
+#ifndef AGG_PATH_STORAGE_INCLUDED
+#define AGG_PATH_STORAGE_INCLUDED
+
+#include <string.h>
+#include <math.h>
+#include "agg_math.h"
+#include "agg_array.h"
+#include "agg_bezier_arc.h"
+
+namespace agg
+{
+
+
+ //----------------------------------------------------vertex_block_storage
+ template<class T, unsigned BlockShift=8, unsigned BlockPool=256>
+ class vertex_block_storage
+ {
+ public:
+ // Allocation parameters
+ enum block_scale_e
+ {
+ block_shift = BlockShift,
+ block_size = 1 << block_shift,
+ block_mask = block_size - 1,
+ block_pool = BlockPool
+ };
+
+ typedef T value_type;
+ typedef vertex_block_storage<T, BlockShift, BlockPool> self_type;
+
+ ~vertex_block_storage();
+ vertex_block_storage();
+ vertex_block_storage(const self_type& v);
+ const self_type& operator = (const self_type& ps);
+
+ void remove_all();
+ void free_all();
+
+ void add_vertex(double x, double y, unsigned cmd);
+ void modify_vertex(unsigned idx, double x, double y);
+ void modify_vertex(unsigned idx, double x, double y, unsigned cmd);
+ void modify_command(unsigned idx, unsigned cmd);
+ void swap_vertices(unsigned v1, unsigned v2);
+
+ unsigned last_command() const;
+ unsigned last_vertex(double* x, double* y) const;
+ unsigned prev_vertex(double* x, double* y) const;
+
+ double last_x() const;
+ double last_y() const;
+
+ unsigned total_vertices() const;
+ unsigned vertex(unsigned idx, double* x, double* y) const;
+ unsigned command(unsigned idx) const;
+
+ private:
+ void allocate_block(unsigned nb);
+ int8u* storage_ptrs(T** xy_ptr);
+
+ private:
+ unsigned m_total_vertices;
+ unsigned m_total_blocks;
+ unsigned m_max_blocks;
+ T** m_coord_blocks;
+ int8u** m_cmd_blocks;
+ };
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ void vertex_block_storage<T,S,P>::free_all()
+ {
+ if(m_total_blocks)
+ {
+ T** coord_blk = m_coord_blocks + m_total_blocks - 1;
+ while(m_total_blocks--)
+ {
+ pod_allocator<T>::deallocate(
+ *coord_blk,
+ block_size * 2 +
+ block_size / (sizeof(T) / sizeof(unsigned char)));
+ --coord_blk;
+ }
+ pod_allocator<T*>::deallocate(m_coord_blocks, m_max_blocks * 2);
+ m_total_blocks = 0;
+ m_max_blocks = 0;
+ m_coord_blocks = 0;
+ m_cmd_blocks = 0;
+ m_total_vertices = 0;
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ vertex_block_storage<T,S,P>::~vertex_block_storage()
+ {
+ free_all();
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ vertex_block_storage<T,S,P>::vertex_block_storage() :
+ m_total_vertices(0),
+ m_total_blocks(0),
+ m_max_blocks(0),
+ m_coord_blocks(0),
+ m_cmd_blocks(0)
+ {
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ vertex_block_storage<T,S,P>::vertex_block_storage(const vertex_block_storage<T,S,P>& v) :
+ m_total_vertices(0),
+ m_total_blocks(0),
+ m_max_blocks(0),
+ m_coord_blocks(0),
+ m_cmd_blocks(0)
+ {
+ *this = v;
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ const vertex_block_storage<T,S,P>&
+ vertex_block_storage<T,S,P>::operator = (const vertex_block_storage<T,S,P>& v)
+ {
+ remove_all();
+ unsigned i;
+ for(i = 0; i < v.total_vertices(); i++)
+ {
+ double x, y;
+ unsigned cmd = v.vertex(i, &x, &y);
+ add_vertex(x, y, cmd);
+ }
+ return *this;
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ inline void vertex_block_storage<T,S,P>::remove_all()
+ {
+ m_total_vertices = 0;
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ inline void vertex_block_storage<T,S,P>::add_vertex(double x, double y,
+ unsigned cmd)
+ {
+ T* coord_ptr = 0;
+ *storage_ptrs(&coord_ptr) = (int8u)cmd;
+ coord_ptr[0] = T(x);
+ coord_ptr[1] = T(y);
+ m_total_vertices++;
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ inline void vertex_block_storage<T,S,P>::modify_vertex(unsigned idx,
+ double x, double y)
+ {
+ T* pv = m_coord_blocks[idx >> block_shift] + ((idx & block_mask) << 1);
+ pv[0] = T(x);
+ pv[1] = T(y);
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ inline void vertex_block_storage<T,S,P>::modify_vertex(unsigned idx,
+ double x, double y,
+ unsigned cmd)
+ {
+ unsigned block = idx >> block_shift;
+ unsigned offset = idx & block_mask;
+ T* pv = m_coord_blocks[block] + (offset << 1);
+ pv[0] = T(x);
+ pv[1] = T(y);
+ m_cmd_blocks[block][offset] = (int8u)cmd;
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ inline void vertex_block_storage<T,S,P>::modify_command(unsigned idx,
+ unsigned cmd)
+ {
+ m_cmd_blocks[idx >> block_shift][idx & block_mask] = (int8u)cmd;
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ inline void vertex_block_storage<T,S,P>::swap_vertices(unsigned v1, unsigned v2)
+ {
+ unsigned b1 = v1 >> block_shift;
+ unsigned b2 = v2 >> block_shift;
+ unsigned o1 = v1 & block_mask;
+ unsigned o2 = v2 & block_mask;
+ T* pv1 = m_coord_blocks[b1] + (o1 << 1);
+ T* pv2 = m_coord_blocks[b2] + (o2 << 1);
+ T val;
+ val = pv1[0]; pv1[0] = pv2[0]; pv2[0] = val;
+ val = pv1[1]; pv1[1] = pv2[1]; pv2[1] = val;
+ int8u cmd = m_cmd_blocks[b1][o1];
+ m_cmd_blocks[b1][o1] = m_cmd_blocks[b2][o2];
+ m_cmd_blocks[b2][o2] = cmd;
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ inline unsigned vertex_block_storage<T,S,P>::last_command() const
+ {
+ if(m_total_vertices) return command(m_total_vertices - 1);
+ return path_cmd_stop;
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ inline unsigned vertex_block_storage<T,S,P>::last_vertex(double* x, double* y) const
+ {
+ if(m_total_vertices) return vertex(m_total_vertices - 1, x, y);
+ return path_cmd_stop;
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ inline unsigned vertex_block_storage<T,S,P>::prev_vertex(double* x, double* y) const
+ {
+ if(m_total_vertices > 1) return vertex(m_total_vertices - 2, x, y);
+ return path_cmd_stop;
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ inline double vertex_block_storage<T,S,P>::last_x() const
+ {
+ if(m_total_vertices)
+ {
+ unsigned idx = m_total_vertices - 1;
+ return m_coord_blocks[idx >> block_shift][(idx & block_mask) << 1];
+ }
+ return 0.0;
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ inline double vertex_block_storage<T,S,P>::last_y() const
+ {
+ if(m_total_vertices)
+ {
+ unsigned idx = m_total_vertices - 1;
+ return m_coord_blocks[idx >> block_shift][((idx & block_mask) << 1) + 1];
+ }
+ return 0.0;
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ inline unsigned vertex_block_storage<T,S,P>::total_vertices() const
+ {
+ return m_total_vertices;
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ inline unsigned vertex_block_storage<T,S,P>::vertex(unsigned idx,
+ double* x, double* y) const
+ {
+ unsigned nb = idx >> block_shift;
+ const T* pv = m_coord_blocks[nb] + ((idx & block_mask) << 1);
+ *x = pv[0];
+ *y = pv[1];
+ return m_cmd_blocks[nb][idx & block_mask];
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ inline unsigned vertex_block_storage<T,S,P>::command(unsigned idx) const
+ {
+ return m_cmd_blocks[idx >> block_shift][idx & block_mask];
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ void vertex_block_storage<T,S,P>::allocate_block(unsigned nb)
+ {
+ if(nb >= m_max_blocks)
+ {
+ T** new_coords =
+ pod_allocator<T*>::allocate((m_max_blocks + block_pool) * 2);
+
+ unsigned char** new_cmds =
+ (unsigned char**)(new_coords + m_max_blocks + block_pool);
+
+ if(m_coord_blocks)
+ {
+ memcpy(new_coords,
+ m_coord_blocks,
+ m_max_blocks * sizeof(T*));
+
+ memcpy(new_cmds,
+ m_cmd_blocks,
+ m_max_blocks * sizeof(unsigned char*));
+
+ pod_allocator<T*>::deallocate(m_coord_blocks, m_max_blocks * 2);
+ }
+ m_coord_blocks = new_coords;
+ m_cmd_blocks = new_cmds;
+ m_max_blocks += block_pool;
+ }
+ m_coord_blocks[nb] =
+ pod_allocator<T>::allocate(block_size * 2 +
+ block_size / (sizeof(T) / sizeof(unsigned char)));
+
+ m_cmd_blocks[nb] =
+ (unsigned char*)(m_coord_blocks[nb] + block_size * 2);
+
+ m_total_blocks++;
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ int8u* vertex_block_storage<T,S,P>::storage_ptrs(T** xy_ptr)
+ {
+ unsigned nb = m_total_vertices >> block_shift;
+ if(nb >= m_total_blocks)
+ {
+ allocate_block(nb);
+ }
+ *xy_ptr = m_coord_blocks[nb] + ((m_total_vertices & block_mask) << 1);
+ return m_cmd_blocks[nb] + (m_total_vertices & block_mask);
+ }
+
+
+
+
+ //-----------------------------------------------------poly_plain_adaptor
+ template<class T> class poly_plain_adaptor
+ {
+ public:
+ typedef T value_type;
+
+ poly_plain_adaptor() :
+ m_data(0),
+ m_ptr(0),
+ m_end(0),
+ m_closed(false),
+ m_stop(false)
+ {}
+
+ poly_plain_adaptor(const T* data, unsigned num_points, bool closed) :
+ m_data(data),
+ m_ptr(data),
+ m_end(data + num_points * 2),
+ m_closed(closed),
+ m_stop(false)
+ {}
+
+ void init(const T* data, unsigned num_points, bool closed)
+ {
+ m_data = data;
+ m_ptr = data;
+ m_end = data + num_points * 2;
+ m_closed = closed;
+ m_stop = false;
+ }
+
+ void rewind(unsigned)
+ {
+ m_ptr = m_data;
+ m_stop = false;
+ }
+
+ unsigned vertex(double* x, double* y)
+ {
+ if(m_ptr < m_end)
+ {
+ bool first = m_ptr == m_data;
+ *x = *m_ptr++;
+ *y = *m_ptr++;
+ return first ? path_cmd_move_to : path_cmd_line_to;
+ }
+ *x = *y = 0.0;
+ if(m_closed && !m_stop)
+ {
+ m_stop = true;
+ return path_cmd_end_poly | path_flags_close;
+ }
+ return path_cmd_stop;
+ }
+
+ private:
+ const T* m_data;
+ const T* m_ptr;
+ const T* m_end;
+ bool m_closed;
+ bool m_stop;
+ };
+
+
+
+
+
+ //-------------------------------------------------poly_container_adaptor
+ template<class Container> class poly_container_adaptor
+ {
+ public:
+ typedef typename Container::value_type vertex_type;
+
+ poly_container_adaptor() :
+ m_container(0),
+ m_index(0),
+ m_closed(false),
+ m_stop(false)
+ {}
+
+ poly_container_adaptor(const Container& data, bool closed) :
+ m_container(&data),
+ m_index(0),
+ m_closed(closed),
+ m_stop(false)
+ {}
+
+ void init(const Container& data, bool closed)
+ {
+ m_container = &data;
+ m_index = 0;
+ m_closed = closed;
+ m_stop = false;
+ }
+
+ void rewind(unsigned)
+ {
+ m_index = 0;
+ m_stop = false;
+ }
+
+ unsigned vertex(double* x, double* y)
+ {
+ if(m_index < m_container->size())
+ {
+ bool first = m_index == 0;
+ const vertex_type& v = (*m_container)[m_index++];
+ *x = v.x;
+ *y = v.y;
+ return first ? path_cmd_move_to : path_cmd_line_to;
+ }
+ *x = *y = 0.0;
+ if(m_closed && !m_stop)
+ {
+ m_stop = true;
+ return path_cmd_end_poly | path_flags_close;
+ }
+ return path_cmd_stop;
+ }
+
+ private:
+ const Container* m_container;
+ unsigned m_index;
+ bool m_closed;
+ bool m_stop;
+ };
+
+
+
+ //-----------------------------------------poly_container_reverse_adaptor
+ template<class Container> class poly_container_reverse_adaptor
+ {
+ public:
+ typedef typename Container::value_type vertex_type;
+
+ poly_container_reverse_adaptor() :
+ m_container(0),
+ m_index(-1),
+ m_closed(false),
+ m_stop(false)
+ {}
+
+ poly_container_reverse_adaptor(Container& data, bool closed) :
+ m_container(&data),
+ m_index(-1),
+ m_closed(closed),
+ m_stop(false)
+ {}
+
+ void init(Container& data, bool closed)
+ {
+ m_container = &data;
+ m_index = m_container->size() - 1;
+ m_closed = closed;
+ m_stop = false;
+ }
+
+ void rewind(unsigned)
+ {
+ m_index = m_container->size() - 1;
+ m_stop = false;
+ }
+
+ unsigned vertex(double* x, double* y)
+ {
+ if(m_index >= 0)
+ {
+ bool first = m_index == int(m_container->size() - 1);
+ const vertex_type& v = (*m_container)[m_index--];
+ *x = v.x;
+ *y = v.y;
+ return first ? path_cmd_move_to : path_cmd_line_to;
+ }
+ *x = *y = 0.0;
+ if(m_closed && !m_stop)
+ {
+ m_stop = true;
+ return path_cmd_end_poly | path_flags_close;
+ }
+ return path_cmd_stop;
+ }
+
+ private:
+ Container* m_container;
+ int m_index;
+ bool m_closed;
+ bool m_stop;
+ };
+
+
+
+
+
+ //--------------------------------------------------------line_adaptor
+ class line_adaptor
+ {
+ public:
+ typedef double value_type;
+
+ line_adaptor() : m_line(m_coord, 2, false) {}
+ line_adaptor(double x1, double y1, double x2, double y2) :
+ m_line(m_coord, 2, false)
+ {
+ m_coord[0] = x1;
+ m_coord[1] = y1;
+ m_coord[2] = x2;
+ m_coord[3] = y2;
+ }
+
+ void init(double x1, double y1, double x2, double y2)
+ {
+ m_coord[0] = x1;
+ m_coord[1] = y1;
+ m_coord[2] = x2;
+ m_coord[3] = y2;
+ m_line.rewind(0);
+ }
+
+ void rewind(unsigned)
+ {
+ m_line.rewind(0);
+ }
+
+ unsigned vertex(double* x, double* y)
+ {
+ return m_line.vertex(x, y);
+ }
+
+ private:
+ double m_coord[4];
+ poly_plain_adaptor<double> m_line;
+ };
+
+
+
+
+
+
+
+
+
+
+
+
+
+ //---------------------------------------------------------------path_base
+ // A container to store vertices with their flags.
+ // A path consists of a number of contours separated with "move_to"
+ // commands. The path storage can keep and maintain more than one
+ // path.
+ // To navigate to the beginning of a particular path, use rewind(path_id);
+ // Where path_id is what start_new_path() returns. So, when you call
+ // start_new_path() you need to store its return value somewhere else
+ // to navigate to the path afterwards.
+ //
+ // See also: vertex_source concept
+ //------------------------------------------------------------------------
+ template<class VertexContainer> class path_base
+ {
+ public:
+ typedef VertexContainer container_type;
+ typedef path_base<VertexContainer> self_type;
+
+ //--------------------------------------------------------------------
+ path_base() : m_vertices(), m_iterator(0) {}
+ void remove_all() { m_vertices.remove_all(); m_iterator = 0; }
+ void free_all() { m_vertices.free_all(); m_iterator = 0; }
+
+ // Make path functions
+ //--------------------------------------------------------------------
+ unsigned start_new_path();
+
+ void move_to(double x, double y);
+ void move_rel(double dx, double dy);
+
+ void line_to(double x, double y);
+ void line_rel(double dx, double dy);
+
+ void hline_to(double x);
+ void hline_rel(double dx);
+
+ void vline_to(double y);
+ void vline_rel(double dy);
+
+ void arc_to(double rx, double ry,
+ double angle,
+ bool large_arc_flag,
+ bool sweep_flag,
+ double x, double y);
+
+ void arc_rel(double rx, double ry,
+ double angle,
+ bool large_arc_flag,
+ bool sweep_flag,
+ double dx, double dy);
+
+ void curve3(double x_ctrl, double y_ctrl,
+ double x_to, double y_to);
+
+ void curve3_rel(double dx_ctrl, double dy_ctrl,
+ double dx_to, double dy_to);
+
+ void curve3(double x_to, double y_to);
+
+ void curve3_rel(double dx_to, double dy_to);
+
+ void curve4(double x_ctrl1, double y_ctrl1,
+ double x_ctrl2, double y_ctrl2,
+ double x_to, double y_to);
+
+ void curve4_rel(double dx_ctrl1, double dy_ctrl1,
+ double dx_ctrl2, double dy_ctrl2,
+ double dx_to, double dy_to);
+
+ void curve4(double x_ctrl2, double y_ctrl2,
+ double x_to, double y_to);
+
+ void curve4_rel(double x_ctrl2, double y_ctrl2,
+ double x_to, double y_to);
+
+
+ void end_poly(unsigned flags = path_flags_close);
+ void close_polygon(unsigned flags = path_flags_none);
+
+ // Accessors
+ //--------------------------------------------------------------------
+ const container_type& vertices() const { return m_vertices; }
+ container_type& vertices() { return m_vertices; }
+
+ unsigned total_vertices() const;
+
+ void rel_to_abs(double* x, double* y) const;
+
+ unsigned last_vertex(double* x, double* y) const;
+ unsigned prev_vertex(double* x, double* y) const;
+
+ double last_x() const;
+ double last_y() const;
+
+ unsigned vertex(unsigned idx, double* x, double* y) const;
+ unsigned command(unsigned idx) const;
+
+ void modify_vertex(unsigned idx, double x, double y);
+ void modify_vertex(unsigned idx, double x, double y, unsigned cmd);
+ void modify_command(unsigned idx, unsigned cmd);
+
+ // VertexSource interface
+ //--------------------------------------------------------------------
+ void rewind(unsigned path_id);
+ unsigned vertex(double* x, double* y);
+
+ // Arrange the orientation of a polygon, all polygons in a path,
+ // or in all paths. After calling arrange_orientations() or
+ // arrange_orientations_all_paths(), all the polygons will have
+ // the same orientation, i.e. path_flags_cw or path_flags_ccw
+ //--------------------------------------------------------------------
+ unsigned arrange_polygon_orientation(unsigned start, path_flags_e orientation);
+ unsigned arrange_orientations(unsigned path_id, path_flags_e orientation);
+ void arrange_orientations_all_paths(path_flags_e orientation);
+ void invert_polygon(unsigned start);
+
+ // Flip all vertices horizontally or vertically,
+ // between x1 and x2, or between y1 and y2 respectively
+ //--------------------------------------------------------------------
+ void flip_x(double x1, double x2);
+ void flip_y(double y1, double y2);
+
+ // Concatenate path. The path is added as is.
+ //--------------------------------------------------------------------
+ template<class VertexSource>
+ void concat_path(VertexSource& vs, unsigned path_id = 0)
+ {
+ double x, y;
+ unsigned cmd;
+ vs.rewind(path_id);
+ while(!is_stop(cmd = vs.vertex(&x, &y)))
+ {
+ m_vertices.add_vertex(x, y, cmd);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ // Join path. The path is joined with the existing one, that is,
+ // it behaves as if the pen of a plotter was always down (drawing)
+ template<class VertexSource>
+ void join_path(VertexSource& vs, unsigned path_id = 0)
+ {
+ double x, y;
+ unsigned cmd;
+ vs.rewind(path_id);
+ cmd = vs.vertex(&x, &y);
+ if(!is_stop(cmd))
+ {
+ if(is_vertex(cmd))
+ {
+ double x0, y0;
+ unsigned cmd0 = last_vertex(&x0, &y0);
+ if(is_vertex(cmd0))
+ {
+ if(calc_distance(x, y, x0, y0) > vertex_dist_epsilon)
+ {
+ if(is_move_to(cmd)) cmd = path_cmd_line_to;
+ m_vertices.add_vertex(x, y, cmd);
+ }
+ }
+ else
+ {
+ if(is_stop(cmd0))
+ {
+ cmd = path_cmd_move_to;
+ }
+ else
+ {
+ if(is_move_to(cmd)) cmd = path_cmd_line_to;
+ }
+ m_vertices.add_vertex(x, y, cmd);
+ }
+ }
+ while(!is_stop(cmd = vs.vertex(&x, &y)))
+ {
+ m_vertices.add_vertex(x, y, is_move_to(cmd) ?
+ unsigned(path_cmd_line_to) :
+ cmd);
+ }
+ }
+ }
+
+ // Concatenate polygon/polyline.
+ //--------------------------------------------------------------------
+ template<class T> void concat_poly(const T* data,
+ unsigned num_points,
+ bool closed)
+ {
+ poly_plain_adaptor<T> poly(data, num_points, closed);
+ concat_path(poly);
+ }
+
+ // Join polygon/polyline continuously.
+ //--------------------------------------------------------------------
+ template<class T> void join_poly(const T* data,
+ unsigned num_points,
+ bool closed)
+ {
+ poly_plain_adaptor<T> poly(data, num_points, closed);
+ join_path(poly);
+ }
+
+ //--------------------------------------------------------------------
+ void translate(double dx, double dy, unsigned path_id=0);
+ void translate_all_paths(double dx, double dy);
+
+ //--------------------------------------------------------------------
+ template<class Trans>
+ void transform(const Trans& trans, unsigned path_id=0)
+ {
+ unsigned num_ver = m_vertices.total_vertices();
+ for(; path_id < num_ver; path_id++)
+ {
+ double x, y;
+ unsigned cmd = m_vertices.vertex(path_id, &x, &y);
+ if(is_stop(cmd)) break;
+ if(is_vertex(cmd))
+ {
+ trans.transform(&x, &y);
+ m_vertices.modify_vertex(path_id, x, y);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ template<class Trans>
+ void transform_all_paths(const Trans& trans)
+ {
+ unsigned idx;
+ unsigned num_ver = m_vertices.total_vertices();
+ for(idx = 0; idx < num_ver; idx++)
+ {
+ double x, y;
+ if(is_vertex(m_vertices.vertex(idx, &x, &y)))
+ {
+ trans.transform(&x, &y);
+ m_vertices.modify_vertex(idx, x, y);
+ }
+ }
+ }
+
+
+ //--------------------------------------------------------------------
+ // If the end points of a path are very, very close then make them
+ // exactly equal so that the stroke converter is not confused.
+ //--------------------------------------------------------------------
+ unsigned align_path(unsigned idx = 0)
+ {
+ if (idx >= total_vertices() || !is_move_to(command(idx)))
+ {
+ return total_vertices();
+ }
+
+ double start_x, start_y;
+ for (; idx < total_vertices() && is_move_to(command(idx)); ++idx)
+ {
+ vertex(idx, &start_x, &start_y);
+ }
+ while (idx < total_vertices() && is_drawing(command(idx)))
+ ++idx;
+
+ double x, y;
+ if (is_drawing(vertex(idx - 1, &x, &y)) &&
+ is_equal_eps(x, start_x, 1e-8) &&
+ is_equal_eps(y, start_y, 1e-8))
+ {
+ modify_vertex(idx - 1, start_x, start_y);
+ }
+
+ while (idx < total_vertices() && !is_move_to(command(idx)))
+ ++idx;
+ return idx;
+ }
+
+ void align_all_paths()
+ {
+ for (unsigned i = 0; i < total_vertices(); i = align_path(i));
+ }
+
+
+ private:
+ unsigned perceive_polygon_orientation(unsigned start, unsigned end);
+ void invert_polygon(unsigned start, unsigned end);
+
+ VertexContainer m_vertices;
+ unsigned m_iterator;
+ };
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ unsigned path_base<VC>::start_new_path()
+ {
+ if(!is_stop(m_vertices.last_command()))
+ {
+ m_vertices.add_vertex(0.0, 0.0, path_cmd_stop);
+ }
+ return m_vertices.total_vertices();
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline void path_base<VC>::rel_to_abs(double* x, double* y) const
+ {
+ if(m_vertices.total_vertices())
+ {
+ double x2;
+ double y2;
+ if(is_vertex(m_vertices.last_vertex(&x2, &y2)))
+ {
+ *x += x2;
+ *y += y2;
+ }
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline void path_base<VC>::move_to(double x, double y)
+ {
+ m_vertices.add_vertex(x, y, path_cmd_move_to);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline void path_base<VC>::move_rel(double dx, double dy)
+ {
+ rel_to_abs(&dx, &dy);
+ m_vertices.add_vertex(dx, dy, path_cmd_move_to);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline void path_base<VC>::line_to(double x, double y)
+ {
+ m_vertices.add_vertex(x, y, path_cmd_line_to);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline void path_base<VC>::line_rel(double dx, double dy)
+ {
+ rel_to_abs(&dx, &dy);
+ m_vertices.add_vertex(dx, dy, path_cmd_line_to);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline void path_base<VC>::hline_to(double x)
+ {
+ m_vertices.add_vertex(x, last_y(), path_cmd_line_to);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline void path_base<VC>::hline_rel(double dx)
+ {
+ double dy = 0;
+ rel_to_abs(&dx, &dy);
+ m_vertices.add_vertex(dx, dy, path_cmd_line_to);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline void path_base<VC>::vline_to(double y)
+ {
+ m_vertices.add_vertex(last_x(), y, path_cmd_line_to);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline void path_base<VC>::vline_rel(double dy)
+ {
+ double dx = 0;
+ rel_to_abs(&dx, &dy);
+ m_vertices.add_vertex(dx, dy, path_cmd_line_to);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::arc_to(double rx, double ry,
+ double angle,
+ bool large_arc_flag,
+ bool sweep_flag,
+ double x, double y)
+ {
+ if(m_vertices.total_vertices() && is_vertex(m_vertices.last_command()))
+ {
+ const double epsilon = 1e-30;
+ double x0 = 0.0;
+ double y0 = 0.0;
+ m_vertices.last_vertex(&x0, &y0);
+
+ rx = fabs(rx);
+ ry = fabs(ry);
+
+ // Ensure radii are valid
+ //-------------------------
+ if(rx < epsilon || ry < epsilon)
+ {
+ line_to(x, y);
+ return;
+ }
+
+ if(calc_distance(x0, y0, x, y) < epsilon)
+ {
+ // If the endpoints (x, y) and (x0, y0) are identical, then this
+ // is equivalent to omitting the elliptical arc segment entirely.
+ return;
+ }
+ bezier_arc_svg a(x0, y0, rx, ry, angle, large_arc_flag, sweep_flag, x, y);
+ if(a.radii_ok())
+ {
+ join_path(a);
+ }
+ else
+ {
+ line_to(x, y);
+ }
+ }
+ else
+ {
+ move_to(x, y);
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::arc_rel(double rx, double ry,
+ double angle,
+ bool large_arc_flag,
+ bool sweep_flag,
+ double dx, double dy)
+ {
+ rel_to_abs(&dx, &dy);
+ arc_to(rx, ry, angle, large_arc_flag, sweep_flag, dx, dy);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::curve3(double x_ctrl, double y_ctrl,
+ double x_to, double y_to)
+ {
+ m_vertices.add_vertex(x_ctrl, y_ctrl, path_cmd_curve3);
+ m_vertices.add_vertex(x_to, y_to, path_cmd_curve3);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::curve3_rel(double dx_ctrl, double dy_ctrl,
+ double dx_to, double dy_to)
+ {
+ rel_to_abs(&dx_ctrl, &dy_ctrl);
+ rel_to_abs(&dx_to, &dy_to);
+ m_vertices.add_vertex(dx_ctrl, dy_ctrl, path_cmd_curve3);
+ m_vertices.add_vertex(dx_to, dy_to, path_cmd_curve3);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::curve3(double x_to, double y_to)
+ {
+ double x0;
+ double y0;
+ if(is_vertex(m_vertices.last_vertex(&x0, &y0)))
+ {
+ double x_ctrl;
+ double y_ctrl;
+ unsigned cmd = m_vertices.prev_vertex(&x_ctrl, &y_ctrl);
+ if(is_curve(cmd))
+ {
+ x_ctrl = x0 + x0 - x_ctrl;
+ y_ctrl = y0 + y0 - y_ctrl;
+ }
+ else
+ {
+ x_ctrl = x0;
+ y_ctrl = y0;
+ }
+ curve3(x_ctrl, y_ctrl, x_to, y_to);
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::curve3_rel(double dx_to, double dy_to)
+ {
+ rel_to_abs(&dx_to, &dy_to);
+ curve3(dx_to, dy_to);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::curve4(double x_ctrl1, double y_ctrl1,
+ double x_ctrl2, double y_ctrl2,
+ double x_to, double y_to)
+ {
+ m_vertices.add_vertex(x_ctrl1, y_ctrl1, path_cmd_curve4);
+ m_vertices.add_vertex(x_ctrl2, y_ctrl2, path_cmd_curve4);
+ m_vertices.add_vertex(x_to, y_to, path_cmd_curve4);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::curve4_rel(double dx_ctrl1, double dy_ctrl1,
+ double dx_ctrl2, double dy_ctrl2,
+ double dx_to, double dy_to)
+ {
+ rel_to_abs(&dx_ctrl1, &dy_ctrl1);
+ rel_to_abs(&dx_ctrl2, &dy_ctrl2);
+ rel_to_abs(&dx_to, &dy_to);
+ m_vertices.add_vertex(dx_ctrl1, dy_ctrl1, path_cmd_curve4);
+ m_vertices.add_vertex(dx_ctrl2, dy_ctrl2, path_cmd_curve4);
+ m_vertices.add_vertex(dx_to, dy_to, path_cmd_curve4);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::curve4(double x_ctrl2, double y_ctrl2,
+ double x_to, double y_to)
+ {
+ double x0;
+ double y0;
+ if(is_vertex(last_vertex(&x0, &y0)))
+ {
+ double x_ctrl1;
+ double y_ctrl1;
+ unsigned cmd = prev_vertex(&x_ctrl1, &y_ctrl1);
+ if(is_curve(cmd))
+ {
+ x_ctrl1 = x0 + x0 - x_ctrl1;
+ y_ctrl1 = y0 + y0 - y_ctrl1;
+ }
+ else
+ {
+ x_ctrl1 = x0;
+ y_ctrl1 = y0;
+ }
+ curve4(x_ctrl1, y_ctrl1, x_ctrl2, y_ctrl2, x_to, y_to);
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::curve4_rel(double dx_ctrl2, double dy_ctrl2,
+ double dx_to, double dy_to)
+ {
+ rel_to_abs(&dx_ctrl2, &dy_ctrl2);
+ rel_to_abs(&dx_to, &dy_to);
+ curve4(dx_ctrl2, dy_ctrl2, dx_to, dy_to);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline void path_base<VC>::end_poly(unsigned flags)
+ {
+ if(is_vertex(m_vertices.last_command()))
+ {
+ m_vertices.add_vertex(0.0, 0.0, path_cmd_end_poly | flags);
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline void path_base<VC>::close_polygon(unsigned flags)
+ {
+ end_poly(path_flags_close | flags);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline unsigned path_base<VC>::total_vertices() const
+ {
+ return m_vertices.total_vertices();
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline unsigned path_base<VC>::last_vertex(double* x, double* y) const
+ {
+ return m_vertices.last_vertex(x, y);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline unsigned path_base<VC>::prev_vertex(double* x, double* y) const
+ {
+ return m_vertices.prev_vertex(x, y);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline double path_base<VC>::last_x() const
+ {
+ return m_vertices.last_x();
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline double path_base<VC>::last_y() const
+ {
+ return m_vertices.last_y();
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline unsigned path_base<VC>::vertex(unsigned idx, double* x, double* y) const
+ {
+ return m_vertices.vertex(idx, x, y);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline unsigned path_base<VC>::command(unsigned idx) const
+ {
+ return m_vertices.command(idx);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::modify_vertex(unsigned idx, double x, double y)
+ {
+ m_vertices.modify_vertex(idx, x, y);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::modify_vertex(unsigned idx, double x, double y, unsigned cmd)
+ {
+ m_vertices.modify_vertex(idx, x, y, cmd);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::modify_command(unsigned idx, unsigned cmd)
+ {
+ m_vertices.modify_command(idx, cmd);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline void path_base<VC>::rewind(unsigned path_id)
+ {
+ m_iterator = path_id;
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline unsigned path_base<VC>::vertex(double* x, double* y)
+ {
+ if(m_iterator >= m_vertices.total_vertices()) return path_cmd_stop;
+ return m_vertices.vertex(m_iterator++, x, y);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ unsigned path_base<VC>::perceive_polygon_orientation(unsigned start,
+ unsigned end)
+ {
+ // Calculate signed area (double area to be exact)
+ //---------------------
+ unsigned np = end - start;
+ double area = 0.0;
+ unsigned i;
+ for(i = 0; i < np; i++)
+ {
+ double x1, y1, x2, y2;
+ m_vertices.vertex(start + i, &x1, &y1);
+ m_vertices.vertex(start + (i + 1) % np, &x2, &y2);
+ area += x1 * y2 - y1 * x2;
+ }
+ return (area < 0.0) ? path_flags_cw : path_flags_ccw;
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::invert_polygon(unsigned start, unsigned end)
+ {
+ unsigned i;
+ unsigned tmp_cmd = m_vertices.command(start);
+
+ --end; // Make "end" inclusive
+
+ // Shift all commands to one position
+ for(i = start; i < end; i++)
+ {
+ m_vertices.modify_command(i, m_vertices.command(i + 1));
+ }
+
+ // Assign starting command to the ending command
+ m_vertices.modify_command(end, tmp_cmd);
+
+ // Reverse the polygon
+ while(end > start)
+ {
+ m_vertices.swap_vertices(start++, end--);
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::invert_polygon(unsigned start)
+ {
+ // Skip all non-vertices at the beginning
+ while(start < m_vertices.total_vertices() &&
+ !is_vertex(m_vertices.command(start))) ++start;
+
+ // Skip all insignificant move_to
+ while(start+1 < m_vertices.total_vertices() &&
+ is_move_to(m_vertices.command(start)) &&
+ is_move_to(m_vertices.command(start+1))) ++start;
+
+ // Find the last vertex
+ unsigned end = start + 1;
+ while(end < m_vertices.total_vertices() &&
+ !is_next_poly(m_vertices.command(end))) ++end;
+
+ invert_polygon(start, end);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ unsigned path_base<VC>::arrange_polygon_orientation(unsigned start,
+ path_flags_e orientation)
+ {
+ if(orientation == path_flags_none) return start;
+
+ // Skip all non-vertices at the beginning
+ while(start < m_vertices.total_vertices() &&
+ !is_vertex(m_vertices.command(start))) ++start;
+
+ // Skip all insignificant move_to
+ while(start+1 < m_vertices.total_vertices() &&
+ is_move_to(m_vertices.command(start)) &&
+ is_move_to(m_vertices.command(start+1))) ++start;
+
+ // Find the last vertex
+ unsigned end = start + 1;
+ while(end < m_vertices.total_vertices() &&
+ !is_next_poly(m_vertices.command(end))) ++end;
+
+ if(end - start > 2)
+ {
+ if(perceive_polygon_orientation(start, end) != unsigned(orientation))
+ {
+ // Invert polygon, set orientation flag, and skip all end_poly
+ invert_polygon(start, end);
+ unsigned cmd;
+ while(end < m_vertices.total_vertices() &&
+ is_end_poly(cmd = m_vertices.command(end)))
+ {
+ m_vertices.modify_command(end++, set_orientation(cmd, orientation));
+ }
+ }
+ }
+ return end;
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ unsigned path_base<VC>::arrange_orientations(unsigned start,
+ path_flags_e orientation)
+ {
+ if(orientation != path_flags_none)
+ {
+ while(start < m_vertices.total_vertices())
+ {
+ start = arrange_polygon_orientation(start, orientation);
+ if(is_stop(m_vertices.command(start)))
+ {
+ ++start;
+ break;
+ }
+ }
+ }
+ return start;
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::arrange_orientations_all_paths(path_flags_e orientation)
+ {
+ if(orientation != path_flags_none)
+ {
+ unsigned start = 0;
+ while(start < m_vertices.total_vertices())
+ {
+ start = arrange_orientations(start, orientation);
+ }
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::flip_x(double x1, double x2)
+ {
+ unsigned i;
+ double x, y;
+ for(i = 0; i < m_vertices.total_vertices(); i++)
+ {
+ unsigned cmd = m_vertices.vertex(i, &x, &y);
+ if(is_vertex(cmd))
+ {
+ m_vertices.modify_vertex(i, x2 - x + x1, y);
+ }
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::flip_y(double y1, double y2)
+ {
+ unsigned i;
+ double x, y;
+ for(i = 0; i < m_vertices.total_vertices(); i++)
+ {
+ unsigned cmd = m_vertices.vertex(i, &x, &y);
+ if(is_vertex(cmd))
+ {
+ m_vertices.modify_vertex(i, x, y2 - y + y1);
+ }
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::translate(double dx, double dy, unsigned path_id)
+ {
+ unsigned num_ver = m_vertices.total_vertices();
+ for(; path_id < num_ver; path_id++)
+ {
+ double x, y;
+ unsigned cmd = m_vertices.vertex(path_id, &x, &y);
+ if(is_stop(cmd)) break;
+ if(is_vertex(cmd))
+ {
+ x += dx;
+ y += dy;
+ m_vertices.modify_vertex(path_id, x, y);
+ }
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::translate_all_paths(double dx, double dy)
+ {
+ unsigned idx;
+ unsigned num_ver = m_vertices.total_vertices();
+ for(idx = 0; idx < num_ver; idx++)
+ {
+ double x, y;
+ if(is_vertex(m_vertices.vertex(idx, &x, &y)))
+ {
+ x += dx;
+ y += dy;
+ m_vertices.modify_vertex(idx, x, y);
+ }
+ }
+ }
+
+ //-----------------------------------------------------vertex_stl_storage
+ template<class Container> class vertex_stl_storage
+ {
+ public:
+ typedef typename Container::value_type vertex_type;
+ typedef typename vertex_type::value_type value_type;
+
+ void remove_all() { m_vertices.clear(); }
+ void free_all() { m_vertices.clear(); }
+
+ void add_vertex(double x, double y, unsigned cmd)
+ {
+ m_vertices.push_back(vertex_type(value_type(x),
+ value_type(y),
+ int8u(cmd)));
+ }
+
+ void modify_vertex(unsigned idx, double x, double y)
+ {
+ vertex_type& v = m_vertices[idx];
+ v.x = value_type(x);
+ v.y = value_type(y);
+ }
+
+ void modify_vertex(unsigned idx, double x, double y, unsigned cmd)
+ {
+ vertex_type& v = m_vertices[idx];
+ v.x = value_type(x);
+ v.y = value_type(y);
+ v.cmd = int8u(cmd);
+ }
+
+ void modify_command(unsigned idx, unsigned cmd)
+ {
+ m_vertices[idx].cmd = int8u(cmd);
+ }
+
+ void swap_vertices(unsigned v1, unsigned v2)
+ {
+ vertex_type t = m_vertices[v1];
+ m_vertices[v1] = m_vertices[v2];
+ m_vertices[v2] = t;
+ }
+
+ unsigned last_command() const
+ {
+ return m_vertices.size() ?
+ m_vertices[m_vertices.size() - 1].cmd :
+ path_cmd_stop;
+ }
+
+ unsigned last_vertex(double* x, double* y) const
+ {
+ if(m_vertices.size() == 0)
+ {
+ *x = *y = 0.0;
+ return path_cmd_stop;
+ }
+ return vertex(m_vertices.size() - 1, x, y);
+ }
+
+ unsigned prev_vertex(double* x, double* y) const
+ {
+ if(m_vertices.size() < 2)
+ {
+ *x = *y = 0.0;
+ return path_cmd_stop;
+ }
+ return vertex(m_vertices.size() - 2, x, y);
+ }
+
+ double last_x() const
+ {
+ return m_vertices.size() ? m_vertices[m_vertices.size() - 1].x : 0.0;
+ }
+
+ double last_y() const
+ {
+ return m_vertices.size() ? m_vertices[m_vertices.size() - 1].y : 0.0;
+ }
+
+ unsigned total_vertices() const
+ {
+ return m_vertices.size();
+ }
+
+ unsigned vertex(unsigned idx, double* x, double* y) const
+ {
+ const vertex_type& v = m_vertices[idx];
+ *x = v.x;
+ *y = v.y;
+ return v.cmd;
+ }
+
+ unsigned command(unsigned idx) const
+ {
+ return m_vertices[idx].cmd;
+ }
+
+ private:
+ Container m_vertices;
+ };
+
+ //-----------------------------------------------------------path_storage
+ typedef path_base<vertex_block_storage<double> > path_storage;
+
+ // Example of declarations path_storage with pod_bvector as a container
+ //-----------------------------------------------------------------------
+ //typedef path_base<vertex_stl_storage<pod_bvector<vertex_d> > > path_storage;
+
+}
+
+
+
+// Example of declarations path_storage with std::vector as a container
+//---------------------------------------------------------------------------
+//#include <vector>
+//namespace agg
+//{
+// typedef path_base<vertex_stl_storage<std::vector<vertex_d> > > stl_path_storage;
+//}
+
+
+
+
+#endif
diff --git a/src/agg/agg_pixfmt_base.h b/src/agg/agg_pixfmt_base.h
new file mode 100644
index 000000000..57ae19cfe
--- /dev/null
+++ b/src/agg/agg_pixfmt_base.h
@@ -0,0 +1,97 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+
+#ifndef AGG_PIXFMT_BASE_INCLUDED
+#define AGG_PIXFMT_BASE_INCLUDED
+
+#include "agg_basics.h"
+#include "agg_color_gray.h"
+#include "agg_color_rgba.h"
+
+namespace agg
+{
+ struct pixfmt_gray_tag
+ {
+ };
+
+ struct pixfmt_rgb_tag
+ {
+ };
+
+ struct pixfmt_rgba_tag
+ {
+ };
+
+ //--------------------------------------------------------------blender_base
+ template<class ColorT, class Order = void>
+ struct blender_base
+ {
+ typedef ColorT color_type;
+ typedef Order order_type;
+ typedef typename color_type::value_type value_type;
+
+ static rgba get(value_type r, value_type g, value_type b, value_type a, cover_type cover = cover_full)
+ {
+ if (cover > cover_none)
+ {
+ rgba c(
+ color_type::to_double(r),
+ color_type::to_double(g),
+ color_type::to_double(b),
+ color_type::to_double(a));
+
+ if (cover < cover_full)
+ {
+ double x = double(cover) / cover_full;
+ c.r *= x;
+ c.g *= x;
+ c.b *= x;
+ c.a *= x;
+ }
+
+ return c;
+ }
+ else return rgba::no_color();
+ }
+
+ static rgba get(const value_type* p, cover_type cover = cover_full)
+ {
+ return get(
+ p[order_type::R],
+ p[order_type::G],
+ p[order_type::B],
+ p[order_type::A],
+ cover);
+ }
+
+ static void set(value_type* p, value_type r, value_type g, value_type b, value_type a)
+ {
+ p[order_type::R] = r;
+ p[order_type::G] = g;
+ p[order_type::B] = b;
+ p[order_type::A] = a;
+ }
+
+ static void set(value_type* p, const rgba& c)
+ {
+ p[order_type::R] = color_type::from_double(c.r);
+ p[order_type::G] = color_type::from_double(c.g);
+ p[order_type::B] = color_type::from_double(c.b);
+ p[order_type::A] = color_type::from_double(c.a);
+ }
+ };
+}
+
+#endif
diff --git a/src/agg/agg_pixfmt_gray.h b/src/agg/agg_pixfmt_gray.h
new file mode 100644
index 000000000..d03dc8650
--- /dev/null
+++ b/src/agg/agg_pixfmt_gray.h
@@ -0,0 +1,738 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Adaptation for high precision colors has been sponsored by
+// Liberty Technology Systems, Inc., visit http://lib-sys.com
+//
+// Liberty Technology Systems, Inc. is the provider of
+// PostScript and PDF technology for software developers.
+//
+//----------------------------------------------------------------------------
+
+#ifndef AGG_PIXFMT_GRAY_INCLUDED
+#define AGG_PIXFMT_GRAY_INCLUDED
+
+#include <string.h>
+#include "agg_pixfmt_base.h"
+#include "agg_rendering_buffer.h"
+
+namespace agg
+{
+
+ //============================================================blender_gray
+ template<class ColorT> struct blender_gray
+ {
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ // Blend pixels using the non-premultiplied form of Alvy-Ray Smith's
+ // compositing function. Since the render buffer is opaque we skip the
+ // initial premultiply and final demultiply.
+
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type cv, value_type alpha, cover_type cover)
+ {
+ blend_pix(p, cv, color_type::mult_cover(alpha, cover));
+ }
+
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type cv, value_type alpha)
+ {
+ *p = color_type::lerp(*p, cv, alpha);
+ }
+ };
+
+
+ //======================================================blender_gray_pre
+ template<class ColorT> struct blender_gray_pre
+ {
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ // Blend pixels using the premultiplied form of Alvy-Ray Smith's
+ // compositing function.
+
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type cv, value_type alpha, cover_type cover)
+ {
+ blend_pix(p, color_type::mult_cover(cv, cover), color_type::mult_cover(alpha, cover));
+ }
+
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type cv, value_type alpha)
+ {
+ *p = color_type::prelerp(*p, cv, alpha);
+ }
+ };
+
+
+
+ //=====================================================apply_gamma_dir_gray
+ template<class ColorT, class GammaLut> class apply_gamma_dir_gray
+ {
+ public:
+ typedef typename ColorT::value_type value_type;
+
+ apply_gamma_dir_gray(const GammaLut& gamma) : m_gamma(gamma) {}
+
+ AGG_INLINE void operator () (value_type* p)
+ {
+ *p = m_gamma.dir(*p);
+ }
+
+ private:
+ const GammaLut& m_gamma;
+ };
+
+
+
+ //=====================================================apply_gamma_inv_gray
+ template<class ColorT, class GammaLut> class apply_gamma_inv_gray
+ {
+ public:
+ typedef typename ColorT::value_type value_type;
+
+ apply_gamma_inv_gray(const GammaLut& gamma) : m_gamma(gamma) {}
+
+ AGG_INLINE void operator () (value_type* p)
+ {
+ *p = m_gamma.inv(*p);
+ }
+
+ private:
+ const GammaLut& m_gamma;
+ };
+
+
+
+ //=================================================pixfmt_alpha_blend_gray
+ template<class Blender, class RenBuf, unsigned Step = 1, unsigned Offset = 0>
+ class pixfmt_alpha_blend_gray
+ {
+ public:
+ typedef pixfmt_gray_tag pixfmt_category;
+ typedef RenBuf rbuf_type;
+ typedef typename rbuf_type::row_data row_data;
+ typedef Blender blender_type;
+ typedef typename blender_type::color_type color_type;
+ typedef int order_type; // A fake one
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ enum
+ {
+ num_components = 1,
+ pix_width = sizeof(value_type) * Step,
+ pix_step = Step,
+ pix_offset = Offset,
+ };
+ struct pixel_type
+ {
+ value_type c[num_components];
+
+ void set(value_type v)
+ {
+ c[0] = v;
+ }
+
+ void set(const color_type& color)
+ {
+ set(color.v);
+ }
+
+ void get(value_type& v) const
+ {
+ v = c[0];
+ }
+
+ color_type get() const
+ {
+ return color_type(c[0]);
+ }
+
+ pixel_type* next()
+ {
+ return (pixel_type*)(c + pix_step);
+ }
+
+ const pixel_type* next() const
+ {
+ return (const pixel_type*)(c + pix_step);
+ }
+
+ pixel_type* advance(int n)
+ {
+ return (pixel_type*)(c + n * pix_step);
+ }
+
+ const pixel_type* advance(int n) const
+ {
+ return (const pixel_type*)(c + n * pix_step);
+ }
+ };
+
+ private:
+ //--------------------------------------------------------------------
+ AGG_INLINE void blend_pix(pixel_type* p,
+ value_type v, value_type a,
+ unsigned cover)
+ {
+ blender_type::blend_pix(p->c, v, a, cover);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void blend_pix(pixel_type* p, value_type v, value_type a)
+ {
+ blender_type::blend_pix(p->c, v, a);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void blend_pix(pixel_type* p, const color_type& c, unsigned cover)
+ {
+ blender_type::blend_pix(p->c, c.v, c.a, cover);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void blend_pix(pixel_type* p, const color_type& c)
+ {
+ blender_type::blend_pix(p->c, c.v, c.a);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void copy_or_blend_pix(pixel_type* p, const color_type& c, unsigned cover)
+ {
+ if (!c.is_transparent())
+ {
+ if (c.is_opaque() && cover == cover_mask)
+ {
+ p->set(c);
+ }
+ else
+ {
+ blend_pix(p, c, cover);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void copy_or_blend_pix(pixel_type* p, const color_type& c)
+ {
+ if (!c.is_transparent())
+ {
+ if (c.is_opaque())
+ {
+ p->set(c);
+ }
+ else
+ {
+ blend_pix(p, c);
+ }
+ }
+ }
+
+ public:
+ //--------------------------------------------------------------------
+ explicit pixfmt_alpha_blend_gray(rbuf_type& rb) :
+ m_rbuf(&rb)
+ {}
+ void attach(rbuf_type& rb) { m_rbuf = &rb; }
+ //--------------------------------------------------------------------
+
+ template<class PixFmt>
+ bool attach(PixFmt& pixf, int x1, int y1, int x2, int y2)
+ {
+ rect_i r(x1, y1, x2, y2);
+ if (r.clip(rect_i(0, 0, pixf.width()-1, pixf.height()-1)))
+ {
+ int stride = pixf.stride();
+ m_rbuf->attach(pixf.pix_ptr(r.x1, stride < 0 ? r.y2 : r.y1),
+ (r.x2 - r.x1) + 1,
+ (r.y2 - r.y1) + 1,
+ stride);
+ return true;
+ }
+ return false;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE unsigned width() const { return m_rbuf->width(); }
+ AGG_INLINE unsigned height() const { return m_rbuf->height(); }
+ AGG_INLINE int stride() const { return m_rbuf->stride(); }
+
+ //--------------------------------------------------------------------
+ int8u* row_ptr(int y) { return m_rbuf->row_ptr(y); }
+ const int8u* row_ptr(int y) const { return m_rbuf->row_ptr(y); }
+ row_data row(int y) const { return m_rbuf->row(y); }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE int8u* pix_ptr(int x, int y)
+ {
+ return m_rbuf->row_ptr(y) + sizeof(value_type) * (x * pix_step + pix_offset);
+ }
+
+ AGG_INLINE const int8u* pix_ptr(int x, int y) const
+ {
+ return m_rbuf->row_ptr(y) + sizeof(value_type) * (x * pix_step + pix_offset);
+ }
+
+ // Return pointer to pixel value, forcing row to be allocated.
+ AGG_INLINE pixel_type* pix_value_ptr(int x, int y, unsigned len)
+ {
+ return (pixel_type*)(m_rbuf->row_ptr(x, y, len) + sizeof(value_type) * (x * pix_step + pix_offset));
+ }
+
+ // Return pointer to pixel value, or null if row not allocated.
+ AGG_INLINE const pixel_type* pix_value_ptr(int x, int y) const
+ {
+ int8u* p = m_rbuf->row_ptr(y);
+ return p ? (pixel_type*)(p + sizeof(value_type) * (x * pix_step + pix_offset)) : 0;
+ }
+
+ // Get pixel pointer from raw buffer pointer.
+ AGG_INLINE static pixel_type* pix_value_ptr(void* p)
+ {
+ return (pixel_type*)((value_type*)p + pix_offset);
+ }
+
+ // Get pixel pointer from raw buffer pointer.
+ AGG_INLINE static const pixel_type* pix_value_ptr(const void* p)
+ {
+ return (const pixel_type*)((const value_type*)p + pix_offset);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE static void write_plain_color(void* p, color_type c)
+ {
+ // Grayscale formats are implicitly premultiplied.
+ c.premultiply();
+ pix_value_ptr(p)->set(c);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE static color_type read_plain_color(const void* p)
+ {
+ return pix_value_ptr(p)->get();
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE static void make_pix(int8u* p, const color_type& c)
+ {
+ ((pixel_type*)p)->set(c);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE color_type pixel(int x, int y) const
+ {
+ if (const pixel_type* p = pix_value_ptr(x, y))
+ {
+ return p->get();
+ }
+ return color_type::no_color();
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void copy_pixel(int x, int y, const color_type& c)
+ {
+ pix_value_ptr(x, y, 1)->set(c);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void blend_pixel(int x, int y, const color_type& c, int8u cover)
+ {
+ copy_or_blend_pix(pix_value_ptr(x, y, 1), c, cover);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void copy_hline(int x, int y,
+ unsigned len,
+ const color_type& c)
+ {
+ pixel_type* p = pix_value_ptr(x, y, len);
+ do
+ {
+ p->set(c);
+ p = p->next();
+ }
+ while(--len);
+ }
+
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void copy_vline(int x, int y,
+ unsigned len,
+ const color_type& c)
+ {
+ do
+ {
+ pix_value_ptr(x, y++, 1)->set(c);
+ }
+ while (--len);
+ }
+
+
+ //--------------------------------------------------------------------
+ void blend_hline(int x, int y,
+ unsigned len,
+ const color_type& c,
+ int8u cover)
+ {
+ if (!c.is_transparent())
+ {
+ pixel_type* p = pix_value_ptr(x, y, len);
+
+ if (c.is_opaque() && cover == cover_mask)
+ {
+ do
+ {
+ p->set(c);
+ p = p->next();
+ }
+ while (--len);
+ }
+ else
+ {
+ do
+ {
+ blend_pix(p, c, cover);
+ p = p->next();
+ }
+ while (--len);
+ }
+ }
+ }
+
+
+ //--------------------------------------------------------------------
+ void blend_vline(int x, int y,
+ unsigned len,
+ const color_type& c,
+ int8u cover)
+ {
+ if (!c.is_transparent())
+ {
+ if (c.is_opaque() && cover == cover_mask)
+ {
+ do
+ {
+ pix_value_ptr(x, y++, 1)->set(c);
+ }
+ while (--len);
+ }
+ else
+ {
+ do
+ {
+ blend_pix(pix_value_ptr(x, y++, 1), c, cover);
+ }
+ while (--len);
+ }
+ }
+ }
+
+
+ //--------------------------------------------------------------------
+ void blend_solid_hspan(int x, int y,
+ unsigned len,
+ const color_type& c,
+ const int8u* covers)
+ {
+ if (!c.is_transparent())
+ {
+ pixel_type* p = pix_value_ptr(x, y, len);
+
+ do
+ {
+ if (c.is_opaque() && *covers == cover_mask)
+ {
+ p->set(c);
+ }
+ else
+ {
+ blend_pix(p, c, *covers);
+ }
+ p = p->next();
+ ++covers;
+ }
+ while (--len);
+ }
+ }
+
+
+ //--------------------------------------------------------------------
+ void blend_solid_vspan(int x, int y,
+ unsigned len,
+ const color_type& c,
+ const int8u* covers)
+ {
+ if (!c.is_transparent())
+ {
+ do
+ {
+ pixel_type* p = pix_value_ptr(x, y++, 1);
+
+ if (c.is_opaque() && *covers == cover_mask)
+ {
+ p->set(c);
+ }
+ else
+ {
+ blend_pix(p, c, *covers);
+ }
+ ++covers;
+ }
+ while (--len);
+ }
+ }
+
+
+ //--------------------------------------------------------------------
+ void copy_color_hspan(int x, int y,
+ unsigned len,
+ const color_type* colors)
+ {
+ pixel_type* p = pix_value_ptr(x, y, len);
+
+ do
+ {
+ p->set(*colors++);
+ p = p->next();
+ }
+ while (--len);
+ }
+
+
+ //--------------------------------------------------------------------
+ void copy_color_vspan(int x, int y,
+ unsigned len,
+ const color_type* colors)
+ {
+ do
+ {
+ pix_value_ptr(x, y++, 1)->set(*colors++);
+ }
+ while (--len);
+ }
+
+
+ //--------------------------------------------------------------------
+ void blend_color_hspan(int x, int y,
+ unsigned len,
+ const color_type* colors,
+ const int8u* covers,
+ int8u cover)
+ {
+ pixel_type* p = pix_value_ptr(x, y, len);
+
+ if (covers)
+ {
+ do
+ {
+ copy_or_blend_pix(p, *colors++, *covers++);
+ p = p->next();
+ }
+ while (--len);
+ }
+ else
+ {
+ if (cover == cover_mask)
+ {
+ do
+ {
+ copy_or_blend_pix(p, *colors++);
+ p = p->next();
+ }
+ while (--len);
+ }
+ else
+ {
+ do
+ {
+ copy_or_blend_pix(p, *colors++, cover);
+ p = p->next();
+ }
+ while (--len);
+ }
+ }
+ }
+
+
+ //--------------------------------------------------------------------
+ void blend_color_vspan(int x, int y,
+ unsigned len,
+ const color_type* colors,
+ const int8u* covers,
+ int8u cover)
+ {
+ if (covers)
+ {
+ do
+ {
+ copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++, *covers++);
+ }
+ while (--len);
+ }
+ else
+ {
+ if (cover == cover_mask)
+ {
+ do
+ {
+ copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++);
+ }
+ while (--len);
+ }
+ else
+ {
+ do
+ {
+ copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++, cover);
+ }
+ while (--len);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ template<class Function> void for_each_pixel(Function f)
+ {
+ unsigned y;
+ for (y = 0; y < height(); ++y)
+ {
+ row_data r = m_rbuf->row(y);
+ if (r.ptr)
+ {
+ unsigned len = r.x2 - r.x1 + 1;
+ pixel_type* p = pix_value_ptr(r.x1, y, len);
+ do
+ {
+ f(p->c);
+ p = p->next();
+ }
+ while (--len);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ template<class GammaLut> void apply_gamma_dir(const GammaLut& g)
+ {
+ for_each_pixel(apply_gamma_dir_gray<color_type, GammaLut>(g));
+ }
+
+ //--------------------------------------------------------------------
+ template<class GammaLut> void apply_gamma_inv(const GammaLut& g)
+ {
+ for_each_pixel(apply_gamma_inv_gray<color_type, GammaLut>(g));
+ }
+
+ //--------------------------------------------------------------------
+ template<class RenBuf2>
+ void copy_from(const RenBuf2& from,
+ int xdst, int ydst,
+ int xsrc, int ysrc,
+ unsigned len)
+ {
+ if (const int8u* p = from.row_ptr(ysrc))
+ {
+ memmove(m_rbuf->row_ptr(xdst, ydst, len) + xdst * pix_width,
+ p + xsrc * pix_width,
+ len * pix_width);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ // Blend from single color, using grayscale surface as alpha channel.
+ template<class SrcPixelFormatRenderer>
+ void blend_from_color(const SrcPixelFormatRenderer& from,
+ const color_type& color,
+ int xdst, int ydst,
+ int xsrc, int ysrc,
+ unsigned len,
+ int8u cover)
+ {
+ typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type;
+ typedef typename SrcPixelFormatRenderer::color_type src_color_type;
+
+ if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc))
+ {
+ pixel_type* pdst = pix_value_ptr(xdst, ydst, len);
+
+ do
+ {
+ copy_or_blend_pix(pdst, color, src_color_type::scale_cover(cover, psrc->c[0]));
+ psrc = psrc->next();
+ pdst = pdst->next();
+ }
+ while (--len);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ // Blend from color table, using grayscale surface as indexes into table.
+ // Obviously, this only works for integer value types.
+ template<class SrcPixelFormatRenderer>
+ void blend_from_lut(const SrcPixelFormatRenderer& from,
+ const color_type* color_lut,
+ int xdst, int ydst,
+ int xsrc, int ysrc,
+ unsigned len,
+ int8u cover)
+ {
+ typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type;
+
+ if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc))
+ {
+ pixel_type* pdst = pix_value_ptr(xdst, ydst, len);
+
+ do
+ {
+ copy_or_blend_pix(pdst, color_lut[psrc->c[0]], cover);
+ psrc = psrc->next();
+ pdst = pdst->next();
+ }
+ while (--len);
+ }
+ }
+
+ private:
+ rbuf_type* m_rbuf;
+ };
+
+ typedef blender_gray<gray8> blender_gray8;
+ typedef blender_gray<sgray8> blender_sgray8;
+ typedef blender_gray<gray16> blender_gray16;
+ typedef blender_gray<gray32> blender_gray32;
+
+ typedef blender_gray_pre<gray8> blender_gray8_pre;
+ typedef blender_gray_pre<sgray8> blender_sgray8_pre;
+ typedef blender_gray_pre<gray16> blender_gray16_pre;
+ typedef blender_gray_pre<gray32> blender_gray32_pre;
+
+ typedef pixfmt_alpha_blend_gray<blender_gray8, rendering_buffer> pixfmt_gray8;
+ typedef pixfmt_alpha_blend_gray<blender_sgray8, rendering_buffer> pixfmt_sgray8;
+ typedef pixfmt_alpha_blend_gray<blender_gray16, rendering_buffer> pixfmt_gray16;
+ typedef pixfmt_alpha_blend_gray<blender_gray32, rendering_buffer> pixfmt_gray32;
+
+ typedef pixfmt_alpha_blend_gray<blender_gray8_pre, rendering_buffer> pixfmt_gray8_pre;
+ typedef pixfmt_alpha_blend_gray<blender_sgray8_pre, rendering_buffer> pixfmt_sgray8_pre;
+ typedef pixfmt_alpha_blend_gray<blender_gray16_pre, rendering_buffer> pixfmt_gray16_pre;
+ typedef pixfmt_alpha_blend_gray<blender_gray32_pre, rendering_buffer> pixfmt_gray32_pre;
+}
+
+#endif
+
diff --git a/src/agg/agg_pixfmt_rgb.h b/src/agg/agg_pixfmt_rgb.h
new file mode 100644
index 000000000..6fa8772ce
--- /dev/null
+++ b/src/agg/agg_pixfmt_rgb.h
@@ -0,0 +1,995 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Adaptation for high precision colors has been sponsored by
+// Liberty Technology Systems, Inc., visit http://lib-sys.com
+//
+// Liberty Technology Systems, Inc. is the provider of
+// PostScript and PDF technology for software developers.
+//
+//----------------------------------------------------------------------------
+
+#ifndef AGG_PIXFMT_RGB_INCLUDED
+#define AGG_PIXFMT_RGB_INCLUDED
+
+#include <string.h>
+#include "agg_pixfmt_base.h"
+#include "agg_rendering_buffer.h"
+
+namespace agg
+{
+
+ //=====================================================apply_gamma_dir_rgb
+ template<class ColorT, class Order, class GammaLut> class apply_gamma_dir_rgb
+ {
+ public:
+ typedef typename ColorT::value_type value_type;
+
+ apply_gamma_dir_rgb(const GammaLut& gamma) : m_gamma(gamma) {}
+
+ AGG_INLINE void operator () (value_type* p)
+ {
+ p[Order::R] = m_gamma.dir(p[Order::R]);
+ p[Order::G] = m_gamma.dir(p[Order::G]);
+ p[Order::B] = m_gamma.dir(p[Order::B]);
+ }
+
+ private:
+ const GammaLut& m_gamma;
+ };
+
+
+
+ //=====================================================apply_gamma_inv_rgb
+ template<class ColorT, class Order, class GammaLut> class apply_gamma_inv_rgb
+ {
+ public:
+ typedef typename ColorT::value_type value_type;
+
+ apply_gamma_inv_rgb(const GammaLut& gamma) : m_gamma(gamma) {}
+
+ AGG_INLINE void operator () (value_type* p)
+ {
+ p[Order::R] = m_gamma.inv(p[Order::R]);
+ p[Order::G] = m_gamma.inv(p[Order::G]);
+ p[Order::B] = m_gamma.inv(p[Order::B]);
+ }
+
+ private:
+ const GammaLut& m_gamma;
+ };
+
+
+ //=========================================================blender_rgb
+ template<class ColorT, class Order>
+ struct blender_rgb
+ {
+ typedef ColorT color_type;
+ typedef Order order_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ // Blend pixels using the non-premultiplied form of Alvy-Ray Smith's
+ // compositing function. Since the render buffer is opaque we skip the
+ // initial premultiply and final demultiply.
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type cr, value_type cg, value_type cb, value_type alpha, cover_type cover)
+ {
+ blend_pix(p, cr, cg, cb, color_type::mult_cover(alpha, cover));
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type cr, value_type cg, value_type cb, value_type alpha)
+ {
+ p[Order::R] = color_type::lerp(p[Order::R], cr, alpha);
+ p[Order::G] = color_type::lerp(p[Order::G], cg, alpha);
+ p[Order::B] = color_type::lerp(p[Order::B], cb, alpha);
+ }
+ };
+
+ //======================================================blender_rgb_pre
+ template<class ColorT, class Order>
+ struct blender_rgb_pre
+ {
+ typedef ColorT color_type;
+ typedef Order order_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ // Blend pixels using the premultiplied form of Alvy-Ray Smith's
+ // compositing function.
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type cr, value_type cg, value_type cb, value_type alpha, cover_type cover)
+ {
+ blend_pix(p,
+ color_type::mult_cover(cr, cover),
+ color_type::mult_cover(cg, cover),
+ color_type::mult_cover(cb, cover),
+ color_type::mult_cover(alpha, cover));
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type cr, value_type cg, value_type cb, value_type alpha)
+ {
+ p[Order::R] = color_type::prelerp(p[Order::R], cr, alpha);
+ p[Order::G] = color_type::prelerp(p[Order::G], cg, alpha);
+ p[Order::B] = color_type::prelerp(p[Order::B], cb, alpha);
+ }
+ };
+
+ //===================================================blender_rgb_gamma
+ template<class ColorT, class Order, class Gamma>
+ class blender_rgb_gamma : public blender_base<ColorT, Order>
+ {
+ public:
+ typedef ColorT color_type;
+ typedef Order order_type;
+ typedef Gamma gamma_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ //--------------------------------------------------------------------
+ blender_rgb_gamma() : m_gamma(0) {}
+ void gamma(const gamma_type& g) { m_gamma = &g; }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void blend_pix(value_type* p,
+ value_type cr, value_type cg, value_type cb, value_type alpha, cover_type cover)
+ {
+ blend_pix(p, cr, cg, cb, color_type::mult_cover(alpha, cover));
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void blend_pix(value_type* p,
+ value_type cr, value_type cg, value_type cb, value_type alpha)
+ {
+ calc_type r = m_gamma->dir(p[Order::R]);
+ calc_type g = m_gamma->dir(p[Order::G]);
+ calc_type b = m_gamma->dir(p[Order::B]);
+ p[Order::R] = m_gamma->inv(color_type::downscale((m_gamma->dir(cr) - r) * alpha) + r);
+ p[Order::G] = m_gamma->inv(color_type::downscale((m_gamma->dir(cg) - g) * alpha) + g);
+ p[Order::B] = m_gamma->inv(color_type::downscale((m_gamma->dir(cb) - b) * alpha) + b);
+ }
+
+ private:
+ const gamma_type* m_gamma;
+ };
+
+
+ //==================================================pixfmt_alpha_blend_rgb
+ template<class Blender, class RenBuf, unsigned Step, unsigned Offset = 0>
+ class pixfmt_alpha_blend_rgb
+ {
+ public:
+ typedef pixfmt_rgb_tag pixfmt_category;
+ typedef RenBuf rbuf_type;
+ typedef Blender blender_type;
+ typedef typename rbuf_type::row_data row_data;
+ typedef typename blender_type::color_type color_type;
+ typedef typename blender_type::order_type order_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ enum
+ {
+ num_components = 3,
+ pix_step = Step,
+ pix_offset = Offset,
+ pix_width = sizeof(value_type) * pix_step
+ };
+ struct pixel_type
+ {
+ value_type c[num_components];
+
+ void set(value_type r, value_type g, value_type b)
+ {
+ c[order_type::R] = r;
+ c[order_type::G] = g;
+ c[order_type::B] = b;
+ }
+
+ void set(const color_type& color)
+ {
+ set(color.r, color.g, color.b);
+ }
+
+ void get(value_type& r, value_type& g, value_type& b) const
+ {
+ r = c[order_type::R];
+ g = c[order_type::G];
+ b = c[order_type::B];
+ }
+
+ color_type get() const
+ {
+ return color_type(
+ c[order_type::R],
+ c[order_type::G],
+ c[order_type::B]);
+ }
+
+ pixel_type* next()
+ {
+ return (pixel_type*)(c + pix_step);
+ }
+
+ const pixel_type* next() const
+ {
+ return (const pixel_type*)(c + pix_step);
+ }
+
+ pixel_type* advance(int n)
+ {
+ return (pixel_type*)(c + n * pix_step);
+ }
+
+ const pixel_type* advance(int n) const
+ {
+ return (const pixel_type*)(c + n * pix_step);
+ }
+ };
+
+ private:
+ //--------------------------------------------------------------------
+ AGG_INLINE void blend_pix(pixel_type* p,
+ value_type r, value_type g, value_type b, value_type a,
+ unsigned cover)
+ {
+ m_blender.blend_pix(p->c, r, g, b, a, cover);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void blend_pix(pixel_type* p,
+ value_type r, value_type g, value_type b, value_type a)
+ {
+ m_blender.blend_pix(p->c, r, g, b, a);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void blend_pix(pixel_type* p, const color_type& c, unsigned cover)
+ {
+ m_blender.blend_pix(p->c, c.r, c.g, c.b, c.a, cover);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void blend_pix(pixel_type* p, const color_type& c)
+ {
+ m_blender.blend_pix(p->c, c.r, c.g, c.b, c.a);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void copy_or_blend_pix(pixel_type* p, const color_type& c, unsigned cover)
+ {
+ if (!c.is_transparent())
+ {
+ if (c.is_opaque() && cover == cover_mask)
+ {
+ p->set(c);
+ }
+ else
+ {
+ blend_pix(p, c, cover);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void copy_or_blend_pix(pixel_type* p, const color_type& c)
+ {
+ if (!c.is_transparent())
+ {
+ if (c.is_opaque())
+ {
+ p->set(c);
+ }
+ else
+ {
+ blend_pix(p, c);
+ }
+ }
+ }
+
+ public:
+ //--------------------------------------------------------------------
+ explicit pixfmt_alpha_blend_rgb(rbuf_type& rb) :
+ m_rbuf(&rb)
+ {}
+ void attach(rbuf_type& rb) { m_rbuf = &rb; }
+
+ //--------------------------------------------------------------------
+ template<class PixFmt>
+ bool attach(PixFmt& pixf, int x1, int y1, int x2, int y2)
+ {
+ rect_i r(x1, y1, x2, y2);
+ if (r.clip(rect_i(0, 0, pixf.width()-1, pixf.height()-1)))
+ {
+ int stride = pixf.stride();
+ m_rbuf->attach(pixf.pix_ptr(r.x1, stride < 0 ? r.y2 : r.y1),
+ (r.x2 - r.x1) + 1,
+ (r.y2 - r.y1) + 1,
+ stride);
+ return true;
+ }
+ return false;
+ }
+
+ //--------------------------------------------------------------------
+ Blender& blender() { return m_blender; }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE unsigned width() const { return m_rbuf->width(); }
+ AGG_INLINE unsigned height() const { return m_rbuf->height(); }
+ AGG_INLINE int stride() const { return m_rbuf->stride(); }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE int8u* row_ptr(int y) { return m_rbuf->row_ptr(y); }
+ AGG_INLINE const int8u* row_ptr(int y) const { return m_rbuf->row_ptr(y); }
+ AGG_INLINE row_data row(int y) const { return m_rbuf->row(y); }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE int8u* pix_ptr(int x, int y)
+ {
+ return m_rbuf->row_ptr(y) + sizeof(value_type) * (x * pix_step + pix_offset);
+ }
+
+ AGG_INLINE const int8u* pix_ptr(int x, int y) const
+ {
+ return m_rbuf->row_ptr(y) + sizeof(value_type) * (x * pix_step + pix_offset);
+ }
+
+ // Return pointer to pixel value, forcing row to be allocated.
+ AGG_INLINE pixel_type* pix_value_ptr(int x, int y, unsigned len)
+ {
+ return (pixel_type*)(m_rbuf->row_ptr(x, y, len) + sizeof(value_type) * (x * pix_step + pix_offset));
+ }
+
+ // Return pointer to pixel value, or null if row not allocated.
+ AGG_INLINE const pixel_type* pix_value_ptr(int x, int y) const
+ {
+ int8u* p = m_rbuf->row_ptr(y);
+ return p ? (pixel_type*)(p + sizeof(value_type) * (x * pix_step + pix_offset)) : 0;
+ }
+
+ // Get pixel pointer from raw buffer pointer.
+ AGG_INLINE static pixel_type* pix_value_ptr(void* p)
+ {
+ return (pixel_type*)((value_type*)p + pix_offset);
+ }
+
+ // Get pixel pointer from raw buffer pointer.
+ AGG_INLINE static const pixel_type* pix_value_ptr(const void* p)
+ {
+ return (const pixel_type*)((const value_type*)p + pix_offset);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE static void write_plain_color(void* p, color_type c)
+ {
+ // RGB formats are implicitly premultiplied.
+ c.premultiply();
+ pix_value_ptr(p)->set(c);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE static color_type read_plain_color(const void* p)
+ {
+ return pix_value_ptr(p)->get();
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE static void make_pix(int8u* p, const color_type& c)
+ {
+ ((pixel_type*)p)->set(c);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE color_type pixel(int x, int y) const
+ {
+ if (const pixel_type* p = pix_value_ptr(x, y))
+ {
+ return p->get();
+ }
+ return color_type::no_color();
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void copy_pixel(int x, int y, const color_type& c)
+ {
+ pix_value_ptr(x, y, 1)->set(c);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void blend_pixel(int x, int y, const color_type& c, int8u cover)
+ {
+ copy_or_blend_pix(pix_value_ptr(x, y, 1), c, cover);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void copy_hline(int x, int y,
+ unsigned len,
+ const color_type& c)
+ {
+ pixel_type* p = pix_value_ptr(x, y, len);
+ do
+ {
+ p->set(c);
+ p = p->next();
+ }
+ while(--len);
+ }
+
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void copy_vline(int x, int y,
+ unsigned len,
+ const color_type& c)
+ {
+ do
+ {
+ pix_value_ptr(x, y++, 1)->set(c);
+ }
+ while (--len);
+ }
+
+ //--------------------------------------------------------------------
+ void blend_hline(int x, int y,
+ unsigned len,
+ const color_type& c,
+ int8u cover)
+ {
+ if (!c.is_transparent())
+ {
+ pixel_type* p = pix_value_ptr(x, y, len);
+
+ if (c.is_opaque() && cover == cover_mask)
+ {
+ do
+ {
+ p->set(c);
+ p = p->next();
+ }
+ while (--len);
+ }
+ else
+ {
+ do
+ {
+ blend_pix(p, c, cover);
+ p = p->next();
+ }
+ while (--len);
+ }
+ }
+ }
+
+
+ //--------------------------------------------------------------------
+ void blend_vline(int x, int y,
+ unsigned len,
+ const color_type& c,
+ int8u cover)
+ {
+ if (!c.is_transparent())
+ {
+ if (c.is_opaque() && cover == cover_mask)
+ {
+ do
+ {
+ pix_value_ptr(x, y++, 1)->set(c);
+ }
+ while (--len);
+ }
+ else
+ {
+ do
+ {
+ blend_pix(pix_value_ptr(x, y++, 1), c, cover);
+ }
+ while (--len);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ void blend_solid_hspan(int x, int y,
+ unsigned len,
+ const color_type& c,
+ const int8u* covers)
+ {
+ if (!c.is_transparent())
+ {
+ pixel_type* p = pix_value_ptr(x, y, len);
+
+ do
+ {
+ if (c.is_opaque() && *covers == cover_mask)
+ {
+ p->set(c);
+ }
+ else
+ {
+ blend_pix(p, c, *covers);
+ }
+ p = p->next();
+ ++covers;
+ }
+ while (--len);
+ }
+ }
+
+
+ //--------------------------------------------------------------------
+ void blend_solid_vspan(int x, int y,
+ unsigned len,
+ const color_type& c,
+ const int8u* covers)
+ {
+ if (!c.is_transparent())
+ {
+ do
+ {
+ pixel_type* p = pix_value_ptr(x, y++, 1);
+
+ if (c.is_opaque() && *covers == cover_mask)
+ {
+ p->set(c);
+ }
+ else
+ {
+ blend_pix(p, c, *covers);
+ }
+ ++covers;
+ }
+ while (--len);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ void copy_color_hspan(int x, int y,
+ unsigned len,
+ const color_type* colors)
+ {
+ pixel_type* p = pix_value_ptr(x, y, len);
+
+ do
+ {
+ p->set(*colors++);
+ p = p->next();
+ }
+ while (--len);
+ }
+
+
+ //--------------------------------------------------------------------
+ void copy_color_vspan(int x, int y,
+ unsigned len,
+ const color_type* colors)
+ {
+ do
+ {
+ pix_value_ptr(x, y++, 1)->set(*colors++);
+ }
+ while (--len);
+ }
+
+ //--------------------------------------------------------------------
+ void blend_color_hspan(int x, int y,
+ unsigned len,
+ const color_type* colors,
+ const int8u* covers,
+ int8u cover)
+ {
+ pixel_type* p = pix_value_ptr(x, y, len);
+
+ if (covers)
+ {
+ do
+ {
+ copy_or_blend_pix(p, *colors++, *covers++);
+ p = p->next();
+ }
+ while (--len);
+ }
+ else
+ {
+ if (cover == cover_mask)
+ {
+ do
+ {
+ copy_or_blend_pix(p, *colors++);
+ p = p->next();
+ }
+ while (--len);
+ }
+ else
+ {
+ do
+ {
+ copy_or_blend_pix(p, *colors++, cover);
+ p = p->next();
+ }
+ while (--len);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ void blend_color_vspan(int x, int y,
+ unsigned len,
+ const color_type* colors,
+ const int8u* covers,
+ int8u cover)
+ {
+ if (covers)
+ {
+ do
+ {
+ copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++, *covers++);
+ }
+ while (--len);
+ }
+ else
+ {
+ if (cover == cover_mask)
+ {
+ do
+ {
+ copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++);
+ }
+ while (--len);
+ }
+ else
+ {
+ do
+ {
+ copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++, cover);
+ }
+ while (--len);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ template<class Function> void for_each_pixel(Function f)
+ {
+ for (unsigned y = 0; y < height(); ++y)
+ {
+ row_data r = m_rbuf->row(y);
+ if (r.ptr)
+ {
+ unsigned len = r.x2 - r.x1 + 1;
+ pixel_type* p = pix_value_ptr(r.x1, y, len);
+ do
+ {
+ f(p->c);
+ p = p->next();
+ }
+ while (--len);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ template<class GammaLut> void apply_gamma_dir(const GammaLut& g)
+ {
+ for_each_pixel(apply_gamma_dir_rgb<color_type, order_type, GammaLut>(g));
+ }
+
+ //--------------------------------------------------------------------
+ template<class GammaLut> void apply_gamma_inv(const GammaLut& g)
+ {
+ for_each_pixel(apply_gamma_inv_rgb<color_type, order_type, GammaLut>(g));
+ }
+
+ //--------------------------------------------------------------------
+ template<class RenBuf2>
+ void copy_from(const RenBuf2& from,
+ int xdst, int ydst,
+ int xsrc, int ysrc,
+ unsigned len)
+ {
+ if (const int8u* p = from.row_ptr(ysrc))
+ {
+ memmove(m_rbuf->row_ptr(xdst, ydst, len) + xdst * pix_width,
+ p + xsrc * pix_width,
+ len * pix_width);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ // Blend from an RGBA surface.
+ template<class SrcPixelFormatRenderer>
+ void blend_from(const SrcPixelFormatRenderer& from,
+ int xdst, int ydst,
+ int xsrc, int ysrc,
+ unsigned len,
+ int8u cover)
+ {
+ typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type;
+ typedef typename SrcPixelFormatRenderer::order_type src_order;
+
+ if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc))
+ {
+ pixel_type* pdst = pix_value_ptr(xdst, ydst, len);
+
+ if (cover == cover_mask)
+ {
+ do
+ {
+ value_type alpha = psrc->c[src_order::A];
+ if (alpha <= color_type::empty_value())
+ {
+ if (alpha >= color_type::full_value())
+ {
+ pdst->c[order_type::R] = psrc->c[src_order::R];
+ pdst->c[order_type::G] = psrc->c[src_order::G];
+ pdst->c[order_type::B] = psrc->c[src_order::B];
+ }
+ else
+ {
+ blend_pix(pdst,
+ psrc->c[src_order::R],
+ psrc->c[src_order::G],
+ psrc->c[src_order::B],
+ alpha);
+ }
+ }
+ psrc = psrc->next();
+ pdst = pdst->next();
+ }
+ while(--len);
+ }
+ else
+ {
+ do
+ {
+ copy_or_blend_pix(pdst, psrc->get(), cover);
+ psrc = psrc->next();
+ pdst = pdst->next();
+ }
+ while (--len);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ // Blend from single color, using grayscale surface as alpha channel.
+ template<class SrcPixelFormatRenderer>
+ void blend_from_color(const SrcPixelFormatRenderer& from,
+ const color_type& color,
+ int xdst, int ydst,
+ int xsrc, int ysrc,
+ unsigned len,
+ int8u cover)
+ {
+ typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type;
+ typedef typename SrcPixelFormatRenderer::color_type src_color_type;
+
+ if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc))
+ {
+ pixel_type* pdst = pix_value_ptr(xdst, ydst, len);
+
+ do
+ {
+ copy_or_blend_pix(pdst, color, src_color_type::scale_cover(cover, psrc->c[0]));
+ psrc = psrc->next();
+ pdst = pdst->next();
+ }
+ while (--len);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ // Blend from color table, using grayscale surface as indexes into table.
+ // Obviously, this only works for integer value types.
+ template<class SrcPixelFormatRenderer>
+ void blend_from_lut(const SrcPixelFormatRenderer& from,
+ const color_type* color_lut,
+ int xdst, int ydst,
+ int xsrc, int ysrc,
+ unsigned len,
+ int8u cover)
+ {
+ typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type;
+
+ if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc))
+ {
+ pixel_type* pdst = pix_value_ptr(xdst, ydst, len);
+
+ if (cover == cover_mask)
+ {
+ do
+ {
+ const color_type& color = color_lut[psrc->c[0]];
+ blend_pix(pdst, color);
+ psrc = psrc->next();
+ pdst = pdst->next();
+ }
+ while(--len);
+ }
+ else
+ {
+ do
+ {
+ copy_or_blend_pix(pdst, color_lut[psrc->c[0]], cover);
+ psrc = psrc->next();
+ pdst = pdst->next();
+ }
+ while(--len);
+ }
+ }
+ }
+
+ private:
+ rbuf_type* m_rbuf;
+ Blender m_blender;
+ };
+
+ //-----------------------------------------------------------------------
+ typedef blender_rgb<rgba8, order_rgb> blender_rgb24;
+ typedef blender_rgb<rgba8, order_bgr> blender_bgr24;
+ typedef blender_rgb<srgba8, order_rgb> blender_srgb24;
+ typedef blender_rgb<srgba8, order_bgr> blender_sbgr24;
+ typedef blender_rgb<rgba16, order_rgb> blender_rgb48;
+ typedef blender_rgb<rgba16, order_bgr> blender_bgr48;
+ typedef blender_rgb<rgba32, order_rgb> blender_rgb96;
+ typedef blender_rgb<rgba32, order_bgr> blender_bgr96;
+
+ typedef blender_rgb_pre<rgba8, order_rgb> blender_rgb24_pre;
+ typedef blender_rgb_pre<rgba8, order_bgr> blender_bgr24_pre;
+ typedef blender_rgb_pre<srgba8, order_rgb> blender_srgb24_pre;
+ typedef blender_rgb_pre<srgba8, order_bgr> blender_sbgr24_pre;
+ typedef blender_rgb_pre<rgba16, order_rgb> blender_rgb48_pre;
+ typedef blender_rgb_pre<rgba16, order_bgr> blender_bgr48_pre;
+ typedef blender_rgb_pre<rgba32, order_rgb> blender_rgb96_pre;
+ typedef blender_rgb_pre<rgba32, order_bgr> blender_bgr96_pre;
+
+ typedef pixfmt_alpha_blend_rgb<blender_rgb24, rendering_buffer, 3> pixfmt_rgb24;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr24, rendering_buffer, 3> pixfmt_bgr24;
+ typedef pixfmt_alpha_blend_rgb<blender_srgb24, rendering_buffer, 3> pixfmt_srgb24;
+ typedef pixfmt_alpha_blend_rgb<blender_sbgr24, rendering_buffer, 3> pixfmt_sbgr24;
+ typedef pixfmt_alpha_blend_rgb<blender_rgb48, rendering_buffer, 3> pixfmt_rgb48;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr48, rendering_buffer, 3> pixfmt_bgr48;
+ typedef pixfmt_alpha_blend_rgb<blender_rgb96, rendering_buffer, 3> pixfmt_rgb96;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr96, rendering_buffer, 3> pixfmt_bgr96;
+
+ typedef pixfmt_alpha_blend_rgb<blender_rgb24_pre, rendering_buffer, 3> pixfmt_rgb24_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr24_pre, rendering_buffer, 3> pixfmt_bgr24_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_srgb24_pre, rendering_buffer, 3> pixfmt_srgb24_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_sbgr24_pre, rendering_buffer, 3> pixfmt_sbgr24_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_rgb48_pre, rendering_buffer, 3> pixfmt_rgb48_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr48_pre, rendering_buffer, 3> pixfmt_bgr48_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_rgb96_pre, rendering_buffer, 3> pixfmt_rgb96_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr96_pre, rendering_buffer, 3> pixfmt_bgr96_pre;
+
+ typedef pixfmt_alpha_blend_rgb<blender_rgb24, rendering_buffer, 4, 0> pixfmt_rgbx32;
+ typedef pixfmt_alpha_blend_rgb<blender_rgb24, rendering_buffer, 4, 1> pixfmt_xrgb32;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr24, rendering_buffer, 4, 1> pixfmt_xbgr32;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr24, rendering_buffer, 4, 0> pixfmt_bgrx32;
+ typedef pixfmt_alpha_blend_rgb<blender_srgb24, rendering_buffer, 4, 0> pixfmt_srgbx32;
+ typedef pixfmt_alpha_blend_rgb<blender_srgb24, rendering_buffer, 4, 1> pixfmt_sxrgb32;
+ typedef pixfmt_alpha_blend_rgb<blender_sbgr24, rendering_buffer, 4, 1> pixfmt_sxbgr32;
+ typedef pixfmt_alpha_blend_rgb<blender_sbgr24, rendering_buffer, 4, 0> pixfmt_sbgrx32;
+ typedef pixfmt_alpha_blend_rgb<blender_rgb48, rendering_buffer, 4, 0> pixfmt_rgbx64;
+ typedef pixfmt_alpha_blend_rgb<blender_rgb48, rendering_buffer, 4, 1> pixfmt_xrgb64;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr48, rendering_buffer, 4, 1> pixfmt_xbgr64;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr48, rendering_buffer, 4, 0> pixfmt_bgrx64;
+ typedef pixfmt_alpha_blend_rgb<blender_rgb96, rendering_buffer, 4, 0> pixfmt_rgbx128;
+ typedef pixfmt_alpha_blend_rgb<blender_rgb96, rendering_buffer, 4, 1> pixfmt_xrgb128;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr96, rendering_buffer, 4, 1> pixfmt_xbgr128;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr96, rendering_buffer, 4, 0> pixfmt_bgrx128;
+
+ typedef pixfmt_alpha_blend_rgb<blender_rgb24_pre, rendering_buffer, 4, 0> pixfmt_rgbx32_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_rgb24_pre, rendering_buffer, 4, 1> pixfmt_xrgb32_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr24_pre, rendering_buffer, 4, 1> pixfmt_xbgr32_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr24_pre, rendering_buffer, 4, 0> pixfmt_bgrx32_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_srgb24_pre, rendering_buffer, 4, 0> pixfmt_srgbx32_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_srgb24_pre, rendering_buffer, 4, 1> pixfmt_sxrgb32_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_sbgr24_pre, rendering_buffer, 4, 1> pixfmt_sxbgr32_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_sbgr24_pre, rendering_buffer, 4, 0> pixfmt_sbgrx32_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_rgb48_pre, rendering_buffer, 4, 0> pixfmt_rgbx64_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_rgb48_pre, rendering_buffer, 4, 1> pixfmt_xrgb64_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr48_pre, rendering_buffer, 4, 1> pixfmt_xbgr64_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr48_pre, rendering_buffer, 4, 0> pixfmt_bgrx64_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_rgb96_pre, rendering_buffer, 4, 0> pixfmt_rgbx128_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_rgb96_pre, rendering_buffer, 4, 1> pixfmt_xrgb128_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr96_pre, rendering_buffer, 4, 1> pixfmt_xbgr128_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr96_pre, rendering_buffer, 4, 0> pixfmt_bgrx128_pre;
+
+
+ //-----------------------------------------------------pixfmt_rgb24_gamma
+ template<class Gamma> class pixfmt_rgb24_gamma :
+ public pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba8, order_rgb, Gamma>, rendering_buffer, 3>
+ {
+ public:
+ pixfmt_rgb24_gamma(rendering_buffer& rb, const Gamma& g) :
+ pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba8, order_rgb, Gamma>, rendering_buffer, 3>(rb)
+ {
+ this->blender().gamma(g);
+ }
+ };
+
+ //-----------------------------------------------------pixfmt_srgb24_gamma
+ template<class Gamma> class pixfmt_srgb24_gamma :
+ public pixfmt_alpha_blend_rgb<blender_rgb_gamma<srgba8, order_rgb, Gamma>, rendering_buffer, 3>
+ {
+ public:
+ pixfmt_srgb24_gamma(rendering_buffer& rb, const Gamma& g) :
+ pixfmt_alpha_blend_rgb<blender_rgb_gamma<srgba8, order_rgb, Gamma>, rendering_buffer, 3>(rb)
+ {
+ this->blender().gamma(g);
+ }
+ };
+
+ //-----------------------------------------------------pixfmt_bgr24_gamma
+ template<class Gamma> class pixfmt_bgr24_gamma :
+ public pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba8, order_bgr, Gamma>, rendering_buffer, 3>
+ {
+ public:
+ pixfmt_bgr24_gamma(rendering_buffer& rb, const Gamma& g) :
+ pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba8, order_bgr, Gamma>, rendering_buffer, 3>(rb)
+ {
+ this->blender().gamma(g);
+ }
+ };
+
+ //-----------------------------------------------------pixfmt_sbgr24_gamma
+ template<class Gamma> class pixfmt_sbgr24_gamma :
+ public pixfmt_alpha_blend_rgb<blender_rgb_gamma<srgba8, order_bgr, Gamma>, rendering_buffer, 3>
+ {
+ public:
+ pixfmt_sbgr24_gamma(rendering_buffer& rb, const Gamma& g) :
+ pixfmt_alpha_blend_rgb<blender_rgb_gamma<srgba8, order_bgr, Gamma>, rendering_buffer, 3>(rb)
+ {
+ this->blender().gamma(g);
+ }
+ };
+
+ //-----------------------------------------------------pixfmt_rgb48_gamma
+ template<class Gamma> class pixfmt_rgb48_gamma :
+ public pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba16, order_rgb, Gamma>, rendering_buffer, 3>
+ {
+ public:
+ pixfmt_rgb48_gamma(rendering_buffer& rb, const Gamma& g) :
+ pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba16, order_rgb, Gamma>, rendering_buffer, 3>(rb)
+ {
+ this->blender().gamma(g);
+ }
+ };
+
+ //-----------------------------------------------------pixfmt_bgr48_gamma
+ template<class Gamma> class pixfmt_bgr48_gamma :
+ public pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba16, order_bgr, Gamma>, rendering_buffer, 3>
+ {
+ public:
+ pixfmt_bgr48_gamma(rendering_buffer& rb, const Gamma& g) :
+ pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba16, order_bgr, Gamma>, rendering_buffer, 3>(rb)
+ {
+ this->blender().gamma(g);
+ }
+ };
+
+}
+
+#endif
+
diff --git a/src/agg/agg_rasterizer_cells_aa.h b/src/agg/agg_rasterizer_cells_aa.h
new file mode 100644
index 000000000..1147148fa
--- /dev/null
+++ b/src/agg/agg_rasterizer_cells_aa.h
@@ -0,0 +1,741 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+//
+// The author gratefully acknowleges the support of David Turner,
+// Robert Wilhelm, and Werner Lemberg - the authors of the FreeType
+// libray - in producing this work. See http://www.freetype.org for details.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Adaptation for 32-bit screen coordinates has been sponsored by
+// Liberty Technology Systems, Inc., visit http://lib-sys.com
+//
+// Liberty Technology Systems, Inc. is the provider of
+// PostScript and PDF technology for software developers.
+//
+//----------------------------------------------------------------------------
+#ifndef AGG_RASTERIZER_CELLS_AA_INCLUDED
+#define AGG_RASTERIZER_CELLS_AA_INCLUDED
+
+#include <string.h>
+#include <cstdlib>
+#include <limits>
+#include "agg_math.h"
+#include "agg_array.h"
+
+
+namespace agg
+{
+
+ //-----------------------------------------------------rasterizer_cells_aa
+ // An internal class that implements the main rasterization algorithm.
+ // Used in the rasterizer. Should not be used direcly.
+ template<class Cell> class rasterizer_cells_aa
+ {
+ enum cell_block_scale_e
+ {
+ cell_block_shift = 12,
+ cell_block_size = 1 << cell_block_shift,
+ cell_block_mask = cell_block_size - 1,
+ cell_block_pool = 256,
+ cell_block_limit = 1024
+ };
+
+ struct sorted_y
+ {
+ unsigned start;
+ unsigned num;
+ };
+
+ public:
+ typedef Cell cell_type;
+ typedef rasterizer_cells_aa<Cell> self_type;
+
+ ~rasterizer_cells_aa();
+ rasterizer_cells_aa();
+
+ void reset();
+ void style(const cell_type& style_cell);
+ void line(int x1, int y1, int x2, int y2);
+
+ int min_x() const { return m_min_x; }
+ int min_y() const { return m_min_y; }
+ int max_x() const { return m_max_x; }
+ int max_y() const { return m_max_y; }
+
+ void sort_cells();
+
+ unsigned total_cells() const
+ {
+ return m_num_cells;
+ }
+
+ unsigned scanline_num_cells(unsigned y) const
+ {
+ return m_sorted_y[y - m_min_y].num;
+ }
+
+ const cell_type* const* scanline_cells(unsigned y) const
+ {
+ return m_sorted_cells.data() + m_sorted_y[y - m_min_y].start;
+ }
+
+ bool sorted() const { return m_sorted; }
+
+ private:
+ rasterizer_cells_aa(const self_type&);
+ const self_type& operator = (const self_type&);
+
+ void set_curr_cell(int x, int y);
+ void add_curr_cell();
+ void render_hline(int ey, int x1, int y1, int x2, int y2);
+ void allocate_block();
+
+ private:
+ unsigned m_num_blocks;
+ unsigned m_max_blocks;
+ unsigned m_curr_block;
+ unsigned m_num_cells;
+ cell_type** m_cells;
+ cell_type* m_curr_cell_ptr;
+ pod_vector<cell_type*> m_sorted_cells;
+ pod_vector<sorted_y> m_sorted_y;
+ cell_type m_curr_cell;
+ cell_type m_style_cell;
+ int m_min_x;
+ int m_min_y;
+ int m_max_x;
+ int m_max_y;
+ bool m_sorted;
+ };
+
+
+
+
+ //------------------------------------------------------------------------
+ template<class Cell>
+ rasterizer_cells_aa<Cell>::~rasterizer_cells_aa()
+ {
+ if(m_num_blocks)
+ {
+ cell_type** ptr = m_cells + m_num_blocks - 1;
+ while(m_num_blocks--)
+ {
+ pod_allocator<cell_type>::deallocate(*ptr, cell_block_size);
+ ptr--;
+ }
+ pod_allocator<cell_type*>::deallocate(m_cells, m_max_blocks);
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class Cell>
+ rasterizer_cells_aa<Cell>::rasterizer_cells_aa() :
+ m_num_blocks(0),
+ m_max_blocks(0),
+ m_curr_block(0),
+ m_num_cells(0),
+ m_cells(0),
+ m_curr_cell_ptr(0),
+ m_sorted_cells(),
+ m_sorted_y(),
+ m_min_x(std::numeric_limits<int>::max()),
+ m_min_y(std::numeric_limits<int>::max()),
+ m_max_x(std::numeric_limits<int>::min()),
+ m_max_y(std::numeric_limits<int>::min()),
+ m_sorted(false)
+ {
+ m_style_cell.initial();
+ m_curr_cell.initial();
+ }
+
+ //------------------------------------------------------------------------
+ template<class Cell>
+ void rasterizer_cells_aa<Cell>::reset()
+ {
+ m_num_cells = 0;
+ m_curr_block = 0;
+ m_curr_cell.initial();
+ m_style_cell.initial();
+ m_sorted = false;
+ m_min_x = std::numeric_limits<int>::max();
+ m_min_y = std::numeric_limits<int>::max();
+ m_max_x = std::numeric_limits<int>::min();
+ m_max_y = std::numeric_limits<int>::min();
+ }
+
+ //------------------------------------------------------------------------
+ template<class Cell>
+ AGG_INLINE void rasterizer_cells_aa<Cell>::add_curr_cell()
+ {
+ if(m_curr_cell.area | m_curr_cell.cover)
+ {
+ if((m_num_cells & cell_block_mask) == 0)
+ {
+ if(m_num_blocks >= cell_block_limit) return;
+ allocate_block();
+ }
+ *m_curr_cell_ptr++ = m_curr_cell;
+ ++m_num_cells;
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class Cell>
+ AGG_INLINE void rasterizer_cells_aa<Cell>::set_curr_cell(int x, int y)
+ {
+ if(m_curr_cell.not_equal(x, y, m_style_cell))
+ {
+ add_curr_cell();
+ m_curr_cell.style(m_style_cell);
+ m_curr_cell.x = x;
+ m_curr_cell.y = y;
+ m_curr_cell.cover = 0;
+ m_curr_cell.area = 0;
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class Cell>
+ AGG_INLINE void rasterizer_cells_aa<Cell>::render_hline(int ey,
+ int x1, int y1,
+ int x2, int y2)
+ {
+ int ex1 = x1 >> poly_subpixel_shift;
+ int ex2 = x2 >> poly_subpixel_shift;
+ int fx1 = x1 & poly_subpixel_mask;
+ int fx2 = x2 & poly_subpixel_mask;
+
+ int delta, p, first;
+ long long dx;
+ int incr, lift, mod, rem;
+
+ //trivial case. Happens often
+ if(y1 == y2)
+ {
+ set_curr_cell(ex2, ey);
+ return;
+ }
+
+ //everything is located in a single cell. That is easy!
+ if(ex1 == ex2)
+ {
+ delta = y2 - y1;
+ m_curr_cell.cover += delta;
+ m_curr_cell.area += (fx1 + fx2) * delta;
+ return;
+ }
+
+ //ok, we'll have to render a run of adjacent cells on the same
+ //hline...
+ p = (poly_subpixel_scale - fx1) * (y2 - y1);
+ first = poly_subpixel_scale;
+ incr = 1;
+
+ dx = (long long)x2 - (long long)x1;
+
+ if(dx < 0)
+ {
+ p = fx1 * (y2 - y1);
+ first = 0;
+ incr = -1;
+ dx = -dx;
+ }
+
+ delta = (int)(p / dx);
+ mod = (int)(p % dx);
+
+ if(mod < 0)
+ {
+ delta--;
+ mod += static_cast<int>(dx);
+ }
+
+ m_curr_cell.cover += delta;
+ m_curr_cell.area += (fx1 + first) * delta;
+
+ ex1 += incr;
+ set_curr_cell(ex1, ey);
+ y1 += delta;
+
+ if(ex1 != ex2)
+ {
+ p = poly_subpixel_scale * (y2 - y1 + delta);
+ lift = (int)(p / dx);
+ rem = (int)(p % dx);
+
+ if (rem < 0)
+ {
+ lift--;
+ rem += static_cast<int>(dx);
+ }
+
+ mod -= static_cast<int>(dx);
+
+ while (ex1 != ex2)
+ {
+ delta = lift;
+ mod += rem;
+ if(mod >= 0)
+ {
+ mod -= static_cast<int>(dx);
+ delta++;
+ }
+
+ m_curr_cell.cover += delta;
+ m_curr_cell.area += poly_subpixel_scale * delta;
+ y1 += delta;
+ ex1 += incr;
+ set_curr_cell(ex1, ey);
+ }
+ }
+ delta = y2 - y1;
+ m_curr_cell.cover += delta;
+ m_curr_cell.area += (fx2 + poly_subpixel_scale - first) * delta;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Cell>
+ AGG_INLINE void rasterizer_cells_aa<Cell>::style(const cell_type& style_cell)
+ {
+ m_style_cell.style(style_cell);
+ }
+
+ //------------------------------------------------------------------------
+ template<class Cell>
+ void rasterizer_cells_aa<Cell>::line(int x1, int y1, int x2, int y2)
+ {
+ enum dx_limit_e { dx_limit = 16384 << poly_subpixel_shift };
+
+ long long dx = (long long)x2 - (long long)x1;
+
+ if(dx >= dx_limit || dx <= -dx_limit)
+ {
+ int cx = (int)(((long long)x1 + (long long)x2) >> 1);
+ int cy = (int)(((long long)y1 + (long long)y2) >> 1);
+ line(x1, y1, cx, cy);
+ line(cx, cy, x2, y2);
+ }
+
+ long long dy = (long long)y2 - (long long)y1;
+ int ex1 = x1 >> poly_subpixel_shift;
+ int ex2 = x2 >> poly_subpixel_shift;
+ int ey1 = y1 >> poly_subpixel_shift;
+ int ey2 = y2 >> poly_subpixel_shift;
+ int fy1 = y1 & poly_subpixel_mask;
+ int fy2 = y2 & poly_subpixel_mask;
+
+ int x_from, x_to;
+ int rem, mod, lift, delta, first, incr;
+ long long p;
+
+ if(ex1 < m_min_x) m_min_x = ex1;
+ if(ex1 > m_max_x) m_max_x = ex1;
+ if(ey1 < m_min_y) m_min_y = ey1;
+ if(ey1 > m_max_y) m_max_y = ey1;
+ if(ex2 < m_min_x) m_min_x = ex2;
+ if(ex2 > m_max_x) m_max_x = ex2;
+ if(ey2 < m_min_y) m_min_y = ey2;
+ if(ey2 > m_max_y) m_max_y = ey2;
+
+ set_curr_cell(ex1, ey1);
+
+ //everything is on a single hline
+ if(ey1 == ey2)
+ {
+ render_hline(ey1, x1, fy1, x2, fy2);
+ return;
+ }
+
+ //Vertical line - we have to calculate start and end cells,
+ //and then - the common values of the area and coverage for
+ //all cells of the line. We know exactly there's only one
+ //cell, so, we don't have to call render_hline().
+ incr = 1;
+ if(dx == 0)
+ {
+ int ex = x1 >> poly_subpixel_shift;
+ int two_fx = (x1 - (ex << poly_subpixel_shift)) << 1;
+ int area;
+
+ first = poly_subpixel_scale;
+ if(dy < 0)
+ {
+ first = 0;
+ incr = -1;
+ }
+
+ x_from = x1;
+
+ //render_hline(ey1, x_from, fy1, x_from, first);
+ delta = first - fy1;
+ m_curr_cell.cover += delta;
+ m_curr_cell.area += two_fx * delta;
+
+ ey1 += incr;
+ set_curr_cell(ex, ey1);
+
+ delta = first + first - poly_subpixel_scale;
+ area = two_fx * delta;
+ while(ey1 != ey2)
+ {
+ //render_hline(ey1, x_from, poly_subpixel_scale - first, x_from, first);
+ m_curr_cell.cover = delta;
+ m_curr_cell.area = area;
+ ey1 += incr;
+ set_curr_cell(ex, ey1);
+ }
+ //render_hline(ey1, x_from, poly_subpixel_scale - first, x_from, fy2);
+ delta = fy2 - poly_subpixel_scale + first;
+ m_curr_cell.cover += delta;
+ m_curr_cell.area += two_fx * delta;
+ return;
+ }
+
+ //ok, we have to render several hlines
+ p = (poly_subpixel_scale - fy1) * dx;
+ first = poly_subpixel_scale;
+
+ if(dy < 0)
+ {
+ p = fy1 * dx;
+ first = 0;
+ incr = -1;
+ dy = -dy;
+ }
+
+ delta = (int)(p / dy);
+ mod = (int)(p % dy);
+
+ if(mod < 0)
+ {
+ delta--;
+ mod += static_cast<int>(dy);
+ }
+
+ x_from = x1 + delta;
+ render_hline(ey1, x1, fy1, x_from, first);
+
+ ey1 += incr;
+ set_curr_cell(x_from >> poly_subpixel_shift, ey1);
+
+ if(ey1 != ey2)
+ {
+ p = poly_subpixel_scale * dx;
+ lift = (int)(p / dy);
+ rem = (int)(p % dy);
+
+ if(rem < 0)
+ {
+ lift--;
+ rem += static_cast<int>(dy);
+ }
+ mod -= static_cast<int>(dy);
+
+ while(ey1 != ey2)
+ {
+ delta = lift;
+ mod += rem;
+ if (mod >= 0)
+ {
+ mod -= static_cast<int>(dy);
+ delta++;
+ }
+
+ x_to = x_from + delta;
+ render_hline(ey1, x_from, poly_subpixel_scale - first, x_to, first);
+ x_from = x_to;
+
+ ey1 += incr;
+ set_curr_cell(x_from >> poly_subpixel_shift, ey1);
+ }
+ }
+ render_hline(ey1, x_from, poly_subpixel_scale - first, x2, fy2);
+ }
+
+ //------------------------------------------------------------------------
+ template<class Cell>
+ void rasterizer_cells_aa<Cell>::allocate_block()
+ {
+ if(m_curr_block >= m_num_blocks)
+ {
+ if(m_num_blocks >= m_max_blocks)
+ {
+ cell_type** new_cells =
+ pod_allocator<cell_type*>::allocate(m_max_blocks +
+ cell_block_pool);
+
+ if(m_cells)
+ {
+ memcpy(new_cells, m_cells, m_max_blocks * sizeof(cell_type*));
+ pod_allocator<cell_type*>::deallocate(m_cells, m_max_blocks);
+ }
+ m_cells = new_cells;
+ m_max_blocks += cell_block_pool;
+ }
+
+ m_cells[m_num_blocks++] =
+ pod_allocator<cell_type>::allocate(cell_block_size);
+
+ }
+ m_curr_cell_ptr = m_cells[m_curr_block++];
+ }
+
+
+
+ //------------------------------------------------------------------------
+ template <class T> static AGG_INLINE void swap_cells(T* a, T* b)
+ {
+ T temp = *a;
+ *a = *b;
+ *b = temp;
+ }
+
+
+ //------------------------------------------------------------------------
+ enum
+ {
+ qsort_threshold = 9
+ };
+
+
+ //------------------------------------------------------------------------
+ template<class Cell>
+ void qsort_cells(Cell** start, unsigned num)
+ {
+ Cell** stack[80];
+ Cell*** top;
+ Cell** limit;
+ Cell** base;
+
+ limit = start + num;
+ base = start;
+ top = stack;
+
+ for (;;)
+ {
+ int len = int(limit - base);
+
+ Cell** i;
+ Cell** j;
+ Cell** pivot;
+
+ if(len > qsort_threshold)
+ {
+ // we use base + len/2 as the pivot
+ pivot = base + len / 2;
+ swap_cells(base, pivot);
+
+ i = base + 1;
+ j = limit - 1;
+
+ // now ensure that *i <= *base <= *j
+ if((*j)->x < (*i)->x)
+ {
+ swap_cells(i, j);
+ }
+
+ if((*base)->x < (*i)->x)
+ {
+ swap_cells(base, i);
+ }
+
+ if((*j)->x < (*base)->x)
+ {
+ swap_cells(base, j);
+ }
+
+ for(;;)
+ {
+ int x = (*base)->x;
+ do i++; while( (*i)->x < x );
+ do j--; while( x < (*j)->x );
+
+ if(i > j)
+ {
+ break;
+ }
+
+ swap_cells(i, j);
+ }
+
+ swap_cells(base, j);
+
+ // now, push the largest sub-array
+ if(j - base > limit - i)
+ {
+ top[0] = base;
+ top[1] = j;
+ base = i;
+ }
+ else
+ {
+ top[0] = i;
+ top[1] = limit;
+ limit = j;
+ }
+ top += 2;
+ }
+ else
+ {
+ // the sub-array is small, perform insertion sort
+ j = base;
+ i = j + 1;
+
+ for(; i < limit; j = i, i++)
+ {
+ for(; j[1]->x < (*j)->x; j--)
+ {
+ swap_cells(j + 1, j);
+ if (j == base)
+ {
+ break;
+ }
+ }
+ }
+
+ if(top > stack)
+ {
+ top -= 2;
+ base = top[0];
+ limit = top[1];
+ }
+ else
+ {
+ break;
+ }
+ }
+ }
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class Cell>
+ void rasterizer_cells_aa<Cell>::sort_cells()
+ {
+ if(m_sorted) return; //Perform sort only the first time.
+
+ add_curr_cell();
+ m_curr_cell.x = std::numeric_limits<int>::max();
+ m_curr_cell.y = std::numeric_limits<int>::max();
+ m_curr_cell.cover = 0;
+ m_curr_cell.area = 0;
+
+ if(m_num_cells == 0) return;
+
+// DBG: Check to see if min/max works well.
+//for(unsigned nc = 0; nc < m_num_cells; nc++)
+//{
+// cell_type* cell = m_cells[nc >> cell_block_shift] + (nc & cell_block_mask);
+// if(cell->x < m_min_x ||
+// cell->y < m_min_y ||
+// cell->x > m_max_x ||
+// cell->y > m_max_y)
+// {
+// cell = cell; // Breakpoint here
+// }
+//}
+ // Allocate the array of cell pointers
+ m_sorted_cells.allocate(m_num_cells, 16);
+
+ // Allocate and zero the Y array
+ m_sorted_y.allocate(m_max_y - m_min_y + 1, 16);
+ m_sorted_y.zero();
+
+ // Create the Y-histogram (count the numbers of cells for each Y)
+ cell_type** block_ptr = m_cells;
+ cell_type* cell_ptr;
+ unsigned nb = m_num_cells;
+ unsigned i;
+ while(nb)
+ {
+ cell_ptr = *block_ptr++;
+ i = (nb > cell_block_size) ? unsigned(cell_block_size) : nb;
+ nb -= i;
+ while(i--)
+ {
+ m_sorted_y[cell_ptr->y - m_min_y].start++;
+ ++cell_ptr;
+ }
+ }
+
+ // Convert the Y-histogram into the array of starting indexes
+ unsigned start = 0;
+ for(i = 0; i < m_sorted_y.size(); i++)
+ {
+ unsigned v = m_sorted_y[i].start;
+ m_sorted_y[i].start = start;
+ start += v;
+ }
+
+ // Fill the cell pointer array sorted by Y
+ block_ptr = m_cells;
+ nb = m_num_cells;
+ while(nb)
+ {
+ cell_ptr = *block_ptr++;
+ i = (nb > cell_block_size) ? unsigned(cell_block_size) : nb;
+ nb -= i;
+ while(i--)
+ {
+ sorted_y& curr_y = m_sorted_y[cell_ptr->y - m_min_y];
+ m_sorted_cells[curr_y.start + curr_y.num] = cell_ptr;
+ ++curr_y.num;
+ ++cell_ptr;
+ }
+ }
+
+ // Finally arrange the X-arrays
+ for(i = 0; i < m_sorted_y.size(); i++)
+ {
+ const sorted_y& curr_y = m_sorted_y[i];
+ if(curr_y.num)
+ {
+ qsort_cells(m_sorted_cells.data() + curr_y.start, curr_y.num);
+ }
+ }
+ m_sorted = true;
+ }
+
+
+
+ //------------------------------------------------------scanline_hit_test
+ class scanline_hit_test
+ {
+ public:
+ scanline_hit_test(int x) : m_x(x), m_hit(false) {}
+
+ void reset_spans() {}
+ void finalize(int) {}
+ void add_cell(int x, int)
+ {
+ if(m_x == x) m_hit = true;
+ }
+ void add_span(int x, int len, int)
+ {
+ if(m_x >= x && m_x < x+len) m_hit = true;
+ }
+ unsigned num_spans() const { return 1; }
+ bool hit() const { return m_hit; }
+
+ private:
+ int m_x;
+ bool m_hit;
+ };
+
+
+}
+
+#endif
diff --git a/src/agg/agg_rasterizer_scanline_aa.h b/src/agg/agg_rasterizer_scanline_aa.h
new file mode 100644
index 000000000..ffc2ddf94
--- /dev/null
+++ b/src/agg/agg_rasterizer_scanline_aa.h
@@ -0,0 +1,481 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+//
+// The author gratefully acknowleges the support of David Turner,
+// Robert Wilhelm, and Werner Lemberg - the authors of the FreeType
+// libray - in producing this work. See http://www.freetype.org for details.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Adaptation for 32-bit screen coordinates has been sponsored by
+// Liberty Technology Systems, Inc., visit http://lib-sys.com
+//
+// Liberty Technology Systems, Inc. is the provider of
+// PostScript and PDF technology for software developers.
+//
+//----------------------------------------------------------------------------
+#ifndef AGG_RASTERIZER_SCANLINE_AA_INCLUDED
+#define AGG_RASTERIZER_SCANLINE_AA_INCLUDED
+
+#include "agg_rasterizer_cells_aa.h"
+#include "agg_rasterizer_sl_clip.h"
+#include "agg_rasterizer_scanline_aa_nogamma.h"
+#include "agg_gamma_functions.h"
+
+
+namespace agg
+{
+ //==================================================rasterizer_scanline_aa
+ // Polygon rasterizer that is used to render filled polygons with
+ // high-quality Anti-Aliasing. Internally, by default, the class uses
+ // integer coordinates in format 24.8, i.e. 24 bits for integer part
+ // and 8 bits for fractional - see poly_subpixel_shift. This class can be
+ // used in the following way:
+ //
+ // 1. filling_rule(filling_rule_e ft) - optional.
+ //
+ // 2. gamma() - optional.
+ //
+ // 3. reset()
+ //
+ // 4. move_to(x, y) / line_to(x, y) - make the polygon. One can create
+ // more than one contour, but each contour must consist of at least 3
+ // vertices, i.e. move_to(x1, y1); line_to(x2, y2); line_to(x3, y3);
+ // is the absolute minimum of vertices that define a triangle.
+ // The algorithm does not check either the number of vertices nor
+ // coincidence of their coordinates, but in the worst case it just
+ // won't draw anything.
+ // The orger of the vertices (clockwise or counterclockwise)
+ // is important when using the non-zero filling rule (fill_non_zero).
+ // In this case the vertex order of all the contours must be the same
+ // if you want your intersecting polygons to be without "holes".
+ // You actually can use different vertices order. If the contours do not
+ // intersect each other the order is not important anyway. If they do,
+ // contours with the same vertex order will be rendered without "holes"
+ // while the intersecting contours with different orders will have "holes".
+ //
+ // filling_rule() and gamma() can be called anytime before "sweeping".
+ //------------------------------------------------------------------------
+ template<class Clip=rasterizer_sl_clip_int> class rasterizer_scanline_aa
+ {
+ enum status
+ {
+ status_initial,
+ status_move_to,
+ status_line_to,
+ status_closed
+ };
+
+ public:
+ typedef Clip clip_type;
+ typedef typename Clip::conv_type conv_type;
+ typedef typename Clip::coord_type coord_type;
+
+ enum aa_scale_e
+ {
+ aa_shift = 8,
+ aa_scale = 1 << aa_shift,
+ aa_mask = aa_scale - 1,
+ aa_scale2 = aa_scale * 2,
+ aa_mask2 = aa_scale2 - 1
+ };
+
+ //--------------------------------------------------------------------
+ rasterizer_scanline_aa() :
+ m_outline(),
+ m_clipper(),
+ m_filling_rule(fill_non_zero),
+ m_auto_close(true),
+ m_start_x(0),
+ m_start_y(0),
+ m_status(status_initial)
+ {
+ int i;
+ for(i = 0; i < aa_scale; i++) m_gamma[i] = i;
+ }
+
+ //--------------------------------------------------------------------
+ template<class GammaF>
+ rasterizer_scanline_aa(const GammaF& gamma_function) :
+ m_outline(),
+ m_clipper(m_outline),
+ m_filling_rule(fill_non_zero),
+ m_auto_close(true),
+ m_start_x(0),
+ m_start_y(0),
+ m_status(status_initial)
+ {
+ gamma(gamma_function);
+ }
+
+ //--------------------------------------------------------------------
+ void reset();
+ void reset_clipping();
+ void clip_box(double x1, double y1, double x2, double y2);
+ void filling_rule(filling_rule_e filling_rule);
+ void auto_close(bool flag) { m_auto_close = flag; }
+
+ //--------------------------------------------------------------------
+ template<class GammaF> void gamma(const GammaF& gamma_function)
+ {
+ int i;
+ for(i = 0; i < aa_scale; i++)
+ {
+ m_gamma[i] = uround(gamma_function(double(i) / aa_mask) * aa_mask);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ unsigned apply_gamma(unsigned cover) const
+ {
+ return m_gamma[cover];
+ }
+
+ //--------------------------------------------------------------------
+ void move_to(int x, int y);
+ void line_to(int x, int y);
+ void move_to_d(double x, double y);
+ void line_to_d(double x, double y);
+ void close_polygon();
+ void add_vertex(double x, double y, unsigned cmd);
+
+ void edge(int x1, int y1, int x2, int y2);
+ void edge_d(double x1, double y1, double x2, double y2);
+
+ //-------------------------------------------------------------------
+ template<class VertexSource>
+ void add_path(VertexSource& vs, unsigned path_id=0)
+ {
+ double x;
+ double y;
+
+ unsigned cmd;
+ vs.rewind(path_id);
+ if(m_outline.sorted()) reset();
+ while(!is_stop(cmd = vs.vertex(&x, &y)))
+ {
+ add_vertex(x, y, cmd);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ int min_x() const { return m_outline.min_x(); }
+ int min_y() const { return m_outline.min_y(); }
+ int max_x() const { return m_outline.max_x(); }
+ int max_y() const { return m_outline.max_y(); }
+
+ //--------------------------------------------------------------------
+ void sort();
+ bool rewind_scanlines();
+ bool navigate_scanline(int y);
+
+ //--------------------------------------------------------------------
+ AGG_INLINE unsigned calculate_alpha(int area) const
+ {
+ int cover = area >> (poly_subpixel_shift*2 + 1 - aa_shift);
+
+ if(cover < 0) cover = -cover;
+ if(m_filling_rule == fill_even_odd)
+ {
+ cover &= aa_mask2;
+ if(cover > aa_scale)
+ {
+ cover = aa_scale2 - cover;
+ }
+ }
+ if(cover > aa_mask) cover = aa_mask;
+ return m_gamma[cover];
+ }
+
+ //--------------------------------------------------------------------
+ template<class Scanline> bool sweep_scanline(Scanline& sl)
+ {
+ for(;;)
+ {
+ if(m_scan_y > m_outline.max_y()) return false;
+ sl.reset_spans();
+ unsigned num_cells = m_outline.scanline_num_cells(m_scan_y);
+ const cell_aa* const* cells = m_outline.scanline_cells(m_scan_y);
+ int cover = 0;
+
+ while(num_cells)
+ {
+ const cell_aa* cur_cell = *cells;
+ int x = cur_cell->x;
+ int area = cur_cell->area;
+ unsigned alpha;
+
+ cover += cur_cell->cover;
+
+ //accumulate all cells with the same X
+ while(--num_cells)
+ {
+ cur_cell = *++cells;
+ if(cur_cell->x != x) break;
+ area += cur_cell->area;
+ cover += cur_cell->cover;
+ }
+
+ if(area)
+ {
+ alpha = calculate_alpha((cover << (poly_subpixel_shift + 1)) - area);
+ if(alpha)
+ {
+ sl.add_cell(x, alpha);
+ }
+ x++;
+ }
+
+ if(num_cells && cur_cell->x > x)
+ {
+ alpha = calculate_alpha(cover << (poly_subpixel_shift + 1));
+ if(alpha)
+ {
+ sl.add_span(x, cur_cell->x - x, alpha);
+ }
+ }
+ }
+
+ if(sl.num_spans()) break;
+ ++m_scan_y;
+ }
+
+ sl.finalize(m_scan_y);
+ ++m_scan_y;
+ return true;
+ }
+
+ //--------------------------------------------------------------------
+ bool hit_test(int tx, int ty);
+
+
+ private:
+ //--------------------------------------------------------------------
+ // Disable copying
+ rasterizer_scanline_aa(const rasterizer_scanline_aa<Clip>&);
+ const rasterizer_scanline_aa<Clip>&
+ operator = (const rasterizer_scanline_aa<Clip>&);
+
+ private:
+ rasterizer_cells_aa<cell_aa> m_outline;
+ clip_type m_clipper;
+ int m_gamma[aa_scale];
+ filling_rule_e m_filling_rule;
+ bool m_auto_close;
+ coord_type m_start_x;
+ coord_type m_start_y;
+ unsigned m_status;
+ int m_scan_y;
+ };
+
+
+
+
+
+
+
+
+
+
+
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa<Clip>::reset()
+ {
+ m_outline.reset();
+ m_status = status_initial;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa<Clip>::filling_rule(filling_rule_e filling_rule)
+ {
+ m_filling_rule = filling_rule;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa<Clip>::clip_box(double x1, double y1,
+ double x2, double y2)
+ {
+ reset();
+ m_clipper.clip_box(conv_type::upscale(x1), conv_type::upscale(y1),
+ conv_type::upscale(x2), conv_type::upscale(y2));
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa<Clip>::reset_clipping()
+ {
+ reset();
+ m_clipper.reset_clipping();
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa<Clip>::close_polygon()
+ {
+ if(m_status == status_line_to)
+ {
+ m_clipper.line_to(m_outline, m_start_x, m_start_y);
+ m_status = status_closed;
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa<Clip>::move_to(int x, int y)
+ {
+ if(m_outline.sorted()) reset();
+ if(m_auto_close) close_polygon();
+ m_clipper.move_to(m_start_x = conv_type::downscale(x),
+ m_start_y = conv_type::downscale(y));
+ m_status = status_move_to;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa<Clip>::line_to(int x, int y)
+ {
+ m_clipper.line_to(m_outline,
+ conv_type::downscale(x),
+ conv_type::downscale(y));
+ m_status = status_line_to;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa<Clip>::move_to_d(double x, double y)
+ {
+ if(m_outline.sorted()) reset();
+ if(m_auto_close) close_polygon();
+ m_clipper.move_to(m_start_x = conv_type::upscale(x),
+ m_start_y = conv_type::upscale(y));
+ m_status = status_move_to;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa<Clip>::line_to_d(double x, double y)
+ {
+ m_clipper.line_to(m_outline,
+ conv_type::upscale(x),
+ conv_type::upscale(y));
+ m_status = status_line_to;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa<Clip>::add_vertex(double x, double y, unsigned cmd)
+ {
+ if(is_move_to(cmd))
+ {
+ move_to_d(x, y);
+ }
+ else
+ if(is_vertex(cmd))
+ {
+ line_to_d(x, y);
+ }
+ else
+ if(is_close(cmd))
+ {
+ close_polygon();
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa<Clip>::edge(int x1, int y1, int x2, int y2)
+ {
+ if(m_outline.sorted()) reset();
+ m_clipper.move_to(conv_type::downscale(x1), conv_type::downscale(y1));
+ m_clipper.line_to(m_outline,
+ conv_type::downscale(x2),
+ conv_type::downscale(y2));
+ m_status = status_move_to;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa<Clip>::edge_d(double x1, double y1,
+ double x2, double y2)
+ {
+ if(m_outline.sorted()) reset();
+ m_clipper.move_to(conv_type::upscale(x1), conv_type::upscale(y1));
+ m_clipper.line_to(m_outline,
+ conv_type::upscale(x2),
+ conv_type::upscale(y2));
+ m_status = status_move_to;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa<Clip>::sort()
+ {
+ if(m_auto_close) close_polygon();
+ m_outline.sort_cells();
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ AGG_INLINE bool rasterizer_scanline_aa<Clip>::rewind_scanlines()
+ {
+ if(m_auto_close) close_polygon();
+ m_outline.sort_cells();
+ if(m_outline.total_cells() == 0)
+ {
+ return false;
+ }
+ m_scan_y = m_outline.min_y();
+ return true;
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ AGG_INLINE bool rasterizer_scanline_aa<Clip>::navigate_scanline(int y)
+ {
+ if(m_auto_close) close_polygon();
+ m_outline.sort_cells();
+ if(m_outline.total_cells() == 0 ||
+ y < m_outline.min_y() ||
+ y > m_outline.max_y())
+ {
+ return false;
+ }
+ m_scan_y = y;
+ return true;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ bool rasterizer_scanline_aa<Clip>::hit_test(int tx, int ty)
+ {
+ if(!navigate_scanline(ty)) return false;
+ scanline_hit_test sl(tx);
+ sweep_scanline(sl);
+ return sl.hit();
+ }
+
+
+
+}
+
+
+
+#endif
+
diff --git a/src/agg/agg_rasterizer_scanline_aa_nogamma.h b/src/agg/agg_rasterizer_scanline_aa_nogamma.h
new file mode 100644
index 000000000..9a809aa5a
--- /dev/null
+++ b/src/agg/agg_rasterizer_scanline_aa_nogamma.h
@@ -0,0 +1,483 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+//
+// The author gratefully acknowleges the support of David Turner,
+// Robert Wilhelm, and Werner Lemberg - the authors of the FreeType
+// libray - in producing this work. See http://www.freetype.org for details.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Adaptation for 32-bit screen coordinates has been sponsored by
+// Liberty Technology Systems, Inc., visit http://lib-sys.com
+//
+// Liberty Technology Systems, Inc. is the provider of
+// PostScript and PDF technology for software developers.
+//
+//----------------------------------------------------------------------------
+#ifndef AGG_RASTERIZER_SCANLINE_AA_NOGAMMA_INCLUDED
+#define AGG_RASTERIZER_SCANLINE_AA_NOGAMMA_INCLUDED
+
+#include <limits>
+#include "agg_rasterizer_cells_aa.h"
+#include "agg_rasterizer_sl_clip.h"
+
+
+namespace agg
+{
+
+
+ //-----------------------------------------------------------------cell_aa
+ // A pixel cell. There're no constructors defined and it was done
+ // intentionally in order to avoid extra overhead when allocating an
+ // array of cells.
+ struct cell_aa
+ {
+ int x;
+ int y;
+ int cover;
+ int area;
+
+ void initial()
+ {
+ x = std::numeric_limits<int>::max();
+ y = std::numeric_limits<int>::max();
+ cover = 0;
+ area = 0;
+ }
+
+ void style(const cell_aa&) {}
+
+ int not_equal(int ex, int ey, const cell_aa&) const
+ {
+ return ((unsigned)ex - (unsigned)x) | ((unsigned)ey - (unsigned)y);
+ }
+ };
+
+
+ //==================================================rasterizer_scanline_aa_nogamma
+ // Polygon rasterizer that is used to render filled polygons with
+ // high-quality Anti-Aliasing. Internally, by default, the class uses
+ // integer coordinates in format 24.8, i.e. 24 bits for integer part
+ // and 8 bits for fractional - see poly_subpixel_shift. This class can be
+ // used in the following way:
+ //
+ // 1. filling_rule(filling_rule_e ft) - optional.
+ //
+ // 2. gamma() - optional.
+ //
+ // 3. reset()
+ //
+ // 4. move_to(x, y) / line_to(x, y) - make the polygon. One can create
+ // more than one contour, but each contour must consist of at least 3
+ // vertices, i.e. move_to(x1, y1); line_to(x2, y2); line_to(x3, y3);
+ // is the absolute minimum of vertices that define a triangle.
+ // The algorithm does not check either the number of vertices nor
+ // coincidence of their coordinates, but in the worst case it just
+ // won't draw anything.
+ // The orger of the vertices (clockwise or counterclockwise)
+ // is important when using the non-zero filling rule (fill_non_zero).
+ // In this case the vertex order of all the contours must be the same
+ // if you want your intersecting polygons to be without "holes".
+ // You actually can use different vertices order. If the contours do not
+ // intersect each other the order is not important anyway. If they do,
+ // contours with the same vertex order will be rendered without "holes"
+ // while the intersecting contours with different orders will have "holes".
+ //
+ // filling_rule() and gamma() can be called anytime before "sweeping".
+ //------------------------------------------------------------------------
+ template<class Clip=rasterizer_sl_clip_int> class rasterizer_scanline_aa_nogamma
+ {
+ enum status
+ {
+ status_initial,
+ status_move_to,
+ status_line_to,
+ status_closed
+ };
+
+ public:
+ typedef Clip clip_type;
+ typedef typename Clip::conv_type conv_type;
+ typedef typename Clip::coord_type coord_type;
+
+ enum aa_scale_e
+ {
+ aa_shift = 8,
+ aa_scale = 1 << aa_shift,
+ aa_mask = aa_scale - 1,
+ aa_scale2 = aa_scale * 2,
+ aa_mask2 = aa_scale2 - 1
+ };
+
+ //--------------------------------------------------------------------
+ rasterizer_scanline_aa_nogamma() :
+ m_outline(),
+ m_clipper(),
+ m_filling_rule(fill_non_zero),
+ m_auto_close(true),
+ m_start_x(0),
+ m_start_y(0),
+ m_status(status_initial)
+ {
+ }
+
+ //--------------------------------------------------------------------
+ void reset();
+ void reset_clipping();
+ void clip_box(double x1, double y1, double x2, double y2);
+ void filling_rule(filling_rule_e filling_rule);
+ void auto_close(bool flag) { m_auto_close = flag; }
+
+ //--------------------------------------------------------------------
+ unsigned apply_gamma(unsigned cover) const
+ {
+ return cover;
+ }
+
+ //--------------------------------------------------------------------
+ void move_to(int x, int y);
+ void line_to(int x, int y);
+ void move_to_d(double x, double y);
+ void line_to_d(double x, double y);
+ void close_polygon();
+ void add_vertex(double x, double y, unsigned cmd);
+
+ void edge(int x1, int y1, int x2, int y2);
+ void edge_d(double x1, double y1, double x2, double y2);
+
+ //-------------------------------------------------------------------
+ template<class VertexSource>
+ void add_path(VertexSource& vs, unsigned path_id=0)
+ {
+ double x;
+ double y;
+
+ unsigned cmd;
+ vs.rewind(path_id);
+ if(m_outline.sorted()) reset();
+ while(!is_stop(cmd = vs.vertex(&x, &y)))
+ {
+ add_vertex(x, y, cmd);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ int min_x() const { return m_outline.min_x(); }
+ int min_y() const { return m_outline.min_y(); }
+ int max_x() const { return m_outline.max_x(); }
+ int max_y() const { return m_outline.max_y(); }
+
+ //--------------------------------------------------------------------
+ void sort();
+ bool rewind_scanlines();
+ bool navigate_scanline(int y);
+
+ //--------------------------------------------------------------------
+ AGG_INLINE unsigned calculate_alpha(int area) const
+ {
+ int cover = area >> (poly_subpixel_shift*2 + 1 - aa_shift);
+
+ if(cover < 0) cover = -cover;
+ if(m_filling_rule == fill_even_odd)
+ {
+ cover &= aa_mask2;
+ if(cover > aa_scale)
+ {
+ cover = aa_scale2 - cover;
+ }
+ }
+ if(cover > aa_mask) cover = aa_mask;
+ return cover;
+ }
+
+ //--------------------------------------------------------------------
+ template<class Scanline> bool sweep_scanline(Scanline& sl)
+ {
+ for(;;)
+ {
+ if(m_scan_y > m_outline.max_y()) return false;
+ sl.reset_spans();
+ unsigned num_cells = m_outline.scanline_num_cells(m_scan_y);
+ const cell_aa* const* cells = m_outline.scanline_cells(m_scan_y);
+ int cover = 0;
+
+ while(num_cells)
+ {
+ const cell_aa* cur_cell = *cells;
+ int x = cur_cell->x;
+ int area = cur_cell->area;
+ unsigned alpha;
+
+ cover += cur_cell->cover;
+
+ //accumulate all cells with the same X
+ while(--num_cells)
+ {
+ cur_cell = *++cells;
+ if(cur_cell->x != x) break;
+ area += cur_cell->area;
+ cover += cur_cell->cover;
+ }
+
+ if(area)
+ {
+ alpha = calculate_alpha((cover << (poly_subpixel_shift + 1)) - area);
+ if(alpha)
+ {
+ sl.add_cell(x, alpha);
+ }
+ x++;
+ }
+
+ if(num_cells && cur_cell->x > x)
+ {
+ alpha = calculate_alpha(cover << (poly_subpixel_shift + 1));
+ if(alpha)
+ {
+ sl.add_span(x, cur_cell->x - x, alpha);
+ }
+ }
+ }
+
+ if(sl.num_spans()) break;
+ ++m_scan_y;
+ }
+
+ sl.finalize(m_scan_y);
+ ++m_scan_y;
+ return true;
+ }
+
+ //--------------------------------------------------------------------
+ bool hit_test(int tx, int ty);
+
+
+ private:
+ //--------------------------------------------------------------------
+ // Disable copying
+ rasterizer_scanline_aa_nogamma(const rasterizer_scanline_aa_nogamma<Clip>&);
+ const rasterizer_scanline_aa_nogamma<Clip>&
+ operator = (const rasterizer_scanline_aa_nogamma<Clip>&);
+
+ private:
+ rasterizer_cells_aa<cell_aa> m_outline;
+ clip_type m_clipper;
+ filling_rule_e m_filling_rule;
+ bool m_auto_close;
+ coord_type m_start_x;
+ coord_type m_start_y;
+ unsigned m_status;
+ int m_scan_y;
+ };
+
+
+
+
+
+
+
+
+
+
+
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa_nogamma<Clip>::reset()
+ {
+ m_outline.reset();
+ m_status = status_initial;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa_nogamma<Clip>::filling_rule(filling_rule_e filling_rule)
+ {
+ m_filling_rule = filling_rule;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa_nogamma<Clip>::clip_box(double x1, double y1,
+ double x2, double y2)
+ {
+ reset();
+ m_clipper.clip_box(conv_type::upscale(x1), conv_type::upscale(y1),
+ conv_type::upscale(x2), conv_type::upscale(y2));
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa_nogamma<Clip>::reset_clipping()
+ {
+ reset();
+ m_clipper.reset_clipping();
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa_nogamma<Clip>::close_polygon()
+ {
+ if(m_status == status_line_to)
+ {
+ m_clipper.line_to(m_outline, m_start_x, m_start_y);
+ m_status = status_closed;
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa_nogamma<Clip>::move_to(int x, int y)
+ {
+ if(m_outline.sorted()) reset();
+ if(m_auto_close) close_polygon();
+ m_clipper.move_to(m_start_x = conv_type::downscale(x),
+ m_start_y = conv_type::downscale(y));
+ m_status = status_move_to;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa_nogamma<Clip>::line_to(int x, int y)
+ {
+ m_clipper.line_to(m_outline,
+ conv_type::downscale(x),
+ conv_type::downscale(y));
+ m_status = status_line_to;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa_nogamma<Clip>::move_to_d(double x, double y)
+ {
+ if(m_outline.sorted()) reset();
+ if(m_auto_close) close_polygon();
+ m_clipper.move_to(m_start_x = conv_type::upscale(x),
+ m_start_y = conv_type::upscale(y));
+ m_status = status_move_to;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa_nogamma<Clip>::line_to_d(double x, double y)
+ {
+ m_clipper.line_to(m_outline,
+ conv_type::upscale(x),
+ conv_type::upscale(y));
+ m_status = status_line_to;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa_nogamma<Clip>::add_vertex(double x, double y, unsigned cmd)
+ {
+ if(is_move_to(cmd))
+ {
+ move_to_d(x, y);
+ }
+ else
+ if(is_vertex(cmd))
+ {
+ line_to_d(x, y);
+ }
+ else
+ if(is_close(cmd))
+ {
+ close_polygon();
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa_nogamma<Clip>::edge(int x1, int y1, int x2, int y2)
+ {
+ if(m_outline.sorted()) reset();
+ m_clipper.move_to(conv_type::downscale(x1), conv_type::downscale(y1));
+ m_clipper.line_to(m_outline,
+ conv_type::downscale(x2),
+ conv_type::downscale(y2));
+ m_status = status_move_to;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa_nogamma<Clip>::edge_d(double x1, double y1,
+ double x2, double y2)
+ {
+ if(m_outline.sorted()) reset();
+ m_clipper.move_to(conv_type::upscale(x1), conv_type::upscale(y1));
+ m_clipper.line_to(m_outline,
+ conv_type::upscale(x2),
+ conv_type::upscale(y2));
+ m_status = status_move_to;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa_nogamma<Clip>::sort()
+ {
+ if(m_auto_close) close_polygon();
+ m_outline.sort_cells();
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ AGG_INLINE bool rasterizer_scanline_aa_nogamma<Clip>::rewind_scanlines()
+ {
+ if(m_auto_close) close_polygon();
+ m_outline.sort_cells();
+ if(m_outline.total_cells() == 0)
+ {
+ return false;
+ }
+ m_scan_y = m_outline.min_y();
+ return true;
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ AGG_INLINE bool rasterizer_scanline_aa_nogamma<Clip>::navigate_scanline(int y)
+ {
+ if(m_auto_close) close_polygon();
+ m_outline.sort_cells();
+ if(m_outline.total_cells() == 0 ||
+ y < m_outline.min_y() ||
+ y > m_outline.max_y())
+ {
+ return false;
+ }
+ m_scan_y = y;
+ return true;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ bool rasterizer_scanline_aa_nogamma<Clip>::hit_test(int tx, int ty)
+ {
+ if(!navigate_scanline(ty)) return false;
+ scanline_hit_test sl(tx);
+ sweep_scanline(sl);
+ return sl.hit();
+ }
+
+
+
+}
+
+
+
+#endif
+
diff --git a/src/agg/agg_rasterizer_sl_clip.h b/src/agg/agg_rasterizer_sl_clip.h
new file mode 100644
index 000000000..3a7f3a103
--- /dev/null
+++ b/src/agg/agg_rasterizer_sl_clip.h
@@ -0,0 +1,351 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+#ifndef AGG_RASTERIZER_SL_CLIP_INCLUDED
+#define AGG_RASTERIZER_SL_CLIP_INCLUDED
+
+#include "agg_clip_liang_barsky.h"
+
+namespace agg
+{
+ //--------------------------------------------------------poly_max_coord_e
+ enum poly_max_coord_e
+ {
+ poly_max_coord = (1 << 30) - 1 //----poly_max_coord
+ };
+
+ //------------------------------------------------------------ras_conv_int
+ struct ras_conv_int
+ {
+ typedef int coord_type;
+ static AGG_INLINE int mul_div(double a, double b, double c)
+ {
+ return iround(a * b / c);
+ }
+ static int xi(int v) { return v; }
+ static int yi(int v) { return v; }
+ static int upscale(double v) { return iround(v * poly_subpixel_scale); }
+ static int downscale(int v) { return v; }
+ };
+
+ //--------------------------------------------------------ras_conv_int_sat
+ struct ras_conv_int_sat
+ {
+ typedef int coord_type;
+ static AGG_INLINE int mul_div(double a, double b, double c)
+ {
+ return saturation<poly_max_coord>::iround(a * b / c);
+ }
+ static int xi(int v) { return v; }
+ static int yi(int v) { return v; }
+ static int upscale(double v)
+ {
+ return saturation<poly_max_coord>::iround(v * poly_subpixel_scale);
+ }
+ static int downscale(int v) { return v; }
+ };
+
+ //---------------------------------------------------------ras_conv_int_3x
+ struct ras_conv_int_3x
+ {
+ typedef int coord_type;
+ static AGG_INLINE int mul_div(double a, double b, double c)
+ {
+ return iround(a * b / c);
+ }
+ static int xi(int v) { return v * 3; }
+ static int yi(int v) { return v; }
+ static int upscale(double v) { return iround(v * poly_subpixel_scale); }
+ static int downscale(int v) { return v; }
+ };
+
+ //-----------------------------------------------------------ras_conv_dbl
+ struct ras_conv_dbl
+ {
+ typedef double coord_type;
+ static AGG_INLINE double mul_div(double a, double b, double c)
+ {
+ return a * b / c;
+ }
+ static int xi(double v) { return iround(v * poly_subpixel_scale); }
+ static int yi(double v) { return iround(v * poly_subpixel_scale); }
+ static double upscale(double v) { return v; }
+ static double downscale(int v) { return v / double(poly_subpixel_scale); }
+ };
+
+ //--------------------------------------------------------ras_conv_dbl_3x
+ struct ras_conv_dbl_3x
+ {
+ typedef double coord_type;
+ static AGG_INLINE double mul_div(double a, double b, double c)
+ {
+ return a * b / c;
+ }
+ static int xi(double v) { return iround(v * poly_subpixel_scale * 3); }
+ static int yi(double v) { return iround(v * poly_subpixel_scale); }
+ static double upscale(double v) { return v; }
+ static double downscale(int v) { return v / double(poly_subpixel_scale); }
+ };
+
+
+
+
+
+ //------------------------------------------------------rasterizer_sl_clip
+ template<class Conv> class rasterizer_sl_clip
+ {
+ public:
+ typedef Conv conv_type;
+ typedef typename Conv::coord_type coord_type;
+ typedef rect_base<coord_type> rect_type;
+
+ //--------------------------------------------------------------------
+ rasterizer_sl_clip() :
+ m_clip_box(0,0,0,0),
+ m_x1(0),
+ m_y1(0),
+ m_f1(0),
+ m_clipping(false)
+ {}
+
+ //--------------------------------------------------------------------
+ void reset_clipping()
+ {
+ m_clipping = false;
+ }
+
+ //--------------------------------------------------------------------
+ void clip_box(coord_type x1, coord_type y1, coord_type x2, coord_type y2)
+ {
+ m_clip_box = rect_type(x1, y1, x2, y2);
+ m_clip_box.normalize();
+ m_clipping = true;
+ }
+
+ //--------------------------------------------------------------------
+ void move_to(coord_type x1, coord_type y1)
+ {
+ m_x1 = x1;
+ m_y1 = y1;
+ if(m_clipping) m_f1 = clipping_flags(x1, y1, m_clip_box);
+ }
+
+ private:
+ //------------------------------------------------------------------------
+ template<class Rasterizer>
+ AGG_INLINE void line_clip_y(Rasterizer& ras,
+ coord_type x1, coord_type y1,
+ coord_type x2, coord_type y2,
+ unsigned f1, unsigned f2) const
+ {
+ f1 &= 10;
+ f2 &= 10;
+ if((f1 | f2) == 0)
+ {
+ // Fully visible
+ ras.line(Conv::xi(x1), Conv::yi(y1), Conv::xi(x2), Conv::yi(y2));
+ }
+ else
+ {
+ if(f1 == f2)
+ {
+ // Invisible by Y
+ return;
+ }
+
+ coord_type tx1 = x1;
+ coord_type ty1 = y1;
+ coord_type tx2 = x2;
+ coord_type ty2 = y2;
+
+ if(f1 & 8) // y1 < clip.y1
+ {
+ tx1 = x1 + Conv::mul_div(m_clip_box.y1-y1, x2-x1, y2-y1);
+ ty1 = m_clip_box.y1;
+ }
+
+ if(f1 & 2) // y1 > clip.y2
+ {
+ tx1 = x1 + Conv::mul_div(m_clip_box.y2-y1, x2-x1, y2-y1);
+ ty1 = m_clip_box.y2;
+ }
+
+ if(f2 & 8) // y2 < clip.y1
+ {
+ tx2 = x1 + Conv::mul_div(m_clip_box.y1-y1, x2-x1, y2-y1);
+ ty2 = m_clip_box.y1;
+ }
+
+ if(f2 & 2) // y2 > clip.y2
+ {
+ tx2 = x1 + Conv::mul_div(m_clip_box.y2-y1, x2-x1, y2-y1);
+ ty2 = m_clip_box.y2;
+ }
+ ras.line(Conv::xi(tx1), Conv::yi(ty1),
+ Conv::xi(tx2), Conv::yi(ty2));
+ }
+ }
+
+
+ public:
+ //--------------------------------------------------------------------
+ template<class Rasterizer>
+ void line_to(Rasterizer& ras, coord_type x2, coord_type y2)
+ {
+ if(m_clipping)
+ {
+ unsigned f2 = clipping_flags(x2, y2, m_clip_box);
+
+ if((m_f1 & 10) == (f2 & 10) && (m_f1 & 10) != 0)
+ {
+ // Invisible by Y
+ m_x1 = x2;
+ m_y1 = y2;
+ m_f1 = f2;
+ return;
+ }
+
+ coord_type x1 = m_x1;
+ coord_type y1 = m_y1;
+ unsigned f1 = m_f1;
+ coord_type y3, y4;
+ unsigned f3, f4;
+
+ switch(((f1 & 5) << 1) | (f2 & 5))
+ {
+ case 0: // Visible by X
+ line_clip_y(ras, x1, y1, x2, y2, f1, f2);
+ break;
+
+ case 1: // x2 > clip.x2
+ y3 = y1 + Conv::mul_div(m_clip_box.x2-x1, y2-y1, x2-x1);
+ f3 = clipping_flags_y(y3, m_clip_box);
+ line_clip_y(ras, x1, y1, m_clip_box.x2, y3, f1, f3);
+ line_clip_y(ras, m_clip_box.x2, y3, m_clip_box.x2, y2, f3, f2);
+ break;
+
+ case 2: // x1 > clip.x2
+ y3 = y1 + Conv::mul_div(m_clip_box.x2-x1, y2-y1, x2-x1);
+ f3 = clipping_flags_y(y3, m_clip_box);
+ line_clip_y(ras, m_clip_box.x2, y1, m_clip_box.x2, y3, f1, f3);
+ line_clip_y(ras, m_clip_box.x2, y3, x2, y2, f3, f2);
+ break;
+
+ case 3: // x1 > clip.x2 && x2 > clip.x2
+ line_clip_y(ras, m_clip_box.x2, y1, m_clip_box.x2, y2, f1, f2);
+ break;
+
+ case 4: // x2 < clip.x1
+ y3 = y1 + Conv::mul_div(m_clip_box.x1-x1, y2-y1, x2-x1);
+ f3 = clipping_flags_y(y3, m_clip_box);
+ line_clip_y(ras, x1, y1, m_clip_box.x1, y3, f1, f3);
+ line_clip_y(ras, m_clip_box.x1, y3, m_clip_box.x1, y2, f3, f2);
+ break;
+
+ case 6: // x1 > clip.x2 && x2 < clip.x1
+ y3 = y1 + Conv::mul_div(m_clip_box.x2-x1, y2-y1, x2-x1);
+ y4 = y1 + Conv::mul_div(m_clip_box.x1-x1, y2-y1, x2-x1);
+ f3 = clipping_flags_y(y3, m_clip_box);
+ f4 = clipping_flags_y(y4, m_clip_box);
+ line_clip_y(ras, m_clip_box.x2, y1, m_clip_box.x2, y3, f1, f3);
+ line_clip_y(ras, m_clip_box.x2, y3, m_clip_box.x1, y4, f3, f4);
+ line_clip_y(ras, m_clip_box.x1, y4, m_clip_box.x1, y2, f4, f2);
+ break;
+
+ case 8: // x1 < clip.x1
+ y3 = y1 + Conv::mul_div(m_clip_box.x1-x1, y2-y1, x2-x1);
+ f3 = clipping_flags_y(y3, m_clip_box);
+ line_clip_y(ras, m_clip_box.x1, y1, m_clip_box.x1, y3, f1, f3);
+ line_clip_y(ras, m_clip_box.x1, y3, x2, y2, f3, f2);
+ break;
+
+ case 9: // x1 < clip.x1 && x2 > clip.x2
+ y3 = y1 + Conv::mul_div(m_clip_box.x1-x1, y2-y1, x2-x1);
+ y4 = y1 + Conv::mul_div(m_clip_box.x2-x1, y2-y1, x2-x1);
+ f3 = clipping_flags_y(y3, m_clip_box);
+ f4 = clipping_flags_y(y4, m_clip_box);
+ line_clip_y(ras, m_clip_box.x1, y1, m_clip_box.x1, y3, f1, f3);
+ line_clip_y(ras, m_clip_box.x1, y3, m_clip_box.x2, y4, f3, f4);
+ line_clip_y(ras, m_clip_box.x2, y4, m_clip_box.x2, y2, f4, f2);
+ break;
+
+ case 12: // x1 < clip.x1 && x2 < clip.x1
+ line_clip_y(ras, m_clip_box.x1, y1, m_clip_box.x1, y2, f1, f2);
+ break;
+ }
+ m_f1 = f2;
+ }
+ else
+ {
+ ras.line(Conv::xi(m_x1), Conv::yi(m_y1),
+ Conv::xi(x2), Conv::yi(y2));
+ }
+ m_x1 = x2;
+ m_y1 = y2;
+ }
+
+
+ private:
+ rect_type m_clip_box;
+ coord_type m_x1;
+ coord_type m_y1;
+ unsigned m_f1;
+ bool m_clipping;
+ };
+
+
+
+
+ //---------------------------------------------------rasterizer_sl_no_clip
+ class rasterizer_sl_no_clip
+ {
+ public:
+ typedef ras_conv_int conv_type;
+ typedef int coord_type;
+
+ rasterizer_sl_no_clip() : m_x1(0), m_y1(0) {}
+
+ void reset_clipping() {}
+ void clip_box(coord_type, coord_type, coord_type, coord_type) {}
+ void move_to(coord_type x1, coord_type y1) { m_x1 = x1; m_y1 = y1; }
+
+ template<class Rasterizer>
+ void line_to(Rasterizer& ras, coord_type x2, coord_type y2)
+ {
+ ras.line(m_x1, m_y1, x2, y2);
+ m_x1 = x2;
+ m_y1 = y2;
+ }
+
+ private:
+ int m_x1, m_y1;
+ };
+
+
+ // -----rasterizer_sl_clip_int
+ // -----rasterizer_sl_clip_int_sat
+ // -----rasterizer_sl_clip_int_3x
+ // -----rasterizer_sl_clip_dbl
+ // -----rasterizer_sl_clip_dbl_3x
+ //------------------------------------------------------------------------
+ typedef rasterizer_sl_clip<ras_conv_int> rasterizer_sl_clip_int;
+ typedef rasterizer_sl_clip<ras_conv_int_sat> rasterizer_sl_clip_int_sat;
+ typedef rasterizer_sl_clip<ras_conv_int_3x> rasterizer_sl_clip_int_3x;
+ typedef rasterizer_sl_clip<ras_conv_dbl> rasterizer_sl_clip_dbl;
+ typedef rasterizer_sl_clip<ras_conv_dbl_3x> rasterizer_sl_clip_dbl_3x;
+
+
+}
+
+#endif
diff --git a/src/agg/agg_renderer_base.h b/src/agg/agg_renderer_base.h
new file mode 100644
index 000000000..527c62f78
--- /dev/null
+++ b/src/agg/agg_renderer_base.h
@@ -0,0 +1,731 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// class renderer_base
+//
+//----------------------------------------------------------------------------
+
+#ifndef AGG_RENDERER_BASE_INCLUDED
+#define AGG_RENDERER_BASE_INCLUDED
+
+#include "agg_basics.h"
+#include "agg_rendering_buffer.h"
+
+namespace agg
+{
+
+ //-----------------------------------------------------------renderer_base
+ template<class PixelFormat> class renderer_base
+ {
+ public:
+ typedef PixelFormat pixfmt_type;
+ typedef typename pixfmt_type::color_type color_type;
+ typedef typename pixfmt_type::row_data row_data;
+
+ //--------------------------------------------------------------------
+ renderer_base() : m_ren(0), m_clip_box(1, 1, 0, 0) {}
+ explicit renderer_base(pixfmt_type& ren) :
+ m_ren(&ren),
+ m_clip_box(0, 0, ren.width() - 1, ren.height() - 1)
+ {}
+ void attach(pixfmt_type& ren)
+ {
+ m_ren = &ren;
+ m_clip_box = rect_i(0, 0, ren.width() - 1, ren.height() - 1);
+ }
+
+ //--------------------------------------------------------------------
+ const pixfmt_type& ren() const { return *m_ren; }
+ pixfmt_type& ren() { return *m_ren; }
+
+ //--------------------------------------------------------------------
+ unsigned width() const { return m_ren->width(); }
+ unsigned height() const { return m_ren->height(); }
+
+ //--------------------------------------------------------------------
+ bool clip_box(int x1, int y1, int x2, int y2)
+ {
+ rect_i cb(x1, y1, x2, y2);
+ cb.normalize();
+ if(cb.clip(rect_i(0, 0, width() - 1, height() - 1)))
+ {
+ m_clip_box = cb;
+ return true;
+ }
+ m_clip_box.x1 = 1;
+ m_clip_box.y1 = 1;
+ m_clip_box.x2 = 0;
+ m_clip_box.y2 = 0;
+ return false;
+ }
+
+ //--------------------------------------------------------------------
+ void reset_clipping(bool visibility)
+ {
+ if(visibility)
+ {
+ m_clip_box.x1 = 0;
+ m_clip_box.y1 = 0;
+ m_clip_box.x2 = width() - 1;
+ m_clip_box.y2 = height() - 1;
+ }
+ else
+ {
+ m_clip_box.x1 = 1;
+ m_clip_box.y1 = 1;
+ m_clip_box.x2 = 0;
+ m_clip_box.y2 = 0;
+ }
+ }
+
+ //--------------------------------------------------------------------
+ void clip_box_naked(int x1, int y1, int x2, int y2)
+ {
+ m_clip_box.x1 = x1;
+ m_clip_box.y1 = y1;
+ m_clip_box.x2 = x2;
+ m_clip_box.y2 = y2;
+ }
+
+ //--------------------------------------------------------------------
+ bool inbox(int x, int y) const
+ {
+ return x >= m_clip_box.x1 && y >= m_clip_box.y1 &&
+ x <= m_clip_box.x2 && y <= m_clip_box.y2;
+ }
+
+ //--------------------------------------------------------------------
+ const rect_i& clip_box() const { return m_clip_box; }
+ int xmin() const { return m_clip_box.x1; }
+ int ymin() const { return m_clip_box.y1; }
+ int xmax() const { return m_clip_box.x2; }
+ int ymax() const { return m_clip_box.y2; }
+
+ //--------------------------------------------------------------------
+ const rect_i& bounding_clip_box() const { return m_clip_box; }
+ int bounding_xmin() const { return m_clip_box.x1; }
+ int bounding_ymin() const { return m_clip_box.y1; }
+ int bounding_xmax() const { return m_clip_box.x2; }
+ int bounding_ymax() const { return m_clip_box.y2; }
+
+ //--------------------------------------------------------------------
+ void clear(const color_type& c)
+ {
+ unsigned y;
+ if(width())
+ {
+ for(y = 0; y < height(); y++)
+ {
+ m_ren->copy_hline(0, y, width(), c);
+ }
+ }
+ }
+
+
+ //--------------------------------------------------------------------
+ void fill(const color_type& c)
+ {
+ unsigned y;
+ if(width())
+ {
+ for(y = 0; y < height(); y++)
+ {
+ m_ren->blend_hline(0, y, width(), c, cover_mask);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ void copy_pixel(int x, int y, const color_type& c)
+ {
+ if(inbox(x, y))
+ {
+ m_ren->copy_pixel(x, y, c);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ void blend_pixel(int x, int y, const color_type& c, cover_type cover)
+ {
+ if(inbox(x, y))
+ {
+ m_ren->blend_pixel(x, y, c, cover);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ color_type pixel(int x, int y) const
+ {
+ return inbox(x, y) ?
+ m_ren->pixel(x, y) :
+ color_type::no_color();
+ }
+
+ //--------------------------------------------------------------------
+ void copy_hline(int x1, int y, int x2, const color_type& c)
+ {
+ if(x1 > x2) { int t = x2; x2 = x1; x1 = t; }
+ if(y > ymax()) return;
+ if(y < ymin()) return;
+ if(x1 > xmax()) return;
+ if(x2 < xmin()) return;
+
+ if(x1 < xmin()) x1 = xmin();
+ if(x2 > xmax()) x2 = xmax();
+
+ m_ren->copy_hline(x1, y, x2 - x1 + 1, c);
+ }
+
+ //--------------------------------------------------------------------
+ void copy_vline(int x, int y1, int y2, const color_type& c)
+ {
+ if(y1 > y2) { int t = y2; y2 = y1; y1 = t; }
+ if(x > xmax()) return;
+ if(x < xmin()) return;
+ if(y1 > ymax()) return;
+ if(y2 < ymin()) return;
+
+ if(y1 < ymin()) y1 = ymin();
+ if(y2 > ymax()) y2 = ymax();
+
+ m_ren->copy_vline(x, y1, y2 - y1 + 1, c);
+ }
+
+ //--------------------------------------------------------------------
+ void blend_hline(int x1, int y, int x2,
+ const color_type& c, cover_type cover)
+ {
+ if(x1 > x2) { int t = x2; x2 = x1; x1 = t; }
+ if(y > ymax()) return;
+ if(y < ymin()) return;
+ if(x1 > xmax()) return;
+ if(x2 < xmin()) return;
+
+ if(x1 < xmin()) x1 = xmin();
+ if(x2 > xmax()) x2 = xmax();
+
+ m_ren->blend_hline(x1, y, x2 - x1 + 1, c, cover);
+ }
+
+ //--------------------------------------------------------------------
+ void blend_vline(int x, int y1, int y2,
+ const color_type& c, cover_type cover)
+ {
+ if(y1 > y2) { int t = y2; y2 = y1; y1 = t; }
+ if(x > xmax()) return;
+ if(x < xmin()) return;
+ if(y1 > ymax()) return;
+ if(y2 < ymin()) return;
+
+ if(y1 < ymin()) y1 = ymin();
+ if(y2 > ymax()) y2 = ymax();
+
+ m_ren->blend_vline(x, y1, y2 - y1 + 1, c, cover);
+ }
+
+
+ //--------------------------------------------------------------------
+ void copy_bar(int x1, int y1, int x2, int y2, const color_type& c)
+ {
+ rect_i rc(x1, y1, x2, y2);
+ rc.normalize();
+ if(rc.clip(clip_box()))
+ {
+ int y;
+ for(y = rc.y1; y <= rc.y2; y++)
+ {
+ m_ren->copy_hline(rc.x1, y, unsigned(rc.x2 - rc.x1 + 1), c);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ void blend_bar(int x1, int y1, int x2, int y2,
+ const color_type& c, cover_type cover)
+ {
+ rect_i rc(x1, y1, x2, y2);
+ rc.normalize();
+ if(rc.clip(clip_box()))
+ {
+ int y;
+ for(y = rc.y1; y <= rc.y2; y++)
+ {
+ m_ren->blend_hline(rc.x1,
+ y,
+ unsigned(rc.x2 - rc.x1 + 1),
+ c,
+ cover);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ void blend_solid_hspan(int x, int y, int len,
+ const color_type& c,
+ const cover_type* covers)
+ {
+ if(y > ymax()) return;
+ if(y < ymin()) return;
+
+ if(x < xmin())
+ {
+ len -= xmin() - x;
+ if(len <= 0) return;
+ covers += xmin() - x;
+ x = xmin();
+ }
+ if(x + len > xmax())
+ {
+ len = xmax() - x + 1;
+ if(len <= 0) return;
+ }
+ m_ren->blend_solid_hspan(x, y, len, c, covers);
+ }
+
+ //--------------------------------------------------------------------
+ void blend_solid_vspan(int x, int y, int len,
+ const color_type& c,
+ const cover_type* covers)
+ {
+ if(x > xmax()) return;
+ if(x < xmin()) return;
+
+ if(y < ymin())
+ {
+ len -= ymin() - y;
+ if(len <= 0) return;
+ covers += ymin() - y;
+ y = ymin();
+ }
+ if(y + len > ymax())
+ {
+ len = ymax() - y + 1;
+ if(len <= 0) return;
+ }
+ m_ren->blend_solid_vspan(x, y, len, c, covers);
+ }
+
+
+ //--------------------------------------------------------------------
+ void copy_color_hspan(int x, int y, int len, const color_type* colors)
+ {
+ if(y > ymax()) return;
+ if(y < ymin()) return;
+
+ if(x < xmin())
+ {
+ int d = xmin() - x;
+ len -= d;
+ if(len <= 0) return;
+ colors += d;
+ x = xmin();
+ }
+ if(x + len > xmax())
+ {
+ len = xmax() - x + 1;
+ if(len <= 0) return;
+ }
+ m_ren->copy_color_hspan(x, y, len, colors);
+ }
+
+
+ //--------------------------------------------------------------------
+ void copy_color_vspan(int x, int y, int len, const color_type* colors)
+ {
+ if(x > xmax()) return;
+ if(x < xmin()) return;
+
+ if(y < ymin())
+ {
+ int d = ymin() - y;
+ len -= d;
+ if(len <= 0) return;
+ colors += d;
+ y = ymin();
+ }
+ if(y + len > ymax())
+ {
+ len = ymax() - y + 1;
+ if(len <= 0) return;
+ }
+ m_ren->copy_color_vspan(x, y, len, colors);
+ }
+
+
+ //--------------------------------------------------------------------
+ void blend_color_hspan(int x, int y, int len,
+ const color_type* colors,
+ const cover_type* covers,
+ cover_type cover = agg::cover_full)
+ {
+ if(y > ymax()) return;
+ if(y < ymin()) return;
+
+ if(x < xmin())
+ {
+ int d = xmin() - x;
+ len -= d;
+ if(len <= 0) return;
+ if(covers) covers += d;
+ colors += d;
+ x = xmin();
+ }
+ if(x + len > xmax())
+ {
+ len = xmax() - x + 1;
+ if(len <= 0) return;
+ }
+ m_ren->blend_color_hspan(x, y, len, colors, covers, cover);
+ }
+
+ //--------------------------------------------------------------------
+ void blend_color_vspan(int x, int y, int len,
+ const color_type* colors,
+ const cover_type* covers,
+ cover_type cover = agg::cover_full)
+ {
+ if(x > xmax()) return;
+ if(x < xmin()) return;
+
+ if(y < ymin())
+ {
+ int d = ymin() - y;
+ len -= d;
+ if(len <= 0) return;
+ if(covers) covers += d;
+ colors += d;
+ y = ymin();
+ }
+ if(y + len > ymax())
+ {
+ len = ymax() - y + 1;
+ if(len <= 0) return;
+ }
+ m_ren->blend_color_vspan(x, y, len, colors, covers, cover);
+ }
+
+ //--------------------------------------------------------------------
+ rect_i clip_rect_area(rect_i& dst, rect_i& src, int wsrc, int hsrc) const
+ {
+ rect_i rc(0,0,0,0);
+ rect_i cb = clip_box();
+ ++cb.x2;
+ ++cb.y2;
+
+ if(src.x1 < 0)
+ {
+ dst.x1 -= src.x1;
+ src.x1 = 0;
+ }
+ if(src.y1 < 0)
+ {
+ dst.y1 -= src.y1;
+ src.y1 = 0;
+ }
+
+ if(src.x2 > wsrc) src.x2 = wsrc;
+ if(src.y2 > hsrc) src.y2 = hsrc;
+
+ if(dst.x1 < cb.x1)
+ {
+ src.x1 += cb.x1 - dst.x1;
+ dst.x1 = cb.x1;
+ }
+ if(dst.y1 < cb.y1)
+ {
+ src.y1 += cb.y1 - dst.y1;
+ dst.y1 = cb.y1;
+ }
+
+ if(dst.x2 > cb.x2) dst.x2 = cb.x2;
+ if(dst.y2 > cb.y2) dst.y2 = cb.y2;
+
+ rc.x2 = dst.x2 - dst.x1;
+ rc.y2 = dst.y2 - dst.y1;
+
+ if(rc.x2 > src.x2 - src.x1) rc.x2 = src.x2 - src.x1;
+ if(rc.y2 > src.y2 - src.y1) rc.y2 = src.y2 - src.y1;
+ return rc;
+ }
+
+ //--------------------------------------------------------------------
+ template<class RenBuf>
+ void copy_from(const RenBuf& src,
+ const rect_i* rect_src_ptr = 0,
+ int dx = 0,
+ int dy = 0)
+ {
+ rect_i rsrc(0, 0, src.width(), src.height());
+ if(rect_src_ptr)
+ {
+ rsrc.x1 = rect_src_ptr->x1;
+ rsrc.y1 = rect_src_ptr->y1;
+ rsrc.x2 = rect_src_ptr->x2 + 1;
+ rsrc.y2 = rect_src_ptr->y2 + 1;
+ }
+
+ // Version with xdst, ydst (absolute positioning)
+ //rect_i rdst(xdst, ydst, xdst + rsrc.x2 - rsrc.x1, ydst + rsrc.y2 - rsrc.y1);
+
+ // Version with dx, dy (relative positioning)
+ rect_i rdst(rsrc.x1 + dx, rsrc.y1 + dy, rsrc.x2 + dx, rsrc.y2 + dy);
+
+ rect_i rc = clip_rect_area(rdst, rsrc, src.width(), src.height());
+
+ if(rc.x2 > 0)
+ {
+ int incy = 1;
+ if(rdst.y1 > rsrc.y1)
+ {
+ rsrc.y1 += rc.y2 - 1;
+ rdst.y1 += rc.y2 - 1;
+ incy = -1;
+ }
+ while(rc.y2 > 0)
+ {
+ m_ren->copy_from(src,
+ rdst.x1, rdst.y1,
+ rsrc.x1, rsrc.y1,
+ rc.x2);
+ rdst.y1 += incy;
+ rsrc.y1 += incy;
+ --rc.y2;
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ template<class SrcPixelFormatRenderer>
+ void blend_from(const SrcPixelFormatRenderer& src,
+ const rect_i* rect_src_ptr = 0,
+ int dx = 0,
+ int dy = 0,
+ cover_type cover = agg::cover_full)
+ {
+ rect_i rsrc(0, 0, src.width(), src.height());
+ if(rect_src_ptr)
+ {
+ rsrc.x1 = rect_src_ptr->x1;
+ rsrc.y1 = rect_src_ptr->y1;
+ rsrc.x2 = rect_src_ptr->x2 + 1;
+ rsrc.y2 = rect_src_ptr->y2 + 1;
+ }
+
+ // Version with xdst, ydst (absolute positioning)
+ //rect_i rdst(xdst, ydst, xdst + rsrc.x2 - rsrc.x1, ydst + rsrc.y2 - rsrc.y1);
+
+ // Version with dx, dy (relative positioning)
+ rect_i rdst(rsrc.x1 + dx, rsrc.y1 + dy, rsrc.x2 + dx, rsrc.y2 + dy);
+ rect_i rc = clip_rect_area(rdst, rsrc, src.width(), src.height());
+
+ if(rc.x2 > 0)
+ {
+ int incy = 1;
+ if(rdst.y1 > rsrc.y1)
+ {
+ rsrc.y1 += rc.y2 - 1;
+ rdst.y1 += rc.y2 - 1;
+ incy = -1;
+ }
+ while(rc.y2 > 0)
+ {
+ typename SrcPixelFormatRenderer::row_data rw = src.row(rsrc.y1);
+ if(rw.ptr)
+ {
+ int x1src = rsrc.x1;
+ int x1dst = rdst.x1;
+ int len = rc.x2;
+ if(rw.x1 > x1src)
+ {
+ x1dst += rw.x1 - x1src;
+ len -= rw.x1 - x1src;
+ x1src = rw.x1;
+ }
+ if(len > 0)
+ {
+ if(x1src + len-1 > rw.x2)
+ {
+ len -= x1src + len - rw.x2 - 1;
+ }
+ if(len > 0)
+ {
+ m_ren->blend_from(src,
+ x1dst, rdst.y1,
+ x1src, rsrc.y1,
+ len,
+ cover);
+ }
+ }
+ }
+ rdst.y1 += incy;
+ rsrc.y1 += incy;
+ --rc.y2;
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ template<class SrcPixelFormatRenderer>
+ void blend_from_color(const SrcPixelFormatRenderer& src,
+ const color_type& color,
+ const rect_i* rect_src_ptr = 0,
+ int dx = 0,
+ int dy = 0,
+ cover_type cover = agg::cover_full)
+ {
+ rect_i rsrc(0, 0, src.width(), src.height());
+ if(rect_src_ptr)
+ {
+ rsrc.x1 = rect_src_ptr->x1;
+ rsrc.y1 = rect_src_ptr->y1;
+ rsrc.x2 = rect_src_ptr->x2 + 1;
+ rsrc.y2 = rect_src_ptr->y2 + 1;
+ }
+
+ // Version with xdst, ydst (absolute positioning)
+ //rect_i rdst(xdst, ydst, xdst + rsrc.x2 - rsrc.x1, ydst + rsrc.y2 - rsrc.y1);
+
+ // Version with dx, dy (relative positioning)
+ rect_i rdst(rsrc.x1 + dx, rsrc.y1 + dy, rsrc.x2 + dx, rsrc.y2 + dy);
+ rect_i rc = clip_rect_area(rdst, rsrc, src.width(), src.height());
+
+ if(rc.x2 > 0)
+ {
+ int incy = 1;
+ if(rdst.y1 > rsrc.y1)
+ {
+ rsrc.y1 += rc.y2 - 1;
+ rdst.y1 += rc.y2 - 1;
+ incy = -1;
+ }
+ while(rc.y2 > 0)
+ {
+ typename SrcPixelFormatRenderer::row_data rw = src.row(rsrc.y1);
+ if(rw.ptr)
+ {
+ int x1src = rsrc.x1;
+ int x1dst = rdst.x1;
+ int len = rc.x2;
+ if(rw.x1 > x1src)
+ {
+ x1dst += rw.x1 - x1src;
+ len -= rw.x1 - x1src;
+ x1src = rw.x1;
+ }
+ if(len > 0)
+ {
+ if(x1src + len-1 > rw.x2)
+ {
+ len -= x1src + len - rw.x2 - 1;
+ }
+ if(len > 0)
+ {
+ m_ren->blend_from_color(src,
+ color,
+ x1dst, rdst.y1,
+ x1src, rsrc.y1,
+ len,
+ cover);
+ }
+ }
+ }
+ rdst.y1 += incy;
+ rsrc.y1 += incy;
+ --rc.y2;
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ template<class SrcPixelFormatRenderer>
+ void blend_from_lut(const SrcPixelFormatRenderer& src,
+ const color_type* color_lut,
+ const rect_i* rect_src_ptr = 0,
+ int dx = 0,
+ int dy = 0,
+ cover_type cover = agg::cover_full)
+ {
+ rect_i rsrc(0, 0, src.width(), src.height());
+ if(rect_src_ptr)
+ {
+ rsrc.x1 = rect_src_ptr->x1;
+ rsrc.y1 = rect_src_ptr->y1;
+ rsrc.x2 = rect_src_ptr->x2 + 1;
+ rsrc.y2 = rect_src_ptr->y2 + 1;
+ }
+
+ // Version with xdst, ydst (absolute positioning)
+ //rect_i rdst(xdst, ydst, xdst + rsrc.x2 - rsrc.x1, ydst + rsrc.y2 - rsrc.y1);
+
+ // Version with dx, dy (relative positioning)
+ rect_i rdst(rsrc.x1 + dx, rsrc.y1 + dy, rsrc.x2 + dx, rsrc.y2 + dy);
+ rect_i rc = clip_rect_area(rdst, rsrc, src.width(), src.height());
+
+ if(rc.x2 > 0)
+ {
+ int incy = 1;
+ if(rdst.y1 > rsrc.y1)
+ {
+ rsrc.y1 += rc.y2 - 1;
+ rdst.y1 += rc.y2 - 1;
+ incy = -1;
+ }
+ while(rc.y2 > 0)
+ {
+ typename SrcPixelFormatRenderer::row_data rw = src.row(rsrc.y1);
+ if(rw.ptr)
+ {
+ int x1src = rsrc.x1;
+ int x1dst = rdst.x1;
+ int len = rc.x2;
+ if(rw.x1 > x1src)
+ {
+ x1dst += rw.x1 - x1src;
+ len -= rw.x1 - x1src;
+ x1src = rw.x1;
+ }
+ if(len > 0)
+ {
+ if(x1src + len-1 > rw.x2)
+ {
+ len -= x1src + len - rw.x2 - 1;
+ }
+ if(len > 0)
+ {
+ m_ren->blend_from_lut(src,
+ color_lut,
+ x1dst, rdst.y1,
+ x1src, rsrc.y1,
+ len,
+ cover);
+ }
+ }
+ }
+ rdst.y1 += incy;
+ rsrc.y1 += incy;
+ --rc.y2;
+ }
+ }
+ }
+
+ private:
+ pixfmt_type* m_ren;
+ rect_i m_clip_box;
+ };
+
+
+}
+
+#endif
diff --git a/src/agg/agg_renderer_scanline.h b/src/agg/agg_renderer_scanline.h
new file mode 100644
index 000000000..311e9f739
--- /dev/null
+++ b/src/agg/agg_renderer_scanline.h
@@ -0,0 +1,854 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+
+#ifndef AGG_RENDERER_SCANLINE_INCLUDED
+#define AGG_RENDERER_SCANLINE_INCLUDED
+
+#include <limits>
+#include <cstdlib>
+#include "agg_basics.h"
+#include "agg_renderer_base.h"
+
+namespace agg
+{
+
+ //================================================render_scanline_aa_solid
+ template<class Scanline, class BaseRenderer, class ColorT>
+ void render_scanline_aa_solid(const Scanline& sl,
+ BaseRenderer& ren,
+ const ColorT& color)
+ {
+ int y = sl.y();
+ unsigned num_spans = sl.num_spans();
+ typename Scanline::const_iterator span = sl.begin();
+
+ for(;;)
+ {
+ int x = span->x;
+ if(span->len > 0)
+ {
+ ren.blend_solid_hspan(x, y, (unsigned)span->len,
+ color,
+ span->covers);
+ }
+ else
+ {
+ ren.blend_hline(x, y, (unsigned)(x - span->len - 1),
+ color,
+ *(span->covers));
+ }
+ if(--num_spans == 0) break;
+ ++span;
+ }
+ }
+
+ //===============================================render_scanlines_aa_solid
+ template<class Rasterizer, class Scanline,
+ class BaseRenderer, class ColorT>
+ void render_scanlines_aa_solid(Rasterizer& ras, Scanline& sl,
+ BaseRenderer& ren, const ColorT& color)
+ {
+ if(ras.rewind_scanlines())
+ {
+ // Explicitly convert "color" to the BaseRenderer color type.
+ // For example, it can be called with color type "rgba", while
+ // "rgba8" is needed. Otherwise it will be implicitly
+ // converted in the loop many times.
+ //----------------------
+ typename BaseRenderer::color_type ren_color = color;
+
+ sl.reset(ras.min_x(), ras.max_x());
+ while(ras.sweep_scanline(sl))
+ {
+ //render_scanline_aa_solid(sl, ren, ren_color);
+
+ // This code is equivalent to the above call (copy/paste).
+ // It's just a "manual" optimization for old compilers,
+ // like Microsoft Visual C++ v6.0
+ //-------------------------------
+ int y = sl.y();
+ unsigned num_spans = sl.num_spans();
+ typename Scanline::const_iterator span = sl.begin();
+
+ for(;;)
+ {
+ int x = span->x;
+ if(span->len > 0)
+ {
+ ren.blend_solid_hspan(x, y, (unsigned)span->len,
+ ren_color,
+ span->covers);
+ }
+ else
+ {
+ ren.blend_hline(x, y, (unsigned)(x - span->len - 1),
+ ren_color,
+ *(span->covers));
+ }
+ if(--num_spans == 0) break;
+ ++span;
+ }
+ }
+ }
+ }
+
+ //==============================================renderer_scanline_aa_solid
+ template<class BaseRenderer> class renderer_scanline_aa_solid
+ {
+ public:
+ typedef BaseRenderer base_ren_type;
+ typedef typename base_ren_type::color_type color_type;
+
+ //--------------------------------------------------------------------
+ renderer_scanline_aa_solid() : m_ren(0) {}
+ explicit renderer_scanline_aa_solid(base_ren_type& ren) : m_ren(&ren) {}
+ void attach(base_ren_type& ren)
+ {
+ m_ren = &ren;
+ }
+
+ //--------------------------------------------------------------------
+ void color(const color_type& c) { m_color = c; }
+ const color_type& color() const { return m_color; }
+
+ //--------------------------------------------------------------------
+ void prepare() {}
+
+ //--------------------------------------------------------------------
+ template<class Scanline> void render(const Scanline& sl)
+ {
+ render_scanline_aa_solid(sl, *m_ren, m_color);
+ }
+
+ private:
+ base_ren_type* m_ren;
+ color_type m_color;
+ };
+
+
+
+
+
+
+
+
+
+
+
+
+
+ //======================================================render_scanline_aa
+ template<class Scanline, class BaseRenderer,
+ class SpanAllocator, class SpanGenerator>
+ void render_scanline_aa(const Scanline& sl, BaseRenderer& ren,
+ SpanAllocator& alloc, SpanGenerator& span_gen)
+ {
+ int y = sl.y();
+
+ unsigned num_spans = sl.num_spans();
+ typename Scanline::const_iterator span = sl.begin();
+ for(;;)
+ {
+ int x = span->x;
+ int len = span->len;
+ const typename Scanline::cover_type* covers = span->covers;
+
+ if(len < 0) len = -len;
+ typename BaseRenderer::color_type* colors = alloc.allocate(len);
+ span_gen.generate(colors, x, y, len);
+ ren.blend_color_hspan(x, y, len, colors,
+ (span->len < 0) ? 0 : covers, *covers);
+
+ if(--num_spans == 0) break;
+ ++span;
+ }
+ }
+
+ //=====================================================render_scanlines_aa
+ template<class Rasterizer, class Scanline, class BaseRenderer,
+ class SpanAllocator, class SpanGenerator>
+ void render_scanlines_aa(Rasterizer& ras, Scanline& sl, BaseRenderer& ren,
+ SpanAllocator& alloc, SpanGenerator& span_gen)
+ {
+ if(ras.rewind_scanlines())
+ {
+ sl.reset(ras.min_x(), ras.max_x());
+ span_gen.prepare();
+ while(ras.sweep_scanline(sl))
+ {
+ render_scanline_aa(sl, ren, alloc, span_gen);
+ }
+ }
+ }
+
+ //====================================================renderer_scanline_aa
+ template<class BaseRenderer, class SpanAllocator, class SpanGenerator>
+ class renderer_scanline_aa
+ {
+ public:
+ typedef BaseRenderer base_ren_type;
+ typedef SpanAllocator alloc_type;
+ typedef SpanGenerator span_gen_type;
+
+ //--------------------------------------------------------------------
+ renderer_scanline_aa() : m_ren(0), m_alloc(0), m_span_gen(0) {}
+ renderer_scanline_aa(base_ren_type& ren,
+ alloc_type& alloc,
+ span_gen_type& span_gen) :
+ m_ren(&ren),
+ m_alloc(&alloc),
+ m_span_gen(&span_gen)
+ {}
+ void attach(base_ren_type& ren,
+ alloc_type& alloc,
+ span_gen_type& span_gen)
+ {
+ m_ren = &ren;
+ m_alloc = &alloc;
+ m_span_gen = &span_gen;
+ }
+
+ //--------------------------------------------------------------------
+ void prepare() { m_span_gen->prepare(); }
+
+ //--------------------------------------------------------------------
+ template<class Scanline> void render(const Scanline& sl)
+ {
+ render_scanline_aa(sl, *m_ren, *m_alloc, *m_span_gen);
+ }
+
+ private:
+ base_ren_type* m_ren;
+ alloc_type* m_alloc;
+ span_gen_type* m_span_gen;
+ };
+
+
+
+
+
+
+ //===============================================render_scanline_bin_solid
+ template<class Scanline, class BaseRenderer, class ColorT>
+ void render_scanline_bin_solid(const Scanline& sl,
+ BaseRenderer& ren,
+ const ColorT& color)
+ {
+ unsigned num_spans = sl.num_spans();
+ typename Scanline::const_iterator span = sl.begin();
+ for(;;)
+ {
+ ren.blend_hline(span->x,
+ sl.y(),
+ span->x - 1 + ((span->len < 0) ?
+ -span->len :
+ span->len),
+ color,
+ cover_full);
+ if(--num_spans == 0) break;
+ ++span;
+ }
+ }
+
+ //==============================================render_scanlines_bin_solid
+ template<class Rasterizer, class Scanline,
+ class BaseRenderer, class ColorT>
+ void render_scanlines_bin_solid(Rasterizer& ras, Scanline& sl,
+ BaseRenderer& ren, const ColorT& color)
+ {
+ if(ras.rewind_scanlines())
+ {
+ // Explicitly convert "color" to the BaseRenderer color type.
+ // For example, it can be called with color type "rgba", while
+ // "rgba8" is needed. Otherwise it will be implicitly
+ // converted in the loop many times.
+ //----------------------
+ typename BaseRenderer::color_type ren_color(color);
+
+ sl.reset(ras.min_x(), ras.max_x());
+ while(ras.sweep_scanline(sl))
+ {
+ //render_scanline_bin_solid(sl, ren, ren_color);
+
+ // This code is equivalent to the above call (copy/paste).
+ // It's just a "manual" optimization for old compilers,
+ // like Microsoft Visual C++ v6.0
+ //-------------------------------
+ unsigned num_spans = sl.num_spans();
+ typename Scanline::const_iterator span = sl.begin();
+ for(;;)
+ {
+ ren.blend_hline(span->x,
+ sl.y(),
+ span->x - 1 + ((span->len < 0) ?
+ -span->len :
+ span->len),
+ ren_color,
+ cover_full);
+ if(--num_spans == 0) break;
+ ++span;
+ }
+ }
+ }
+ }
+
+ //=============================================renderer_scanline_bin_solid
+ template<class BaseRenderer> class renderer_scanline_bin_solid
+ {
+ public:
+ typedef BaseRenderer base_ren_type;
+ typedef typename base_ren_type::color_type color_type;
+
+ //--------------------------------------------------------------------
+ renderer_scanline_bin_solid() : m_ren(0) {}
+ explicit renderer_scanline_bin_solid(base_ren_type& ren) : m_ren(&ren) {}
+ void attach(base_ren_type& ren)
+ {
+ m_ren = &ren;
+ }
+
+ //--------------------------------------------------------------------
+ void color(const color_type& c) { m_color = c; }
+ const color_type& color() const { return m_color; }
+
+ //--------------------------------------------------------------------
+ void prepare() {}
+
+ //--------------------------------------------------------------------
+ template<class Scanline> void render(const Scanline& sl)
+ {
+ render_scanline_bin_solid(sl, *m_ren, m_color);
+ }
+
+ private:
+ base_ren_type* m_ren;
+ color_type m_color;
+ };
+
+
+
+
+
+
+
+
+ //======================================================render_scanline_bin
+ template<class Scanline, class BaseRenderer,
+ class SpanAllocator, class SpanGenerator>
+ void render_scanline_bin(const Scanline& sl, BaseRenderer& ren,
+ SpanAllocator& alloc, SpanGenerator& span_gen)
+ {
+ int y = sl.y();
+
+ unsigned num_spans = sl.num_spans();
+ typename Scanline::const_iterator span = sl.begin();
+ for(;;)
+ {
+ int x = span->x;
+ int len = span->len;
+ if(len < 0) len = -len;
+ typename BaseRenderer::color_type* colors = alloc.allocate(len);
+ span_gen.generate(colors, x, y, len);
+ ren.blend_color_hspan(x, y, len, colors, 0, cover_full);
+ if(--num_spans == 0) break;
+ ++span;
+ }
+ }
+
+ //=====================================================render_scanlines_bin
+ template<class Rasterizer, class Scanline, class BaseRenderer,
+ class SpanAllocator, class SpanGenerator>
+ void render_scanlines_bin(Rasterizer& ras, Scanline& sl, BaseRenderer& ren,
+ SpanAllocator& alloc, SpanGenerator& span_gen)
+ {
+ if(ras.rewind_scanlines())
+ {
+ sl.reset(ras.min_x(), ras.max_x());
+ span_gen.prepare();
+ while(ras.sweep_scanline(sl))
+ {
+ render_scanline_bin(sl, ren, alloc, span_gen);
+ }
+ }
+ }
+
+ //====================================================renderer_scanline_bin
+ template<class BaseRenderer, class SpanAllocator, class SpanGenerator>
+ class renderer_scanline_bin
+ {
+ public:
+ typedef BaseRenderer base_ren_type;
+ typedef SpanAllocator alloc_type;
+ typedef SpanGenerator span_gen_type;
+
+ //--------------------------------------------------------------------
+ renderer_scanline_bin() : m_ren(0), m_alloc(0), m_span_gen(0) {}
+ renderer_scanline_bin(base_ren_type& ren,
+ alloc_type& alloc,
+ span_gen_type& span_gen) :
+ m_ren(&ren),
+ m_alloc(&alloc),
+ m_span_gen(&span_gen)
+ {}
+ void attach(base_ren_type& ren,
+ alloc_type& alloc,
+ span_gen_type& span_gen)
+ {
+ m_ren = &ren;
+ m_alloc = &alloc;
+ m_span_gen = &span_gen;
+ }
+
+ //--------------------------------------------------------------------
+ void prepare() { m_span_gen->prepare(); }
+
+ //--------------------------------------------------------------------
+ template<class Scanline> void render(const Scanline& sl)
+ {
+ render_scanline_bin(sl, *m_ren, *m_alloc, *m_span_gen);
+ }
+
+ private:
+ base_ren_type* m_ren;
+ alloc_type* m_alloc;
+ span_gen_type* m_span_gen;
+ };
+
+
+
+
+
+
+
+
+
+
+ //========================================================render_scanlines
+ template<class Rasterizer, class Scanline, class Renderer>
+ void render_scanlines(Rasterizer& ras, Scanline& sl, Renderer& ren)
+ {
+ if(ras.rewind_scanlines())
+ {
+ sl.reset(ras.min_x(), ras.max_x());
+ ren.prepare();
+ while(ras.sweep_scanline(sl))
+ {
+ ren.render(sl);
+ }
+ }
+ }
+
+ //========================================================render_all_paths
+ template<class Rasterizer, class Scanline, class Renderer,
+ class VertexSource, class ColorStorage, class PathId>
+ void render_all_paths(Rasterizer& ras,
+ Scanline& sl,
+ Renderer& r,
+ VertexSource& vs,
+ const ColorStorage& as,
+ const PathId& path_id,
+ unsigned num_paths)
+ {
+ for(unsigned i = 0; i < num_paths; i++)
+ {
+ ras.reset();
+ ras.add_path(vs, path_id[i]);
+ r.color(as[i]);
+ render_scanlines(ras, sl, r);
+ }
+ }
+
+
+
+
+
+
+ //=============================================render_scanlines_compound
+ template<class Rasterizer,
+ class ScanlineAA,
+ class ScanlineBin,
+ class BaseRenderer,
+ class SpanAllocator,
+ class StyleHandler>
+ void render_scanlines_compound(Rasterizer& ras,
+ ScanlineAA& sl_aa,
+ ScanlineBin& sl_bin,
+ BaseRenderer& ren,
+ SpanAllocator& alloc,
+ StyleHandler& sh)
+ {
+ if(ras.rewind_scanlines())
+ {
+ int min_x = ras.min_x();
+ int len = ras.max_x() - min_x + 2;
+ sl_aa.reset(min_x, ras.max_x());
+ sl_bin.reset(min_x, ras.max_x());
+
+ typedef typename BaseRenderer::color_type color_type;
+ color_type* color_span = alloc.allocate(len * 2);
+ color_type* mix_buffer = color_span + len;
+ unsigned num_spans;
+
+ unsigned num_styles;
+ unsigned style;
+ bool solid;
+ while((num_styles = ras.sweep_styles()) > 0)
+ {
+ typename ScanlineAA::const_iterator span_aa;
+ if(num_styles == 1)
+ {
+ // Optimization for a single style. Happens often
+ //-------------------------
+ if(ras.sweep_scanline(sl_aa, 0))
+ {
+ style = ras.style(0);
+ if(sh.is_solid(style))
+ {
+ // Just solid fill
+ //-----------------------
+ render_scanline_aa_solid(sl_aa, ren, sh.color(style));
+ }
+ else
+ {
+ // Arbitrary span generator
+ //-----------------------
+ span_aa = sl_aa.begin();
+ num_spans = sl_aa.num_spans();
+ for(;;)
+ {
+ len = span_aa->len;
+ sh.generate_span(color_span,
+ span_aa->x,
+ sl_aa.y(),
+ len,
+ style);
+
+ ren.blend_color_hspan(span_aa->x,
+ sl_aa.y(),
+ span_aa->len,
+ color_span,
+ span_aa->covers);
+ if(--num_spans == 0) break;
+ ++span_aa;
+ }
+ }
+ }
+ }
+ else
+ {
+ if(ras.sweep_scanline(sl_bin, -1))
+ {
+ // Clear the spans of the mix_buffer
+ //--------------------
+ typename ScanlineBin::const_iterator span_bin = sl_bin.begin();
+ num_spans = sl_bin.num_spans();
+ for(;;)
+ {
+ memset(mix_buffer + span_bin->x - min_x,
+ 0,
+ span_bin->len * sizeof(color_type));
+
+ if(--num_spans == 0) break;
+ ++span_bin;
+ }
+
+ unsigned i;
+ for(i = 0; i < num_styles; i++)
+ {
+ style = ras.style(i);
+ solid = sh.is_solid(style);
+
+ if(ras.sweep_scanline(sl_aa, i))
+ {
+ color_type* colors;
+ color_type* cspan;
+ typename ScanlineAA::cover_type* covers;
+ span_aa = sl_aa.begin();
+ num_spans = sl_aa.num_spans();
+ if(solid)
+ {
+ // Just solid fill
+ //-----------------------
+ for(;;)
+ {
+ color_type c = sh.color(style);
+ len = span_aa->len;
+ colors = mix_buffer + span_aa->x - min_x;
+ covers = span_aa->covers;
+ do
+ {
+ if(*covers == cover_full)
+ {
+ *colors = c;
+ }
+ else
+ {
+ colors->add(c, *covers);
+ }
+ ++colors;
+ ++covers;
+ }
+ while(--len);
+ if(--num_spans == 0) break;
+ ++span_aa;
+ }
+ }
+ else
+ {
+ // Arbitrary span generator
+ //-----------------------
+ for(;;)
+ {
+ len = span_aa->len;
+ colors = mix_buffer + span_aa->x - min_x;
+ cspan = color_span;
+ sh.generate_span(cspan,
+ span_aa->x,
+ sl_aa.y(),
+ len,
+ style);
+ covers = span_aa->covers;
+ do
+ {
+ if(*covers == cover_full)
+ {
+ *colors = *cspan;
+ }
+ else
+ {
+ colors->add(*cspan, *covers);
+ }
+ ++cspan;
+ ++colors;
+ ++covers;
+ }
+ while(--len);
+ if(--num_spans == 0) break;
+ ++span_aa;
+ }
+ }
+ }
+ }
+
+ // Emit the blended result as a color hspan
+ //-------------------------
+ span_bin = sl_bin.begin();
+ num_spans = sl_bin.num_spans();
+ for(;;)
+ {
+ ren.blend_color_hspan(span_bin->x,
+ sl_bin.y(),
+ span_bin->len,
+ mix_buffer + span_bin->x - min_x,
+ 0,
+ cover_full);
+ if(--num_spans == 0) break;
+ ++span_bin;
+ }
+ } // if(ras.sweep_scanline(sl_bin, -1))
+ } // if(num_styles == 1) ... else
+ } // while((num_styles = ras.sweep_styles()) > 0)
+ } // if(ras.rewind_scanlines())
+ }
+
+ //=======================================render_scanlines_compound_layered
+ template<class Rasterizer,
+ class ScanlineAA,
+ class BaseRenderer,
+ class SpanAllocator,
+ class StyleHandler>
+ void render_scanlines_compound_layered(Rasterizer& ras,
+ ScanlineAA& sl_aa,
+ BaseRenderer& ren,
+ SpanAllocator& alloc,
+ StyleHandler& sh)
+ {
+ if(ras.rewind_scanlines())
+ {
+ int min_x = ras.min_x();
+ int len = ras.max_x() - min_x + 2;
+ sl_aa.reset(min_x, ras.max_x());
+
+ typedef typename BaseRenderer::color_type color_type;
+ color_type* color_span = alloc.allocate(len * 2);
+ color_type* mix_buffer = color_span + len;
+ cover_type* cover_buffer = ras.allocate_cover_buffer(len);
+ unsigned num_spans;
+
+ unsigned num_styles;
+ unsigned style;
+ bool solid;
+ while((num_styles = ras.sweep_styles()) > 0)
+ {
+ typename ScanlineAA::const_iterator span_aa;
+ if(num_styles == 1)
+ {
+ // Optimization for a single style. Happens often
+ //-------------------------
+ if(ras.sweep_scanline(sl_aa, 0))
+ {
+ style = ras.style(0);
+ if(sh.is_solid(style))
+ {
+ // Just solid fill
+ //-----------------------
+ render_scanline_aa_solid(sl_aa, ren, sh.color(style));
+ }
+ else
+ {
+ // Arbitrary span generator
+ //-----------------------
+ span_aa = sl_aa.begin();
+ num_spans = sl_aa.num_spans();
+ for(;;)
+ {
+ len = span_aa->len;
+ sh.generate_span(color_span,
+ span_aa->x,
+ sl_aa.y(),
+ len,
+ style);
+
+ ren.blend_color_hspan(span_aa->x,
+ sl_aa.y(),
+ span_aa->len,
+ color_span,
+ span_aa->covers);
+ if(--num_spans == 0) break;
+ ++span_aa;
+ }
+ }
+ }
+ }
+ else
+ {
+ int sl_start = ras.scanline_start();
+ unsigned sl_len = ras.scanline_length();
+
+ if(sl_len)
+ {
+ memset(mix_buffer + sl_start - min_x,
+ 0,
+ sl_len * sizeof(color_type));
+
+ memset(cover_buffer + sl_start - min_x,
+ 0,
+ sl_len * sizeof(cover_type));
+
+ int sl_y = std::numeric_limits<int>::max();
+ unsigned i;
+ for(i = 0; i < num_styles; i++)
+ {
+ style = ras.style(i);
+ solid = sh.is_solid(style);
+
+ if(ras.sweep_scanline(sl_aa, i))
+ {
+ unsigned cover;
+ color_type* colors;
+ color_type* cspan;
+ cover_type* src_covers;
+ cover_type* dst_covers;
+ span_aa = sl_aa.begin();
+ num_spans = sl_aa.num_spans();
+ sl_y = sl_aa.y();
+ if(solid)
+ {
+ // Just solid fill
+ //-----------------------
+ for(;;)
+ {
+ color_type c = sh.color(style);
+ len = span_aa->len;
+ colors = mix_buffer + span_aa->x - min_x;
+ src_covers = span_aa->covers;
+ dst_covers = cover_buffer + span_aa->x - min_x;
+ do
+ {
+ cover = *src_covers;
+ if(*dst_covers + cover > cover_full)
+ {
+ cover = cover_full - *dst_covers;
+ }
+ if(cover)
+ {
+ colors->add(c, cover);
+ *dst_covers += cover;
+ }
+ ++colors;
+ ++src_covers;
+ ++dst_covers;
+ }
+ while(--len);
+ if(--num_spans == 0) break;
+ ++span_aa;
+ }
+ }
+ else
+ {
+ // Arbitrary span generator
+ //-----------------------
+ for(;;)
+ {
+ len = span_aa->len;
+ colors = mix_buffer + span_aa->x - min_x;
+ cspan = color_span;
+ sh.generate_span(cspan,
+ span_aa->x,
+ sl_aa.y(),
+ len,
+ style);
+ src_covers = span_aa->covers;
+ dst_covers = cover_buffer + span_aa->x - min_x;
+ do
+ {
+ cover = *src_covers;
+ if(*dst_covers + cover > cover_full)
+ {
+ cover = cover_full - *dst_covers;
+ }
+ if(cover)
+ {
+ colors->add(*cspan, cover);
+ *dst_covers += cover;
+ }
+ ++cspan;
+ ++colors;
+ ++src_covers;
+ ++dst_covers;
+ }
+ while(--len);
+ if(--num_spans == 0) break;
+ ++span_aa;
+ }
+ }
+ }
+ }
+ ren.blend_color_hspan(sl_start,
+ sl_y,
+ sl_len,
+ mix_buffer + sl_start - min_x,
+ 0,
+ cover_full);
+ } //if(sl_len)
+ } //if(num_styles == 1) ... else
+ } //while((num_styles = ras.sweep_styles()) > 0)
+ } //if(ras.rewind_scanlines())
+ }
+
+
+}
+
+#endif
diff --git a/src/agg/agg_rendering_buffer.h b/src/agg/agg_rendering_buffer.h
new file mode 100644
index 000000000..0eff6ff27
--- /dev/null
+++ b/src/agg/agg_rendering_buffer.h
@@ -0,0 +1,300 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// class rendering_buffer
+//
+//----------------------------------------------------------------------------
+
+#ifndef AGG_RENDERING_BUFFER_INCLUDED
+#define AGG_RENDERING_BUFFER_INCLUDED
+
+#include "agg_array.h"
+
+namespace agg
+{
+
+ //===========================================================row_accessor
+ template<class T> class row_accessor
+ {
+ public:
+ typedef const_row_info<T> row_data;
+
+ //-------------------------------------------------------------------
+ row_accessor() :
+ m_buf(0),
+ m_start(0),
+ m_width(0),
+ m_height(0),
+ m_stride(0)
+ {
+ }
+
+ //--------------------------------------------------------------------
+ row_accessor(T* buf, unsigned width, unsigned height, int stride) :
+ m_buf(0),
+ m_start(0),
+ m_width(0),
+ m_height(0),
+ m_stride(0)
+ {
+ attach(buf, width, height, stride);
+ }
+
+
+ //--------------------------------------------------------------------
+ void attach(T* buf, unsigned width, unsigned height, int stride)
+ {
+ m_buf = m_start = buf;
+ m_width = width;
+ m_height = height;
+ m_stride = stride;
+ if(stride < 0)
+ {
+ m_start = m_buf - int(height - 1) * stride;
+ }
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE T* buf() { return m_buf; }
+ AGG_INLINE const T* buf() const { return m_buf; }
+ AGG_INLINE unsigned width() const { return m_width; }
+ AGG_INLINE unsigned height() const { return m_height; }
+ AGG_INLINE int stride() const { return m_stride; }
+ AGG_INLINE unsigned stride_abs() const
+ {
+ return (m_stride < 0) ? unsigned(-m_stride) : unsigned(m_stride);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE T* row_ptr(int, int y, unsigned)
+ {
+ return m_start + y * m_stride;
+ }
+ AGG_INLINE T* row_ptr(int y) { return m_start + y * m_stride; }
+ AGG_INLINE const T* row_ptr(int y) const { return m_start + y * m_stride; }
+ AGG_INLINE row_data row (int y) const
+ {
+ return row_data(0, m_width-1, row_ptr(y));
+ }
+
+ //--------------------------------------------------------------------
+ template<class RenBuf>
+ void copy_from(const RenBuf& src)
+ {
+ unsigned h = height();
+ if(src.height() < h) h = src.height();
+
+ unsigned l = stride_abs();
+ if(src.stride_abs() < l) l = src.stride_abs();
+
+ l *= sizeof(T);
+
+ unsigned y;
+ unsigned w = width();
+ for (y = 0; y < h; y++)
+ {
+ memcpy(row_ptr(0, y, w), src.row_ptr(y), l);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ void clear(T value)
+ {
+ unsigned y;
+ unsigned w = width();
+ unsigned stride = stride_abs();
+ for(y = 0; y < height(); y++)
+ {
+ T* p = row_ptr(0, y, w);
+ unsigned x;
+ for(x = 0; x < stride; x++)
+ {
+ *p++ = value;
+ }
+ }
+ }
+
+ private:
+ //--------------------------------------------------------------------
+ T* m_buf; // Pointer to renrdering buffer
+ T* m_start; // Pointer to first pixel depending on stride
+ unsigned m_width; // Width in pixels
+ unsigned m_height; // Height in pixels
+ int m_stride; // Number of bytes per row. Can be < 0
+ };
+
+
+
+
+ //==========================================================row_ptr_cache
+ template<class T> class row_ptr_cache
+ {
+ public:
+ typedef const_row_info<T> row_data;
+
+ //-------------------------------------------------------------------
+ row_ptr_cache() :
+ m_buf(0),
+ m_rows(),
+ m_width(0),
+ m_height(0),
+ m_stride(0)
+ {
+ }
+
+ //--------------------------------------------------------------------
+ row_ptr_cache(T* buf, unsigned width, unsigned height, int stride) :
+ m_buf(0),
+ m_rows(),
+ m_width(0),
+ m_height(0),
+ m_stride(0)
+ {
+ attach(buf, width, height, stride);
+ }
+
+ //--------------------------------------------------------------------
+ void attach(T* buf, unsigned width, unsigned height, int stride)
+ {
+ m_buf = buf;
+ m_width = width;
+ m_height = height;
+ m_stride = stride;
+ if(height > m_rows.size())
+ {
+ m_rows.resize(height);
+ }
+
+ T* row_ptr = m_buf;
+
+ if(stride < 0)
+ {
+ row_ptr = m_buf - int(height - 1) * stride;
+ }
+
+ T** rows = &m_rows[0];
+
+ while(height--)
+ {
+ *rows++ = row_ptr;
+ row_ptr += stride;
+ }
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE T* buf() { return m_buf; }
+ AGG_INLINE const T* buf() const { return m_buf; }
+ AGG_INLINE unsigned width() const { return m_width; }
+ AGG_INLINE unsigned height() const { return m_height; }
+ AGG_INLINE int stride() const { return m_stride; }
+ AGG_INLINE unsigned stride_abs() const
+ {
+ return (m_stride < 0) ? unsigned(-m_stride) : unsigned(m_stride);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE T* row_ptr(int, int y, unsigned)
+ {
+ return m_rows[y];
+ }
+ AGG_INLINE T* row_ptr(int y) { return m_rows[y]; }
+ AGG_INLINE const T* row_ptr(int y) const { return m_rows[y]; }
+ AGG_INLINE row_data row (int y) const
+ {
+ return row_data(0, m_width-1, m_rows[y]);
+ }
+
+ //--------------------------------------------------------------------
+ T const* const* rows() const { return &m_rows[0]; }
+
+ //--------------------------------------------------------------------
+ template<class RenBuf>
+ void copy_from(const RenBuf& src)
+ {
+ unsigned h = height();
+ if(src.height() < h) h = src.height();
+
+ unsigned l = stride_abs();
+ if(src.stride_abs() < l) l = src.stride_abs();
+
+ l *= sizeof(T);
+
+ unsigned y;
+ unsigned w = width();
+ for (y = 0; y < h; y++)
+ {
+ memcpy(row_ptr(0, y, w), src.row_ptr(y), l);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ void clear(T value)
+ {
+ unsigned y;
+ unsigned w = width();
+ unsigned stride = stride_abs();
+ for(y = 0; y < height(); y++)
+ {
+ T* p = row_ptr(0, y, w);
+ unsigned x;
+ for(x = 0; x < stride; x++)
+ {
+ *p++ = value;
+ }
+ }
+ }
+
+ private:
+ //--------------------------------------------------------------------
+ T* m_buf; // Pointer to renrdering buffer
+ pod_array<T*> m_rows; // Pointers to each row of the buffer
+ unsigned m_width; // Width in pixels
+ unsigned m_height; // Height in pixels
+ int m_stride; // Number of bytes per row. Can be < 0
+ };
+
+
+
+
+ //========================================================rendering_buffer
+ //
+ // The definition of the main type for accessing the rows in the frame
+ // buffer. It provides functionality to navigate to the rows in a
+ // rectangular matrix, from top to bottom or from bottom to top depending
+ // on stride.
+ //
+ // row_accessor is cheap to create/destroy, but performs one multiplication
+ // when calling row_ptr().
+ //
+ // row_ptr_cache creates an array of pointers to rows, so, the access
+ // via row_ptr() may be faster. But it requires memory allocation
+ // when creating. For example, on typical Intel Pentium hardware
+ // row_ptr_cache speeds span_image_filter_rgb_nn up to 10%
+ //
+ // It's used only in short hand typedefs like pixfmt_rgba32 and can be
+ // redefined in agg_config.h
+ // In real applications you can use both, depending on your needs
+ //------------------------------------------------------------------------
+#ifdef AGG_RENDERING_BUFFER
+ typedef AGG_RENDERING_BUFFER rendering_buffer;
+#else
+// typedef row_ptr_cache<int8u> rendering_buffer;
+ typedef row_accessor<int8u> rendering_buffer;
+#endif
+
+}
+
+
+#endif
diff --git a/src/agg/agg_scanline_p.h b/src/agg/agg_scanline_p.h
new file mode 100644
index 000000000..1d1cbe72f
--- /dev/null
+++ b/src/agg/agg_scanline_p.h
@@ -0,0 +1,329 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Class scanline_p - a general purpose scanline container with packed spans.
+//
+//----------------------------------------------------------------------------
+//
+// Adaptation for 32-bit screen coordinates (scanline32_p) has been sponsored by
+// Liberty Technology Systems, Inc., visit http://lib-sys.com
+//
+// Liberty Technology Systems, Inc. is the provider of
+// PostScript and PDF technology for software developers.
+//
+//----------------------------------------------------------------------------
+#ifndef AGG_SCANLINE_P_INCLUDED
+#define AGG_SCANLINE_P_INCLUDED
+
+#include "agg_array.h"
+
+namespace agg
+{
+
+ //=============================================================scanline_p8
+ //
+ // This is a general purpose scaline container which supports the interface
+ // used in the rasterizer::render(). See description of scanline_u8
+ // for details.
+ //
+ //------------------------------------------------------------------------
+ class scanline_p8
+ {
+ public:
+ typedef scanline_p8 self_type;
+ typedef int8u cover_type;
+ typedef int16 coord_type;
+
+ //--------------------------------------------------------------------
+ struct span
+ {
+ coord_type x;
+ coord_type len; // If negative, it's a solid span, covers is valid
+ const cover_type* covers;
+ };
+
+ typedef span* iterator;
+ typedef const span* const_iterator;
+
+ scanline_p8() :
+ m_last_x(0x7FFFFFF0),
+ m_covers(),
+ m_cover_ptr(0),
+ m_spans(),
+ m_cur_span(0)
+ {
+ }
+
+ //--------------------------------------------------------------------
+ void reset(int min_x, int max_x)
+ {
+ unsigned max_len = max_x - min_x + 3;
+ if(max_len > m_spans.size())
+ {
+ m_spans.resize(max_len);
+ m_covers.resize(max_len);
+ }
+ m_last_x = 0x7FFFFFF0;
+ m_cover_ptr = &m_covers[0];
+ m_cur_span = &m_spans[0];
+ m_cur_span->len = 0;
+ }
+
+ //--------------------------------------------------------------------
+ void add_cell(int x, unsigned cover)
+ {
+ *m_cover_ptr = (cover_type)cover;
+ if(x == m_last_x+1 && m_cur_span->len > 0)
+ {
+ m_cur_span->len++;
+ }
+ else
+ {
+ m_cur_span++;
+ m_cur_span->covers = m_cover_ptr;
+ m_cur_span->x = (int16)x;
+ m_cur_span->len = 1;
+ }
+ m_last_x = x;
+ m_cover_ptr++;
+ }
+
+ //--------------------------------------------------------------------
+ void add_cells(int x, unsigned len, const cover_type* covers)
+ {
+ memcpy(m_cover_ptr, covers, len * sizeof(cover_type));
+ if(x == m_last_x+1 && m_cur_span->len > 0)
+ {
+ m_cur_span->len += (int16)len;
+ }
+ else
+ {
+ m_cur_span++;
+ m_cur_span->covers = m_cover_ptr;
+ m_cur_span->x = (int16)x;
+ m_cur_span->len = (int16)len;
+ }
+ m_cover_ptr += len;
+ m_last_x = x + len - 1;
+ }
+
+ //--------------------------------------------------------------------
+ void add_span(int x, unsigned len, unsigned cover)
+ {
+ if(x == m_last_x+1 &&
+ m_cur_span->len < 0 &&
+ cover == *m_cur_span->covers)
+ {
+ m_cur_span->len -= (int16)len;
+ }
+ else
+ {
+ *m_cover_ptr = (cover_type)cover;
+ m_cur_span++;
+ m_cur_span->covers = m_cover_ptr++;
+ m_cur_span->x = (int16)x;
+ m_cur_span->len = (int16)(-int(len));
+ }
+ m_last_x = x + len - 1;
+ }
+
+ //--------------------------------------------------------------------
+ void finalize(int y)
+ {
+ m_y = y;
+ }
+
+ //--------------------------------------------------------------------
+ void reset_spans()
+ {
+ m_last_x = 0x7FFFFFF0;
+ m_cover_ptr = &m_covers[0];
+ m_cur_span = &m_spans[0];
+ m_cur_span->len = 0;
+ }
+
+ //--------------------------------------------------------------------
+ int y() const { return m_y; }
+ unsigned num_spans() const { return unsigned(m_cur_span - &m_spans[0]); }
+ const_iterator begin() const { return &m_spans[1]; }
+
+ private:
+ scanline_p8(const self_type&);
+ const self_type& operator = (const self_type&);
+
+ int m_last_x;
+ int m_y;
+ pod_array<cover_type> m_covers;
+ cover_type* m_cover_ptr;
+ pod_array<span> m_spans;
+ span* m_cur_span;
+ };
+
+
+
+
+
+
+
+
+ //==========================================================scanline32_p8
+ class scanline32_p8
+ {
+ public:
+ typedef scanline32_p8 self_type;
+ typedef int8u cover_type;
+ typedef int32 coord_type;
+
+ struct span
+ {
+ span() {}
+ span(coord_type x_, coord_type len_, const cover_type* covers_) :
+ x(x_), len(len_), covers(covers_) {}
+
+ coord_type x;
+ coord_type len; // If negative, it's a solid span, covers is valid
+ const cover_type* covers;
+ };
+ typedef pod_bvector<span, 4> span_array_type;
+
+
+ //--------------------------------------------------------------------
+ class const_iterator
+ {
+ public:
+ const_iterator(const span_array_type& spans) :
+ m_spans(spans),
+ m_span_idx(0)
+ {}
+
+ const span& operator*() const { return m_spans[m_span_idx]; }
+ const span* operator->() const { return &m_spans[m_span_idx]; }
+
+ void operator ++ () { ++m_span_idx; }
+
+ private:
+ const span_array_type& m_spans;
+ unsigned m_span_idx;
+ };
+
+ //--------------------------------------------------------------------
+ scanline32_p8() :
+ m_max_len(0),
+ m_last_x(0x7FFFFFF0),
+ m_covers(),
+ m_cover_ptr(0)
+ {
+ }
+
+ //--------------------------------------------------------------------
+ void reset(int min_x, int max_x)
+ {
+ unsigned max_len = max_x - min_x + 3;
+ if(max_len > m_covers.size())
+ {
+ m_covers.resize(max_len);
+ }
+ m_last_x = 0x7FFFFFF0;
+ m_cover_ptr = &m_covers[0];
+ m_spans.remove_all();
+ }
+
+ //--------------------------------------------------------------------
+ void add_cell(int x, unsigned cover)
+ {
+ *m_cover_ptr = cover_type(cover);
+ if(x == m_last_x+1 && m_spans.size() && m_spans.last().len > 0)
+ {
+ m_spans.last().len++;
+ }
+ else
+ {
+ m_spans.add(span(coord_type(x), 1, m_cover_ptr));
+ }
+ m_last_x = x;
+ m_cover_ptr++;
+ }
+
+ //--------------------------------------------------------------------
+ void add_cells(int x, unsigned len, const cover_type* covers)
+ {
+ memcpy(m_cover_ptr, covers, len * sizeof(cover_type));
+ if(x == m_last_x+1 && m_spans.size() && m_spans.last().len > 0)
+ {
+ m_spans.last().len += coord_type(len);
+ }
+ else
+ {
+ m_spans.add(span(coord_type(x), coord_type(len), m_cover_ptr));
+ }
+ m_cover_ptr += len;
+ m_last_x = x + len - 1;
+ }
+
+ //--------------------------------------------------------------------
+ void add_span(int x, unsigned len, unsigned cover)
+ {
+ if(x == m_last_x+1 &&
+ m_spans.size() &&
+ m_spans.last().len < 0 &&
+ cover == *m_spans.last().covers)
+ {
+ m_spans.last().len -= coord_type(len);
+ }
+ else
+ {
+ *m_cover_ptr = cover_type(cover);
+ m_spans.add(span(coord_type(x), -coord_type(len), m_cover_ptr++));
+ }
+ m_last_x = x + len - 1;
+ }
+
+ //--------------------------------------------------------------------
+ void finalize(int y)
+ {
+ m_y = y;
+ }
+
+ //--------------------------------------------------------------------
+ void reset_spans()
+ {
+ m_last_x = 0x7FFFFFF0;
+ m_cover_ptr = &m_covers[0];
+ m_spans.remove_all();
+ }
+
+ //--------------------------------------------------------------------
+ int y() const { return m_y; }
+ unsigned num_spans() const { return m_spans.size(); }
+ const_iterator begin() const { return const_iterator(m_spans); }
+
+ private:
+ scanline32_p8(const self_type&);
+ const self_type& operator = (const self_type&);
+
+ unsigned m_max_len;
+ int m_last_x;
+ int m_y;
+ pod_array<cover_type> m_covers;
+ cover_type* m_cover_ptr;
+ span_array_type m_spans;
+ };
+
+
+}
+
+
+#endif
+
diff --git a/src/agg/agg_trans_affine.h b/src/agg/agg_trans_affine.h
new file mode 100644
index 000000000..1a6116388
--- /dev/null
+++ b/src/agg/agg_trans_affine.h
@@ -0,0 +1,518 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Affine transformation classes.
+//
+//----------------------------------------------------------------------------
+#ifndef AGG_TRANS_AFFINE_INCLUDED
+#define AGG_TRANS_AFFINE_INCLUDED
+
+#include <math.h>
+#include "agg_basics.h"
+
+namespace agg
+{
+ const double affine_epsilon = 1e-14;
+
+ //============================================================trans_affine
+ //
+ // See Implementation agg_trans_affine.cpp
+ //
+ // Affine transformation are linear transformations in Cartesian coordinates
+ // (strictly speaking not only in Cartesian, but for the beginning we will
+ // think so). They are rotation, scaling, translation and skewing.
+ // After any affine transformation a line segment remains a line segment
+ // and it will never become a curve.
+ //
+ // There will be no math about matrix calculations, since it has been
+ // described many times. Ask yourself a very simple question:
+ // "why do we need to understand and use some matrix stuff instead of just
+ // rotating, scaling and so on". The answers are:
+ //
+ // 1. Any combination of transformations can be done by only 4 multiplications
+ // and 4 additions in floating point.
+ // 2. One matrix transformation is equivalent to the number of consecutive
+ // discrete transformations, i.e. the matrix "accumulates" all transformations
+ // in the order of their settings. Suppose we have 4 transformations:
+ // * rotate by 30 degrees,
+ // * scale X to 2.0,
+ // * scale Y to 1.5,
+ // * move to (100, 100).
+ // The result will depend on the order of these transformations,
+ // and the advantage of matrix is that the sequence of discret calls:
+ // rotate(30), scaleX(2.0), scaleY(1.5), move(100,100)
+ // will have exactly the same result as the following matrix transformations:
+ //
+ // affine_matrix m;
+ // m *= rotate_matrix(30);
+ // m *= scaleX_matrix(2.0);
+ // m *= scaleY_matrix(1.5);
+ // m *= move_matrix(100,100);
+ //
+ // m.transform_my_point_at_last(x, y);
+ //
+ // What is the good of it? In real life we will set-up the matrix only once
+ // and then transform many points, let alone the convenience to set any
+ // combination of transformations.
+ //
+ // So, how to use it? Very easy - literally as it's shown above. Not quite,
+ // let us write a correct example:
+ //
+ // agg::trans_affine m;
+ // m *= agg::trans_affine_rotation(30.0 * 3.1415926 / 180.0);
+ // m *= agg::trans_affine_scaling(2.0, 1.5);
+ // m *= agg::trans_affine_translation(100.0, 100.0);
+ // m.transform(&x, &y);
+ //
+ // The affine matrix is all you need to perform any linear transformation,
+ // but all transformations have origin point (0,0). It means that we need to
+ // use 2 translations if we want to rotate someting around (100,100):
+ //
+ // m *= agg::trans_affine_translation(-100.0, -100.0); // move to (0,0)
+ // m *= agg::trans_affine_rotation(30.0 * 3.1415926 / 180.0); // rotate
+ // m *= agg::trans_affine_translation(100.0, 100.0); // move back to (100,100)
+ //----------------------------------------------------------------------
+ struct trans_affine
+ {
+ double sx, shy, shx, sy, tx, ty;
+
+ //------------------------------------------ Construction
+ // Identity matrix
+ trans_affine() :
+ sx(1.0), shy(0.0), shx(0.0), sy(1.0), tx(0.0), ty(0.0)
+ {}
+
+ // Custom matrix. Usually used in derived classes
+ trans_affine(double v0, double v1, double v2,
+ double v3, double v4, double v5) :
+ sx(v0), shy(v1), shx(v2), sy(v3), tx(v4), ty(v5)
+ {}
+
+ // Custom matrix from m[6]
+ explicit trans_affine(const double* m) :
+ sx(m[0]), shy(m[1]), shx(m[2]), sy(m[3]), tx(m[4]), ty(m[5])
+ {}
+
+ // Rectangle to a parallelogram.
+ trans_affine(double x1, double y1, double x2, double y2,
+ const double* parl)
+ {
+ rect_to_parl(x1, y1, x2, y2, parl);
+ }
+
+ // Parallelogram to a rectangle.
+ trans_affine(const double* parl,
+ double x1, double y1, double x2, double y2)
+ {
+ parl_to_rect(parl, x1, y1, x2, y2);
+ }
+
+ // Arbitrary parallelogram transformation.
+ trans_affine(const double* src, const double* dst)
+ {
+ parl_to_parl(src, dst);
+ }
+
+ //---------------------------------- Parellelogram transformations
+ // transform a parallelogram to another one. Src and dst are
+ // pointers to arrays of three points (double[6], x1,y1,...) that
+ // identify three corners of the parallelograms assuming implicit
+ // fourth point. The arguments are arrays of double[6] mapped
+ // to x1,y1, x2,y2, x3,y3 where the coordinates are:
+ // *-----------------*
+ // / (x3,y3)/
+ // / /
+ // /(x1,y1) (x2,y2)/
+ // *-----------------*
+ const trans_affine& parl_to_parl(const double* src,
+ const double* dst);
+
+ const trans_affine& rect_to_parl(double x1, double y1,
+ double x2, double y2,
+ const double* parl);
+
+ const trans_affine& parl_to_rect(const double* parl,
+ double x1, double y1,
+ double x2, double y2);
+
+
+ //------------------------------------------ Operations
+ // Reset - load an identity matrix
+ const trans_affine& reset();
+
+ // Direct transformations operations
+ const trans_affine& translate(double x, double y);
+ const trans_affine& rotate(double a);
+ const trans_affine& scale(double s);
+ const trans_affine& scale(double x, double y);
+
+ // Multiply matrix to another one
+ const trans_affine& multiply(const trans_affine& m);
+
+ // Multiply "m" to "this" and assign the result to "this"
+ const trans_affine& premultiply(const trans_affine& m);
+
+ // Multiply matrix to inverse of another one
+ const trans_affine& multiply_inv(const trans_affine& m);
+
+ // Multiply inverse of "m" to "this" and assign the result to "this"
+ const trans_affine& premultiply_inv(const trans_affine& m);
+
+ // Invert matrix. Do not try to invert degenerate matrices,
+ // there's no check for validity. If you set scale to 0 and
+ // then try to invert matrix, expect unpredictable result.
+ const trans_affine& invert();
+
+ // Mirroring around X
+ const trans_affine& flip_x();
+
+ // Mirroring around Y
+ const trans_affine& flip_y();
+
+ //------------------------------------------- Load/Store
+ // Store matrix to an array [6] of double
+ void store_to(double* m) const
+ {
+ *m++ = sx; *m++ = shy; *m++ = shx; *m++ = sy; *m++ = tx; *m++ = ty;
+ }
+
+ // Load matrix from an array [6] of double
+ const trans_affine& load_from(const double* m)
+ {
+ sx = *m++; shy = *m++; shx = *m++; sy = *m++; tx = *m++; ty = *m++;
+ return *this;
+ }
+
+ //------------------------------------------- Operators
+
+ // Multiply the matrix by another one
+ const trans_affine& operator *= (const trans_affine& m)
+ {
+ return multiply(m);
+ }
+
+ // Multiply the matrix by inverse of another one
+ const trans_affine& operator /= (const trans_affine& m)
+ {
+ return multiply_inv(m);
+ }
+
+ // Multiply the matrix by another one and return
+ // the result in a separete matrix.
+ trans_affine operator * (const trans_affine& m) const
+ {
+ return trans_affine(*this).multiply(m);
+ }
+
+ // Multiply the matrix by inverse of another one
+ // and return the result in a separete matrix.
+ trans_affine operator / (const trans_affine& m) const
+ {
+ return trans_affine(*this).multiply_inv(m);
+ }
+
+ // Calculate and return the inverse matrix
+ trans_affine operator ~ () const
+ {
+ trans_affine ret = *this;
+ return ret.invert();
+ }
+
+ // Equal operator with default epsilon
+ bool operator == (const trans_affine& m) const
+ {
+ return is_equal(m, affine_epsilon);
+ }
+
+ // Not Equal operator with default epsilon
+ bool operator != (const trans_affine& m) const
+ {
+ return !is_equal(m, affine_epsilon);
+ }
+
+ //-------------------------------------------- Transformations
+ // Direct transformation of x and y
+ void transform(double* x, double* y) const;
+
+ // Direct transformation of x and y, 2x2 matrix only, no translation
+ void transform_2x2(double* x, double* y) const;
+
+ // Inverse transformation of x and y. It works slower than the
+ // direct transformation. For massive operations it's better to
+ // invert() the matrix and then use direct transformations.
+ void inverse_transform(double* x, double* y) const;
+
+ //-------------------------------------------- Auxiliary
+ // Calculate the determinant of matrix
+ double determinant() const
+ {
+ return sx * sy - shy * shx;
+ }
+
+ // Calculate the reciprocal of the determinant
+ double determinant_reciprocal() const
+ {
+ return 1.0 / (sx * sy - shy * shx);
+ }
+
+ // Get the average scale (by X and Y).
+ // Basically used to calculate the approximation_scale when
+ // decomposinting curves into line segments.
+ double scale() const;
+
+ // Check to see if the matrix is not degenerate
+ bool is_valid(double epsilon = affine_epsilon) const;
+
+ // Check to see if it's an identity matrix
+ bool is_identity(double epsilon = affine_epsilon) const;
+
+ // Check to see if two matrices are equal
+ bool is_equal(const trans_affine& m, double epsilon = affine_epsilon) const;
+
+ // Determine the major parameters. Use with caution considering
+ // possible degenerate cases.
+ double rotation() const;
+ void translation(double* dx, double* dy) const;
+ void scaling(double* x, double* y) const;
+ void scaling_abs(double* x, double* y) const;
+ };
+
+ //------------------------------------------------------------------------
+ inline void trans_affine::transform(double* x, double* y) const
+ {
+ double tmp = *x;
+ *x = tmp * sx + *y * shx + tx;
+ *y = tmp * shy + *y * sy + ty;
+ }
+
+ //------------------------------------------------------------------------
+ inline void trans_affine::transform_2x2(double* x, double* y) const
+ {
+ double tmp = *x;
+ *x = tmp * sx + *y * shx;
+ *y = tmp * shy + *y * sy;
+ }
+
+ //------------------------------------------------------------------------
+ inline void trans_affine::inverse_transform(double* x, double* y) const
+ {
+ double d = determinant_reciprocal();
+ double a = (*x - tx) * d;
+ double b = (*y - ty) * d;
+ *x = a * sy - b * shx;
+ *y = b * sx - a * shy;
+ }
+
+ //------------------------------------------------------------------------
+ inline double trans_affine::scale() const
+ {
+ double x = 0.707106781 * sx + 0.707106781 * shx;
+ double y = 0.707106781 * shy + 0.707106781 * sy;
+ return sqrt(x*x + y*y);
+ }
+
+ //------------------------------------------------------------------------
+ inline const trans_affine& trans_affine::translate(double x, double y)
+ {
+ tx += x;
+ ty += y;
+ return *this;
+ }
+
+ //------------------------------------------------------------------------
+ inline const trans_affine& trans_affine::rotate(double a)
+ {
+ double ca = cos(a);
+ double sa = sin(a);
+ double t0 = sx * ca - shy * sa;
+ double t2 = shx * ca - sy * sa;
+ double t4 = tx * ca - ty * sa;
+ shy = sx * sa + shy * ca;
+ sy = shx * sa + sy * ca;
+ ty = tx * sa + ty * ca;
+ sx = t0;
+ shx = t2;
+ tx = t4;
+ return *this;
+ }
+
+ //------------------------------------------------------------------------
+ inline const trans_affine& trans_affine::scale(double x, double y)
+ {
+ double mm0 = x; // Possible hint for the optimizer
+ double mm3 = y;
+ sx *= mm0;
+ shx *= mm0;
+ tx *= mm0;
+ shy *= mm3;
+ sy *= mm3;
+ ty *= mm3;
+ return *this;
+ }
+
+ //------------------------------------------------------------------------
+ inline const trans_affine& trans_affine::scale(double s)
+ {
+ double m = s; // Possible hint for the optimizer
+ sx *= m;
+ shx *= m;
+ tx *= m;
+ shy *= m;
+ sy *= m;
+ ty *= m;
+ return *this;
+ }
+
+ //------------------------------------------------------------------------
+ inline const trans_affine& trans_affine::premultiply(const trans_affine& m)
+ {
+ trans_affine t = m;
+ return *this = t.multiply(*this);
+ }
+
+ //------------------------------------------------------------------------
+ inline const trans_affine& trans_affine::multiply_inv(const trans_affine& m)
+ {
+ trans_affine t = m;
+ t.invert();
+ return multiply(t);
+ }
+
+ //------------------------------------------------------------------------
+ inline const trans_affine& trans_affine::premultiply_inv(const trans_affine& m)
+ {
+ trans_affine t = m;
+ t.invert();
+ return *this = t.multiply(*this);
+ }
+
+ //------------------------------------------------------------------------
+ inline void trans_affine::scaling_abs(double* x, double* y) const
+ {
+ // Used to calculate scaling coefficients in image resampling.
+ // When there is considerable shear this method gives us much
+ // better estimation than just sx, sy.
+ *x = sqrt(sx * sx + shx * shx);
+ *y = sqrt(shy * shy + sy * sy);
+ }
+
+ //====================================================trans_affine_rotation
+ // Rotation matrix. sin() and cos() are calculated twice for the same angle.
+ // There's no harm because the performance of sin()/cos() is very good on all
+ // modern processors. Besides, this operation is not going to be invoked too
+ // often.
+ class trans_affine_rotation : public trans_affine
+ {
+ public:
+ trans_affine_rotation(double a) :
+ trans_affine(cos(a), sin(a), -sin(a), cos(a), 0.0, 0.0)
+ {}
+ };
+
+ //====================================================trans_affine_scaling
+ // Scaling matrix. x, y - scale coefficients by X and Y respectively
+ class trans_affine_scaling : public trans_affine
+ {
+ public:
+ trans_affine_scaling(double x, double y) :
+ trans_affine(x, 0.0, 0.0, y, 0.0, 0.0)
+ {}
+
+ trans_affine_scaling(double s) :
+ trans_affine(s, 0.0, 0.0, s, 0.0, 0.0)
+ {}
+ };
+
+ //================================================trans_affine_translation
+ // Translation matrix
+ class trans_affine_translation : public trans_affine
+ {
+ public:
+ trans_affine_translation(double x, double y) :
+ trans_affine(1.0, 0.0, 0.0, 1.0, x, y)
+ {}
+ };
+
+ //====================================================trans_affine_skewing
+ // Sckewing (shear) matrix
+ class trans_affine_skewing : public trans_affine
+ {
+ public:
+ trans_affine_skewing(double x, double y) :
+ trans_affine(1.0, tan(y), tan(x), 1.0, 0.0, 0.0)
+ {}
+ };
+
+
+ //===============================================trans_affine_line_segment
+ // Rotate, Scale and Translate, associating 0...dist with line segment
+ // x1,y1,x2,y2
+ class trans_affine_line_segment : public trans_affine
+ {
+ public:
+ trans_affine_line_segment(double x1, double y1, double x2, double y2,
+ double dist)
+ {
+ double dx = x2 - x1;
+ double dy = y2 - y1;
+ if(dist > 0.0)
+ {
+ multiply(trans_affine_scaling(sqrt(dx * dx + dy * dy) / dist));
+ }
+ multiply(trans_affine_rotation(atan2(dy, dx)));
+ multiply(trans_affine_translation(x1, y1));
+ }
+ };
+
+
+ //============================================trans_affine_reflection_unit
+ // Reflection matrix. Reflect coordinates across the line through
+ // the origin containing the unit vector (ux, uy).
+ // Contributed by John Horigan
+ class trans_affine_reflection_unit : public trans_affine
+ {
+ public:
+ trans_affine_reflection_unit(double ux, double uy) :
+ trans_affine(2.0 * ux * ux - 1.0,
+ 2.0 * ux * uy,
+ 2.0 * ux * uy,
+ 2.0 * uy * uy - 1.0,
+ 0.0, 0.0)
+ {}
+ };
+
+
+ //=================================================trans_affine_reflection
+ // Reflection matrix. Reflect coordinates across the line through
+ // the origin at the angle a or containing the non-unit vector (x, y).
+ // Contributed by John Horigan
+ class trans_affine_reflection : public trans_affine_reflection_unit
+ {
+ public:
+ trans_affine_reflection(double a) :
+ trans_affine_reflection_unit(cos(a), sin(a))
+ {}
+
+
+ trans_affine_reflection(double x, double y) :
+ trans_affine_reflection_unit(x / sqrt(x * x + y * y), y / sqrt(x * x + y * y))
+ {}
+ };
+
+}
+
+
+#endif
+
diff --git a/src/agg/copying b/src/agg/copying
new file mode 100644
index 000000000..b6028e519
--- /dev/null
+++ b/src/agg/copying
@@ -0,0 +1,65 @@
+The Anti-Grain Geometry Project
+A high quality rendering engine for C++
+http://antigrain.com
+
+Anti-Grain Geometry has dual licensing model. The Modified BSD
+License was first added in version v2.4 just for convenience.
+It is a simple, permissive non-copyleft free software license,
+compatible with the GNU GPL. It's well proven and recognizable.
+See http://www.fsf.org/licensing/licenses/index_html#ModifiedBSD
+for details.
+
+Note that the Modified BSD license DOES NOT restrict your rights
+if you choose the Anti-Grain Geometry Public License.
+
+
+
+
+Anti-Grain Geometry Public License
+====================================================
+
+Anti-Grain Geometry - Version 2.4
+Copyright (C) 2002-2005 Maxim Shemanarev (McSeem)
+
+Permission to copy, use, modify, sell and distribute this software
+is granted provided this copyright notice appears in all copies.
+This software is provided "as is" without express or implied
+warranty, and with no claim as to its suitability for any purpose.
+
+
+
+
+
+Modified BSD License
+====================================================
+Anti-Grain Geometry - Version 2.4
+Copyright (C) 2002-2005 Maxim Shemanarev (McSeem)
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ 3. The name of the author may not be used to endorse or promote
+ products derived from this software without specific prior
+ written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+