Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/torch/image.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSoumith Chintala <soumith@gmail.com>2015-02-17 14:59:27 +0300
committerSoumith Chintala <soumith@gmail.com>2015-02-17 14:59:27 +0300
commit8122c997ff4010238d125633236ffe62258f7de7 (patch)
treec6dbe52c04b66e58faced21f5b45f616c07c0bb6
parent22b6255fa9864a2b1450dd6dbcf33ee6c79a737b (diff)
parent04ecd583996bade2857ada9fa13152fc7317b619 (diff)
Merge pull request #48 from Moodstocks/lab
added sRGB <-> Lab conversion
-rw-r--r--generic/image.c170
-rw-r--r--image.c20
-rwxr-xr-xinit.lua245
-rw-r--r--test/test_conversion.lua29
4 files changed, 311 insertions, 153 deletions
diff --git a/generic/image.c b/generic/image.c
index a316b2d..dfccfa2 100644
--- a/generic/image.c
+++ b/generic/image.c
@@ -993,7 +993,7 @@ static int image_(Main_translate)(lua_State *L)
if( Tdst->nDimension==3 && ( src_depth!=dst_depth) )
luaL_error(L, "image.translate: src and dst depths do not match");
-
+
for(j = 0; j < src_height; j++) {
for(i = 0; i < src_width; i++) {
long ii=i+shiftx;
@@ -1054,7 +1054,7 @@ int image_(Main_rgb2hsl)(lua_State *L) {
if (mx == r) {
h = (g - b) / d + (g < b ? 6 : 0);
} else if (mx == g) {
- h = (b - r) / d + 2;
+ h = (b - r) / d + 2;
} else {
h = (r - g) / d + 4;
}
@@ -1074,14 +1074,14 @@ int image_(Main_rgb2hsl)(lua_State *L) {
static inline real image_(hue2rgb)(real p, real q, real t) {
if (t < 0.) t += 1;
if (t > 1.) t -= 1;
- if (t < 1./6)
+ if (t < 1./6)
return p + (q - p) * 6. * t;
- else if (t < 1./2)
+ else if (t < 1./2)
return q;
- else if (t < 2./3)
+ else if (t < 2./3)
return p + (q - p) * (2./3 - t) * 6.;
else
- return p;
+ return p;
}
/*
@@ -1161,7 +1161,7 @@ int image_(Main_rgb2hsv)(lua_State *L) {
if (mx == r) {
h = (g - b) / d + (g < b ? 6 : 0);
} else if (mx == g) {
- h = (b - r) / d + 2;
+ h = (b - r) / d + 2;
} else {
h = (r - g) / d + 4;
}
@@ -1221,6 +1221,112 @@ int image_(Main_hsv2rgb)(lua_State *L) {
return 0;
}
+/*
+ * Converts an sRGB color value to LAB.
+ * Based on http://www.brucelindbloom.com/index.html?Equations.html.
+ * Assumes r, g, and b are contained in the set [0, 1].
+ * LAB output is NOT restricted to [0, 1]!
+ */
+int image_(Main_rgb2lab)(lua_State *L) {
+ THTensor *rgb = luaT_checkudata(L, 1, torch_Tensor);
+ THTensor *lab = luaT_checkudata(L, 2, torch_Tensor);
+
+ // CIE Standard
+ double epsilon = 216.0/24389.0;
+ double k = 24389.0/27.0;
+ // D65 white point
+ double xn = 0.950456;
+ double zn = 1.088754;
+
+ int y,x;
+ real r,g,b,l,a,_b;
+ for (y=0; y<rgb->size[1]; y++) {
+ for (x=0; x<rgb->size[2]; x++) {
+ // get RGB
+ r = gamma_expand_sRGB(THTensor_(get3d)(rgb, 0, y, x));
+ g = gamma_expand_sRGB(THTensor_(get3d)(rgb, 1, y, x));
+ b = gamma_expand_sRGB(THTensor_(get3d)(rgb, 2, y, x));
+
+ // sRGB to XYZ
+ double X = 0.412453 * r + 0.357580 * g + 0.180423 * b;
+ double Y = 0.212671 * r + 0.715160 * g + 0.072169 * b;
+ double Z = 0.019334 * r + 0.119193 * g + 0.950227 * b;
+
+ // normalize for D65 white point
+ X /= xn;
+ Z /= zn;
+
+ // XYZ normalized to CIE Lab
+ double fx = X > epsilon ? pow(X, 1/3.0) : (k * X + 16)/116;
+ double fy = Y > epsilon ? pow(Y, 1/3.0) : (k * Y + 16)/116;
+ double fz = Z > epsilon ? pow(Z, 1/3.0) : (k * Z + 16)/116;
+ l = 116 * fy - 16;
+ a = 500 * (fx - fy);
+ _b = 200 * (fy - fz);
+
+ // set lab
+ THTensor_(set3d)(lab, 0, y, x, l);
+ THTensor_(set3d)(lab, 1, y, x, a);
+ THTensor_(set3d)(lab, 2, y, x, _b);
+ }
+ }
+ return 0;
+}
+
+/*
+ * Converts an LAB color value to sRGB.
+ * Based on http://www.brucelindbloom.com/index.html?Equations.html.
+ * returns r, g, and b in the set [0, 1].
+ */
+int image_(Main_lab2rgb)(lua_State *L) {
+ THTensor *lab = luaT_checkudata(L, 1, torch_Tensor);
+ THTensor *rgb = luaT_checkudata(L, 2, torch_Tensor);
+
+ int y,x;
+ real r,g,b,l,a,_b;
+
+ // CIE Standard
+ double epsilon = 216.0/24389.0;
+ double k = 24389.0/27.0;
+ // D65 white point
+ double xn = 0.950456;
+ double zn = 1.088754;
+
+ for (y=0; y<lab->size[1]; y++) {
+ for (x=0; x<lab->size[2]; x++) {
+ // get lab
+ l = THTensor_(get3d)(lab, 0, y, x);
+ a = THTensor_(get3d)(lab, 1, y, x);
+ _b = THTensor_(get3d)(lab, 2, y, x);
+
+ // LAB to XYZ
+ double fy = (l + 16) / 116;
+ double fz = fy - _b / 200;
+ double fx = (a / 500) + fy;
+ double X = pow(fx, 3);
+ if (X <= epsilon)
+ X = (116 * fx - 16) / k;
+ double Y = l > (k * epsilon) ? pow((l + 16) / 116, 3) : l/k;
+ double Z = pow(fz, 3);
+ if (Z <= epsilon)
+ Z = (116 * fz - 16) / k;
+
+ X *= xn;
+ Z *= zn;
+
+ // XYZ to sRGB
+ r = 3.2404542 * X - 1.5371385 * Y - 0.4985314 * Z;
+ g = -0.9692660 * X + 1.8760108 * Y + 0.0415560 * Z;
+ b = 0.0556434 * X - 0.2040259 * Y + 1.0572252 * Z;
+
+ // set rgb
+ THTensor_(set3d)(rgb, 0, y, x, gamma_compress_sRGB(r));
+ THTensor_(set3d)(rgb, 1, y, x, gamma_compress_sRGB(g));
+ THTensor_(set3d)(rgb, 2, y, x, gamma_compress_sRGB(b));
+ }
+ }
+ return 0;
+}
/* Vertically flip an image */
int image_(Main_vflip)(lua_State *L) {
@@ -1360,7 +1466,7 @@ int image_(Main_warp)(lua_State *L) {
// borders
int off_image = 0;
- if (iy < 0 || iy > src_height - 1 ||
+ if (iy < 0 || iy > src_height - 1 ||
ix < 0 || ix > src_width - 1) {
off_image = 1;
}
@@ -1373,7 +1479,7 @@ int image_(Main_warp)(lua_State *L) {
} else {
ix = MAX(ix,0); ix = MIN(ix,src_width-1);
iy = MAX(iy,0); iy = MIN(iy,src_height-1);
-
+
// bilinear?
switch (mode) {
case 1: // Bilinear interpolation
@@ -1396,7 +1502,7 @@ int image_(Main_warp)(lua_State *L) {
// weighted sum of neighbors:
for (k=0; k<channels; k++) {
- dst_data[ k*os[0] + y*os[1] + x*os[2] ] =
+ dst_data[ k*os[0] + y*os[1] + x*os[2] ] =
src_data[ k*is[0] + iy_nw*is[1] + ix_nw*is[2] ] * nw
+ src_data[ k*is[0] + iy_ne*is[1] + MIN(ix_ne,src_width-1)*is[2] ] * ne
+ src_data[ k*is[0] + MIN(iy_sw,src_height-1)*is[1] + ix_sw*is[2] ] * sw
@@ -1423,19 +1529,19 @@ int image_(Main_warp)(lua_State *L) {
long y_pix = floor(iy);
real dx = ix - (real)x_pix;
real dy = iy - (real)y_pix;
-
+
real C[4];
for (k=0; k<channels; k++) {
// Sweep by rows through the samples (to calculate final cubic coefs)
for (jj = 0; jj <= 3; jj++) {
v = y_pix - 1 + jj;
- // We need to clamp all uv values to image border: hopefully
+ // We need to clamp all uv values to image border: hopefully
// branch prediction and compiler reordering takes care of all
- // the conditionals (since the branch probabilities are heavily
+ // the conditionals (since the branch probabilities are heavily
// skewed). Alternatively an inline "getPixelSafe" function would
// would be clearer here, but cannot be done with lua?
v = MAX(MIN((long)(src_height-1), v), 0);
- long ofst = k * is[0] + v * is[1];
+ long ofst = k * is[0] + v * is[1];
u = x_pix;
u = MAX(MIN((long)(src_width-1), u), 0);
real a0 = src_data[ofst + u * is[2]];
@@ -1446,40 +1552,40 @@ int image_(Main_warp)(lua_State *L) {
u = MAX(MIN((long)(src_width-1), u), 0);
real d2 = src_data[ofst + u * is[2]] - a0;
u = x_pix + 2;
- u = MAX(MIN((long)(src_width-1), u), 0);
+ u = MAX(MIN((long)(src_width-1), u), 0);
real d3 = src_data[ofst + u * is[2]] - a0;
// Note: there are mostly static casts, optimizer will take care of
// of it for us (prevents compiler warnings in new gcc)
real a1 = -(real)1/(real)3*d0 + d2 -(real)1/(real)6*d3;
real a2 = (real)1/(real)2*d0 + (real)1/(real)2*d2;
- real a3 = -(real)1/(real)6*d0 - (real)1/(real)2*d2 +
+ real a3 = -(real)1/(real)6*d0 - (real)1/(real)2*d2 +
(real)1/(real)6*d3;
C[jj] = a0 + dx * (a1 + dx * (a2 + a3 * dx));
}
-
+
real d0 = C[0]-C[1];
real d2 = C[2]-C[1];
real d3 = C[3]-C[1];
real a0 = C[1];
real a1 = -(real)1/(real)3*d0 + d2 - (real)1/(real)6*d3;
real a2 = (real)1/(real)2*d0 + (real)1/(real)2*d2;
- real a3 = -(real)1/(real)6*d0 - (real)1/(real)2*d2 +
+ real a3 = -(real)1/(real)6*d0 - (real)1/(real)2*d2 +
(real)1/(real)6*d3;
real Cc = a0 + dy * (a1 + dy * (a2 + a3 * dy));
- // I assume that since the image is stored as reals we don't have
+ // I assume that since the image is stored as reals we don't have
// to worry about clamping to min and max int (to prevent over or
// underflow)
dst_data[ k*os[0] + y*os[1] + x*os[2] ] = Cc;
- }
+ }
}
break;
case 3: // Lanczos
{
- // Note: Lanczos can be made fast if the resampling period is
+ // Note: Lanczos can be made fast if the resampling period is
// constant... and therefore the Lu, Lv can be cached and reused.
- // However, unfortunately warp makes no assumptions about resampling
+ // However, unfortunately warp makes no assumptions about resampling
// and so we need to perform the O(k^2) convolution on each pixel AND
// we have to re-calculate the kernel for every pixel.
// See wikipedia for more info.
@@ -1505,8 +1611,8 @@ int image_(Main_warp)(lua_State *L) {
} else if (du > (float)rad) {
Lu[i] = 0;
} else {
- Lu[i] = ((float)rad * sin((float)M_PI * du) *
- sin((float)M_PI * du / (float)rad)) /
+ Lu[i] = ((float)rad * sin((float)M_PI * du) *
+ sin((float)M_PI * du / (float)rad)) /
((float)(M_PI * M_PI) * du * du);
}
}
@@ -1522,11 +1628,11 @@ int image_(Main_warp)(lua_State *L) {
sin((float)M_PI * dv / (float)rad)) /
((float)(M_PI * M_PI) * dv * dv);
}
- }
+ }
float sum_weights = 0;
for (u=0; u<2*rad; u++) {
for (v=0; v<2*rad; v++) {
- sum_weights += (Lu[u] * Lv[v]);
+ sum_weights += (Lu[u] * Lv[v]);
}
}
@@ -1537,16 +1643,16 @@ int image_(Main_warp)(lua_State *L) {
for (v=y_pix-rad+1, j=0; v<=y_pix+rad; v++, j++) {
long curv = MAX(MIN((long)(src_height-1), v), 0);
real Suv = src_data[k * is[0] + curv * is[1] + curu * is[2]];
-
+
real weight = (real)(Lu[i] * Lv[j]);
result += (Suv * weight);
}
}
// Normalize by the sum of the weights
result = result / (float)sum_weights;
-
- // Again, I assume that since the image is stored as reals we
- // don't have to worry about clamping to min and max int (to
+
+ // Again, I assume that since the image is stored as reals we
+ // don't have to worry about clamping to min and max int (to
// prevent over or underflow)
dst_data[ k*os[0] + y*os[1] + x*os[2] ] = result;
}
@@ -1589,7 +1695,7 @@ int image_(Main_gaussian)(lua_State *L) {
for (u = 0; u < width; u++) {
du = ((real)u + 1 - mean_u) * over_sigmau;
dv = ((real)v + 1 - mean_v) * over_sigmav;
- dst_data[ v*os[0] + u*os[1] ] = amplitude *
+ dst_data[ v*os[0] + u*os[1] ] = amplitude *
exp(-((du*du*0.5) + (dv*dv*0.5)));
}
}
@@ -1676,6 +1782,8 @@ static const struct luaL_Reg image_(Main__) [] = {
{"rgb2hsl", image_(Main_rgb2hsl)},
{"hsv2rgb", image_(Main_hsv2rgb)},
{"hsl2rgb", image_(Main_hsl2rgb)},
+ {"rgb2lab", image_(Main_rgb2lab)},
+ {"lab2rgb", image_(Main_lab2rgb)},
{"gaussian", image_(Main_gaussian)},
{"vflip", image_(Main_vflip)},
{"hflip", image_(Main_hflip)},
diff --git a/image.c b/image.c
index cf6fa28..5bf2ab4 100644
--- a/image.c
+++ b/image.c
@@ -16,6 +16,26 @@
#endif
#define min( a, b ) ( ((a) < (b)) ? (a) : (b) )
+/*
+ * Convert an sRGB color channel to a linear sRGB color channel.
+ */
+static inline float gamma_expand_sRGB(float nonlinear)
+{
+ return (nonlinear <= 0.04045)
+ ? (nonlinear / 12.92)
+ : (pow((nonlinear+0.055)/1.055, 2.4));
+}
+
+/*
+ * Convert a linear sRGB color channel to a sRGB color channel.
+ */
+static inline float gamma_compress_sRGB(float linear)
+{
+ return (linear <= 0.0031308)
+ ? (12.92 * linear)
+ : (1.055 * pow(linear, 1.0/2.4) - 0.055);
+}
+
#include "generic/image.c"
#include "THGenerateAllTypes.h"
diff --git a/init.lua b/init.lua
index 65c60b1..49e1af7 100755
--- a/init.lua
+++ b/init.lua
@@ -1,7 +1,7 @@
----------------------------------------------------------------------
--
-- Copyright (c) 2011 Ronan Collobert, Clement Farabet
---
+--
-- Permission is hereby granted, free of charge, to any person obtaining
-- a copy of this software and associated documentation files (the
-- "Software"), to deal in the Software without restriction, including
@@ -9,10 +9,10 @@
-- distribute, sublicense, and/or sell copies of the Software, and to
-- permit persons to whom the Software is furnished to do so, subject to
-- the following conditions:
---
+--
-- The above copyright notice and this permission notice shall be
-- included in all copies or substantial portions of the Software.
---
+--
-- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-- EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-- MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
@@ -20,12 +20,12 @@
-- LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-- OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-- WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
---
+--
----------------------------------------------------------------------
-- description:
-- image - an image toolBox, for Torch
--
--- history:
+-- history:
-- July 1, 2011, 7:42PM - import from Torch5 - Clement Farabet
----------------------------------------------------------------------
@@ -37,7 +37,7 @@ require 'libimage'
----------------------------------------------------------------------
-- types lookups
---
+--
local type2tensor = {
float = torch.FloatTensor(),
double = torch.DoubleTensor(),
@@ -95,7 +95,7 @@ local function savePNG(filename, tensor)
a.image.saturate(a) -- bound btwn 0 and 1
a:mul(MAXVAL) -- remap to [0..255]
a.libpng.save(filename, a)
-end
+end
rawset(image, 'savePNG', savePNG)
function image.getPNGsize(filename)
@@ -154,7 +154,7 @@ local function decompressJPG(tensor, depth, tensortype)
if torch.typename(tensor) ~= 'torch.ByteTensor' then
dok.error('Input tensor (with compressed jpeg) must be a byte tensor',
'image.decompressJPG')
- end
+ end
local load_from_file = 0
local a = template(tensortype).libjpeg.load(load_from_file, tensor)
if a == nil then
@@ -543,7 +543,7 @@ local function rotate(...)
else
dok.error('mode must be one of: simple | bilinear', 'image.rotate')
end
- return dst
+ return dst
end
rawset(image, 'rotate', rotate)
@@ -765,7 +765,7 @@ local function warp(...)
else
dok.error('Incorrect arguments (clamp_mode is not clamp | pad)!', 'image.warp')
end
-
+
local dim2 = false
if src:nDimension() == 2 then
dim2 = true
@@ -870,7 +870,7 @@ local function convolve(...)
src = args[2]
kernel = args[3]
end
- elseif select('#',...) == 2 then
+ elseif select('#',...) == 2 then
src = args[1]
kernel = args[2]
else
@@ -911,7 +911,7 @@ local function convolve(...)
end
return dst
end
-rawset(image, 'convolve', convolve)
+rawset(image, 'convolve', convolve)
----------------------------------------------------------------------
-- compresses an image between min and max
@@ -919,11 +919,11 @@ rawset(image, 'convolve', convolve)
local function minmax(args)
local tensor = args.tensor
local min = args.min
- local max = args.max
+ local max = args.max
local symm = args.symm or false
local inplace = args.inplace or false
local saturate = args.saturate or false
- local tensorOut = args.tensorOut or (inplace and tensor)
+ local tensorOut = args.tensorOut or (inplace and tensor)
or torch.Tensor(tensor:size()):copy(tensor)
-- resize
@@ -959,7 +959,7 @@ local function minmax(args)
max = max - min
end
if (max ~= 0) then tensorOut:div(max) end
-
+
-- saturate
if saturate then
tensorOut.image.saturate(tensorOut)
@@ -968,7 +968,7 @@ local function minmax(args)
-- and return
return tensorOut
end
-rawset(image, 'minmax', minmax)
+rawset(image, 'minmax', minmax)
local function toDisplayTensor(...)
-- usage
@@ -1000,7 +1000,7 @@ local function toDisplayTensor(...)
else
packed = torch.Tensor(input:size()):copy(input)
end
-
+
-- scale each
if scaleeach and (
(packed:dim() == 4 and (packed:size(2) == 3 or packed:size(2) == 1))
@@ -1011,7 +1011,7 @@ local function toDisplayTensor(...)
image.minmax{tensor=packed[i], inplace=true, min=min, max=max, symm=symm, saturate=saturate}
end
end
-
+
local grid = torch.Tensor()
if packed:dim() == 4 and (packed:size(2) == 3 or packed:size(2) == 1) then
-- arbitrary number of color images: lay them out on a grid
@@ -1052,7 +1052,7 @@ local function toDisplayTensor(...)
else
xerror('packed must be a HxW or KxHxW or Kx3xHxW tensor, or a list of tensors', 'image.toDisplayTensor')
end
-
+
if not scaleeach then
image.minmax{tensor=grid, inplace=true, min=min, max=max, symm=symm, saturate=saturate}
end
@@ -1087,7 +1087,7 @@ local function display(...)
{arg='nrow',type='number',help='number of images per row', default=6},
{arg='saturate', type='boolean', help='saturate (useful when min/max are lower than actual min/max', default=true}
)
-
+
-- dependencies
require 'qt'
require 'qttorch'
@@ -1121,8 +1121,8 @@ local function display(...)
end
hook_mouse = function(x,y,button)
--local size = closure.window.frame.size:totable()
- --size.width =
- --size.height =
+ --size.width =
+ --size.height =
if button == 'LeftButton' then
elseif button == 'RightButton' then
end
@@ -1176,13 +1176,13 @@ local function window(hook_resize, hook_mousepress, hook_mousedoublepress)
local win = qtuiloader.load(pathui)
local painter = qt.QtLuaPainter(win.frame)
if hook_resize then
- qt.connect(qt.QtLuaListener(win.frame),
- 'sigResize(int,int)',
+ qt.connect(qt.QtLuaListener(win.frame),
+ 'sigResize(int,int)',
hook_resize)
end
if hook_mousepress then
qt.connect(qt.QtLuaListener(win.frame),
- 'sigMousePress(int,int,QByteArray,QByteArray,QByteArray)',
+ 'sigMousePress(int,int,QByteArray,QByteArray,QByteArray)',
hook_mousepress)
end
if hook_mousedoublepress then
@@ -1241,83 +1241,12 @@ local function fabio()
end
rawset(image, 'fabio', fabio)
-----------------------------------------------------------------------
--- image.rgb2lab(image)
--- converts a RGB image to YUV
---
-function image.rgb2lab(...)
- -- arg check
- local output,input
- local args = {...}
- if select('#',...) == 2 then
- output = args[1]
- input = args[2]
- elseif select('#',...) == 1 then
- input = args[1]
- else
- print(dok.usage('image.rgb2lab',
- 'transforms an image from RGB to Lab', nil,
- {type='torch.Tensor', help='input image', req=true},
- '',
- {type='torch.Tensor', help='output image', req=true},
- {type='torch.Tensor', help='input image', req=true}
- ))
- dok.error('missing input', 'image.rgb2lab')
- end
-
- -- resize
- output = output or input.new()
- output:resizeAs(input)
-
- -- output chanels
- local xyz = output:clone()
- local outputX = xyz[1]
- local outputY = xyz[2]
- local outputZ = xyz[3]
- -- output chanels
- local outputL = output[1]
- local outputA = output[2]
- local outputB = output[3]
-
-
- -- Set a threshold
- local T = 0.008856;
-
- local RGB = input:new():resize(3,input:size(2)*input:size(3))
-
- -- RGB to XYZ
- local MAT = input.new({{0.412453, 0.357580, 0.180423},
- {0.212671, 0.715160, 0.072169},
- {0.019334, 0.119193, 0.950227}})
- local XYZ = MAT * RGB;
-
- -- Normalize for D65 white point
- XYZ[1]:div(0.950456);
- XYZ[3]:div(1.088754);
- local Y3 = torch.pow(XYZ[2],1/3)
-
- local thres = function(x)
- if x > T then
- return x^(1/3)
- else
- return 1/3*(29/6)^2 * x + 16/116
- end
- end
- XYZ:apply(thres)
-
- outputL:mul(XYZ[2],116):add(-16):div(100)
- outputA:copy(XYZ[1]):add(-1,XYZ[2]):mul(500):add(110):div(220)
- outputB:copy(XYZ[2]):add(-1,XYZ[3]):mul(200):add(110):div(220)
-
- -- return LAB image
- return output
-end
----------------------------------------------------------------------
-- image.rgb2yuv(image)
-- converts a RGB image to YUV
--
-function image.rgb2yuv(...)
+function image.rgb2yuv(...)
-- arg check
local output,input
local args = {...}
@@ -1340,17 +1269,17 @@ function image.rgb2yuv(...)
-- resize
output = output or input.new()
output:resizeAs(input)
-
+
-- input chanels
local inputRed = input[1]
local inputGreen = input[2]
local inputBlue = input[3]
-
+
-- output chanels
local outputY = output[1]
local outputU = output[2]
local outputV = output[3]
-
+
-- convert
outputY:zero():add(0.299, inputRed):add(0.587, inputGreen):add(0.114, inputBlue)
outputU:zero():add(-0.14713, inputRed):add(-0.28886, inputGreen):add(0.436, inputBlue)
@@ -1364,7 +1293,7 @@ end
-- image.yuv2rgb(image)
-- converts a YUV image to RGB
--
-function image.yuv2rgb(...)
+function image.yuv2rgb(...)
-- arg check
local output,input
local args = {...}
@@ -1387,22 +1316,22 @@ function image.yuv2rgb(...)
-- resize
output = output or input.new()
output:resizeAs(input)
-
+
-- input chanels
local inputY = input[1]
local inputU = input[2]
local inputV = input[3]
-
+
-- output chanels
local outputRed = output[1]
local outputGreen = output[2]
local outputBlue = output[3]
-
+
-- convert
outputRed:copy(inputY):add(1.13983, inputV)
- outputGreen:copy(inputY):add(-0.39465, inputU):add(-0.58060, inputV)
+ outputGreen:copy(inputY):add(-0.39465, inputU):add(-0.58060, inputV)
outputBlue:copy(inputY):add(2.03211, inputU)
-
+
-- return RGB image
return output
end
@@ -1434,18 +1363,18 @@ function image.rgb2y(...)
-- resize
output = output or input.new()
output:resize(1, input:size(2), input:size(3))
-
+
-- input chanels
local inputRed = input[1]
local inputGreen = input[2]
local inputBlue = input[3]
-
+
-- output chanels
local outputY = output[1]
-
+
-- convert
outputY:zero():add(0.299, inputRed):add(0.587, inputGreen):add(0.114, inputBlue)
-
+
-- return YUV image
return output
end
@@ -1454,7 +1383,7 @@ end
-- image.rgb2hsl(image)
-- converts an RGB image to HSL
--
-function image.rgb2hsl(...)
+function image.rgb2hsl(...)
-- arg check
local output,input
local args = {...}
@@ -1480,7 +1409,7 @@ function image.rgb2hsl(...)
-- compute
input.image.rgb2hsl(input,output)
-
+
-- return HSL image
return output
end
@@ -1515,7 +1444,7 @@ function image.hsl2rgb(...)
-- compute
input.image.hsl2rgb(input,output)
-
+
-- return HSL image
return output
end
@@ -1550,7 +1479,7 @@ function image.rgb2hsv(...)
-- compute
input.image.rgb2hsv(input,output)
-
+
-- return HSV image
return output
end
@@ -1585,12 +1514,84 @@ function image.hsv2rgb(...)
-- compute
input.image.hsv2rgb(input,output)
-
+
-- return HSV image
return output
end
----------------------------------------------------------------------
+-- image.rgb2lab(image)
+-- converts an RGB image to LAB
+-- assumes sRGB input in the range [0, 1]
+--
+function image.rgb2lab(...)
+ -- arg check
+ local output,input
+ local args = {...}
+ if select('#',...) == 2 then
+ output = args[1]
+ input = args[2]
+ elseif select('#',...) == 1 then
+ input = args[1]
+ else
+ print(dok.usage('image.rgb2lab',
+ 'transforms an image from sRGB to LAB', nil,
+ {type='torch.Tensor', help='input image', req=true},
+ '',
+ {type='torch.Tensor', help='output image', req=true},
+ {type='torch.Tensor', help='input image', req=true}
+ ))
+ dok.error('missing input', 'image.rgb2lab')
+ end
+
+ -- resize
+ output = output or input.new()
+ output:resizeAs(input)
+
+ -- compute
+ input.image.rgb2lab(input,output)
+
+ -- return LAB image
+ return output
+end
+
+----------------------------------------------------------------------
+-- image.lab2rgb(image)
+-- converts an LAB image to RGB (assumes sRGB)
+--
+function image.lab2rgb(...)
+ -- arg check
+ local output,input
+ local args = {...}
+ if select('#',...) == 2 then
+ output = args[1]
+ input = args[2]
+ elseif select('#',...) == 1 then
+ input = args[1]
+ else
+ print(dok.usage('image.lab2rgb',
+ 'transforms an image from LAB to RGB', nil,
+ {type='torch.Tensor', help='input image', req=true},
+ '',
+ {type='torch.Tensor', help='output image', req=true},
+ {type='torch.Tensor', help='input image', req=true}
+ ))
+ dok.error('missing input', 'image.lab2rgb')
+ end
+
+ -- resize
+ output = output or input.new()
+ output:resizeAs(input)
+
+ -- compute
+ input.image.lab2rgb(input,output)
+
+ -- return sRGB image
+ return output
+end
+
+
+----------------------------------------------------------------------
-- image.rgb2nrgb(image)
-- converts an RGB image to normalized-RGB
--
@@ -1619,14 +1620,14 @@ function image.rgb2nrgb(...)
output:resizeAs(input)
local sum = input.new()
sum:resize(input:size(2), input:size(3))
-
+
-- compute sum and normalize
sum:copy(input[1]):add(input[2]):add(input[3]):add(1e-6)
output:copy(input)
output[1]:cdiv(sum)
output[2]:cdiv(sum)
output[3]:cdiv(sum)
-
+
-- return HSV image
return output
end
@@ -1675,7 +1676,7 @@ end
--
function image.gaussian(...)
-- process args
- local _, size, sigma, amplitude, normalize,
+ local _, size, sigma, amplitude, normalize,
width, height, sigma_horz, sigma_vert, mean_horz, mean_vert = dok.unpack(
{...},
'image.gaussian',
@@ -1695,7 +1696,7 @@ function image.gaussian(...)
-- generate kernel
local gauss = torch.Tensor(height, width)
gauss.image.gaussian(gauss, amplitude, normalize, sigma_horz, sigma_vert, mean_horz, mean_vert)
-
+
return gauss
end
@@ -1715,7 +1716,7 @@ function image.gaussian1D(...)
-- local vars
local center = mean * size + 0.5
-
+
-- generate kernel
local gauss = torch.Tensor(size)
for i=1,size do
@@ -1733,7 +1734,7 @@ end
--
function image.laplacian(...)
-- process args
- local _, size, sigma, amplitude, normalize,
+ local _, size, sigma, amplitude, normalize,
width, height, sigma_horz, sigma_vert, mean_horz, mean_vert = dok.unpack(
{...},
'image.laplacian',
@@ -1753,7 +1754,7 @@ function image.laplacian(...)
-- local vars
local center_x = mean_horz * width + 0.5
local center_y = mean_vert * height + 0.5
-
+
-- generate kernel
local logauss = torch.Tensor(height,width)
for i=1,height do
diff --git a/test/test_conversion.lua b/test/test_conversion.lua
new file mode 100644
index 0000000..dcc0d25
--- /dev/null
+++ b/test/test_conversion.lua
@@ -0,0 +1,29 @@
+require 'image'
+require 'paths'
+
+-- Create an instance of the test framework
+local mytester = torch.Tester()
+local precision = 1e-4
+local test = {}
+
+function test.TestLabConversionBackAndForth()
+ -- This test breaks if someone removes lena from the repo
+ local imfile = '../lena.jpg'
+ if not paths.filep(imfile) then
+ error(imfile .. ' is missing!')
+ end
+
+ -- Load lena directly from the filename
+ local img = image.loadJPG(imfile)
+
+ -- Convert to LAB and back to RGB
+ local lab = image.rgb2lab(img)
+ local img2 = image.lab2rgb(lab)
+ -- Compare RGB images
+ mytester:assertlt((img - img2):abs():max(), precision,
+ 'RGB <-> LAB conversion produces wrong results! ')
+end
+
+-- Now run the test above
+mytester:add(test)
+mytester:run()