summaryrefslogtreecommitdiffstats
path: root/gfx/qcms
diff options
context:
space:
mode:
authorMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
committerMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
commit5f8de423f190bbb79a62f804151bc24824fa32d8 (patch)
tree10027f336435511475e392454359edea8e25895d /gfx/qcms
parent49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff)
downloadUXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip
Add m-esr52 at 52.6.0
Diffstat (limited to 'gfx/qcms')
-rw-r--r--gfx/qcms/chain.c993
-rw-r--r--gfx/qcms/chain.h30
-rw-r--r--gfx/qcms/iccread.c1404
-rw-r--r--gfx/qcms/matrix.c136
-rw-r--r--gfx/qcms/matrix.h39
-rw-r--r--gfx/qcms/moz.build48
-rw-r--r--gfx/qcms/qcms.h179
-rw-r--r--gfx/qcms/qcmsint.h327
-rw-r--r--gfx/qcms/qcmstypes.h51
-rw-r--r--gfx/qcms/transform-altivec.c269
-rw-r--r--gfx/qcms/transform-sse1.c253
-rw-r--r--gfx/qcms/transform-sse2.c243
-rw-r--r--gfx/qcms/transform.c1410
-rw-r--r--gfx/qcms/transform_util.c516
-rw-r--r--gfx/qcms/transform_util.h104
15 files changed, 6002 insertions, 0 deletions
diff --git a/gfx/qcms/chain.c b/gfx/qcms/chain.c
new file mode 100644
index 000000000..e382fbe00
--- /dev/null
+++ b/gfx/qcms/chain.c
@@ -0,0 +1,993 @@
+/* vim: set ts=8 sw=8 noexpandtab: */
+// qcms
+// Copyright (C) 2009 Mozilla Corporation
+// Copyright (C) 1998-2007 Marti Maria
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the "Software"),
+// to deal in the Software without restriction, including without limitation
+// the rights to use, copy, modify, merge, publish, distribute, sublicense,
+// and/or sell copies of the Software, and to permit persons to whom the Software
+// is furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+// THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+#include <stdlib.h>
+#include <math.h>
+#include <assert.h>
+#include <string.h> //memcpy
+#include "qcmsint.h"
+#include "transform_util.h"
+#include "matrix.h"
+
+static struct matrix build_lut_matrix(struct lutType *lut)
+{
+ struct matrix result;
+ if (lut) {
+ result.m[0][0] = s15Fixed16Number_to_float(lut->e00);
+ result.m[0][1] = s15Fixed16Number_to_float(lut->e01);
+ result.m[0][2] = s15Fixed16Number_to_float(lut->e02);
+ result.m[1][0] = s15Fixed16Number_to_float(lut->e10);
+ result.m[1][1] = s15Fixed16Number_to_float(lut->e11);
+ result.m[1][2] = s15Fixed16Number_to_float(lut->e12);
+ result.m[2][0] = s15Fixed16Number_to_float(lut->e20);
+ result.m[2][1] = s15Fixed16Number_to_float(lut->e21);
+ result.m[2][2] = s15Fixed16Number_to_float(lut->e22);
+ result.invalid = false;
+ } else {
+ memset(&result, 0, sizeof(struct matrix));
+ result.invalid = true;
+ }
+ return result;
+}
+
+static struct matrix build_mAB_matrix(struct lutmABType *lut)
+{
+ struct matrix result;
+ if (lut) {
+ result.m[0][0] = s15Fixed16Number_to_float(lut->e00);
+ result.m[0][1] = s15Fixed16Number_to_float(lut->e01);
+ result.m[0][2] = s15Fixed16Number_to_float(lut->e02);
+ result.m[1][0] = s15Fixed16Number_to_float(lut->e10);
+ result.m[1][1] = s15Fixed16Number_to_float(lut->e11);
+ result.m[1][2] = s15Fixed16Number_to_float(lut->e12);
+ result.m[2][0] = s15Fixed16Number_to_float(lut->e20);
+ result.m[2][1] = s15Fixed16Number_to_float(lut->e21);
+ result.m[2][2] = s15Fixed16Number_to_float(lut->e22);
+ result.invalid = false;
+ } else {
+ memset(&result, 0, sizeof(struct matrix));
+ result.invalid = true;
+ }
+ return result;
+}
+
+//Based on lcms cmsLab2XYZ
+#define f(t) (t <= (24.0f/116.0f)*(24.0f/116.0f)*(24.0f/116.0f)) ? ((841.0/108.0) * t + (16.0/116.0)) : pow(t,1.0/3.0)
+#define f_1(t) (t <= (24.0f/116.0f)) ? ((108.0/841.0) * (t - (16.0/116.0))) : (t * t * t)
+static void qcms_transform_module_LAB_to_XYZ(struct qcms_modular_transform *transform, float *src, float *dest, size_t length)
+{
+ size_t i;
+ // lcms: D50 XYZ values
+ float WhitePointX = 0.9642f;
+ float WhitePointY = 1.0f;
+ float WhitePointZ = 0.8249f;
+ for (i = 0; i < length; i++) {
+ float device_L = *src++ * 100.0f;
+ float device_a = *src++ * 255.0f - 128.0f;
+ float device_b = *src++ * 255.0f - 128.0f;
+ float y = (device_L + 16.0f) / 116.0f;
+
+ float X = f_1((y + 0.002f * device_a)) * WhitePointX;
+ float Y = f_1(y) * WhitePointY;
+ float Z = f_1((y - 0.005f * device_b)) * WhitePointZ;
+ *dest++ = X / (1.0 + 32767.0/32768.0);
+ *dest++ = Y / (1.0 + 32767.0/32768.0);
+ *dest++ = Z / (1.0 + 32767.0/32768.0);
+ }
+}
+
+//Based on lcms cmsXYZ2Lab
+static void qcms_transform_module_XYZ_to_LAB(struct qcms_modular_transform *transform, float *src, float *dest, size_t length)
+{
+ size_t i;
+ // lcms: D50 XYZ values
+ float WhitePointX = 0.9642f;
+ float WhitePointY = 1.0f;
+ float WhitePointZ = 0.8249f;
+ for (i = 0; i < length; i++) {
+ float device_x = *src++ * (1.0 + 32767.0/32768.0) / WhitePointX;
+ float device_y = *src++ * (1.0 + 32767.0/32768.0) / WhitePointY;
+ float device_z = *src++ * (1.0 + 32767.0/32768.0) / WhitePointZ;
+
+ float fx = f(device_x);
+ float fy = f(device_y);
+ float fz = f(device_z);
+
+ float L = 116.0f*fy - 16.0f;
+ float a = 500.0f*(fx - fy);
+ float b = 200.0f*(fy - fz);
+ *dest++ = L / 100.0f;
+ *dest++ = (a+128.0f) / 255.0f;
+ *dest++ = (b+128.0f) / 255.0f;
+ }
+
+}
+
+static void qcms_transform_module_clut_only(struct qcms_modular_transform *transform, float *src, float *dest, size_t length)
+{
+ size_t i;
+ int xy_len = 1;
+ int x_len = transform->grid_size;
+ int len = x_len * x_len;
+ float* r_table = transform->r_clut;
+ float* g_table = transform->g_clut;
+ float* b_table = transform->b_clut;
+
+ for (i = 0; i < length; i++) {
+ assert(transform->grid_size >= 1);
+
+ float linear_r = *src++;
+ float linear_g = *src++;
+ float linear_b = *src++;
+
+ int x = floorf(linear_r * (transform->grid_size-1));
+ int y = floorf(linear_g * (transform->grid_size-1));
+ int z = floorf(linear_b * (transform->grid_size-1));
+ int x_n = ceilf(linear_r * (transform->grid_size-1));
+ int y_n = ceilf(linear_g * (transform->grid_size-1));
+ int z_n = ceilf(linear_b * (transform->grid_size-1));
+ float x_d = linear_r * (transform->grid_size-1) - x;
+ float y_d = linear_g * (transform->grid_size-1) - y;
+ float z_d = linear_b * (transform->grid_size-1) - z;
+
+ float r_x1 = lerp(CLU(r_table,x,y,z), CLU(r_table,x_n,y,z), x_d);
+ float r_x2 = lerp(CLU(r_table,x,y_n,z), CLU(r_table,x_n,y_n,z), x_d);
+ float r_y1 = lerp(r_x1, r_x2, y_d);
+ float r_x3 = lerp(CLU(r_table,x,y,z_n), CLU(r_table,x_n,y,z_n), x_d);
+ float r_x4 = lerp(CLU(r_table,x,y_n,z_n), CLU(r_table,x_n,y_n,z_n), x_d);
+ float r_y2 = lerp(r_x3, r_x4, y_d);
+ float clut_r = lerp(r_y1, r_y2, z_d);
+
+ float g_x1 = lerp(CLU(g_table,x,y,z), CLU(g_table,x_n,y,z), x_d);
+ float g_x2 = lerp(CLU(g_table,x,y_n,z), CLU(g_table,x_n,y_n,z), x_d);
+ float g_y1 = lerp(g_x1, g_x2, y_d);
+ float g_x3 = lerp(CLU(g_table,x,y,z_n), CLU(g_table,x_n,y,z_n), x_d);
+ float g_x4 = lerp(CLU(g_table,x,y_n,z_n), CLU(g_table,x_n,y_n,z_n), x_d);
+ float g_y2 = lerp(g_x3, g_x4, y_d);
+ float clut_g = lerp(g_y1, g_y2, z_d);
+
+ float b_x1 = lerp(CLU(b_table,x,y,z), CLU(b_table,x_n,y,z), x_d);
+ float b_x2 = lerp(CLU(b_table,x,y_n,z), CLU(b_table,x_n,y_n,z), x_d);
+ float b_y1 = lerp(b_x1, b_x2, y_d);
+ float b_x3 = lerp(CLU(b_table,x,y,z_n), CLU(b_table,x_n,y,z_n), x_d);
+ float b_x4 = lerp(CLU(b_table,x,y_n,z_n), CLU(b_table,x_n,y_n,z_n), x_d);
+ float b_y2 = lerp(b_x3, b_x4, y_d);
+ float clut_b = lerp(b_y1, b_y2, z_d);
+
+ *dest++ = clamp_float(clut_r);
+ *dest++ = clamp_float(clut_g);
+ *dest++ = clamp_float(clut_b);
+ }
+}
+
+static void qcms_transform_module_clut(struct qcms_modular_transform *transform, float *src, float *dest, size_t length)
+{
+ size_t i;
+ int xy_len = 1;
+ int x_len = transform->grid_size;
+ int len = x_len * x_len;
+ float* r_table = transform->r_clut;
+ float* g_table = transform->g_clut;
+ float* b_table = transform->b_clut;
+ for (i = 0; i < length; i++) {
+ assert(transform->grid_size >= 1);
+
+ float device_r = *src++;
+ float device_g = *src++;
+ float device_b = *src++;
+ float linear_r = lut_interp_linear_float(device_r,
+ transform->input_clut_table_r, transform->input_clut_table_length);
+ float linear_g = lut_interp_linear_float(device_g,
+ transform->input_clut_table_g, transform->input_clut_table_length);
+ float linear_b = lut_interp_linear_float(device_b,
+ transform->input_clut_table_b, transform->input_clut_table_length);
+
+ int x = floorf(linear_r * (transform->grid_size-1));
+ int y = floorf(linear_g * (transform->grid_size-1));
+ int z = floorf(linear_b * (transform->grid_size-1));
+ int x_n = ceilf(linear_r * (transform->grid_size-1));
+ int y_n = ceilf(linear_g * (transform->grid_size-1));
+ int z_n = ceilf(linear_b * (transform->grid_size-1));
+ float x_d = linear_r * (transform->grid_size-1) - x;
+ float y_d = linear_g * (transform->grid_size-1) - y;
+ float z_d = linear_b * (transform->grid_size-1) - z;
+
+ float r_x1 = lerp(CLU(r_table,x,y,z), CLU(r_table,x_n,y,z), x_d);
+ float r_x2 = lerp(CLU(r_table,x,y_n,z), CLU(r_table,x_n,y_n,z), x_d);
+ float r_y1 = lerp(r_x1, r_x2, y_d);
+ float r_x3 = lerp(CLU(r_table,x,y,z_n), CLU(r_table,x_n,y,z_n), x_d);
+ float r_x4 = lerp(CLU(r_table,x,y_n,z_n), CLU(r_table,x_n,y_n,z_n), x_d);
+ float r_y2 = lerp(r_x3, r_x4, y_d);
+ float clut_r = lerp(r_y1, r_y2, z_d);
+
+ float g_x1 = lerp(CLU(g_table,x,y,z), CLU(g_table,x_n,y,z), x_d);
+ float g_x2 = lerp(CLU(g_table,x,y_n,z), CLU(g_table,x_n,y_n,z), x_d);
+ float g_y1 = lerp(g_x1, g_x2, y_d);
+ float g_x3 = lerp(CLU(g_table,x,y,z_n), CLU(g_table,x_n,y,z_n), x_d);
+ float g_x4 = lerp(CLU(g_table,x,y_n,z_n), CLU(g_table,x_n,y_n,z_n), x_d);
+ float g_y2 = lerp(g_x3, g_x4, y_d);
+ float clut_g = lerp(g_y1, g_y2, z_d);
+
+ float b_x1 = lerp(CLU(b_table,x,y,z), CLU(b_table,x_n,y,z), x_d);
+ float b_x2 = lerp(CLU(b_table,x,y_n,z), CLU(b_table,x_n,y_n,z), x_d);
+ float b_y1 = lerp(b_x1, b_x2, y_d);
+ float b_x3 = lerp(CLU(b_table,x,y,z_n), CLU(b_table,x_n,y,z_n), x_d);
+ float b_x4 = lerp(CLU(b_table,x,y_n,z_n), CLU(b_table,x_n,y_n,z_n), x_d);
+ float b_y2 = lerp(b_x3, b_x4, y_d);
+ float clut_b = lerp(b_y1, b_y2, z_d);
+
+ float pcs_r = lut_interp_linear_float(clut_r,
+ transform->output_clut_table_r, transform->output_clut_table_length);
+ float pcs_g = lut_interp_linear_float(clut_g,
+ transform->output_clut_table_g, transform->output_clut_table_length);
+ float pcs_b = lut_interp_linear_float(clut_b,
+ transform->output_clut_table_b, transform->output_clut_table_length);
+
+ *dest++ = clamp_float(pcs_r);
+ *dest++ = clamp_float(pcs_g);
+ *dest++ = clamp_float(pcs_b);
+ }
+}
+
+/* NOT USED
+static void qcms_transform_module_tetra_clut(struct qcms_modular_transform *transform, float *src, float *dest, size_t length)
+{
+ size_t i;
+ int xy_len = 1;
+ int x_len = transform->grid_size;
+ int len = x_len * x_len;
+ float* r_table = transform->r_clut;
+ float* g_table = transform->g_clut;
+ float* b_table = transform->b_clut;
+ float c0_r, c1_r, c2_r, c3_r;
+ float c0_g, c1_g, c2_g, c3_g;
+ float c0_b, c1_b, c2_b, c3_b;
+ float clut_r, clut_g, clut_b;
+ float pcs_r, pcs_g, pcs_b;
+ for (i = 0; i < length; i++) {
+ float device_r = *src++;
+ float device_g = *src++;
+ float device_b = *src++;
+ float linear_r = lut_interp_linear_float(device_r,
+ transform->input_clut_table_r, transform->input_clut_table_length);
+ float linear_g = lut_interp_linear_float(device_g,
+ transform->input_clut_table_g, transform->input_clut_table_length);
+ float linear_b = lut_interp_linear_float(device_b,
+ transform->input_clut_table_b, transform->input_clut_table_length);
+
+ int x = floorf(linear_r * (transform->grid_size-1));
+ int y = floorf(linear_g * (transform->grid_size-1));
+ int z = floorf(linear_b * (transform->grid_size-1));
+ int x_n = ceilf(linear_r * (transform->grid_size-1));
+ int y_n = ceilf(linear_g * (transform->grid_size-1));
+ int z_n = ceilf(linear_b * (transform->grid_size-1));
+ float rx = linear_r * (transform->grid_size-1) - x;
+ float ry = linear_g * (transform->grid_size-1) - y;
+ float rz = linear_b * (transform->grid_size-1) - z;
+
+ c0_r = CLU(r_table, x, y, z);
+ c0_g = CLU(g_table, x, y, z);
+ c0_b = CLU(b_table, x, y, z);
+ if( rx >= ry ) {
+ if (ry >= rz) { //rx >= ry && ry >= rz
+ c1_r = CLU(r_table, x_n, y, z) - c0_r;
+ c2_r = CLU(r_table, x_n, y_n, z) - CLU(r_table, x_n, y, z);
+ c3_r = CLU(r_table, x_n, y_n, z_n) - CLU(r_table, x_n, y_n, z);
+ c1_g = CLU(g_table, x_n, y, z) - c0_g;
+ c2_g = CLU(g_table, x_n, y_n, z) - CLU(g_table, x_n, y, z);
+ c3_g = CLU(g_table, x_n, y_n, z_n) - CLU(g_table, x_n, y_n, z);
+ c1_b = CLU(b_table, x_n, y, z) - c0_b;
+ c2_b = CLU(b_table, x_n, y_n, z) - CLU(b_table, x_n, y, z);
+ c3_b = CLU(b_table, x_n, y_n, z_n) - CLU(b_table, x_n, y_n, z);
+ } else {
+ if (rx >= rz) { //rx >= rz && rz >= ry
+ c1_r = CLU(r_table, x_n, y, z) - c0_r;
+ c2_r = CLU(r_table, x_n, y_n, z_n) - CLU(r_table, x_n, y, z_n);
+ c3_r = CLU(r_table, x_n, y, z_n) - CLU(r_table, x_n, y, z);
+ c1_g = CLU(g_table, x_n, y, z) - c0_g;
+ c2_g = CLU(g_table, x_n, y_n, z_n) - CLU(g_table, x_n, y, z_n);
+ c3_g = CLU(g_table, x_n, y, z_n) - CLU(g_table, x_n, y, z);
+ c1_b = CLU(b_table, x_n, y, z) - c0_b;
+ c2_b = CLU(b_table, x_n, y_n, z_n) - CLU(b_table, x_n, y, z_n);
+ c3_b = CLU(b_table, x_n, y, z_n) - CLU(b_table, x_n, y, z);
+ } else { //rz > rx && rx >= ry
+ c1_r = CLU(r_table, x_n, y, z_n) - CLU(r_table, x, y, z_n);
+ c2_r = CLU(r_table, x_n, y_n, z_n) - CLU(r_table, x_n, y, z_n);
+ c3_r = CLU(r_table, x, y, z_n) - c0_r;
+ c1_g = CLU(g_table, x_n, y, z_n) - CLU(g_table, x, y, z_n);
+ c2_g = CLU(g_table, x_n, y_n, z_n) - CLU(g_table, x_n, y, z_n);
+ c3_g = CLU(g_table, x, y, z_n) - c0_g;
+ c1_b = CLU(b_table, x_n, y, z_n) - CLU(b_table, x, y, z_n);
+ c2_b = CLU(b_table, x_n, y_n, z_n) - CLU(b_table, x_n, y, z_n);
+ c3_b = CLU(b_table, x, y, z_n) - c0_b;
+ }
+ }
+ } else {
+ if (rx >= rz) { //ry > rx && rx >= rz
+ c1_r = CLU(r_table, x_n, y_n, z) - CLU(r_table, x, y_n, z);
+ c2_r = CLU(r_table, x_n, y_n, z) - c0_r;
+ c3_r = CLU(r_table, x_n, y_n, z_n) - CLU(r_table, x_n, y_n, z);
+ c1_g = CLU(g_table, x_n, y_n, z) - CLU(g_table, x, y_n, z);
+ c2_g = CLU(g_table, x_n, y_n, z) - c0_g;
+ c3_g = CLU(g_table, x_n, y_n, z_n) - CLU(g_table, x_n, y_n, z);
+ c1_b = CLU(b_table, x_n, y_n, z) - CLU(b_table, x, y_n, z);
+ c2_b = CLU(b_table, x_n, y_n, z) - c0_b;
+ c3_b = CLU(b_table, x_n, y_n, z_n) - CLU(b_table, x_n, y_n, z);
+ } else {
+ if (ry >= rz) { //ry >= rz && rz > rx
+ c1_r = CLU(r_table, x_n, y_n, z_n) - CLU(r_table, x, y_n, z_n);
+ c2_r = CLU(r_table, x, y_n, z) - c0_r;
+ c3_r = CLU(r_table, x, y_n, z_n) - CLU(r_table, x, y_n, z);
+ c1_g = CLU(g_table, x_n, y_n, z_n) - CLU(g_table, x, y_n, z_n);
+ c2_g = CLU(g_table, x, y_n, z) - c0_g;
+ c3_g = CLU(g_table, x, y_n, z_n) - CLU(g_table, x, y_n, z);
+ c1_b = CLU(b_table, x_n, y_n, z_n) - CLU(b_table, x, y_n, z_n);
+ c2_b = CLU(b_table, x, y_n, z) - c0_b;
+ c3_b = CLU(b_table, x, y_n, z_n) - CLU(b_table, x, y_n, z);
+ } else { //rz > ry && ry > rx
+ c1_r = CLU(r_table, x_n, y_n, z_n) - CLU(r_table, x, y_n, z_n);
+ c2_r = CLU(r_table, x, y_n, z) - c0_r;
+ c3_r = CLU(r_table, x_n, y_n, z_n) - CLU(r_table, x_n, y_n, z);
+ c1_g = CLU(g_table, x_n, y_n, z_n) - CLU(g_table, x, y_n, z_n);
+ c2_g = CLU(g_table, x, y_n, z) - c0_g;
+ c3_g = CLU(g_table, x_n, y_n, z_n) - CLU(g_table, x_n, y_n, z);
+ c1_b = CLU(b_table, x_n, y_n, z_n) - CLU(b_table, x, y_n, z_n);
+ c2_b = CLU(b_table, x, y_n, z) - c0_b;
+ c3_b = CLU(b_table, x_n, y_n, z_n) - CLU(b_table, x_n, y_n, z);
+ }
+ }
+ }
+
+ clut_r = c0_r + c1_r*rx + c2_r*ry + c3_r*rz;
+ clut_g = c0_g + c1_g*rx + c2_g*ry + c3_g*rz;
+ clut_b = c0_b + c1_b*rx + c2_b*ry + c3_b*rz;
+
+ pcs_r = lut_interp_linear_float(clut_r,
+ transform->output_clut_table_r, transform->output_clut_table_length);
+ pcs_g = lut_interp_linear_float(clut_g,
+ transform->output_clut_table_g, transform->output_clut_table_length);
+ pcs_b = lut_interp_linear_float(clut_b,
+ transform->output_clut_table_b, transform->output_clut_table_length);
+ *dest++ = clamp_float(pcs_r);
+ *dest++ = clamp_float(pcs_g);
+ *dest++ = clamp_float(pcs_b);
+ }
+}
+*/
+
+static void qcms_transform_module_gamma_table(struct qcms_modular_transform *transform, float *src, float *dest, size_t length)
+{
+ size_t i;
+ float out_r, out_g, out_b;
+ for (i = 0; i < length; i++) {
+ float in_r = *src++;
+ float in_g = *src++;
+ float in_b = *src++;
+
+ out_r = lut_interp_linear_float(in_r, transform->input_clut_table_r, 256);
+ out_g = lut_interp_linear_float(in_g, transform->input_clut_table_g, 256);
+ out_b = lut_interp_linear_float(in_b, transform->input_clut_table_b, 256);
+
+ *dest++ = clamp_float(out_r);
+ *dest++ = clamp_float(out_g);
+ *dest++ = clamp_float(out_b);
+ }
+}
+
+static void qcms_transform_module_gamma_lut(struct qcms_modular_transform *transform, float *src, float *dest, size_t length)
+{
+ size_t i;
+ float out_r, out_g, out_b;
+ for (i = 0; i < length; i++) {
+ float in_r = *src++;
+ float in_g = *src++;
+ float in_b = *src++;
+
+ out_r = lut_interp_linear(in_r,
+ transform->output_gamma_lut_r, transform->output_gamma_lut_r_length);
+ out_g = lut_interp_linear(in_g,
+ transform->output_gamma_lut_g, transform->output_gamma_lut_g_length);
+ out_b = lut_interp_linear(in_b,
+ transform->output_gamma_lut_b, transform->output_gamma_lut_b_length);
+
+ *dest++ = clamp_float(out_r);
+ *dest++ = clamp_float(out_g);
+ *dest++ = clamp_float(out_b);
+ }
+}
+
+static void qcms_transform_module_matrix_translate(struct qcms_modular_transform *transform, float *src, float *dest, size_t length)
+{
+ size_t i;
+ struct matrix mat;
+
+ /* store the results in column major mode
+ * this makes doing the multiplication with sse easier */
+ mat.m[0][0] = transform->matrix.m[0][0];
+ mat.m[1][0] = transform->matrix.m[0][1];
+ mat.m[2][0] = transform->matrix.m[0][2];
+ mat.m[0][1] = transform->matrix.m[1][0];
+ mat.m[1][1] = transform->matrix.m[1][1];
+ mat.m[2][1] = transform->matrix.m[1][2];
+ mat.m[0][2] = transform->matrix.m[2][0];
+ mat.m[1][2] = transform->matrix.m[2][1];
+ mat.m[2][2] = transform->matrix.m[2][2];
+
+ for (i = 0; i < length; i++) {
+ float in_r = *src++;
+ float in_g = *src++;
+ float in_b = *src++;
+
+ float out_r = mat.m[0][0]*in_r + mat.m[1][0]*in_g + mat.m[2][0]*in_b + transform->tx;
+ float out_g = mat.m[0][1]*in_r + mat.m[1][1]*in_g + mat.m[2][1]*in_b + transform->ty;
+ float out_b = mat.m[0][2]*in_r + mat.m[1][2]*in_g + mat.m[2][2]*in_b + transform->tz;
+
+ *dest++ = clamp_float(out_r);
+ *dest++ = clamp_float(out_g);
+ *dest++ = clamp_float(out_b);
+ }
+}
+
+static void qcms_transform_module_matrix(struct qcms_modular_transform *transform, float *src, float *dest, size_t length)
+{
+ size_t i;
+ struct matrix mat;
+
+ /* store the results in column major mode
+ * this makes doing the multiplication with sse easier */
+ mat.m[0][0] = transform->matrix.m[0][0];
+ mat.m[1][0] = transform->matrix.m[0][1];
+ mat.m[2][0] = transform->matrix.m[0][2];
+ mat.m[0][1] = transform->matrix.m[1][0];
+ mat.m[1][1] = transform->matrix.m[1][1];
+ mat.m[2][1] = transform->matrix.m[1][2];
+ mat.m[0][2] = transform->matrix.m[2][0];
+ mat.m[1][2] = transform->matrix.m[2][1];
+ mat.m[2][2] = transform->matrix.m[2][2];
+
+ for (i = 0; i < length; i++) {
+ float in_r = *src++;
+ float in_g = *src++;
+ float in_b = *src++;
+
+ float out_r = mat.m[0][0]*in_r + mat.m[1][0]*in_g + mat.m[2][0]*in_b;
+ float out_g = mat.m[0][1]*in_r + mat.m[1][1]*in_g + mat.m[2][1]*in_b;
+ float out_b = mat.m[0][2]*in_r + mat.m[1][2]*in_g + mat.m[2][2]*in_b;
+
+ *dest++ = clamp_float(out_r);
+ *dest++ = clamp_float(out_g);
+ *dest++ = clamp_float(out_b);
+ }
+}
+
+static struct qcms_modular_transform* qcms_modular_transform_alloc() {
+ return calloc(1, sizeof(struct qcms_modular_transform));
+}
+
+static void qcms_modular_transform_release(struct qcms_modular_transform *transform)
+{
+ struct qcms_modular_transform *next_transform;
+ while (transform != NULL) {
+ next_transform = transform->next_transform;
+ // clut may use a single block of memory.
+ // Perhaps we should remove this to simply the code.
+ if (transform->input_clut_table_r + transform->input_clut_table_length == transform->input_clut_table_g && transform->input_clut_table_g + transform->input_clut_table_length == transform->input_clut_table_b) {
+ if (transform->input_clut_table_r) free(transform->input_clut_table_r);
+ } else {
+ if (transform->input_clut_table_r) free(transform->input_clut_table_r);
+ if (transform->input_clut_table_g) free(transform->input_clut_table_g);
+ if (transform->input_clut_table_b) free(transform->input_clut_table_b);
+ }
+ if (transform->r_clut + 1 == transform->g_clut && transform->g_clut + 1 == transform->b_clut) {
+ if (transform->r_clut) free(transform->r_clut);
+ } else {
+ if (transform->r_clut) free(transform->r_clut);
+ if (transform->g_clut) free(transform->g_clut);
+ if (transform->b_clut) free(transform->b_clut);
+ }
+ if (transform->output_clut_table_r + transform->output_clut_table_length == transform->output_clut_table_g && transform->output_clut_table_g+ transform->output_clut_table_length == transform->output_clut_table_b) {
+ if (transform->output_clut_table_r) free(transform->output_clut_table_r);
+ } else {
+ if (transform->output_clut_table_r) free(transform->output_clut_table_r);
+ if (transform->output_clut_table_g) free(transform->output_clut_table_g);
+ if (transform->output_clut_table_b) free(transform->output_clut_table_b);
+ }
+ if (transform->output_gamma_lut_r) free(transform->output_gamma_lut_r);
+ if (transform->output_gamma_lut_g) free(transform->output_gamma_lut_g);
+ if (transform->output_gamma_lut_b) free(transform->output_gamma_lut_b);
+ free(transform);
+ transform = next_transform;
+ }
+}
+
+/* Set transform to be the next element in the linked list. */
+static void append_transform(struct qcms_modular_transform *transform, struct qcms_modular_transform ***next_transform)
+{
+ **next_transform = transform;
+ while (transform) {
+ *next_transform = &(transform->next_transform);
+ transform = transform->next_transform;
+ }
+}
+
+/* reverse the transformation list (used by mBA) */
+static struct qcms_modular_transform* reverse_transform(struct qcms_modular_transform *transform)
+{
+ struct qcms_modular_transform *prev_transform = NULL;
+ while (transform != NULL) {
+ struct qcms_modular_transform *next_transform = transform->next_transform;
+ transform->next_transform = prev_transform;
+ prev_transform = transform;
+ transform = next_transform;
+ }
+
+ return prev_transform;
+}
+
+#define EMPTY_TRANSFORM_LIST NULL
+static struct qcms_modular_transform* qcms_modular_transform_create_mAB(struct lutmABType *lut)
+{
+ struct qcms_modular_transform *first_transform = NULL;
+ struct qcms_modular_transform **next_transform = &first_transform;
+ struct qcms_modular_transform *transform = NULL;
+
+ if (lut->a_curves[0] != NULL) {
+ size_t clut_length;
+ float *clut;
+
+ // If the A curve is present this also implies the
+ // presence of a CLUT.
+ if (!lut->clut_table)
+ goto fail;
+
+ // Prepare A curve.
+ transform = qcms_modular_transform_alloc();
+ if (!transform)
+ goto fail;
+ append_transform(transform, &next_transform);
+ transform->input_clut_table_r = build_input_gamma_table(lut->a_curves[0]);
+ transform->input_clut_table_g = build_input_gamma_table(lut->a_curves[1]);
+ transform->input_clut_table_b = build_input_gamma_table(lut->a_curves[2]);
+ transform->transform_module_fn = qcms_transform_module_gamma_table;
+ if (lut->num_grid_points[0] != lut->num_grid_points[1] ||
+ lut->num_grid_points[1] != lut->num_grid_points[2] ) {
+ //XXX: We don't currently support clut that are not squared!
+ goto fail;
+ }
+
+ // Prepare CLUT
+ transform = qcms_modular_transform_alloc();
+ if (!transform)
+ goto fail;
+ append_transform(transform, &next_transform);
+ clut_length = sizeof(float)*pow(lut->num_grid_points[0], 3)*3;
+ clut = malloc(clut_length);
+ if (!clut)
+ goto fail;
+ memcpy(clut, lut->clut_table, clut_length);
+ transform->r_clut = clut + 0;
+ transform->g_clut = clut + 1;
+ transform->b_clut = clut + 2;
+ transform->grid_size = lut->num_grid_points[0];
+ transform->transform_module_fn = qcms_transform_module_clut_only;
+ }
+ if (lut->m_curves[0] != NULL) {
+ // M curve imples the presence of a Matrix
+
+ // Prepare M curve
+ transform = qcms_modular_transform_alloc();
+ if (!transform)
+ goto fail;
+ append_transform(transform, &next_transform);
+ transform->input_clut_table_r = build_input_gamma_table(lut->m_curves[0]);
+ transform->input_clut_table_g = build_input_gamma_table(lut->m_curves[1]);
+ transform->input_clut_table_b = build_input_gamma_table(lut->m_curves[2]);
+ transform->transform_module_fn = qcms_transform_module_gamma_table;
+
+ // Prepare Matrix
+ transform = qcms_modular_transform_alloc();
+ if (!transform)
+ goto fail;
+ append_transform(transform, &next_transform);
+ transform->matrix = build_mAB_matrix(lut);
+ if (transform->matrix.invalid)
+ goto fail;
+ transform->tx = s15Fixed16Number_to_float(lut->e03);
+ transform->ty = s15Fixed16Number_to_float(lut->e13);
+ transform->tz = s15Fixed16Number_to_float(lut->e23);
+ transform->transform_module_fn = qcms_transform_module_matrix_translate;
+ }
+ if (lut->b_curves[0] != NULL) {
+ // Prepare B curve
+ transform = qcms_modular_transform_alloc();
+ if (!transform)
+ goto fail;
+ append_transform(transform, &next_transform);
+ transform->input_clut_table_r = build_input_gamma_table(lut->b_curves[0]);
+ transform->input_clut_table_g = build_input_gamma_table(lut->b_curves[1]);
+ transform->input_clut_table_b = build_input_gamma_table(lut->b_curves[2]);
+ transform->transform_module_fn = qcms_transform_module_gamma_table;
+ } else {
+ // B curve is mandatory
+ goto fail;
+ }
+
+ if (lut->reversed) {
+ // mBA are identical to mAB except that the transformation order
+ // is reversed
+ first_transform = reverse_transform(first_transform);
+ }
+
+ return first_transform;
+fail:
+ qcms_modular_transform_release(first_transform);
+ return NULL;
+}
+
+static struct qcms_modular_transform* qcms_modular_transform_create_lut(struct lutType *lut)
+{
+ struct qcms_modular_transform *first_transform = NULL;
+ struct qcms_modular_transform **next_transform = &first_transform;
+ struct qcms_modular_transform *transform = NULL;
+
+ size_t in_curve_len, clut_length, out_curve_len;
+ float *in_curves, *clut, *out_curves;
+
+ // Prepare Matrix
+ transform = qcms_modular_transform_alloc();
+ if (!transform)
+ goto fail;
+ append_transform(transform, &next_transform);
+ transform->matrix = build_lut_matrix(lut);
+ if (transform->matrix.invalid)
+ goto fail;
+ transform->transform_module_fn = qcms_transform_module_matrix;
+
+ // Prepare input curves
+ transform = qcms_modular_transform_alloc();
+ if (!transform)
+ goto fail;
+ append_transform(transform, &next_transform);
+ in_curve_len = sizeof(float)*lut->num_input_table_entries * 3;
+ in_curves = malloc(in_curve_len);
+ if (!in_curves)
+ goto fail;
+ memcpy(in_curves, lut->input_table, in_curve_len);
+ transform->input_clut_table_r = in_curves + lut->num_input_table_entries * 0;
+ transform->input_clut_table_g = in_curves + lut->num_input_table_entries * 1;
+ transform->input_clut_table_b = in_curves + lut->num_input_table_entries * 2;
+ transform->input_clut_table_length = lut->num_input_table_entries;
+
+ // Prepare table
+ clut_length = sizeof(float)*pow(lut->num_clut_grid_points, 3)*3;
+ clut = malloc(clut_length);
+ if (!clut)
+ goto fail;
+ memcpy(clut, lut->clut_table, clut_length);
+ transform->r_clut = clut + 0;
+ transform->g_clut = clut + 1;
+ transform->b_clut = clut + 2;
+ transform->grid_size = lut->num_clut_grid_points;
+
+ // Prepare output curves
+ out_curve_len = sizeof(float) * lut->num_output_table_entries * 3;
+ out_curves = malloc(out_curve_len);
+ if (!out_curves)
+ goto fail;
+ memcpy(out_curves, lut->output_table, out_curve_len);
+ transform->output_clut_table_r = out_curves + lut->num_output_table_entries * 0;
+ transform->output_clut_table_g = out_curves + lut->num_output_table_entries * 1;
+ transform->output_clut_table_b = out_curves + lut->num_output_table_entries * 2;
+ transform->output_clut_table_length = lut->num_output_table_entries;
+ transform->transform_module_fn = qcms_transform_module_clut;
+
+ return first_transform;
+fail:
+ qcms_modular_transform_release(first_transform);
+ return NULL;
+}
+
+struct qcms_modular_transform* qcms_modular_transform_create_input(qcms_profile *in)
+{
+ struct qcms_modular_transform *first_transform = NULL;
+ struct qcms_modular_transform **next_transform = &first_transform;
+
+ if (in->A2B0) {
+ struct qcms_modular_transform *lut_transform;
+ lut_transform = qcms_modular_transform_create_lut(in->A2B0);
+ if (!lut_transform)
+ goto fail;
+ append_transform(lut_transform, &next_transform);
+ } else if (in->mAB && in->mAB->num_in_channels == 3 && in->mAB->num_out_channels == 3) {
+ struct qcms_modular_transform *mAB_transform;
+ mAB_transform = qcms_modular_transform_create_mAB(in->mAB);
+ if (!mAB_transform)
+ goto fail;
+ append_transform(mAB_transform, &next_transform);
+
+ } else {
+ struct qcms_modular_transform *transform;
+
+ transform = qcms_modular_transform_alloc();
+ if (!transform)
+ goto fail;
+ append_transform(transform, &next_transform);
+ transform->input_clut_table_r = build_input_gamma_table(in->redTRC);
+ transform->input_clut_table_g = build_input_gamma_table(in->greenTRC);
+ transform->input_clut_table_b = build_input_gamma_table(in->blueTRC);
+ transform->transform_module_fn = qcms_transform_module_gamma_table;
+ if (!transform->input_clut_table_r || !transform->input_clut_table_g ||
+ !transform->input_clut_table_b) {
+ goto fail;
+ }
+
+ transform = qcms_modular_transform_alloc();
+ if (!transform)
+ goto fail;
+ append_transform(transform, &next_transform);
+ transform->matrix.m[0][0] = 1/1.999969482421875f;
+ transform->matrix.m[0][1] = 0.f;
+ transform->matrix.m[0][2] = 0.f;
+ transform->matrix.m[1][0] = 0.f;
+ transform->matrix.m[1][1] = 1/1.999969482421875f;
+ transform->matrix.m[1][2] = 0.f;
+ transform->matrix.m[2][0] = 0.f;
+ transform->matrix.m[2][1] = 0.f;
+ transform->matrix.m[2][2] = 1/1.999969482421875f;
+ transform->matrix.invalid = false;
+ transform->transform_module_fn = qcms_transform_module_matrix;
+
+ transform = qcms_modular_transform_alloc();
+ if (!transform)
+ goto fail;
+ append_transform(transform, &next_transform);
+ transform->matrix = build_colorant_matrix(in);
+ transform->transform_module_fn = qcms_transform_module_matrix;
+ }
+
+ return first_transform;
+fail:
+ qcms_modular_transform_release(first_transform);
+ return EMPTY_TRANSFORM_LIST;
+}
+static struct qcms_modular_transform* qcms_modular_transform_create_output(qcms_profile *out)
+{
+ struct qcms_modular_transform *first_transform = NULL;
+ struct qcms_modular_transform **next_transform = &first_transform;
+
+ if (out->B2A0) {
+ struct qcms_modular_transform *lut_transform;
+ lut_transform = qcms_modular_transform_create_lut(out->B2A0);
+ if (!lut_transform)
+ goto fail;
+ append_transform(lut_transform, &next_transform);
+ } else if (out->mBA && out->mBA->num_in_channels == 3 && out->mBA->num_out_channels == 3) {
+ struct qcms_modular_transform *lut_transform;
+ lut_transform = qcms_modular_transform_create_mAB(out->mBA);
+ if (!lut_transform)
+ goto fail;
+ append_transform(lut_transform, &next_transform);
+ } else if (out->redTRC && out->greenTRC && out->blueTRC) {
+ struct qcms_modular_transform *transform;
+
+ transform = qcms_modular_transform_alloc();
+ if (!transform)
+ goto fail;
+ append_transform(transform, &next_transform);
+ transform->matrix = matrix_invert(build_colorant_matrix(out));
+ transform->transform_module_fn = qcms_transform_module_matrix;
+
+ transform = qcms_modular_transform_alloc();
+ if (!transform)
+ goto fail;
+ append_transform(transform, &next_transform);
+ transform->matrix.m[0][0] = 1.999969482421875f;
+ transform->matrix.m[0][1] = 0.f;
+ transform->matrix.m[0][2] = 0.f;
+ transform->matrix.m[1][0] = 0.f;
+ transform->matrix.m[1][1] = 1.999969482421875f;
+ transform->matrix.m[1][2] = 0.f;
+ transform->matrix.m[2][0] = 0.f;
+ transform->matrix.m[2][1] = 0.f;
+ transform->matrix.m[2][2] = 1.999969482421875f;
+ transform->matrix.invalid = false;
+ transform->transform_module_fn = qcms_transform_module_matrix;
+
+ transform = qcms_modular_transform_alloc();
+ if (!transform)
+ goto fail;
+ append_transform(transform, &next_transform);
+ build_output_lut(out->redTRC, &transform->output_gamma_lut_r,
+ &transform->output_gamma_lut_r_length);
+ build_output_lut(out->greenTRC, &transform->output_gamma_lut_g,
+ &transform->output_gamma_lut_g_length);
+ build_output_lut(out->blueTRC, &transform->output_gamma_lut_b,
+ &transform->output_gamma_lut_b_length);
+ transform->transform_module_fn = qcms_transform_module_gamma_lut;
+
+ if (!transform->output_gamma_lut_r || !transform->output_gamma_lut_g ||
+ !transform->output_gamma_lut_b) {
+ goto fail;
+ }
+ } else {
+ assert(0 && "Unsupported output profile workflow.");
+ return NULL;
+ }
+
+ return first_transform;
+fail:
+ qcms_modular_transform_release(first_transform);
+ return EMPTY_TRANSFORM_LIST;
+}
+
+/* Not Completed
+// Simplify the transformation chain to an equivalent transformation chain
+static struct qcms_modular_transform* qcms_modular_transform_reduce(struct qcms_modular_transform *transform)
+{
+ struct qcms_modular_transform *first_transform = NULL;
+ struct qcms_modular_transform *curr_trans = transform;
+ struct qcms_modular_transform *prev_trans = NULL;
+ while (curr_trans) {
+ struct qcms_modular_transform *next_trans = curr_trans->next_transform;
+ if (curr_trans->transform_module_fn == qcms_transform_module_matrix) {
+ if (next_trans && next_trans->transform_module_fn == qcms_transform_module_matrix) {
+ curr_trans->matrix = matrix_multiply(curr_trans->matrix, next_trans->matrix);
+ goto remove_next;
+ }
+ }
+ if (curr_trans->transform_module_fn == qcms_transform_module_gamma_table) {
+ bool isLinear = true;
+ uint16_t i;
+ for (i = 0; isLinear && i < 256; i++) {
+ isLinear &= (int)(curr_trans->input_clut_table_r[i] * 255) == i;
+ isLinear &= (int)(curr_trans->input_clut_table_g[i] * 255) == i;
+ isLinear &= (int)(curr_trans->input_clut_table_b[i] * 255) == i;
+ }
+ goto remove_current;
+ }
+
+next_transform:
+ if (!next_trans) break;
+ prev_trans = curr_trans;
+ curr_trans = next_trans;
+ continue;
+remove_current:
+ if (curr_trans == transform) {
+ //Update head
+ transform = next_trans;
+ } else {
+ prev_trans->next_transform = next_trans;
+ }
+ curr_trans->next_transform = NULL;
+ qcms_modular_transform_release(curr_trans);
+ //return transform;
+ return qcms_modular_transform_reduce(transform);
+remove_next:
+ curr_trans->next_transform = next_trans->next_transform;
+ next_trans->next_transform = NULL;
+ qcms_modular_transform_release(next_trans);
+ continue;
+ }
+ return transform;
+}
+*/
+
+static struct qcms_modular_transform* qcms_modular_transform_create(qcms_profile *in, qcms_profile *out)
+{
+ struct qcms_modular_transform *first_transform = NULL;
+ struct qcms_modular_transform **next_transform = &first_transform;
+
+ if (in->color_space == RGB_SIGNATURE) {
+ struct qcms_modular_transform* rgb_to_pcs;
+ rgb_to_pcs = qcms_modular_transform_create_input(in);
+ if (!rgb_to_pcs)
+ goto fail;
+ append_transform(rgb_to_pcs, &next_transform);
+ } else {
+ assert(0 && "input color space not supported");
+ goto fail;
+ }
+
+ if (in->pcs == LAB_SIGNATURE && out->pcs == XYZ_SIGNATURE) {
+ struct qcms_modular_transform* lab_to_pcs;
+ lab_to_pcs = qcms_modular_transform_alloc();
+ if (!lab_to_pcs)
+ goto fail;
+ append_transform(lab_to_pcs, &next_transform);
+ lab_to_pcs->transform_module_fn = qcms_transform_module_LAB_to_XYZ;
+ }
+
+ // This does not improve accuracy in practice, something is wrong here.
+ //if (in->chromaticAdaption.invalid == false) {
+ // struct qcms_modular_transform* chromaticAdaption;
+ // chromaticAdaption = qcms_modular_transform_alloc();
+ // if (!chromaticAdaption)
+ // goto fail;
+ // append_transform(chromaticAdaption, &next_transform);
+ // chromaticAdaption->matrix = matrix_invert(in->chromaticAdaption);
+ // chromaticAdaption->transform_module_fn = qcms_transform_module_matrix;
+ //}
+
+ if (in->pcs == XYZ_SIGNATURE && out->pcs == LAB_SIGNATURE) {
+ struct qcms_modular_transform* pcs_to_lab;
+ pcs_to_lab = qcms_modular_transform_alloc();
+ if (!pcs_to_lab)
+ goto fail;
+ append_transform(pcs_to_lab, &next_transform);
+ pcs_to_lab->transform_module_fn = qcms_transform_module_XYZ_to_LAB;
+ }
+
+ if (out->color_space == RGB_SIGNATURE) {
+ struct qcms_modular_transform* pcs_to_rgb;
+ pcs_to_rgb = qcms_modular_transform_create_output(out);
+ if (!pcs_to_rgb)
+ goto fail;
+ append_transform(pcs_to_rgb, &next_transform);
+ } else {
+ assert(0 && "output color space not supported");
+ goto fail;
+ }
+ // Not Completed
+ //return qcms_modular_transform_reduce(first_transform);
+ return first_transform;
+fail:
+ qcms_modular_transform_release(first_transform);
+ return EMPTY_TRANSFORM_LIST;
+}
+
+static float* qcms_modular_transform_data(struct qcms_modular_transform *transform, float *src, float *dest, size_t len)
+{
+ while (transform != NULL) {
+ // Keep swaping src/dest when performing a transform to use less memory.
+ float *new_src = dest;
+ const transform_module_fn_t transform_fn = transform->transform_module_fn;
+ if (transform_fn != qcms_transform_module_gamma_table &&
+ transform_fn != qcms_transform_module_gamma_lut &&
+ transform_fn != qcms_transform_module_clut &&
+ transform_fn != qcms_transform_module_clut_only &&
+ transform_fn != qcms_transform_module_matrix &&
+ transform_fn != qcms_transform_module_matrix_translate &&
+ transform_fn != qcms_transform_module_LAB_to_XYZ &&
+ transform_fn != qcms_transform_module_XYZ_to_LAB) {
+ assert(0 && "Unsupported transform module");
+ return NULL;
+ }
+ transform->transform_module_fn(transform,src,dest,len);
+ dest = src;
+ src = new_src;
+ transform = transform->next_transform;
+ }
+ // The results end up in the src buffer because of the switching
+ return src;
+}
+
+float* qcms_chain_transform(qcms_profile *in, qcms_profile *out, float *src, float *dest, size_t lutSize)
+{
+ struct qcms_modular_transform *transform_list = qcms_modular_transform_create(in, out);
+ if (transform_list != NULL) {
+ float *lut = qcms_modular_transform_data(transform_list, src, dest, lutSize/3);
+ qcms_modular_transform_release(transform_list);
+ return lut;
+ }
+ return NULL;
+}
diff --git a/gfx/qcms/chain.h b/gfx/qcms/chain.h
new file mode 100644
index 000000000..bdc6c8872
--- /dev/null
+++ b/gfx/qcms/chain.h
@@ -0,0 +1,30 @@
+/* vim: set ts=8 sw=8 noexpandtab: */
+// qcms
+// Copyright (C) 2009 Mozilla Foundation
+// Copyright (C) 1998-2007 Marti Maria
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the "Software"),
+// to deal in the Software without restriction, including without limitation
+// the rights to use, copy, modify, merge, publish, distribute, sublicense,
+// and/or sell copies of the Software, and to permit persons to whom the Software
+// is furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+// THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+#ifndef _QCMS_CHAIN_H
+#define _QCMS_CHAIN_H
+
+// Generates and returns a 3D LUT with lutSize^3 samples using the provided src/dest.
+float* qcms_chain_transform(qcms_profile *in, qcms_profile *out, float *src, float *dest, size_t lutSize);
+
+#endif
diff --git a/gfx/qcms/iccread.c b/gfx/qcms/iccread.c
new file mode 100644
index 000000000..c3221d3cc
--- /dev/null
+++ b/gfx/qcms/iccread.c
@@ -0,0 +1,1404 @@
+/* vim: set ts=8 sw=8 noexpandtab: */
+// qcms
+// Copyright (C) 2009 Mozilla Foundation
+// Copyright (C) 1998-2007 Marti Maria
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the "Software"),
+// to deal in the Software without restriction, including without limitation
+// the rights to use, copy, modify, merge, publish, distribute, sublicense,
+// and/or sell copies of the Software, and to permit persons to whom the Software
+// is furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+// THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+#include <math.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h> //memset
+#include "qcmsint.h"
+
+/* It might be worth having a unified limit on content controlled
+ * allocation per profile. This would remove the need for many
+ * of the arbitrary limits that we used */
+
+typedef uint32_t be32;
+typedef uint16_t be16;
+
+static be32 cpu_to_be32(uint32_t v)
+{
+#ifdef IS_LITTLE_ENDIAN
+ return ((v & 0xff) << 24) | ((v & 0xff00) << 8) | ((v & 0xff0000) >> 8) | ((v & 0xff000000) >> 24);
+#else
+ return v;
+#endif
+}
+
+static be16 cpu_to_be16(uint16_t v)
+{
+#ifdef IS_LITTLE_ENDIAN
+ return ((v & 0xff) << 8) | ((v & 0xff00) >> 8);
+#else
+ return v;
+#endif
+}
+
+static uint32_t be32_to_cpu(be32 v)
+{
+#ifdef IS_LITTLE_ENDIAN
+ return ((v & 0xff) << 24) | ((v & 0xff00) << 8) | ((v & 0xff0000) >> 8) | ((v & 0xff000000) >> 24);
+ //return __builtin_bswap32(v);
+#else
+ return v;
+#endif
+}
+
+static uint16_t be16_to_cpu(be16 v)
+{
+#ifdef IS_LITTLE_ENDIAN
+ return ((v & 0xff) << 8) | ((v & 0xff00) >> 8);
+#else
+ return v;
+#endif
+}
+
+/* a wrapper around the memory that we are going to parse
+ * into a qcms_profile */
+struct mem_source
+{
+ const unsigned char *buf;
+ size_t size;
+ qcms_bool valid;
+ const char *invalid_reason;
+};
+
+static void invalid_source(struct mem_source *mem, const char *reason)
+{
+ mem->valid = false;
+ mem->invalid_reason = reason;
+}
+
+static uint32_t read_u32(struct mem_source *mem, size_t offset)
+{
+ /* Subtract from mem->size instead of the more intuitive adding to offset.
+ * This avoids overflowing offset. The subtraction is safe because
+ * mem->size is guaranteed to be > 4 */
+ if (offset > mem->size - 4) {
+ invalid_source(mem, "Invalid offset");
+ return 0;
+ } else {
+ be32 k;
+ memcpy(&k, mem->buf + offset, sizeof(k));
+ return be32_to_cpu(k);
+ }
+}
+
+static uint16_t read_u16(struct mem_source *mem, size_t offset)
+{
+ if (offset > mem->size - 2) {
+ invalid_source(mem, "Invalid offset");
+ return 0;
+ } else {
+ be16 k;
+ memcpy(&k, mem->buf + offset, sizeof(k));
+ return be16_to_cpu(k);
+ }
+}
+
+static uint8_t read_u8(struct mem_source *mem, size_t offset)
+{
+ if (offset > mem->size - 1) {
+ invalid_source(mem, "Invalid offset");
+ return 0;
+ } else {
+ return *(uint8_t*)(mem->buf + offset);
+ }
+}
+
+static s15Fixed16Number read_s15Fixed16Number(struct mem_source *mem, size_t offset)
+{
+ return read_u32(mem, offset);
+}
+
+static uInt8Number read_uInt8Number(struct mem_source *mem, size_t offset)
+{
+ return read_u8(mem, offset);
+}
+
+static uInt16Number read_uInt16Number(struct mem_source *mem, size_t offset)
+{
+ return read_u16(mem, offset);
+}
+
+static void write_u32(void *mem, size_t offset, uint32_t value)
+{
+ *((uint32_t *)((unsigned char*)mem + offset)) = cpu_to_be32(value);
+}
+
+static void write_u16(void *mem, size_t offset, uint16_t value)
+{
+ *((uint16_t *)((unsigned char*)mem + offset)) = cpu_to_be16(value);
+}
+
+#define BAD_VALUE_PROFILE NULL
+#define INVALID_PROFILE NULL
+#define NO_MEM_PROFILE NULL
+
+/* An arbitrary 4MB limit on profile size */
+#define MAX_PROFILE_SIZE 1024*1024*4
+#define MAX_TAG_COUNT 1024
+
+static void check_CMM_type_signature(struct mem_source *src)
+{
+ //uint32_t CMM_type_signature = read_u32(src, 4);
+ //TODO: do the check?
+
+}
+
+static void check_profile_version(struct mem_source *src)
+{
+
+ /*
+ uint8_t major_revision = read_u8(src, 8 + 0);
+ uint8_t minor_revision = read_u8(src, 8 + 1);
+ */
+ uint8_t reserved1 = read_u8(src, 8 + 2);
+ uint8_t reserved2 = read_u8(src, 8 + 3);
+ /* Checking the version doesn't buy us anything
+ if (major_revision != 0x4) {
+ if (major_revision > 0x2)
+ invalid_source(src, "Unsupported major revision");
+ if (minor_revision > 0x40)
+ invalid_source(src, "Unsupported minor revision");
+ }
+ */
+ if (reserved1 != 0 || reserved2 != 0)
+ invalid_source(src, "Invalid reserved bytes");
+}
+
+#define INPUT_DEVICE_PROFILE 0x73636e72 // 'scnr'
+#define DISPLAY_DEVICE_PROFILE 0x6d6e7472 // 'mntr'
+#define OUTPUT_DEVICE_PROFILE 0x70727472 // 'prtr'
+#define DEVICE_LINK_PROFILE 0x6c696e6b // 'link'
+#define COLOR_SPACE_PROFILE 0x73706163 // 'spac'
+#define ABSTRACT_PROFILE 0x61627374 // 'abst'
+#define NAMED_COLOR_PROFILE 0x6e6d636c // 'nmcl'
+
+static void read_class_signature(qcms_profile *profile, struct mem_source *mem)
+{
+ profile->class = read_u32(mem, 12);
+ switch (profile->class) {
+ case DISPLAY_DEVICE_PROFILE:
+ case INPUT_DEVICE_PROFILE:
+ case OUTPUT_DEVICE_PROFILE:
+ case COLOR_SPACE_PROFILE:
+ break;
+ default:
+ invalid_source(mem, "Invalid Profile/Device Class signature");
+ }
+}
+
+static void read_color_space(qcms_profile *profile, struct mem_source *mem)
+{
+ profile->color_space = read_u32(mem, 16);
+ switch (profile->color_space) {
+ case RGB_SIGNATURE:
+ case GRAY_SIGNATURE:
+ break;
+ default:
+ invalid_source(mem, "Unsupported colorspace");
+ }
+}
+
+static void read_pcs(qcms_profile *profile, struct mem_source *mem)
+{
+ profile->pcs = read_u32(mem, 20);
+ switch (profile->pcs) {
+ case XYZ_SIGNATURE:
+ case LAB_SIGNATURE:
+ break;
+ default:
+ invalid_source(mem, "Unsupported pcs");
+ }
+}
+
+struct tag
+{
+ uint32_t signature;
+ uint32_t offset;
+ uint32_t size;
+};
+
+struct tag_index {
+ uint32_t count;
+ struct tag *tags;
+};
+
+static struct tag_index read_tag_table(qcms_profile *profile, struct mem_source *mem)
+{
+ struct tag_index index = {0, NULL};
+ unsigned int i;
+
+ index.count = read_u32(mem, 128);
+ if (index.count > MAX_TAG_COUNT) {
+ invalid_source(mem, "max number of tags exceeded");
+ return index;
+ }
+
+ index.tags = malloc(sizeof(struct tag)*index.count);
+ if (index.tags) {
+ for (i = 0; i < index.count; i++) {
+ index.tags[i].signature = read_u32(mem, 128 + 4 + 4*i*3);
+ index.tags[i].offset = read_u32(mem, 128 + 4 + 4*i*3 + 4);
+ index.tags[i].size = read_u32(mem, 128 + 4 + 4*i*3 + 8);
+ }
+ }
+
+ return index;
+}
+
+// Checks a profile for obvious inconsistencies and returns
+// true if the profile looks bogus and should probably be
+// ignored.
+qcms_bool qcms_profile_is_bogus(qcms_profile *profile)
+{
+ float sum[3], target[3], tolerance[3];
+ float rX, rY, rZ, gX, gY, gZ, bX, bY, bZ;
+ bool negative;
+ unsigned i;
+
+ // We currently only check the bogosity of RGB profiles
+ if (profile->color_space != RGB_SIGNATURE)
+ return false;
+
+ if (profile->A2B0 || profile->B2A0)
+ return false;
+
+ rX = s15Fixed16Number_to_float(profile->redColorant.X);
+ rY = s15Fixed16Number_to_float(profile->redColorant.Y);
+ rZ = s15Fixed16Number_to_float(profile->redColorant.Z);
+
+ gX = s15Fixed16Number_to_float(profile->greenColorant.X);
+ gY = s15Fixed16Number_to_float(profile->greenColorant.Y);
+ gZ = s15Fixed16Number_to_float(profile->greenColorant.Z);
+
+ bX = s15Fixed16Number_to_float(profile->blueColorant.X);
+ bY = s15Fixed16Number_to_float(profile->blueColorant.Y);
+ bZ = s15Fixed16Number_to_float(profile->blueColorant.Z);
+
+ // Check if any of the XYZ values are negative (see mozilla bug 498245)
+ // CIEXYZ tristimulus values cannot be negative according to the spec.
+ negative =
+ (rX < 0) || (rY < 0) || (rZ < 0) ||
+ (gX < 0) || (gY < 0) || (gZ < 0) ||
+ (bX < 0) || (bY < 0) || (bZ < 0);
+
+ if (negative)
+ return true;
+
+
+ // Sum the values; they should add up to something close to white
+ sum[0] = rX + gX + bX;
+ sum[1] = rY + gY + bY;
+ sum[2] = rZ + gZ + bZ;
+
+ // Build our target vector (see mozilla bug 460629)
+ target[0] = 0.96420f;
+ target[1] = 1.00000f;
+ target[2] = 0.82491f;
+
+ // Our tolerance vector - Recommended by Chris Murphy based on
+ // conversion from the LAB space criterion of no more than 3 in any one
+ // channel. This is similar to, but slightly more tolerant than Adobe's
+ // criterion.
+ tolerance[0] = 0.02f;
+ tolerance[1] = 0.02f;
+ tolerance[2] = 0.04f;
+
+ // Compare with our tolerance
+ for (i = 0; i < 3; ++i) {
+ if (!(((sum[i] - tolerance[i]) <= target[i]) &&
+ ((sum[i] + tolerance[i]) >= target[i])))
+ return true;
+ }
+
+ // All Good
+ return false;
+}
+
+#define TAG_bXYZ 0x6258595a
+#define TAG_gXYZ 0x6758595a
+#define TAG_rXYZ 0x7258595a
+#define TAG_rTRC 0x72545243
+#define TAG_bTRC 0x62545243
+#define TAG_gTRC 0x67545243
+#define TAG_kTRC 0x6b545243
+#define TAG_A2B0 0x41324230
+#define TAG_B2A0 0x42324130
+#define TAG_CHAD 0x63686164
+
+static struct tag *find_tag(struct tag_index index, uint32_t tag_id)
+{
+ unsigned int i;
+ struct tag *tag = NULL;
+ for (i = 0; i < index.count; i++) {
+ if (index.tags[i].signature == tag_id) {
+ return &index.tags[i];
+ }
+ }
+ return tag;
+}
+
+#define XYZ_TYPE 0x58595a20 // 'XYZ '
+#define CURVE_TYPE 0x63757276 // 'curv'
+#define PARAMETRIC_CURVE_TYPE 0x70617261 // 'para'
+#define LUT16_TYPE 0x6d667432 // 'mft2'
+#define LUT8_TYPE 0x6d667431 // 'mft1'
+#define LUT_MAB_TYPE 0x6d414220 // 'mAB '
+#define LUT_MBA_TYPE 0x6d424120 // 'mBA '
+#define CHROMATIC_TYPE 0x73663332 // 'sf32'
+
+static struct matrix read_tag_s15Fixed16ArrayType(struct mem_source *src, struct tag_index index, uint32_t tag_id)
+{
+ struct tag *tag = find_tag(index, tag_id);
+ struct matrix matrix;
+ if (tag) {
+ uint8_t i;
+ uint32_t offset = tag->offset;
+ uint32_t type = read_u32(src, offset);
+
+ // Check mandatory type signature for s16Fixed16ArrayType
+ if (type != CHROMATIC_TYPE) {
+ invalid_source(src, "unexpected type, expected 'sf32'");
+ }
+
+ for (i = 0; i < 9; i++) {
+ matrix.m[i/3][i%3] = s15Fixed16Number_to_float(read_s15Fixed16Number(src, offset+8+i*4));
+ }
+ matrix.invalid = false;
+ } else {
+ matrix.invalid = true;
+ invalid_source(src, "missing sf32tag");
+ }
+ return matrix;
+}
+
+static struct XYZNumber read_tag_XYZType(struct mem_source *src, struct tag_index index, uint32_t tag_id)
+{
+ struct XYZNumber num = {0, 0, 0};
+ struct tag *tag = find_tag(index, tag_id);
+ if (tag) {
+ uint32_t offset = tag->offset;
+
+ uint32_t type = read_u32(src, offset);
+ if (type != XYZ_TYPE)
+ invalid_source(src, "unexpected type, expected XYZ");
+ num.X = read_s15Fixed16Number(src, offset+8);
+ num.Y = read_s15Fixed16Number(src, offset+12);
+ num.Z = read_s15Fixed16Number(src, offset+16);
+ } else {
+ invalid_source(src, "missing xyztag");
+ }
+ return num;
+}
+
+// Read the tag at a given offset rather then the tag_index.
+// This method is used when reading mAB tags where nested curveType are
+// present that are not part of the tag_index.
+static struct curveType *read_curveType(struct mem_source *src, uint32_t offset, uint32_t *len)
+{
+ static const uint32_t COUNT_TO_LENGTH[5] = {1, 3, 4, 5, 7};
+ struct curveType *curve = NULL;
+ uint32_t type = read_u32(src, offset);
+ uint32_t count;
+ uint32_t i;
+
+ if (type != CURVE_TYPE && type != PARAMETRIC_CURVE_TYPE) {
+ invalid_source(src, "unexpected type, expected CURV or PARA");
+ return NULL;
+ }
+
+ if (type == CURVE_TYPE) {
+ count = read_u32(src, offset+8);
+
+#define MAX_CURVE_ENTRIES 40000 //arbitrary
+ if (count > MAX_CURVE_ENTRIES) {
+ invalid_source(src, "curve size too large");
+ return NULL;
+ }
+ curve = malloc(sizeof(struct curveType) + sizeof(uInt16Number)*count);
+ if (!curve)
+ return NULL;
+
+ curve->count = count;
+ curve->type = CURVE_TYPE;
+
+ for (i=0; i<count; i++) {
+ curve->data[i] = read_u16(src, offset + 12 + i*2);
+ }
+ *len = 12 + count * 2;
+ } else { //PARAMETRIC_CURVE_TYPE
+ count = read_u16(src, offset+8);
+
+ if (count > 4) {
+ invalid_source(src, "parametric function type not supported.");
+ return NULL;
+ }
+
+ curve = malloc(sizeof(struct curveType));
+ if (!curve)
+ return NULL;
+
+ curve->count = count;
+ curve->type = PARAMETRIC_CURVE_TYPE;
+
+ for (i=0; i < COUNT_TO_LENGTH[count]; i++) {
+ curve->parameter[i] = s15Fixed16Number_to_float(read_s15Fixed16Number(src, offset + 12 + i*4));
+ }
+ *len = 12 + COUNT_TO_LENGTH[count] * 4;
+
+ if ((count == 1 || count == 2)) {
+ /* we have a type 1 or type 2 function that has a division by 'a' */
+ float a = curve->parameter[1];
+ if (a == 0.f)
+ invalid_source(src, "parametricCurve definition causes division by zero.");
+ }
+ }
+
+ return curve;
+}
+
+static struct curveType *read_tag_curveType(struct mem_source *src, struct tag_index index, uint32_t tag_id)
+{
+ struct tag *tag = find_tag(index, tag_id);
+ struct curveType *curve = NULL;
+ if (tag) {
+ uint32_t len;
+ return read_curveType(src, tag->offset, &len);
+ } else {
+ invalid_source(src, "missing curvetag");
+ }
+
+ return curve;
+}
+
+#define MAX_CLUT_SIZE 500000 // arbitrary
+#define MAX_CHANNELS 10 // arbitrary
+static void read_nested_curveType(struct mem_source *src, struct curveType *(*curveArray)[MAX_CHANNELS], uint8_t num_channels, uint32_t curve_offset)
+{
+ uint32_t channel_offset = 0;
+ int i;
+ for (i = 0; i < num_channels; i++) {
+ uint32_t tag_len;
+
+ (*curveArray)[i] = read_curveType(src, curve_offset + channel_offset, &tag_len);
+ if (!(*curveArray)[i]) {
+ invalid_source(src, "invalid nested curveType curve");
+ }
+
+ channel_offset += tag_len;
+ // 4 byte aligned
+ if ((tag_len % 4) != 0)
+ channel_offset += 4 - (tag_len % 4);
+ }
+
+}
+
+static void mAB_release(struct lutmABType *lut)
+{
+ uint8_t i;
+
+ for (i = 0; i < lut->num_in_channels; i++){
+ free(lut->a_curves[i]);
+ }
+ for (i = 0; i < lut->num_out_channels; i++){
+ free(lut->b_curves[i]);
+ free(lut->m_curves[i]);
+ }
+ free(lut);
+}
+
+/* See section 10.10 for specs */
+static struct lutmABType *read_tag_lutmABType(struct mem_source *src, struct tag_index index, uint32_t tag_id)
+{
+ struct tag *tag = find_tag(index, tag_id);
+ uint32_t offset = tag->offset;
+ uint32_t a_curve_offset, b_curve_offset, m_curve_offset;
+ uint32_t matrix_offset;
+ uint32_t clut_offset;
+ uint32_t clut_size = 1;
+ uint8_t clut_precision;
+ uint32_t type = read_u32(src, offset);
+ uint8_t num_in_channels, num_out_channels;
+ struct lutmABType *lut;
+ uint32_t i;
+
+ if (type != LUT_MAB_TYPE && type != LUT_MBA_TYPE) {
+ return NULL;
+ }
+
+ num_in_channels = read_u8(src, offset + 8);
+ num_out_channels = read_u8(src, offset + 9);
+ if (num_in_channels > MAX_CHANNELS || num_out_channels > MAX_CHANNELS)
+ return NULL;
+
+ // We require 3in/out channels since we only support RGB->XYZ (or RGB->LAB)
+ // XXX: If we remove this restriction make sure that the number of channels
+ // is less or equal to the maximum number of mAB curves in qcmsint.h
+ // also check for clut_size overflow. Also make sure it's != 0
+ if (num_in_channels != 3 || num_out_channels != 3)
+ return NULL;
+
+ // some of this data is optional and is denoted by a zero offset
+ // we also use this to track their existance
+ a_curve_offset = read_u32(src, offset + 28);
+ clut_offset = read_u32(src, offset + 24);
+ m_curve_offset = read_u32(src, offset + 20);
+ matrix_offset = read_u32(src, offset + 16);
+ b_curve_offset = read_u32(src, offset + 12);
+
+ // Convert offsets relative to the tag to relative to the profile
+ // preserve zero for optional fields
+ if (a_curve_offset)
+ a_curve_offset += offset;
+ if (clut_offset)
+ clut_offset += offset;
+ if (m_curve_offset)
+ m_curve_offset += offset;
+ if (matrix_offset)
+ matrix_offset += offset;
+ if (b_curve_offset)
+ b_curve_offset += offset;
+
+ if (clut_offset) {
+ assert (num_in_channels == 3);
+ // clut_size can not overflow since lg(256^num_in_channels) = 24 bits.
+ for (i = 0; i < num_in_channels; i++) {
+ clut_size *= read_u8(src, clut_offset + i);
+ if (clut_size == 0) {
+ invalid_source(src, "bad clut_size");
+ }
+ }
+ } else {
+ clut_size = 0;
+ }
+
+ // 24bits * 3 won't overflow either
+ clut_size = clut_size * num_out_channels;
+
+ if (clut_size > MAX_CLUT_SIZE)
+ return NULL;
+
+ lut = malloc(sizeof(struct lutmABType) + (clut_size) * sizeof(float));
+ if (!lut)
+ return NULL;
+ // we'll fill in the rest below
+ memset(lut, 0, sizeof(struct lutmABType));
+ lut->clut_table = &lut->clut_table_data[0];
+
+ if (clut_offset) {
+ for (i = 0; i < num_in_channels; i++) {
+ lut->num_grid_points[i] = read_u8(src, clut_offset + i);
+ if (lut->num_grid_points[i] == 0) {
+ invalid_source(src, "bad grid_points");
+ }
+ }
+ }
+
+ // Reverse the processing of transformation elements for mBA type.
+ lut->reversed = (type == LUT_MBA_TYPE);
+
+ lut->num_in_channels = num_in_channels;
+ lut->num_out_channels = num_out_channels;
+
+ if (matrix_offset) {
+ // read the matrix if we have it
+ lut->e00 = read_s15Fixed16Number(src, matrix_offset+4*0);
+ lut->e01 = read_s15Fixed16Number(src, matrix_offset+4*1);
+ lut->e02 = read_s15Fixed16Number(src, matrix_offset+4*2);
+ lut->e10 = read_s15Fixed16Number(src, matrix_offset+4*3);
+ lut->e11 = read_s15Fixed16Number(src, matrix_offset+4*4);
+ lut->e12 = read_s15Fixed16Number(src, matrix_offset+4*5);
+ lut->e20 = read_s15Fixed16Number(src, matrix_offset+4*6);
+ lut->e21 = read_s15Fixed16Number(src, matrix_offset+4*7);
+ lut->e22 = read_s15Fixed16Number(src, matrix_offset+4*8);
+ lut->e03 = read_s15Fixed16Number(src, matrix_offset+4*9);
+ lut->e13 = read_s15Fixed16Number(src, matrix_offset+4*10);
+ lut->e23 = read_s15Fixed16Number(src, matrix_offset+4*11);
+ }
+
+ if (a_curve_offset) {
+ read_nested_curveType(src, &lut->a_curves, num_in_channels, a_curve_offset);
+ }
+ if (m_curve_offset) {
+ read_nested_curveType(src, &lut->m_curves, num_out_channels, m_curve_offset);
+ }
+ if (b_curve_offset) {
+ read_nested_curveType(src, &lut->b_curves, num_out_channels, b_curve_offset);
+ } else {
+ invalid_source(src, "B curves required");
+ }
+
+ if (clut_offset) {
+ clut_precision = read_u8(src, clut_offset + 16);
+ if (clut_precision == 1) {
+ for (i = 0; i < clut_size; i++) {
+ lut->clut_table[i] = uInt8Number_to_float(read_uInt8Number(src, clut_offset + 20 + i*1));
+ }
+ } else if (clut_precision == 2) {
+ for (i = 0; i < clut_size; i++) {
+ lut->clut_table[i] = uInt16Number_to_float(read_uInt16Number(src, clut_offset + 20 + i*2));
+ }
+ } else {
+ invalid_source(src, "Invalid clut precision");
+ }
+ }
+
+ if (!src->valid) {
+ mAB_release(lut);
+ return NULL;
+ }
+
+ return lut;
+}
+
+static struct lutType *read_tag_lutType(struct mem_source *src, struct tag_index index, uint32_t tag_id)
+{
+ struct tag *tag = find_tag(index, tag_id);
+ uint32_t offset = tag->offset;
+ uint32_t type = read_u32(src, offset);
+ uint16_t num_input_table_entries;
+ uint16_t num_output_table_entries;
+ uint8_t in_chan, grid_points, out_chan;
+ uint32_t clut_offset, output_offset;
+ uint32_t clut_size;
+ size_t entry_size;
+ struct lutType *lut;
+ uint32_t i;
+
+ /* I'm not sure why the spec specifies a fixed number of entries for LUT8 tables even though
+ * they have room for the num_entries fields */
+ if (type == LUT8_TYPE) {
+ num_input_table_entries = 256;
+ num_output_table_entries = 256;
+ entry_size = 1;
+ } else if (type == LUT16_TYPE) {
+ num_input_table_entries = read_u16(src, offset + 48);
+ num_output_table_entries = read_u16(src, offset + 50);
+ if (num_input_table_entries == 0 || num_output_table_entries == 0) {
+ invalid_source(src, "Bad channel count");
+ return NULL;
+ }
+ entry_size = 2;
+ } else {
+ assert(0); // the caller checks that this doesn't happen
+ invalid_source(src, "Unexpected lut type");
+ return NULL;
+ }
+
+ in_chan = read_u8(src, offset + 8);
+ out_chan = read_u8(src, offset + 9);
+ grid_points = read_u8(src, offset + 10);
+
+ clut_size = pow(grid_points, in_chan);
+ if (clut_size > MAX_CLUT_SIZE) {
+ invalid_source(src, "CLUT too large");
+ return NULL;
+ }
+
+ if (clut_size <= 0) {
+ invalid_source(src, "CLUT must not be empty.");
+ return NULL;
+ }
+
+ if (in_chan != 3 || out_chan != 3) {
+ invalid_source(src, "CLUT only supports RGB");
+ return NULL;
+ }
+
+ lut = malloc(sizeof(struct lutType) + (num_input_table_entries * in_chan + clut_size*out_chan + num_output_table_entries * out_chan)*sizeof(float));
+ if (!lut) {
+ invalid_source(src, "CLUT too large");
+ return NULL;
+ }
+
+ /* compute the offsets of tables */
+ lut->input_table = &lut->table_data[0];
+ lut->clut_table = &lut->table_data[in_chan*num_input_table_entries];
+ lut->output_table = &lut->table_data[in_chan*num_input_table_entries + clut_size*out_chan];
+
+ lut->num_input_table_entries = num_input_table_entries;
+ lut->num_output_table_entries = num_output_table_entries;
+ lut->num_input_channels = in_chan;
+ lut->num_output_channels = out_chan;
+ lut->num_clut_grid_points = grid_points;
+ lut->e00 = read_s15Fixed16Number(src, offset+12);
+ lut->e01 = read_s15Fixed16Number(src, offset+16);
+ lut->e02 = read_s15Fixed16Number(src, offset+20);
+ lut->e10 = read_s15Fixed16Number(src, offset+24);
+ lut->e11 = read_s15Fixed16Number(src, offset+28);
+ lut->e12 = read_s15Fixed16Number(src, offset+32);
+ lut->e20 = read_s15Fixed16Number(src, offset+36);
+ lut->e21 = read_s15Fixed16Number(src, offset+40);
+ lut->e22 = read_s15Fixed16Number(src, offset+44);
+
+ for (i = 0; i < (uint32_t)(lut->num_input_table_entries * in_chan); i++) {
+ if (type == LUT8_TYPE) {
+ lut->input_table[i] = uInt8Number_to_float(read_uInt8Number(src, offset + 52 + i * entry_size));
+ } else {
+ lut->input_table[i] = uInt16Number_to_float(read_uInt16Number(src, offset + 52 + i * entry_size));
+ }
+ }
+
+ clut_offset = offset + 52 + lut->num_input_table_entries * in_chan * entry_size;
+ for (i = 0; i < clut_size * out_chan; i+=3) {
+ if (type == LUT8_TYPE) {
+ lut->clut_table[i+0] = uInt8Number_to_float(read_uInt8Number(src, clut_offset + i*entry_size + 0));
+ lut->clut_table[i+1] = uInt8Number_to_float(read_uInt8Number(src, clut_offset + i*entry_size + 1));
+ lut->clut_table[i+2] = uInt8Number_to_float(read_uInt8Number(src, clut_offset + i*entry_size + 2));
+ } else {
+ lut->clut_table[i+0] = uInt16Number_to_float(read_uInt16Number(src, clut_offset + i*entry_size + 0));
+ lut->clut_table[i+1] = uInt16Number_to_float(read_uInt16Number(src, clut_offset + i*entry_size + 2));
+ lut->clut_table[i+2] = uInt16Number_to_float(read_uInt16Number(src, clut_offset + i*entry_size + 4));
+ }
+ }
+
+ output_offset = clut_offset + clut_size * out_chan * entry_size;
+ for (i = 0; i < (uint32_t)(lut->num_output_table_entries * out_chan); i++) {
+ if (type == LUT8_TYPE) {
+ lut->output_table[i] = uInt8Number_to_float(read_uInt8Number(src, output_offset + i*entry_size));
+ } else {
+ lut->output_table[i] = uInt16Number_to_float(read_uInt16Number(src, output_offset + i*entry_size));
+ }
+ }
+
+ return lut;
+}
+
+static void read_rendering_intent(qcms_profile *profile, struct mem_source *src)
+{
+ profile->rendering_intent = read_u32(src, 64);
+ switch (profile->rendering_intent) {
+ case QCMS_INTENT_PERCEPTUAL:
+ case QCMS_INTENT_SATURATION:
+ case QCMS_INTENT_RELATIVE_COLORIMETRIC:
+ case QCMS_INTENT_ABSOLUTE_COLORIMETRIC:
+ break;
+ default:
+ invalid_source(src, "unknown rendering intent");
+ }
+}
+
+qcms_profile *qcms_profile_create(void)
+{
+ return calloc(sizeof(qcms_profile), 1);
+}
+
+
+
+/* build sRGB gamma table */
+/* based on cmsBuildParametricGamma() */
+static uint16_t *build_sRGB_gamma_table(int num_entries)
+{
+ int i;
+ /* taken from lcms: Build_sRGBGamma() */
+ double gamma = 2.4;
+ double a = 1./1.055;
+ double b = 0.055/1.055;
+ double c = 1./12.92;
+ double d = 0.04045;
+
+ uint16_t *table = malloc(sizeof(uint16_t) * num_entries);
+ if (!table)
+ return NULL;
+
+ for (i=0; i<num_entries; i++) {
+ double x = (double)i / (num_entries-1);
+ double y, output;
+ // IEC 61966-2.1 (sRGB)
+ // Y = (aX + b)^Gamma | X >= d
+ // Y = cX | X < d
+ if (x >= d) {
+ double e = (a*x + b);
+ if (e > 0)
+ y = pow(e, gamma);
+ else
+ y = 0;
+ } else {
+ y = c*x;
+ }
+
+ // Saturate -- this could likely move to a separate function
+ output = y * 65535. + .5;
+ if (output > 65535.)
+ output = 65535;
+ if (output < 0)
+ output = 0;
+ table[i] = (uint16_t)floor(output);
+ }
+ return table;
+}
+
+static struct curveType *curve_from_table(uint16_t *table, int num_entries)
+{
+ struct curveType *curve;
+ int i;
+ curve = malloc(sizeof(struct curveType) + sizeof(uInt16Number)*num_entries);
+ if (!curve)
+ return NULL;
+ curve->type = CURVE_TYPE;
+ curve->count = num_entries;
+ for (i = 0; i < num_entries; i++) {
+ curve->data[i] = table[i];
+ }
+ return curve;
+}
+
+static uint16_t float_to_u8Fixed8Number(float a)
+{
+ if (a > (255.f + 255.f/256))
+ return 0xffff;
+ else if (a < 0.f)
+ return 0;
+ else
+ return floorf(a*256.f + .5f);
+}
+
+static struct curveType *curve_from_gamma(float gamma)
+{
+ struct curveType *curve;
+ int num_entries = 1;
+ curve = malloc(sizeof(struct curveType) + sizeof(uInt16Number)*num_entries);
+ if (!curve)
+ return NULL;
+ curve->count = num_entries;
+ curve->data[0] = float_to_u8Fixed8Number(gamma);
+ curve->type = CURVE_TYPE;
+ return curve;
+}
+
+//XXX: it would be nice if we had a way of ensuring
+// everything in a profile was initialized regardless of how it was created
+
+//XXX: should this also be taking a black_point?
+/* similar to CGColorSpaceCreateCalibratedRGB */
+qcms_profile* qcms_profile_create_rgb_with_gamma(
+ qcms_CIE_xyY white_point,
+ qcms_CIE_xyYTRIPLE primaries,
+ float gamma)
+{
+ qcms_profile* profile = qcms_profile_create();
+ if (!profile)
+ return NO_MEM_PROFILE;
+
+ //XXX: should store the whitepoint
+ if (!set_rgb_colorants(profile, white_point, primaries)) {
+ qcms_profile_release(profile);
+ return INVALID_PROFILE;
+ }
+
+ profile->redTRC = curve_from_gamma(gamma);
+ profile->blueTRC = curve_from_gamma(gamma);
+ profile->greenTRC = curve_from_gamma(gamma);
+
+ if (!profile->redTRC || !profile->blueTRC || !profile->greenTRC) {
+ qcms_profile_release(profile);
+ return NO_MEM_PROFILE;
+ }
+ profile->class = DISPLAY_DEVICE_PROFILE;
+ profile->rendering_intent = QCMS_INTENT_PERCEPTUAL;
+ profile->color_space = RGB_SIGNATURE;
+ return profile;
+}
+
+qcms_profile* qcms_profile_create_rgb_with_table(
+ qcms_CIE_xyY white_point,
+ qcms_CIE_xyYTRIPLE primaries,
+ uint16_t *table, int num_entries)
+{
+ qcms_profile* profile = qcms_profile_create();
+ if (!profile)
+ return NO_MEM_PROFILE;
+
+ //XXX: should store the whitepoint
+ if (!set_rgb_colorants(profile, white_point, primaries)) {
+ qcms_profile_release(profile);
+ return INVALID_PROFILE;
+ }
+
+ profile->redTRC = curve_from_table(table, num_entries);
+ profile->blueTRC = curve_from_table(table, num_entries);
+ profile->greenTRC = curve_from_table(table, num_entries);
+
+ if (!profile->redTRC || !profile->blueTRC || !profile->greenTRC) {
+ qcms_profile_release(profile);
+ return NO_MEM_PROFILE;
+ }
+ profile->class = DISPLAY_DEVICE_PROFILE;
+ profile->rendering_intent = QCMS_INTENT_PERCEPTUAL;
+ profile->color_space = RGB_SIGNATURE;
+ return profile;
+}
+
+/* from lcms: cmsWhitePointFromTemp */
+/* tempK must be >= 4000. and <= 25000.
+ * Invalid values of tempK will return
+ * (x,y,Y) = (-1.0, -1.0, -1.0)
+ * similar to argyll: icx_DTEMP2XYZ() */
+static qcms_CIE_xyY white_point_from_temp(int temp_K)
+{
+ qcms_CIE_xyY white_point;
+ double x, y;
+ double T, T2, T3;
+ // double M1, M2;
+
+ // No optimization provided.
+ T = temp_K;
+ T2 = T*T; // Square
+ T3 = T2*T; // Cube
+
+ // For correlated color temperature (T) between 4000K and 7000K:
+ if (T >= 4000. && T <= 7000.) {
+ x = -4.6070*(1E9/T3) + 2.9678*(1E6/T2) + 0.09911*(1E3/T) + 0.244063;
+ } else {
+ // or for correlated color temperature (T) between 7000K and 25000K:
+ if (T > 7000.0 && T <= 25000.0) {
+ x = -2.0064*(1E9/T3) + 1.9018*(1E6/T2) + 0.24748*(1E3/T) + 0.237040;
+ } else {
+ // Invalid tempK
+ white_point.x = -1.0;
+ white_point.y = -1.0;
+ white_point.Y = -1.0;
+
+ assert(0 && "invalid temp");
+
+ return white_point;
+ }
+ }
+
+ // Obtain y(x)
+
+ y = -3.000*(x*x) + 2.870*x - 0.275;
+
+ // wave factors (not used, but here for futures extensions)
+
+ // M1 = (-1.3515 - 1.7703*x + 5.9114 *y)/(0.0241 + 0.2562*x - 0.7341*y);
+ // M2 = (0.0300 - 31.4424*x + 30.0717*y)/(0.0241 + 0.2562*x - 0.7341*y);
+
+ // Fill white_point struct
+ white_point.x = x;
+ white_point.y = y;
+ white_point.Y = 1.0;
+
+ return white_point;
+}
+
+qcms_profile* qcms_profile_sRGB(void)
+{
+ qcms_profile *profile;
+ uint16_t *table;
+
+ qcms_CIE_xyYTRIPLE Rec709Primaries = {
+ {0.6400, 0.3300, 1.0},
+ {0.3000, 0.6000, 1.0},
+ {0.1500, 0.0600, 1.0}
+ };
+ qcms_CIE_xyY D65;
+
+ D65 = white_point_from_temp(6504);
+
+ table = build_sRGB_gamma_table(1024);
+
+ if (!table)
+ return NO_MEM_PROFILE;
+
+ profile = qcms_profile_create_rgb_with_table(D65, Rec709Primaries, table, 1024);
+ free(table);
+ return profile;
+}
+
+
+/* qcms_profile_from_memory does not hold a reference to the memory passed in */
+qcms_profile* qcms_profile_from_memory(const void *mem, size_t size)
+{
+ uint32_t length;
+ struct mem_source source;
+ struct mem_source *src = &source;
+ struct tag_index index;
+ qcms_profile *profile;
+
+ source.buf = mem;
+ source.size = size;
+ source.valid = true;
+
+ if (size < 4)
+ return INVALID_PROFILE;
+
+ length = read_u32(src, 0);
+ if (length <= size) {
+ // shrink the area that we can read if appropriate
+ source.size = length;
+ } else {
+ return INVALID_PROFILE;
+ }
+
+ /* ensure that the profile size is sane so it's easier to reason about */
+ if (source.size <= 64 || source.size >= MAX_PROFILE_SIZE)
+ return INVALID_PROFILE;
+
+ profile = qcms_profile_create();
+ if (!profile)
+ return NO_MEM_PROFILE;
+
+ check_CMM_type_signature(src);
+ check_profile_version(src);
+ read_class_signature(profile, src);
+ read_rendering_intent(profile, src);
+ read_color_space(profile, src);
+ read_pcs(profile, src);
+ //TODO read rest of profile stuff
+
+ if (!src->valid)
+ goto invalid_profile;
+
+ index = read_tag_table(profile, src);
+ if (!src->valid || !index.tags)
+ goto invalid_tag_table;
+
+ if (find_tag(index, TAG_CHAD)) {
+ profile->chromaticAdaption = read_tag_s15Fixed16ArrayType(src, index, TAG_CHAD);
+ } else {
+ profile->chromaticAdaption.invalid = true; //Signal the data is not present
+ }
+
+ if (profile->class == DISPLAY_DEVICE_PROFILE || profile->class == INPUT_DEVICE_PROFILE ||
+ profile->class == OUTPUT_DEVICE_PROFILE || profile->class == COLOR_SPACE_PROFILE) {
+ if (profile->color_space == RGB_SIGNATURE) {
+ if (find_tag(index, TAG_A2B0)) {
+ if (read_u32(src, find_tag(index, TAG_A2B0)->offset) == LUT8_TYPE ||
+ read_u32(src, find_tag(index, TAG_A2B0)->offset) == LUT16_TYPE) {
+ profile->A2B0 = read_tag_lutType(src, index, TAG_A2B0);
+ } else if (read_u32(src, find_tag(index, TAG_A2B0)->offset) == LUT_MAB_TYPE) {
+ profile->mAB = read_tag_lutmABType(src, index, TAG_A2B0);
+ }
+ }
+ if (find_tag(index, TAG_B2A0)) {
+ if (read_u32(src, find_tag(index, TAG_B2A0)->offset) == LUT8_TYPE ||
+ read_u32(src, find_tag(index, TAG_B2A0)->offset) == LUT16_TYPE) {
+ profile->B2A0 = read_tag_lutType(src, index, TAG_B2A0);
+ } else if (read_u32(src, find_tag(index, TAG_B2A0)->offset) == LUT_MBA_TYPE) {
+ profile->mBA = read_tag_lutmABType(src, index, TAG_B2A0);
+ }
+ }
+ if (find_tag(index, TAG_rXYZ) || !qcms_supports_iccv4) {
+ profile->redColorant = read_tag_XYZType(src, index, TAG_rXYZ);
+ profile->greenColorant = read_tag_XYZType(src, index, TAG_gXYZ);
+ profile->blueColorant = read_tag_XYZType(src, index, TAG_bXYZ);
+ }
+
+ if (!src->valid)
+ goto invalid_tag_table;
+
+ if (find_tag(index, TAG_rTRC) || !qcms_supports_iccv4) {
+ profile->redTRC = read_tag_curveType(src, index, TAG_rTRC);
+ profile->greenTRC = read_tag_curveType(src, index, TAG_gTRC);
+ profile->blueTRC = read_tag_curveType(src, index, TAG_bTRC);
+
+ if (!profile->redTRC || !profile->blueTRC || !profile->greenTRC)
+ goto invalid_tag_table;
+ }
+ } else if (profile->color_space == GRAY_SIGNATURE) {
+
+ profile->grayTRC = read_tag_curveType(src, index, TAG_kTRC);
+ if (!profile->grayTRC)
+ goto invalid_tag_table;
+
+ } else {
+ assert(0 && "read_color_space protects against entering here");
+ goto invalid_tag_table;
+ }
+ } else {
+ goto invalid_tag_table;
+ }
+
+ if (!src->valid)
+ goto invalid_tag_table;
+
+ free(index.tags);
+
+ return profile;
+
+invalid_tag_table:
+ free(index.tags);
+invalid_profile:
+ qcms_profile_release(profile);
+ return INVALID_PROFILE;
+}
+
+qcms_intent qcms_profile_get_rendering_intent(qcms_profile *profile)
+{
+ return profile->rendering_intent;
+}
+
+icColorSpaceSignature
+qcms_profile_get_color_space(qcms_profile *profile)
+{
+ return profile->color_space;
+}
+
+static void lut_release(struct lutType *lut)
+{
+ free(lut);
+}
+
+void qcms_profile_release(qcms_profile *profile)
+{
+ if (profile->output_table_r)
+ precache_release(profile->output_table_r);
+ if (profile->output_table_g)
+ precache_release(profile->output_table_g);
+ if (profile->output_table_b)
+ precache_release(profile->output_table_b);
+
+ if (profile->A2B0)
+ lut_release(profile->A2B0);
+ if (profile->B2A0)
+ lut_release(profile->B2A0);
+
+ if (profile->mAB)
+ mAB_release(profile->mAB);
+ if (profile->mBA)
+ mAB_release(profile->mBA);
+
+ free(profile->redTRC);
+ free(profile->blueTRC);
+ free(profile->greenTRC);
+ free(profile->grayTRC);
+ free(profile);
+}
+
+
+#include <stdio.h>
+static void qcms_data_from_file(FILE *file, void **mem, size_t *size)
+{
+ uint32_t length, remaining_length;
+ size_t read_length;
+ be32 length_be;
+ void *data;
+
+ *mem = NULL;
+ *size = 0;
+
+ if (fread(&length_be, 1, sizeof(length_be), file) != sizeof(length_be))
+ return;
+
+ length = be32_to_cpu(length_be);
+ if (length > MAX_PROFILE_SIZE || length < sizeof(length_be))
+ return;
+
+ /* allocate room for the entire profile */
+ data = malloc(length);
+ if (!data)
+ return;
+
+ /* copy in length to the front so that the buffer will contain the entire profile */
+ *((be32*)data) = length_be;
+ remaining_length = length - sizeof(length_be);
+
+ /* read the rest profile */
+ read_length = fread((unsigned char*)data + sizeof(length_be), 1, remaining_length, file);
+ if (read_length != remaining_length) {
+ free(data);
+ return;
+ }
+
+ /* successfully get the profile.*/
+ *mem = data;
+ *size = length;
+}
+
+qcms_profile* qcms_profile_from_file(FILE *file)
+{
+ size_t length;
+ qcms_profile *profile;
+ void *data;
+
+ qcms_data_from_file(file, &data, &length);
+ if ((data == NULL) || (length == 0))
+ return INVALID_PROFILE;
+
+ profile = qcms_profile_from_memory(data, length);
+ free(data);
+ return profile;
+}
+
+qcms_profile* qcms_profile_from_path(const char *path)
+{
+ qcms_profile *profile = NULL;
+ FILE *file = fopen(path, "rb");
+ if (file) {
+ profile = qcms_profile_from_file(file);
+ fclose(file);
+ }
+ return profile;
+}
+
+void qcms_data_from_path(const char *path, void **mem, size_t *size)
+{
+ FILE *file = NULL;
+ *mem = NULL;
+ *size = 0;
+
+ file = fopen(path, "rb");
+ if (file) {
+ qcms_data_from_file(file, mem, size);
+ fclose(file);
+ }
+}
+
+#ifdef _WIN32
+/* Unicode path version */
+qcms_profile* qcms_profile_from_unicode_path(const wchar_t *path)
+{
+ qcms_profile *profile = NULL;
+ FILE *file = _wfopen(path, L"rb");
+ if (file) {
+ profile = qcms_profile_from_file(file);
+ fclose(file);
+ }
+ return profile;
+}
+
+void qcms_data_from_unicode_path(const wchar_t *path, void **mem, size_t *size)
+{
+ FILE *file = NULL;
+ *mem = NULL;
+ *size = 0;
+
+ file = _wfopen(path, L"rb");
+ if (file) {
+ qcms_data_from_file(file, mem, size);
+ fclose(file);
+ }
+}
+#endif
+
+/*
+* This function constructs an ICC profile memory with given header and tag data,
+* which can be read via qcms_profile_from_memory(). that means, we must satisfy
+* the profiler header type check (which seems not complete till now) and proper
+* information to read data from the tag table and tag data elements memory.
+*
+* To construct a valid ICC profile, its divided into three steps :
+* (1) construct the r/g/bXYZ part
+* (2) construct the r/g/bTRC part
+* (3) construct the profile header
+* this is a hardcode step just for "create_rgb_with_gamma", it is the only
+* requirement till now, maybe we can make this method more general in future,
+*
+* NOTE : some of the parameters below are hardcode, please refer to the ICC documentation.
+*/
+#define ICC_PROFILE_HEADER_LENGTH 128
+void qcms_data_create_rgb_with_gamma(qcms_CIE_xyY white_point, qcms_CIE_xyYTRIPLE primaries, float gamma, void **mem, size_t *size)
+{
+ uint32_t length, index, xyz_count, trc_count;
+ size_t tag_table_offset, tag_data_offset;
+ void *data;
+ struct matrix colorants;
+
+ uint32_t TAG_XYZ[3] = {TAG_rXYZ, TAG_gXYZ, TAG_bXYZ};
+ uint32_t TAG_TRC[3] = {TAG_rTRC, TAG_gTRC, TAG_bTRC};
+
+ if ((mem == NULL) || (size == NULL))
+ return;
+
+ *mem = NULL;
+ *size = 0;
+
+ /*
+ * total length = icc profile header(128) + tag count(4) +
+ * (tag table item (12) * total tag (6 = 3 rTRC + 3 rXYZ)) + rTRC elements data (3 * 20)
+ * + rXYZ elements data (3*16), and all tag data elements must start at the 4-byte boundary.
+ */
+ xyz_count = 3; // rXYZ, gXYZ, bXYZ
+ trc_count = 3; // rTRC, gTRC, bTRC
+ length = ICC_PROFILE_HEADER_LENGTH + 4 + (12 * (xyz_count + trc_count)) + (xyz_count * 20) + (trc_count * 16);
+
+ // reserve the total memory.
+ data = malloc(length);
+ if (!data)
+ return;
+ memset(data, 0, length);
+
+ // Part1 : write rXYZ, gXYZ and bXYZ
+ if (!get_rgb_colorants(&colorants, white_point, primaries)) {
+ free(data);
+ return;
+ }
+
+ // the position of first tag's signature in tag table
+ tag_table_offset = ICC_PROFILE_HEADER_LENGTH + 4;
+ tag_data_offset = ICC_PROFILE_HEADER_LENGTH + 4 +
+ (12 * (xyz_count + trc_count)); // the start of tag data elements.
+
+ for (index = 0; index < xyz_count; ++index) {
+ // tag table
+ write_u32(data, tag_table_offset, TAG_XYZ[index]);
+ write_u32(data, tag_table_offset+4, tag_data_offset);
+ write_u32(data, tag_table_offset+8, 20); // 20 bytes per TAG_(r/g/b)XYZ tag element
+
+ // tag data element
+ write_u32(data, tag_data_offset, XYZ_TYPE);
+ // reserved 4 bytes.
+ write_u32(data, tag_data_offset+8, double_to_s15Fixed16Number(colorants.m[0][index]));
+ write_u32(data, tag_data_offset+12, double_to_s15Fixed16Number(colorants.m[1][index]));
+ write_u32(data, tag_data_offset+16, double_to_s15Fixed16Number(colorants.m[2][index]));
+
+ tag_table_offset += 12;
+ tag_data_offset += 20;
+ }
+
+ // Part2 : write rTRC, gTRC and bTRC
+ for (index = 0; index < trc_count; ++index) {
+ // tag table
+ write_u32(data, tag_table_offset, TAG_TRC[index]);
+ write_u32(data, tag_table_offset+4, tag_data_offset);
+ write_u32(data, tag_table_offset+8, 14); // 14 bytes per TAG_(r/g/b)TRC element
+
+ // tag data element
+ write_u32(data, tag_data_offset, CURVE_TYPE);
+ // reserved 4 bytes.
+ write_u32(data, tag_data_offset+8, 1); // count
+ write_u16(data, tag_data_offset+12, float_to_u8Fixed8Number(gamma));
+
+ tag_table_offset += 12;
+ tag_data_offset += 16;
+ }
+
+ /* Part3 : write profile header
+ *
+ * Important header fields are left empty. This generates a profile for internal use only.
+ * We should be generating: Profile version (04300000h), Profile signature (acsp),
+ * PCS illumiant field. Likewise mandatory profile tags are omitted.
+ */
+ write_u32(data, 0, length); // the total length of this memory
+ write_u32(data, 12, DISPLAY_DEVICE_PROFILE); // profile->class
+ write_u32(data, 16, RGB_SIGNATURE); // profile->color_space
+ write_u32(data, 20, XYZ_SIGNATURE); // profile->pcs
+ write_u32(data, 64, QCMS_INTENT_PERCEPTUAL); // profile->rendering_intent
+
+ write_u32(data, ICC_PROFILE_HEADER_LENGTH, 6); // total tag count
+
+ // prepare the result
+ *mem = data;
+ *size = length;
+}
diff --git a/gfx/qcms/matrix.c b/gfx/qcms/matrix.c
new file mode 100644
index 000000000..0ce5bd66d
--- /dev/null
+++ b/gfx/qcms/matrix.c
@@ -0,0 +1,136 @@
+/* vim: set ts=8 sw=8 noexpandtab: */
+// qcms
+// Copyright (C) 2009 Mozilla Foundation
+// Copyright (C) 1998-2007 Marti Maria
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the "Software"),
+// to deal in the Software without restriction, including without limitation
+// the rights to use, copy, modify, merge, publish, distribute, sublicense,
+// and/or sell copies of the Software, and to permit persons to whom the Software
+// is furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+// THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+#include <stdlib.h>
+#include "qcmsint.h"
+#include "matrix.h"
+
+struct vector matrix_eval(struct matrix mat, struct vector v)
+{
+ struct vector result;
+ result.v[0] = mat.m[0][0]*v.v[0] + mat.m[0][1]*v.v[1] + mat.m[0][2]*v.v[2];
+ result.v[1] = mat.m[1][0]*v.v[0] + mat.m[1][1]*v.v[1] + mat.m[1][2]*v.v[2];
+ result.v[2] = mat.m[2][0]*v.v[0] + mat.m[2][1]*v.v[1] + mat.m[2][2]*v.v[2];
+ return result;
+}
+
+//XXX: should probably pass by reference and we could
+//probably reuse this computation in matrix_invert
+float matrix_det(struct matrix mat)
+{
+ float det;
+ det = mat.m[0][0]*mat.m[1][1]*mat.m[2][2] +
+ mat.m[0][1]*mat.m[1][2]*mat.m[2][0] +
+ mat.m[0][2]*mat.m[1][0]*mat.m[2][1] -
+ mat.m[0][0]*mat.m[1][2]*mat.m[2][1] -
+ mat.m[0][1]*mat.m[1][0]*mat.m[2][2] -
+ mat.m[0][2]*mat.m[1][1]*mat.m[2][0];
+ return det;
+}
+
+/* from pixman and cairo and Mathematics for Game Programmers */
+/* lcms uses gauss-jordan elimination with partial pivoting which is
+ * less efficient and not as numerically stable. See Mathematics for
+ * Game Programmers. */
+struct matrix matrix_invert(struct matrix mat)
+{
+ struct matrix dest_mat;
+ int i,j;
+ static int a[3] = { 2, 2, 1 };
+ static int b[3] = { 1, 0, 0 };
+
+ /* inv (A) = 1/det (A) * adj (A) */
+ float det = matrix_det(mat);
+
+ if (det == 0) {
+ dest_mat.invalid = true;
+ } else {
+ dest_mat.invalid = false;
+ }
+
+ det = 1/det;
+
+ for (j = 0; j < 3; j++) {
+ for (i = 0; i < 3; i++) {
+ double p;
+ int ai = a[i];
+ int aj = a[j];
+ int bi = b[i];
+ int bj = b[j];
+
+ p = mat.m[ai][aj] * mat.m[bi][bj] -
+ mat.m[ai][bj] * mat.m[bi][aj];
+ if (((i + j) & 1) != 0)
+ p = -p;
+
+ dest_mat.m[j][i] = det * p;
+ }
+ }
+ return dest_mat;
+}
+
+struct matrix matrix_identity(void)
+{
+ struct matrix i;
+ i.m[0][0] = 1;
+ i.m[0][1] = 0;
+ i.m[0][2] = 0;
+ i.m[1][0] = 0;
+ i.m[1][1] = 1;
+ i.m[1][2] = 0;
+ i.m[2][0] = 0;
+ i.m[2][1] = 0;
+ i.m[2][2] = 1;
+ i.invalid = false;
+ return i;
+}
+
+struct matrix matrix_invalid(void)
+{
+ struct matrix inv = matrix_identity();
+ inv.invalid = true;
+ return inv;
+}
+
+
+/* from pixman */
+/* MAT3per... */
+struct matrix matrix_multiply(struct matrix a, struct matrix b)
+{
+ struct matrix result;
+ int dx, dy;
+ int o;
+ for (dy = 0; dy < 3; dy++) {
+ for (dx = 0; dx < 3; dx++) {
+ double v = 0;
+ for (o = 0; o < 3; o++) {
+ v += a.m[dy][o] * b.m[o][dx];
+ }
+ result.m[dy][dx] = v;
+ }
+ }
+ result.invalid = a.invalid || b.invalid;
+ return result;
+}
+
+
diff --git a/gfx/qcms/matrix.h b/gfx/qcms/matrix.h
new file mode 100644
index 000000000..5011988a1
--- /dev/null
+++ b/gfx/qcms/matrix.h
@@ -0,0 +1,39 @@
+/* vim: set ts=8 sw=8 noexpandtab: */
+// qcms
+// Copyright (C) 2009 Mozilla Foundation
+// Copyright (C) 1998-2007 Marti Maria
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the "Software"),
+// to deal in the Software without restriction, including without limitation
+// the rights to use, copy, modify, merge, publish, distribute, sublicense,
+// and/or sell copies of the Software, and to permit persons to whom the Software
+// is furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+// THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+#ifndef _QCMS_MATRIX_H
+#define _QCMS_MATRIX_H
+
+struct vector {
+ float v[3];
+};
+
+struct vector matrix_eval(struct matrix mat, struct vector v);
+float matrix_det(struct matrix mat);
+struct matrix matrix_identity(void);
+struct matrix matrix_multiply(struct matrix a, struct matrix b);
+struct matrix matrix_invert(struct matrix mat);
+
+struct matrix matrix_invalid(void);
+
+#endif
diff --git a/gfx/qcms/moz.build b/gfx/qcms/moz.build
new file mode 100644
index 000000000..83524fa14
--- /dev/null
+++ b/gfx/qcms/moz.build
@@ -0,0 +1,48 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+EXPORTS += [
+ 'qcms.h',
+ 'qcmstypes.h',
+]
+
+SOURCES += [
+ 'chain.c',
+ 'iccread.c',
+ 'matrix.c',
+ 'transform.c',
+ 'transform_util.c',
+]
+
+FINAL_LIBRARY = 'xul'
+
+if CONFIG['GNU_CC']:
+ CFLAGS += ['-Wno-missing-field-initializers']
+
+use_sse1 = False
+use_sse2 = False
+use_altivec = False
+if '86' in CONFIG['OS_TEST']:
+ use_sse2 = True
+ if CONFIG['_MSC_VER']:
+ if CONFIG['OS_ARCH'] != 'WINNT' or CONFIG['OS_TEST'] != 'x86_64':
+ use_sse1 = True
+ else:
+ use_sse1 = True
+elif CONFIG['HAVE_ALTIVEC']:
+ use_altivec = True
+
+if use_sse1:
+ SOURCES += ['transform-sse1.c']
+ SOURCES['transform-sse1.c'].flags += CONFIG['SSE_FLAGS']
+
+if use_sse2:
+ SOURCES += ['transform-sse2.c']
+ SOURCES['transform-sse2.c'].flags += CONFIG['SSE2_FLAGS']
+
+if use_altivec:
+ SOURCES += ['transform-altivec.c']
+ SOURCES['transform-altivec.c'].flags += ['-maltivec']
diff --git a/gfx/qcms/qcms.h b/gfx/qcms/qcms.h
new file mode 100644
index 000000000..1bcb65ae4
--- /dev/null
+++ b/gfx/qcms/qcms.h
@@ -0,0 +1,179 @@
+#ifndef QCMS_H
+#define QCMS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* if we've already got an ICC_H header we can ignore the following */
+#ifndef ICC_H
+/* icc34 defines */
+
+/*****************************************************************
+ Copyright (c) 1994-1996 SunSoft, Inc.
+
+ Rights Reserved
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without restrict-
+ion, including without limitation the rights to use, copy, modify,
+merge, publish distribute, sublicense, and/or sell copies of the
+Software, and to permit persons to whom the Software is furnished
+to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-
+INFRINGEMENT. IN NO EVENT SHALL SUNSOFT, INC. OR ITS PARENT
+COMPANY BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+
+Except as contained in this notice, the name of SunSoft, Inc.
+shall not be used in advertising or otherwise to promote the
+sale, use or other dealings in this Software without written
+authorization from SunSoft Inc.
+******************************************************************/
+
+/*
+ * QCMS, in general, is not threadsafe. However, it should be safe to create
+ * profile and transformation objects on different threads, so long as you
+ * don't use the same objects on different threads at the same time.
+ */
+
+/*
+ * Color Space Signatures
+ * Note that only icSigXYZData and icSigLabData are valid
+ * Profile Connection Spaces (PCSs)
+ */
+typedef enum {
+ icSigXYZData = 0x58595A20L, /* 'XYZ ' */
+ icSigLabData = 0x4C616220L, /* 'Lab ' */
+ icSigLuvData = 0x4C757620L, /* 'Luv ' */
+ icSigYCbCrData = 0x59436272L, /* 'YCbr' */
+ icSigYxyData = 0x59787920L, /* 'Yxy ' */
+ icSigRgbData = 0x52474220L, /* 'RGB ' */
+ icSigGrayData = 0x47524159L, /* 'GRAY' */
+ icSigHsvData = 0x48535620L, /* 'HSV ' */
+ icSigHlsData = 0x484C5320L, /* 'HLS ' */
+ icSigCmykData = 0x434D594BL, /* 'CMYK' */
+ icSigCmyData = 0x434D5920L, /* 'CMY ' */
+ icSig2colorData = 0x32434C52L, /* '2CLR' */
+ icSig3colorData = 0x33434C52L, /* '3CLR' */
+ icSig4colorData = 0x34434C52L, /* '4CLR' */
+ icSig5colorData = 0x35434C52L, /* '5CLR' */
+ icSig6colorData = 0x36434C52L, /* '6CLR' */
+ icSig7colorData = 0x37434C52L, /* '7CLR' */
+ icSig8colorData = 0x38434C52L, /* '8CLR' */
+ icSig9colorData = 0x39434C52L, /* '9CLR' */
+ icSig10colorData = 0x41434C52L, /* 'ACLR' */
+ icSig11colorData = 0x42434C52L, /* 'BCLR' */
+ icSig12colorData = 0x43434C52L, /* 'CCLR' */
+ icSig13colorData = 0x44434C52L, /* 'DCLR' */
+ icSig14colorData = 0x45434C52L, /* 'ECLR' */
+ icSig15colorData = 0x46434C52L, /* 'FCLR' */
+ icMaxEnumData = 0xFFFFFFFFL
+} icColorSpaceSignature;
+#endif
+
+#include <stdio.h>
+
+typedef int qcms_bool;
+
+struct _qcms_transform;
+typedef struct _qcms_transform qcms_transform;
+
+struct _qcms_profile;
+typedef struct _qcms_profile qcms_profile;
+
+/* these values match the Rendering Intent values from the ICC spec */
+typedef enum {
+ QCMS_INTENT_MIN = 0,
+ QCMS_INTENT_PERCEPTUAL = 0,
+ QCMS_INTENT_RELATIVE_COLORIMETRIC = 1,
+ QCMS_INTENT_SATURATION = 2,
+ QCMS_INTENT_ABSOLUTE_COLORIMETRIC = 3,
+ QCMS_INTENT_MAX = 3,
+
+ /* Chris Murphy (CM consultant) suggests this as a default in the event that we
+ * cannot reproduce relative + Black Point Compensation. BPC brings an
+ * unacceptable performance overhead, so we go with perceptual. */
+ QCMS_INTENT_DEFAULT = QCMS_INTENT_PERCEPTUAL,
+} qcms_intent;
+
+//XXX: I don't really like the _DATA_ prefix
+typedef enum {
+ QCMS_DATA_RGB_8,
+ QCMS_DATA_RGBA_8,
+ QCMS_DATA_GRAY_8,
+ QCMS_DATA_GRAYA_8
+} qcms_data_type;
+
+/* the names for the following two types are sort of ugly */
+typedef struct
+{
+ double x;
+ double y;
+ double Y;
+} qcms_CIE_xyY;
+
+typedef struct
+{
+ qcms_CIE_xyY red;
+ qcms_CIE_xyY green;
+ qcms_CIE_xyY blue;
+} qcms_CIE_xyYTRIPLE;
+
+qcms_profile* qcms_profile_create_rgb_with_gamma(
+ qcms_CIE_xyY white_point,
+ qcms_CIE_xyYTRIPLE primaries,
+ float gamma);
+
+void qcms_data_create_rgb_with_gamma(
+ qcms_CIE_xyY white_point,
+ qcms_CIE_xyYTRIPLE primaries,
+ float gamma,
+ void **mem,
+ size_t *size);
+
+qcms_profile* qcms_profile_from_memory(const void *mem, size_t size);
+
+qcms_profile* qcms_profile_from_file(FILE *file);
+qcms_profile* qcms_profile_from_path(const char *path);
+
+void qcms_data_from_path(const char *path, void **mem, size_t *size);
+
+#ifdef _WIN32
+qcms_profile* qcms_profile_from_unicode_path(const wchar_t *path);
+void qcms_data_from_unicode_path(const wchar_t *path, void **mem, size_t *size);
+#endif
+qcms_profile* qcms_profile_sRGB(void);
+void qcms_profile_release(qcms_profile *profile);
+
+qcms_bool qcms_profile_is_bogus(qcms_profile *profile);
+qcms_intent qcms_profile_get_rendering_intent(qcms_profile *profile);
+icColorSpaceSignature qcms_profile_get_color_space(qcms_profile *profile);
+
+void qcms_profile_precache_output_transform(qcms_profile *profile);
+
+qcms_transform* qcms_transform_create(
+ qcms_profile *in, qcms_data_type in_type,
+ qcms_profile* out, qcms_data_type out_type,
+ qcms_intent intent);
+
+void qcms_transform_release(qcms_transform *);
+
+void qcms_transform_data(qcms_transform *transform, void *src, void *dest, size_t length);
+
+void qcms_enable_iccv4();
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/gfx/qcms/qcmsint.h b/gfx/qcms/qcmsint.h
new file mode 100644
index 000000000..3604b2657
--- /dev/null
+++ b/gfx/qcms/qcmsint.h
@@ -0,0 +1,327 @@
+/* vim: set ts=8 sw=8 noexpandtab: */
+#include "qcms.h"
+#include "qcmstypes.h"
+
+/* used as a lookup table for the output transformation.
+ * we refcount them so we only need to have one around per output
+ * profile, instead of duplicating them per transform */
+struct precache_output
+{
+ int ref_count;
+ /* We previously used a count of 65536 here but that seems like more
+ * precision than we actually need. By reducing the size we can
+ * improve startup performance and reduce memory usage. ColorSync on
+ * 10.5 uses 4097 which is perhaps because they use a fixed point
+ * representation where 1. is represented by 0x1000. */
+#define PRECACHE_OUTPUT_SIZE 8192
+#define PRECACHE_OUTPUT_MAX (PRECACHE_OUTPUT_SIZE-1)
+ uint8_t data[PRECACHE_OUTPUT_SIZE];
+};
+
+#ifdef _MSC_VER
+#define ALIGN __declspec(align(16))
+#else
+#define ALIGN __attribute__(( aligned (16) ))
+#endif
+
+struct _qcms_transform {
+ float ALIGN matrix[3][4];
+ float *input_gamma_table_r;
+ float *input_gamma_table_g;
+ float *input_gamma_table_b;
+
+ float *input_clut_table_r;
+ float *input_clut_table_g;
+ float *input_clut_table_b;
+ uint16_t input_clut_table_length;
+ float *r_clut;
+ float *g_clut;
+ float *b_clut;
+ uint16_t grid_size;
+ float *output_clut_table_r;
+ float *output_clut_table_g;
+ float *output_clut_table_b;
+ uint16_t output_clut_table_length;
+
+ float *input_gamma_table_gray;
+
+ float out_gamma_r;
+ float out_gamma_g;
+ float out_gamma_b;
+
+ float out_gamma_gray;
+
+ uint16_t *output_gamma_lut_r;
+ uint16_t *output_gamma_lut_g;
+ uint16_t *output_gamma_lut_b;
+
+ uint16_t *output_gamma_lut_gray;
+
+ size_t output_gamma_lut_r_length;
+ size_t output_gamma_lut_g_length;
+ size_t output_gamma_lut_b_length;
+
+ size_t output_gamma_lut_gray_length;
+
+ struct precache_output *output_table_r;
+ struct precache_output *output_table_g;
+ struct precache_output *output_table_b;
+
+ void (*transform_fn)(struct _qcms_transform *transform, unsigned char *src, unsigned char *dest, size_t length);
+};
+
+struct matrix {
+ float m[3][3];
+ bool invalid;
+};
+
+struct qcms_modular_transform;
+
+typedef void (*transform_module_fn_t)(struct qcms_modular_transform *transform, float *src, float *dest, size_t length);
+
+struct qcms_modular_transform {
+ struct matrix matrix;
+ float tx, ty, tz;
+
+ float *input_clut_table_r;
+ float *input_clut_table_g;
+ float *input_clut_table_b;
+ uint16_t input_clut_table_length;
+ float *r_clut;
+ float *g_clut;
+ float *b_clut;
+ uint16_t grid_size;
+ float *output_clut_table_r;
+ float *output_clut_table_g;
+ float *output_clut_table_b;
+ uint16_t output_clut_table_length;
+
+ uint16_t *output_gamma_lut_r;
+ uint16_t *output_gamma_lut_g;
+ uint16_t *output_gamma_lut_b;
+
+ size_t output_gamma_lut_r_length;
+ size_t output_gamma_lut_g_length;
+ size_t output_gamma_lut_b_length;
+
+ transform_module_fn_t transform_module_fn;
+ struct qcms_modular_transform *next_transform;
+};
+
+typedef int32_t s15Fixed16Number;
+typedef uint16_t uInt16Number;
+typedef uint8_t uInt8Number;
+
+struct XYZNumber {
+ s15Fixed16Number X;
+ s15Fixed16Number Y;
+ s15Fixed16Number Z;
+};
+
+struct curveType {
+ uint32_t type;
+ uint32_t count;
+ float parameter[7];
+ uInt16Number data[];
+};
+
+struct lutmABType {
+ uint8_t num_in_channels;
+ uint8_t num_out_channels;
+ // 16 is the upperbound, actual is 0..num_in_channels.
+ uint8_t num_grid_points[16];
+
+ s15Fixed16Number e00;
+ s15Fixed16Number e01;
+ s15Fixed16Number e02;
+ s15Fixed16Number e03;
+ s15Fixed16Number e10;
+ s15Fixed16Number e11;
+ s15Fixed16Number e12;
+ s15Fixed16Number e13;
+ s15Fixed16Number e20;
+ s15Fixed16Number e21;
+ s15Fixed16Number e22;
+ s15Fixed16Number e23;
+
+ // reversed elements (for mBA)
+ bool reversed;
+
+ float *clut_table;
+ struct curveType *a_curves[10];
+ struct curveType *b_curves[10];
+ struct curveType *m_curves[10];
+ float clut_table_data[];
+};
+
+/* should lut8Type and lut16Type be different types? */
+struct lutType { // used by lut8Type/lut16Type (mft2) only
+ uint8_t num_input_channels;
+ uint8_t num_output_channels;
+ uint8_t num_clut_grid_points;
+
+ s15Fixed16Number e00;
+ s15Fixed16Number e01;
+ s15Fixed16Number e02;
+ s15Fixed16Number e10;
+ s15Fixed16Number e11;
+ s15Fixed16Number e12;
+ s15Fixed16Number e20;
+ s15Fixed16Number e21;
+ s15Fixed16Number e22;
+
+ uint16_t num_input_table_entries;
+ uint16_t num_output_table_entries;
+
+ float *input_table;
+ float *clut_table;
+ float *output_table;
+
+ float table_data[];
+};
+#if 0
+/* this is from an intial idea of having the struct correspond to the data in
+ * the file. I decided that it wasn't a good idea.
+ */
+struct tag_value {
+ uint32_t type;
+ union {
+ struct {
+ uint32_t reserved;
+ struct {
+ s15Fixed16Number X;
+ s15Fixed16Number Y;
+ s15Fixed16Number Z;
+ } XYZNumber;
+ } XYZType;
+ };
+}; // I guess we need to pack this?
+#endif
+
+#define RGB_SIGNATURE 0x52474220
+#define GRAY_SIGNATURE 0x47524159
+#define XYZ_SIGNATURE 0x58595A20
+#define LAB_SIGNATURE 0x4C616220
+
+struct _qcms_profile {
+ uint32_t class;
+ uint32_t color_space;
+ uint32_t pcs;
+ qcms_intent rendering_intent;
+ struct XYZNumber redColorant;
+ struct XYZNumber blueColorant;
+ struct XYZNumber greenColorant;
+ struct curveType *redTRC;
+ struct curveType *blueTRC;
+ struct curveType *greenTRC;
+ struct curveType *grayTRC;
+ struct lutType *A2B0;
+ struct lutType *B2A0;
+ struct lutmABType *mAB;
+ struct lutmABType *mBA;
+ struct matrix chromaticAdaption;
+
+ struct precache_output *output_table_r;
+ struct precache_output *output_table_g;
+ struct precache_output *output_table_b;
+};
+
+#ifdef _MSC_VER
+#define inline _inline
+#endif
+
+/* produces the nearest float to 'a' with a maximum error
+ * of 1/1024 which happens for large values like 0x40000040 */
+static inline float s15Fixed16Number_to_float(s15Fixed16Number a)
+{
+ return ((int32_t)a)/65536.f;
+}
+
+static inline s15Fixed16Number double_to_s15Fixed16Number(double v)
+{
+ return (int32_t)(v*65536);
+}
+
+static inline float uInt8Number_to_float(uInt8Number a)
+{
+ return ((int32_t)a)/255.f;
+}
+
+static inline float uInt16Number_to_float(uInt16Number a)
+{
+ return ((int32_t)a)/65535.f;
+}
+
+
+void precache_release(struct precache_output *p);
+qcms_bool set_rgb_colorants(qcms_profile *profile, qcms_CIE_xyY white_point, qcms_CIE_xyYTRIPLE primaries);
+qcms_bool get_rgb_colorants(struct matrix *colorants, qcms_CIE_xyY white_point, qcms_CIE_xyYTRIPLE primaries);
+
+void qcms_transform_data_rgb_out_lut_sse2(qcms_transform *transform,
+ unsigned char *src,
+ unsigned char *dest,
+ size_t length);
+void qcms_transform_data_rgba_out_lut_sse2(qcms_transform *transform,
+ unsigned char *src,
+ unsigned char *dest,
+ size_t length);
+void qcms_transform_data_rgb_out_lut_sse1(qcms_transform *transform,
+ unsigned char *src,
+ unsigned char *dest,
+ size_t length);
+void qcms_transform_data_rgba_out_lut_sse1(qcms_transform *transform,
+ unsigned char *src,
+ unsigned char *dest,
+ size_t length);
+
+void qcms_transform_data_rgb_out_lut_altivec(qcms_transform *transform,
+ unsigned char *src,
+ unsigned char *dest,
+ size_t length);
+void qcms_transform_data_rgba_out_lut_altivec(qcms_transform *transform,
+ unsigned char *src,
+ unsigned char *dest,
+ size_t length);
+
+extern qcms_bool qcms_supports_iccv4;
+
+#ifdef _MSC_VER
+
+long __cdecl _InterlockedIncrement(long volatile *);
+long __cdecl _InterlockedDecrement(long volatile *);
+#pragma intrinsic(_InterlockedIncrement)
+#pragma intrinsic(_InterlockedDecrement)
+
+#define qcms_atomic_increment(x) _InterlockedIncrement((long volatile *)&x)
+#define qcms_atomic_decrement(x) _InterlockedDecrement((long volatile*)&x)
+
+#else
+
+#define qcms_atomic_increment(x) __sync_add_and_fetch(&x, 1)
+#define qcms_atomic_decrement(x) __sync_sub_and_fetch(&x, 1)
+
+#endif
+
+
+#ifdef NATIVE_OUTPUT
+# define RGB_OUTPUT_COMPONENTS 4
+# define RGBA_OUTPUT_COMPONENTS 4
+# ifdef IS_LITTLE_ENDIAN
+# define OUTPUT_A_INDEX 3
+# define OUTPUT_R_INDEX 2
+# define OUTPUT_G_INDEX 1
+# define OUTPUT_B_INDEX 0
+# else
+# define OUTPUT_A_INDEX 0
+# define OUTPUT_R_INDEX 1
+# define OUTPUT_G_INDEX 2
+# define OUTPUT_B_INDEX 3
+# endif
+#else
+# define RGB_OUTPUT_COMPONENTS 3
+# define RGBA_OUTPUT_COMPONENTS 4
+# define OUTPUT_R_INDEX 0
+# define OUTPUT_G_INDEX 1
+# define OUTPUT_B_INDEX 2
+# define OUTPUT_A_INDEX 3
+#endif
diff --git a/gfx/qcms/qcmstypes.h b/gfx/qcms/qcmstypes.h
new file mode 100644
index 000000000..d36779183
--- /dev/null
+++ b/gfx/qcms/qcmstypes.h
@@ -0,0 +1,51 @@
+#ifndef QCMS_TYPES_H
+#define QCMS_TYPES_H
+
+#if BYTE_ORDER == LITTLE_ENDIAN
+#define IS_LITTLE_ENDIAN
+#elif BYTE_ORDER == BIG_ENDIAN
+#define IS_BIG_ENDIAN
+#endif
+
+/* all of the platforms that we use _MSC_VER on are little endian
+ * so this is sufficient for now */
+#ifdef _MSC_VER
+#define IS_LITTLE_ENDIAN
+#endif
+
+#ifdef __OS2__
+#define IS_LITTLE_ENDIAN
+#endif
+
+#if !defined(IS_LITTLE_ENDIAN) && !defined(IS_BIG_ENDIAN)
+#error Unknown endianess
+#endif
+
+#if defined (_SVR4) || defined (SVR4) || defined (__OpenBSD__) || defined (_sgi) || defined (__sun) || defined (sun) || defined (__digital__)
+# include <inttypes.h>
+#elif defined (_MSC_VER) && _MSC_VER < 1600
+typedef __int8 int8_t;
+typedef unsigned __int8 uint8_t;
+typedef __int16 int16_t;
+typedef unsigned __int16 uint16_t;
+typedef __int32 int32_t;
+typedef unsigned __int32 uint32_t;
+typedef __int64 int64_t;
+typedef unsigned __int64 uint64_t;
+#ifdef _WIN64
+typedef unsigned __int64 uintptr_t;
+#else
+typedef unsigned long uintptr_t;
+#endif
+
+#elif defined (_AIX)
+# include <sys/inttypes.h>
+#else
+# include <stdint.h>
+#endif
+
+typedef qcms_bool bool;
+#define true 1
+#define false 0
+
+#endif
diff --git a/gfx/qcms/transform-altivec.c b/gfx/qcms/transform-altivec.c
new file mode 100644
index 000000000..230efbba2
--- /dev/null
+++ b/gfx/qcms/transform-altivec.c
@@ -0,0 +1,269 @@
+/* vim: set ts=8 sw=8 noexpandtab: */
+// qcms
+// Copyright (C) 2009 Mozilla Corporation
+// Copyright (C) 1998-2007 Marti Maria
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the "Software"),
+// to deal in the Software without restriction, including without limitation
+// the rights to use, copy, modify, merge, publish, distribute, sublicense,
+// and/or sell copies of the Software, and to permit persons to whom the Software
+// is furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+// THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+#include <altivec.h>
+
+#include "qcmsint.h"
+
+#define FLOATSCALE (float)(PRECACHE_OUTPUT_SIZE)
+#define CLAMPMAXVAL (((float) (PRECACHE_OUTPUT_SIZE - 1)) / PRECACHE_OUTPUT_SIZE)
+static const ALIGN float floatScaleX4 = FLOATSCALE;
+static const ALIGN float clampMaxValueX4 = CLAMPMAXVAL;
+
+inline vector float load_aligned_float(float *dataPtr)
+{
+ vector float data = vec_lde(0, dataPtr);
+ vector unsigned char moveToStart = vec_lvsl(0, dataPtr);
+ return vec_perm(data, data, moveToStart);
+}
+
+void qcms_transform_data_rgb_out_lut_altivec(qcms_transform *transform,
+ unsigned char *src,
+ unsigned char *dest,
+ size_t length)
+{
+ unsigned int i;
+ float (*mat)[4] = transform->matrix;
+ char input_back[32];
+ /* Ensure we have a buffer that's 16 byte aligned regardless of the original
+ * stack alignment. We can't use __attribute__((aligned(16))) or __declspec(align(32))
+ * because they don't work on stack variables. gcc 4.4 does do the right thing
+ * on x86 but that's too new for us right now. For more info: gcc bug #16660 */
+ float const *input = (float*)(((uintptr_t)&input_back[16]) & ~0xf);
+ /* share input and output locations to save having to keep the
+ * locations in separate registers */
+ uint32_t const *output = (uint32_t*)input;
+
+ /* deref *transform now to avoid it in loop */
+ const float *igtbl_r = transform->input_gamma_table_r;
+ const float *igtbl_g = transform->input_gamma_table_g;
+ const float *igtbl_b = transform->input_gamma_table_b;
+
+ /* deref *transform now to avoid it in loop */
+ const uint8_t *otdata_r = &transform->output_table_r->data[0];
+ const uint8_t *otdata_g = &transform->output_table_g->data[0];
+ const uint8_t *otdata_b = &transform->output_table_b->data[0];
+
+ /* input matrix values never change */
+ const vector float mat0 = vec_ldl(0, (vector float*)mat[0]);
+ const vector float mat1 = vec_ldl(0, (vector float*)mat[1]);
+ const vector float mat2 = vec_ldl(0, (vector float*)mat[2]);
+
+ /* these values don't change, either */
+ const vector float max = vec_splat(vec_lde(0, (float*)&clampMaxValueX4), 0);
+ const vector float min = (vector float)vec_splat_u32(0);
+ const vector float scale = vec_splat(vec_lde(0, (float*)&floatScaleX4), 0);
+
+ /* working variables */
+ vector float vec_r, vec_g, vec_b, result;
+
+ /* CYA */
+ if (!length)
+ return;
+
+ /* one pixel is handled outside of the loop */
+ length--;
+
+ /* setup for transforming 1st pixel */
+ vec_r = load_aligned_float((float*)&igtbl_r[src[0]]);
+ vec_g = load_aligned_float((float*)&igtbl_r[src[1]]);
+ vec_b = load_aligned_float((float*)&igtbl_r[src[2]]);
+ src += 3;
+
+ /* transform all but final pixel */
+
+ for (i=0; i<length; i++)
+ {
+ /* position values from gamma tables */
+ vec_r = vec_splat(vec_r, 0);
+ vec_g = vec_splat(vec_g, 0);
+ vec_b = vec_splat(vec_b, 0);
+
+ /* gamma * matrix */
+ vec_r = vec_madd(vec_r, mat0, min);
+ vec_g = vec_madd(vec_g, mat1, min);
+ vec_b = vec_madd(vec_b, mat2, min);
+
+ /* crunch, crunch, crunch */
+ vec_r = vec_add(vec_r, vec_add(vec_g, vec_b));
+ vec_r = vec_max(min, vec_r);
+ vec_r = vec_min(max, vec_r);
+ result = vec_madd(vec_r, scale, min);
+
+ /* store calc'd output tables indices */
+ vec_st(vec_ctu(vec_round(result), 0), 0, (vector unsigned int*)output);
+
+ /* load for next loop while store completes */
+ vec_r = load_aligned_float((float*)&igtbl_r[src[0]]);
+ vec_g = load_aligned_float((float*)&igtbl_r[src[1]]);
+ vec_b = load_aligned_float((float*)&igtbl_r[src[2]]);
+ src += 3;
+
+ /* use calc'd indices to output RGB values */
+ dest[0] = otdata_r[output[0]];
+ dest[1] = otdata_g[output[1]];
+ dest[2] = otdata_b[output[2]];
+ dest += 3;
+ }
+
+ /* handle final (maybe only) pixel */
+
+ vec_r = vec_splat(vec_r, 0);
+ vec_g = vec_splat(vec_g, 0);
+ vec_b = vec_splat(vec_b, 0);
+
+ vec_r = vec_madd(vec_r, mat0, min);
+ vec_g = vec_madd(vec_g, mat1, min);
+ vec_b = vec_madd(vec_b, mat2, min);
+
+ vec_r = vec_add(vec_r, vec_add(vec_g, vec_b));
+ vec_r = vec_max(min, vec_r);
+ vec_r = vec_min(max, vec_r);
+ result = vec_madd(vec_r, scale, min);
+
+ vec_st(vec_ctu(vec_round(result),0),0,(vector unsigned int*)output);
+
+ dest[0] = otdata_r[output[0]];
+ dest[1] = otdata_g[output[1]];
+ dest[2] = otdata_b[output[2]];
+}
+
+void qcms_transform_data_rgba_out_lut_altivec(qcms_transform *transform,
+ unsigned char *src,
+ unsigned char *dest,
+ size_t length)
+{
+ unsigned int i;
+ float (*mat)[4] = transform->matrix;
+ char input_back[32];
+ /* Ensure we have a buffer that's 16 byte aligned regardless of the original
+ * stack alignment. We can't use __attribute__((aligned(16))) or __declspec(align(32))
+ * because they don't work on stack variables. gcc 4.4 does do the right thing
+ * on x86 but that's too new for us right now. For more info: gcc bug #16660 */
+ float const *input = (float*)(((uintptr_t)&input_back[16]) & ~0xf);
+ /* share input and output locations to save having to keep the
+ * locations in separate registers */
+ uint32_t const *output = (uint32_t*)input;
+
+ /* deref *transform now to avoid it in loop */
+ const float *igtbl_r = transform->input_gamma_table_r;
+ const float *igtbl_g = transform->input_gamma_table_g;
+ const float *igtbl_b = transform->input_gamma_table_b;
+
+ /* deref *transform now to avoid it in loop */
+ const uint8_t *otdata_r = &transform->output_table_r->data[0];
+ const uint8_t *otdata_g = &transform->output_table_g->data[0];
+ const uint8_t *otdata_b = &transform->output_table_b->data[0];
+
+ /* input matrix values never change */
+ const vector float mat0 = vec_ldl(0, (vector float*)mat[0]);
+ const vector float mat1 = vec_ldl(0, (vector float*)mat[1]);
+ const vector float mat2 = vec_ldl(0, (vector float*)mat[2]);
+
+ /* these values don't change, either */
+ const vector float max = vec_splat(vec_lde(0, (float*)&clampMaxValueX4), 0);
+ const vector float min = (vector float)vec_splat_u32(0);
+ const vector float scale = vec_splat(vec_lde(0, (float*)&floatScaleX4), 0);
+
+ /* working variables */
+ vector float vec_r, vec_g, vec_b, result;
+ unsigned char alpha;
+
+ /* CYA */
+ if (!length)
+ return;
+
+ /* one pixel is handled outside of the loop */
+ length--;
+
+ /* setup for transforming 1st pixel */
+ vec_r = load_aligned_float((float*)&igtbl_r[src[0]]);
+ vec_g = load_aligned_float((float*)&igtbl_r[src[1]]);
+ vec_b = load_aligned_float((float*)&igtbl_r[src[2]]);
+ alpha = src[3];
+ src += 4;
+
+ /* transform all but final pixel */
+
+ for (i=0; i<length; i++)
+ {
+ /* position values from gamma tables */
+ vec_r = vec_splat(vec_r, 0);
+ vec_g = vec_splat(vec_g, 0);
+ vec_b = vec_splat(vec_b, 0);
+
+ /* gamma * matrix */
+ vec_r = vec_madd(vec_r, mat0, min);
+ vec_g = vec_madd(vec_g, mat1, min);
+ vec_b = vec_madd(vec_b, mat2, min);
+
+ /* store alpha for this pixel; load alpha for next */
+ dest[3] = alpha;
+ alpha = src[3];
+
+ /* crunch, crunch, crunch */
+ vec_r = vec_add(vec_r, vec_add(vec_g, vec_b));
+ vec_r = vec_max(min, vec_r);
+ vec_r = vec_min(max, vec_r);
+ result = vec_madd(vec_r, scale, min);
+
+ /* store calc'd output tables indices */
+ vec_st(vec_ctu(vec_round(result), 0), 0, (vector unsigned int*)output);
+
+ /* load gamma values for next loop while store completes */
+ vec_r = load_aligned_float((float*)&igtbl_r[src[0]]);
+ vec_g = load_aligned_float((float*)&igtbl_r[src[1]]);
+ vec_b = load_aligned_float((float*)&igtbl_r[src[2]]);
+ src += 4;
+
+ /* use calc'd indices to output RGB values */
+ dest[0] = otdata_r[output[0]];
+ dest[1] = otdata_g[output[1]];
+ dest[2] = otdata_b[output[2]];
+ dest += 4;
+ }
+
+ /* handle final (maybe only) pixel */
+
+ vec_r = vec_splat(vec_r, 0);
+ vec_g = vec_splat(vec_g, 0);
+ vec_b = vec_splat(vec_b, 0);
+
+ vec_r = vec_madd(vec_r, mat0, min);
+ vec_g = vec_madd(vec_g, mat1, min);
+ vec_b = vec_madd(vec_b, mat2, min);
+
+ dest[3] = alpha;
+
+ vec_r = vec_add(vec_r, vec_add(vec_g, vec_b));
+ vec_r = vec_max(min, vec_r);
+ vec_r = vec_min(max, vec_r);
+ result = vec_madd(vec_r, scale, min);
+
+ vec_st(vec_ctu(vec_round(result), 0), 0, (vector unsigned int*)output);
+
+ dest[0] = otdata_r[output[0]];
+ dest[1] = otdata_g[output[1]];
+ dest[2] = otdata_b[output[2]];
+}
+
diff --git a/gfx/qcms/transform-sse1.c b/gfx/qcms/transform-sse1.c
new file mode 100644
index 000000000..69d814545
--- /dev/null
+++ b/gfx/qcms/transform-sse1.c
@@ -0,0 +1,253 @@
+#include <xmmintrin.h>
+
+#include "qcmsint.h"
+
+/* pre-shuffled: just load these into XMM reg instead of load-scalar/shufps sequence */
+#define FLOATSCALE (float)(PRECACHE_OUTPUT_SIZE)
+#define CLAMPMAXVAL ( ((float) (PRECACHE_OUTPUT_SIZE - 1)) / PRECACHE_OUTPUT_SIZE )
+static const ALIGN float floatScaleX4[4] =
+ { FLOATSCALE, FLOATSCALE, FLOATSCALE, FLOATSCALE};
+static const ALIGN float clampMaxValueX4[4] =
+ { CLAMPMAXVAL, CLAMPMAXVAL, CLAMPMAXVAL, CLAMPMAXVAL};
+
+void qcms_transform_data_rgb_out_lut_sse1(qcms_transform *transform,
+ unsigned char *src,
+ unsigned char *dest,
+ size_t length)
+{
+ unsigned int i;
+ float (*mat)[4] = transform->matrix;
+ char input_back[32];
+ /* Ensure we have a buffer that's 16 byte aligned regardless of the original
+ * stack alignment. We can't use __attribute__((aligned(16))) or __declspec(align(32))
+ * because they don't work on stack variables. gcc 4.4 does do the right thing
+ * on x86 but that's too new for us right now. For more info: gcc bug #16660 */
+ float const * input = (float*)(((uintptr_t)&input_back[16]) & ~0xf);
+ /* share input and output locations to save having to keep the
+ * locations in separate registers */
+ uint32_t const * output = (uint32_t*)input;
+
+ /* deref *transform now to avoid it in loop */
+ const float *igtbl_r = transform->input_gamma_table_r;
+ const float *igtbl_g = transform->input_gamma_table_g;
+ const float *igtbl_b = transform->input_gamma_table_b;
+
+ /* deref *transform now to avoid it in loop */
+ const uint8_t *otdata_r = &transform->output_table_r->data[0];
+ const uint8_t *otdata_g = &transform->output_table_g->data[0];
+ const uint8_t *otdata_b = &transform->output_table_b->data[0];
+
+ /* input matrix values never change */
+ const __m128 mat0 = _mm_load_ps(mat[0]);
+ const __m128 mat1 = _mm_load_ps(mat[1]);
+ const __m128 mat2 = _mm_load_ps(mat[2]);
+
+ /* these values don't change, either */
+ const __m128 max = _mm_load_ps(clampMaxValueX4);
+ const __m128 min = _mm_setzero_ps();
+ const __m128 scale = _mm_load_ps(floatScaleX4);
+
+ /* working variables */
+ __m128 vec_r, vec_g, vec_b, result;
+
+ /* CYA */
+ if (!length)
+ return;
+
+ /* one pixel is handled outside of the loop */
+ length--;
+
+ /* setup for transforming 1st pixel */
+ vec_r = _mm_load_ss(&igtbl_r[src[0]]);
+ vec_g = _mm_load_ss(&igtbl_g[src[1]]);
+ vec_b = _mm_load_ss(&igtbl_b[src[2]]);
+ src += 3;
+
+ /* transform all but final pixel */
+
+ for (i=0; i<length; i++)
+ {
+ /* position values from gamma tables */
+ vec_r = _mm_shuffle_ps(vec_r, vec_r, 0);
+ vec_g = _mm_shuffle_ps(vec_g, vec_g, 0);
+ vec_b = _mm_shuffle_ps(vec_b, vec_b, 0);
+
+ /* gamma * matrix */
+ vec_r = _mm_mul_ps(vec_r, mat0);
+ vec_g = _mm_mul_ps(vec_g, mat1);
+ vec_b = _mm_mul_ps(vec_b, mat2);
+
+ /* crunch, crunch, crunch */
+ vec_r = _mm_add_ps(vec_r, _mm_add_ps(vec_g, vec_b));
+ vec_r = _mm_max_ps(min, vec_r);
+ vec_r = _mm_min_ps(max, vec_r);
+ result = _mm_mul_ps(vec_r, scale);
+
+ /* store calc'd output tables indices */
+ *((__m64 *)&output[0]) = _mm_cvtps_pi32(result);
+ result = _mm_movehl_ps(result, result);
+ *((__m64 *)&output[2]) = _mm_cvtps_pi32(result) ;
+
+ /* load for next loop while store completes */
+ vec_r = _mm_load_ss(&igtbl_r[src[0]]);
+ vec_g = _mm_load_ss(&igtbl_g[src[1]]);
+ vec_b = _mm_load_ss(&igtbl_b[src[2]]);
+ src += 3;
+
+ /* use calc'd indices to output RGB values */
+ dest[OUTPUT_R_INDEX] = otdata_r[output[0]];
+ dest[OUTPUT_G_INDEX] = otdata_g[output[1]];
+ dest[OUTPUT_B_INDEX] = otdata_b[output[2]];
+ dest += RGB_OUTPUT_COMPONENTS;
+ }
+
+ /* handle final (maybe only) pixel */
+
+ vec_r = _mm_shuffle_ps(vec_r, vec_r, 0);
+ vec_g = _mm_shuffle_ps(vec_g, vec_g, 0);
+ vec_b = _mm_shuffle_ps(vec_b, vec_b, 0);
+
+ vec_r = _mm_mul_ps(vec_r, mat0);
+ vec_g = _mm_mul_ps(vec_g, mat1);
+ vec_b = _mm_mul_ps(vec_b, mat2);
+
+ vec_r = _mm_add_ps(vec_r, _mm_add_ps(vec_g, vec_b));
+ vec_r = _mm_max_ps(min, vec_r);
+ vec_r = _mm_min_ps(max, vec_r);
+ result = _mm_mul_ps(vec_r, scale);
+
+ *((__m64 *)&output[0]) = _mm_cvtps_pi32(result);
+ result = _mm_movehl_ps(result, result);
+ *((__m64 *)&output[2]) = _mm_cvtps_pi32(result);
+
+ dest[OUTPUT_R_INDEX] = otdata_r[output[0]];
+ dest[OUTPUT_G_INDEX] = otdata_g[output[1]];
+ dest[OUTPUT_B_INDEX] = otdata_b[output[2]];
+
+ _mm_empty();
+}
+
+void qcms_transform_data_rgba_out_lut_sse1(qcms_transform *transform,
+ unsigned char *src,
+ unsigned char *dest,
+ size_t length)
+{
+ unsigned int i;
+ float (*mat)[4] = transform->matrix;
+ char input_back[32];
+ /* Ensure we have a buffer that's 16 byte aligned regardless of the original
+ * stack alignment. We can't use __attribute__((aligned(16))) or __declspec(align(32))
+ * because they don't work on stack variables. gcc 4.4 does do the right thing
+ * on x86 but that's too new for us right now. For more info: gcc bug #16660 */
+ float const * input = (float*)(((uintptr_t)&input_back[16]) & ~0xf);
+ /* share input and output locations to save having to keep the
+ * locations in separate registers */
+ uint32_t const * output = (uint32_t*)input;
+
+ /* deref *transform now to avoid it in loop */
+ const float *igtbl_r = transform->input_gamma_table_r;
+ const float *igtbl_g = transform->input_gamma_table_g;
+ const float *igtbl_b = transform->input_gamma_table_b;
+
+ /* deref *transform now to avoid it in loop */
+ const uint8_t *otdata_r = &transform->output_table_r->data[0];
+ const uint8_t *otdata_g = &transform->output_table_g->data[0];
+ const uint8_t *otdata_b = &transform->output_table_b->data[0];
+
+ /* input matrix values never change */
+ const __m128 mat0 = _mm_load_ps(mat[0]);
+ const __m128 mat1 = _mm_load_ps(mat[1]);
+ const __m128 mat2 = _mm_load_ps(mat[2]);
+
+ /* these values don't change, either */
+ const __m128 max = _mm_load_ps(clampMaxValueX4);
+ const __m128 min = _mm_setzero_ps();
+ const __m128 scale = _mm_load_ps(floatScaleX4);
+
+ /* working variables */
+ __m128 vec_r, vec_g, vec_b, result;
+ unsigned char alpha;
+
+ /* CYA */
+ if (!length)
+ return;
+
+ /* one pixel is handled outside of the loop */
+ length--;
+
+ /* setup for transforming 1st pixel */
+ vec_r = _mm_load_ss(&igtbl_r[src[0]]);
+ vec_g = _mm_load_ss(&igtbl_g[src[1]]);
+ vec_b = _mm_load_ss(&igtbl_b[src[2]]);
+ alpha = src[3];
+ src += 4;
+
+ /* transform all but final pixel */
+
+ for (i=0; i<length; i++)
+ {
+ /* position values from gamma tables */
+ vec_r = _mm_shuffle_ps(vec_r, vec_r, 0);
+ vec_g = _mm_shuffle_ps(vec_g, vec_g, 0);
+ vec_b = _mm_shuffle_ps(vec_b, vec_b, 0);
+
+ /* gamma * matrix */
+ vec_r = _mm_mul_ps(vec_r, mat0);
+ vec_g = _mm_mul_ps(vec_g, mat1);
+ vec_b = _mm_mul_ps(vec_b, mat2);
+
+ /* store alpha for this pixel; load alpha for next */
+ dest[OUTPUT_A_INDEX] = alpha;
+ alpha = src[3];
+
+ /* crunch, crunch, crunch */
+ vec_r = _mm_add_ps(vec_r, _mm_add_ps(vec_g, vec_b));
+ vec_r = _mm_max_ps(min, vec_r);
+ vec_r = _mm_min_ps(max, vec_r);
+ result = _mm_mul_ps(vec_r, scale);
+
+ /* store calc'd output tables indices */
+ *((__m64 *)&output[0]) = _mm_cvtps_pi32(result);
+ result = _mm_movehl_ps(result, result);
+ *((__m64 *)&output[2]) = _mm_cvtps_pi32(result);
+
+ /* load gamma values for next loop while store completes */
+ vec_r = _mm_load_ss(&igtbl_r[src[0]]);
+ vec_g = _mm_load_ss(&igtbl_g[src[1]]);
+ vec_b = _mm_load_ss(&igtbl_b[src[2]]);
+ src += 4;
+
+ /* use calc'd indices to output RGB values */
+ dest[OUTPUT_R_INDEX] = otdata_r[output[0]];
+ dest[OUTPUT_G_INDEX] = otdata_g[output[1]];
+ dest[OUTPUT_B_INDEX] = otdata_b[output[2]];
+ dest += 4;
+ }
+
+ /* handle final (maybe only) pixel */
+
+ vec_r = _mm_shuffle_ps(vec_r, vec_r, 0);
+ vec_g = _mm_shuffle_ps(vec_g, vec_g, 0);
+ vec_b = _mm_shuffle_ps(vec_b, vec_b, 0);
+
+ vec_r = _mm_mul_ps(vec_r, mat0);
+ vec_g = _mm_mul_ps(vec_g, mat1);
+ vec_b = _mm_mul_ps(vec_b, mat2);
+
+ dest[OUTPUT_A_INDEX] = alpha;
+
+ vec_r = _mm_add_ps(vec_r, _mm_add_ps(vec_g, vec_b));
+ vec_r = _mm_max_ps(min, vec_r);
+ vec_r = _mm_min_ps(max, vec_r);
+ result = _mm_mul_ps(vec_r, scale);
+
+ *((__m64 *)&output[0]) = _mm_cvtps_pi32(result);
+ result = _mm_movehl_ps(result, result);
+ *((__m64 *)&output[2]) = _mm_cvtps_pi32(result);
+
+ dest[OUTPUT_R_INDEX] = otdata_r[output[0]];
+ dest[OUTPUT_G_INDEX] = otdata_g[output[1]];
+ dest[OUTPUT_B_INDEX] = otdata_b[output[2]];
+
+ _mm_empty();
+}
diff --git a/gfx/qcms/transform-sse2.c b/gfx/qcms/transform-sse2.c
new file mode 100644
index 000000000..dc9f495e7
--- /dev/null
+++ b/gfx/qcms/transform-sse2.c
@@ -0,0 +1,243 @@
+#include <emmintrin.h>
+
+#include "qcmsint.h"
+
+/* pre-shuffled: just load these into XMM reg instead of load-scalar/shufps sequence */
+#define FLOATSCALE (float)(PRECACHE_OUTPUT_SIZE)
+#define CLAMPMAXVAL ( ((float) (PRECACHE_OUTPUT_SIZE - 1)) / PRECACHE_OUTPUT_SIZE )
+static const ALIGN float floatScaleX4[4] =
+ { FLOATSCALE, FLOATSCALE, FLOATSCALE, FLOATSCALE};
+static const ALIGN float clampMaxValueX4[4] =
+ { CLAMPMAXVAL, CLAMPMAXVAL, CLAMPMAXVAL, CLAMPMAXVAL};
+
+void qcms_transform_data_rgb_out_lut_sse2(qcms_transform *transform,
+ unsigned char *src,
+ unsigned char *dest,
+ size_t length)
+{
+ unsigned int i;
+ float (*mat)[4] = transform->matrix;
+ char input_back[32];
+ /* Ensure we have a buffer that's 16 byte aligned regardless of the original
+ * stack alignment. We can't use __attribute__((aligned(16))) or __declspec(align(32))
+ * because they don't work on stack variables. gcc 4.4 does do the right thing
+ * on x86 but that's too new for us right now. For more info: gcc bug #16660 */
+ float const * input = (float*)(((uintptr_t)&input_back[16]) & ~0xf);
+ /* share input and output locations to save having to keep the
+ * locations in separate registers */
+ uint32_t const * output = (uint32_t*)input;
+
+ /* deref *transform now to avoid it in loop */
+ const float *igtbl_r = transform->input_gamma_table_r;
+ const float *igtbl_g = transform->input_gamma_table_g;
+ const float *igtbl_b = transform->input_gamma_table_b;
+
+ /* deref *transform now to avoid it in loop */
+ const uint8_t *otdata_r = &transform->output_table_r->data[0];
+ const uint8_t *otdata_g = &transform->output_table_g->data[0];
+ const uint8_t *otdata_b = &transform->output_table_b->data[0];
+
+ /* input matrix values never change */
+ const __m128 mat0 = _mm_load_ps(mat[0]);
+ const __m128 mat1 = _mm_load_ps(mat[1]);
+ const __m128 mat2 = _mm_load_ps(mat[2]);
+
+ /* these values don't change, either */
+ const __m128 max = _mm_load_ps(clampMaxValueX4);
+ const __m128 min = _mm_setzero_ps();
+ const __m128 scale = _mm_load_ps(floatScaleX4);
+
+ /* working variables */
+ __m128 vec_r, vec_g, vec_b, result;
+
+ /* CYA */
+ if (!length)
+ return;
+
+ /* one pixel is handled outside of the loop */
+ length--;
+
+ /* setup for transforming 1st pixel */
+ vec_r = _mm_load_ss(&igtbl_r[src[0]]);
+ vec_g = _mm_load_ss(&igtbl_g[src[1]]);
+ vec_b = _mm_load_ss(&igtbl_b[src[2]]);
+ src += 3;
+
+ /* transform all but final pixel */
+
+ for (i=0; i<length; i++)
+ {
+ /* position values from gamma tables */
+ vec_r = _mm_shuffle_ps(vec_r, vec_r, 0);
+ vec_g = _mm_shuffle_ps(vec_g, vec_g, 0);
+ vec_b = _mm_shuffle_ps(vec_b, vec_b, 0);
+
+ /* gamma * matrix */
+ vec_r = _mm_mul_ps(vec_r, mat0);
+ vec_g = _mm_mul_ps(vec_g, mat1);
+ vec_b = _mm_mul_ps(vec_b, mat2);
+
+ /* crunch, crunch, crunch */
+ vec_r = _mm_add_ps(vec_r, _mm_add_ps(vec_g, vec_b));
+ vec_r = _mm_max_ps(min, vec_r);
+ vec_r = _mm_min_ps(max, vec_r);
+ result = _mm_mul_ps(vec_r, scale);
+
+ /* store calc'd output tables indices */
+ _mm_store_si128((__m128i*)output, _mm_cvtps_epi32(result));
+
+ /* load for next loop while store completes */
+ vec_r = _mm_load_ss(&igtbl_r[src[0]]);
+ vec_g = _mm_load_ss(&igtbl_g[src[1]]);
+ vec_b = _mm_load_ss(&igtbl_b[src[2]]);
+ src += 3;
+
+ /* use calc'd indices to output RGB values */
+ dest[OUTPUT_R_INDEX] = otdata_r[output[0]];
+ dest[OUTPUT_G_INDEX] = otdata_g[output[1]];
+ dest[OUTPUT_B_INDEX] = otdata_b[output[2]];
+ dest += RGB_OUTPUT_COMPONENTS;
+ }
+
+ /* handle final (maybe only) pixel */
+
+ vec_r = _mm_shuffle_ps(vec_r, vec_r, 0);
+ vec_g = _mm_shuffle_ps(vec_g, vec_g, 0);
+ vec_b = _mm_shuffle_ps(vec_b, vec_b, 0);
+
+ vec_r = _mm_mul_ps(vec_r, mat0);
+ vec_g = _mm_mul_ps(vec_g, mat1);
+ vec_b = _mm_mul_ps(vec_b, mat2);
+
+ vec_r = _mm_add_ps(vec_r, _mm_add_ps(vec_g, vec_b));
+ vec_r = _mm_max_ps(min, vec_r);
+ vec_r = _mm_min_ps(max, vec_r);
+ result = _mm_mul_ps(vec_r, scale);
+
+ _mm_store_si128((__m128i*)output, _mm_cvtps_epi32(result));
+
+ dest[OUTPUT_R_INDEX] = otdata_r[output[0]];
+ dest[OUTPUT_G_INDEX] = otdata_g[output[1]];
+ dest[OUTPUT_B_INDEX] = otdata_b[output[2]];
+}
+
+void qcms_transform_data_rgba_out_lut_sse2(qcms_transform *transform,
+ unsigned char *src,
+ unsigned char *dest,
+ size_t length)
+{
+ unsigned int i;
+ float (*mat)[4] = transform->matrix;
+ char input_back[32];
+ /* Ensure we have a buffer that's 16 byte aligned regardless of the original
+ * stack alignment. We can't use __attribute__((aligned(16))) or __declspec(align(32))
+ * because they don't work on stack variables. gcc 4.4 does do the right thing
+ * on x86 but that's too new for us right now. For more info: gcc bug #16660 */
+ float const * input = (float*)(((uintptr_t)&input_back[16]) & ~0xf);
+ /* share input and output locations to save having to keep the
+ * locations in separate registers */
+ uint32_t const * output = (uint32_t*)input;
+
+ /* deref *transform now to avoid it in loop */
+ const float *igtbl_r = transform->input_gamma_table_r;
+ const float *igtbl_g = transform->input_gamma_table_g;
+ const float *igtbl_b = transform->input_gamma_table_b;
+
+ /* deref *transform now to avoid it in loop */
+ const uint8_t *otdata_r = &transform->output_table_r->data[0];
+ const uint8_t *otdata_g = &transform->output_table_g->data[0];
+ const uint8_t *otdata_b = &transform->output_table_b->data[0];
+
+ /* input matrix values never change */
+ const __m128 mat0 = _mm_load_ps(mat[0]);
+ const __m128 mat1 = _mm_load_ps(mat[1]);
+ const __m128 mat2 = _mm_load_ps(mat[2]);
+
+ /* these values don't change, either */
+ const __m128 max = _mm_load_ps(clampMaxValueX4);
+ const __m128 min = _mm_setzero_ps();
+ const __m128 scale = _mm_load_ps(floatScaleX4);
+
+ /* working variables */
+ __m128 vec_r, vec_g, vec_b, result;
+ unsigned char alpha;
+
+ /* CYA */
+ if (!length)
+ return;
+
+ /* one pixel is handled outside of the loop */
+ length--;
+
+ /* setup for transforming 1st pixel */
+ vec_r = _mm_load_ss(&igtbl_r[src[0]]);
+ vec_g = _mm_load_ss(&igtbl_g[src[1]]);
+ vec_b = _mm_load_ss(&igtbl_b[src[2]]);
+ alpha = src[3];
+ src += 4;
+
+ /* transform all but final pixel */
+
+ for (i=0; i<length; i++)
+ {
+ /* position values from gamma tables */
+ vec_r = _mm_shuffle_ps(vec_r, vec_r, 0);
+ vec_g = _mm_shuffle_ps(vec_g, vec_g, 0);
+ vec_b = _mm_shuffle_ps(vec_b, vec_b, 0);
+
+ /* gamma * matrix */
+ vec_r = _mm_mul_ps(vec_r, mat0);
+ vec_g = _mm_mul_ps(vec_g, mat1);
+ vec_b = _mm_mul_ps(vec_b, mat2);
+
+ /* store alpha for this pixel; load alpha for next */
+ dest[OUTPUT_A_INDEX] = alpha;
+ alpha = src[3];
+
+ /* crunch, crunch, crunch */
+ vec_r = _mm_add_ps(vec_r, _mm_add_ps(vec_g, vec_b));
+ vec_r = _mm_max_ps(min, vec_r);
+ vec_r = _mm_min_ps(max, vec_r);
+ result = _mm_mul_ps(vec_r, scale);
+
+ /* store calc'd output tables indices */
+ _mm_store_si128((__m128i*)output, _mm_cvtps_epi32(result));
+
+ /* load gamma values for next loop while store completes */
+ vec_r = _mm_load_ss(&igtbl_r[src[0]]);
+ vec_g = _mm_load_ss(&igtbl_g[src[1]]);
+ vec_b = _mm_load_ss(&igtbl_b[src[2]]);
+ src += 4;
+
+ /* use calc'd indices to output RGB values */
+ dest[OUTPUT_R_INDEX] = otdata_r[output[0]];
+ dest[OUTPUT_G_INDEX] = otdata_g[output[1]];
+ dest[OUTPUT_B_INDEX] = otdata_b[output[2]];
+ dest += RGBA_OUTPUT_COMPONENTS;
+ }
+
+ /* handle final (maybe only) pixel */
+
+ vec_r = _mm_shuffle_ps(vec_r, vec_r, 0);
+ vec_g = _mm_shuffle_ps(vec_g, vec_g, 0);
+ vec_b = _mm_shuffle_ps(vec_b, vec_b, 0);
+
+ vec_r = _mm_mul_ps(vec_r, mat0);
+ vec_g = _mm_mul_ps(vec_g, mat1);
+ vec_b = _mm_mul_ps(vec_b, mat2);
+
+ dest[OUTPUT_A_INDEX] = alpha;
+
+ vec_r = _mm_add_ps(vec_r, _mm_add_ps(vec_g, vec_b));
+ vec_r = _mm_max_ps(min, vec_r);
+ vec_r = _mm_min_ps(max, vec_r);
+ result = _mm_mul_ps(vec_r, scale);
+
+ _mm_store_si128((__m128i*)output, _mm_cvtps_epi32(result));
+
+ dest[OUTPUT_R_INDEX] = otdata_r[output[0]];
+ dest[OUTPUT_G_INDEX] = otdata_g[output[1]];
+ dest[OUTPUT_B_INDEX] = otdata_b[output[2]];
+}
+
+
diff --git a/gfx/qcms/transform.c b/gfx/qcms/transform.c
new file mode 100644
index 000000000..139eb3738
--- /dev/null
+++ b/gfx/qcms/transform.c
@@ -0,0 +1,1410 @@
+/* vim: set ts=8 sw=8 noexpandtab: */
+// qcms
+// Copyright (C) 2009 Mozilla Corporation
+// Copyright (C) 1998-2007 Marti Maria
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the "Software"),
+// to deal in the Software without restriction, including without limitation
+// the rights to use, copy, modify, merge, publish, distribute, sublicense,
+// and/or sell copies of the Software, and to permit persons to whom the Software
+// is furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+// THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+#include <stdlib.h>
+#include <math.h>
+#include <assert.h>
+#include <string.h> //memcpy
+#include "qcmsint.h"
+#include "chain.h"
+#include "matrix.h"
+#include "transform_util.h"
+
+/* for MSVC, GCC, Intel, and Sun compilers */
+#if defined(_M_IX86) || defined(__i386__) || defined(__i386) || defined(_M_AMD64) || defined(__x86_64__) || defined(__x86_64)
+#define X86
+#endif /* _M_IX86 || __i386__ || __i386 || _M_AMD64 || __x86_64__ || __x86_64 */
+
+/**
+ * AltiVec detection for PowerPC CPUs
+ * In case we have a method of detecting do the runtime detection.
+ * Otherwise statically choose the AltiVec path in case the compiler
+ * was told to build with AltiVec support.
+ */
+#if (defined(__POWERPC__) || defined(__powerpc__))
+#if defined(__linux__)
+#include <unistd.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <elf.h>
+#include <linux/auxvec.h>
+#include <asm/cputable.h>
+#include <link.h>
+
+static inline qcms_bool have_altivec() {
+ static int available = -1;
+ int new_avail = 0;
+ ElfW(auxv_t) auxv;
+ ssize_t count;
+ int fd, i;
+
+ if (available != -1)
+ return (available != 0 ? true : false);
+
+ fd = open("/proc/self/auxv", O_RDONLY);
+ if (fd < 0)
+ goto out;
+ do {
+ count = read(fd, &auxv, sizeof(auxv));
+ if (count < 0)
+ goto out_close;
+
+ if (auxv.a_type == AT_HWCAP) {
+ new_avail = !!(auxv.a_un.a_val & PPC_FEATURE_HAS_ALTIVEC);
+ goto out_close;
+ }
+ } while (auxv.a_type != AT_NULL);
+
+out_close:
+ close(fd);
+out:
+ available = new_avail;
+ return (available != 0 ? true : false);
+}
+#elif defined(__APPLE__) && defined(__MACH__)
+#include <sys/sysctl.h>
+
+/**
+ * rip-off from ffmpeg AltiVec detection code.
+ * this code also appears on Apple's AltiVec pages.
+ */
+static inline qcms_bool have_altivec() {
+ int sels[2] = {CTL_HW, HW_VECTORUNIT};
+ static int available = -1;
+ size_t len = sizeof(available);
+ int err;
+
+ if (available != -1)
+ return (available != 0 ? true : false);
+
+ err = sysctl(sels, 2, &available, &len, NULL, 0);
+
+ if (err == 0)
+ if (available != 0)
+ return true;
+
+ return false;
+}
+#elif defined(__ALTIVEC__) || defined(__APPLE_ALTIVEC__)
+#define have_altivec() true
+#else
+#define have_altivec() false
+#endif
+#endif // (defined(__POWERPC__) || defined(__powerpc__))
+
+// Build a White point, primary chromas transfer matrix from RGB to CIE XYZ
+// This is just an approximation, I am not handling all the non-linear
+// aspects of the RGB to XYZ process, and assumming that the gamma correction
+// has transitive property in the tranformation chain.
+//
+// the alghoritm:
+//
+// - First I build the absolute conversion matrix using
+// primaries in XYZ. This matrix is next inverted
+// - Then I eval the source white point across this matrix
+// obtaining the coeficients of the transformation
+// - Then, I apply these coeficients to the original matrix
+static struct matrix build_RGB_to_XYZ_transfer_matrix(qcms_CIE_xyY white, qcms_CIE_xyYTRIPLE primrs)
+{
+ struct matrix primaries;
+ struct matrix primaries_invert;
+ struct matrix result;
+ struct vector white_point;
+ struct vector coefs;
+
+ double xn, yn;
+ double xr, yr;
+ double xg, yg;
+ double xb, yb;
+
+ xn = white.x;
+ yn = white.y;
+
+ if (yn == 0.0)
+ return matrix_invalid();
+
+ xr = primrs.red.x;
+ yr = primrs.red.y;
+ xg = primrs.green.x;
+ yg = primrs.green.y;
+ xb = primrs.blue.x;
+ yb = primrs.blue.y;
+
+ primaries.m[0][0] = xr;
+ primaries.m[0][1] = xg;
+ primaries.m[0][2] = xb;
+
+ primaries.m[1][0] = yr;
+ primaries.m[1][1] = yg;
+ primaries.m[1][2] = yb;
+
+ primaries.m[2][0] = 1 - xr - yr;
+ primaries.m[2][1] = 1 - xg - yg;
+ primaries.m[2][2] = 1 - xb - yb;
+ primaries.invalid = false;
+
+ white_point.v[0] = xn/yn;
+ white_point.v[1] = 1.;
+ white_point.v[2] = (1.0-xn-yn)/yn;
+
+ primaries_invert = matrix_invert(primaries);
+
+ coefs = matrix_eval(primaries_invert, white_point);
+
+ result.m[0][0] = coefs.v[0]*xr;
+ result.m[0][1] = coefs.v[1]*xg;
+ result.m[0][2] = coefs.v[2]*xb;
+
+ result.m[1][0] = coefs.v[0]*yr;
+ result.m[1][1] = coefs.v[1]*yg;
+ result.m[1][2] = coefs.v[2]*yb;
+
+ result.m[2][0] = coefs.v[0]*(1.-xr-yr);
+ result.m[2][1] = coefs.v[1]*(1.-xg-yg);
+ result.m[2][2] = coefs.v[2]*(1.-xb-yb);
+ result.invalid = primaries_invert.invalid;
+
+ return result;
+}
+
+struct CIE_XYZ {
+ double X;
+ double Y;
+ double Z;
+};
+
+/* CIE Illuminant D50 */
+static const struct CIE_XYZ D50_XYZ = {
+ 0.9642,
+ 1.0000,
+ 0.8249
+};
+
+/* from lcms: xyY2XYZ()
+ * corresponds to argyll: icmYxy2XYZ() */
+static struct CIE_XYZ xyY2XYZ(qcms_CIE_xyY source)
+{
+ struct CIE_XYZ dest;
+ dest.X = (source.x / source.y) * source.Y;
+ dest.Y = source.Y;
+ dest.Z = ((1 - source.x - source.y) / source.y) * source.Y;
+ return dest;
+}
+
+/* from lcms: ComputeChromaticAdaption */
+// Compute chromatic adaption matrix using chad as cone matrix
+static struct matrix
+compute_chromatic_adaption(struct CIE_XYZ source_white_point,
+ struct CIE_XYZ dest_white_point,
+ struct matrix chad)
+{
+ struct matrix chad_inv;
+ struct vector cone_source_XYZ, cone_source_rgb;
+ struct vector cone_dest_XYZ, cone_dest_rgb;
+ struct matrix cone, tmp;
+
+ tmp = chad;
+ chad_inv = matrix_invert(tmp);
+
+ cone_source_XYZ.v[0] = source_white_point.X;
+ cone_source_XYZ.v[1] = source_white_point.Y;
+ cone_source_XYZ.v[2] = source_white_point.Z;
+
+ cone_dest_XYZ.v[0] = dest_white_point.X;
+ cone_dest_XYZ.v[1] = dest_white_point.Y;
+ cone_dest_XYZ.v[2] = dest_white_point.Z;
+
+ cone_source_rgb = matrix_eval(chad, cone_source_XYZ);
+ cone_dest_rgb = matrix_eval(chad, cone_dest_XYZ);
+
+ cone.m[0][0] = cone_dest_rgb.v[0]/cone_source_rgb.v[0];
+ cone.m[0][1] = 0;
+ cone.m[0][2] = 0;
+ cone.m[1][0] = 0;
+ cone.m[1][1] = cone_dest_rgb.v[1]/cone_source_rgb.v[1];
+ cone.m[1][2] = 0;
+ cone.m[2][0] = 0;
+ cone.m[2][1] = 0;
+ cone.m[2][2] = cone_dest_rgb.v[2]/cone_source_rgb.v[2];
+ cone.invalid = false;
+
+ // Normalize
+ return matrix_multiply(chad_inv, matrix_multiply(cone, chad));
+}
+
+/* from lcms: cmsAdaptionMatrix */
+// Returns the final chrmatic adaptation from illuminant FromIll to Illuminant ToIll
+// Bradford is assumed
+static struct matrix
+adaption_matrix(struct CIE_XYZ source_illumination, struct CIE_XYZ target_illumination)
+{
+ struct matrix lam_rigg = {{ // Bradford matrix
+ { 0.8951f, 0.2664f, -0.1614f },
+ { -0.7502f, 1.7135f, 0.0367f },
+ { 0.0389f, -0.0685f, 1.0296f }
+ }};
+ return compute_chromatic_adaption(source_illumination, target_illumination, lam_rigg);
+}
+
+/* from lcms: cmsAdaptMatrixToD50 */
+static struct matrix adapt_matrix_to_D50(struct matrix r, qcms_CIE_xyY source_white_pt)
+{
+ struct CIE_XYZ Dn;
+ struct matrix Bradford;
+
+ if (source_white_pt.y == 0.0)
+ return matrix_invalid();
+
+ Dn = xyY2XYZ(source_white_pt);
+
+ Bradford = adaption_matrix(Dn, D50_XYZ);
+ return matrix_multiply(Bradford, r);
+}
+
+qcms_bool set_rgb_colorants(qcms_profile *profile, qcms_CIE_xyY white_point, qcms_CIE_xyYTRIPLE primaries)
+{
+ struct matrix colorants;
+ colorants = build_RGB_to_XYZ_transfer_matrix(white_point, primaries);
+ colorants = adapt_matrix_to_D50(colorants, white_point);
+
+ if (colorants.invalid)
+ return false;
+
+ /* note: there's a transpose type of operation going on here */
+ profile->redColorant.X = double_to_s15Fixed16Number(colorants.m[0][0]);
+ profile->redColorant.Y = double_to_s15Fixed16Number(colorants.m[1][0]);
+ profile->redColorant.Z = double_to_s15Fixed16Number(colorants.m[2][0]);
+
+ profile->greenColorant.X = double_to_s15Fixed16Number(colorants.m[0][1]);
+ profile->greenColorant.Y = double_to_s15Fixed16Number(colorants.m[1][1]);
+ profile->greenColorant.Z = double_to_s15Fixed16Number(colorants.m[2][1]);
+
+ profile->blueColorant.X = double_to_s15Fixed16Number(colorants.m[0][2]);
+ profile->blueColorant.Y = double_to_s15Fixed16Number(colorants.m[1][2]);
+ profile->blueColorant.Z = double_to_s15Fixed16Number(colorants.m[2][2]);
+
+ return true;
+}
+
+qcms_bool get_rgb_colorants(struct matrix *colorants, qcms_CIE_xyY white_point, qcms_CIE_xyYTRIPLE primaries)
+{
+ *colorants = build_RGB_to_XYZ_transfer_matrix(white_point, primaries);
+ *colorants = adapt_matrix_to_D50(*colorants, white_point);
+
+ return (colorants->invalid ? true : false);
+}
+
+#if 0
+static void qcms_transform_data_rgb_out_pow(qcms_transform *transform, unsigned char *src, unsigned char *dest, size_t length)
+{
+ int i;
+ float (*mat)[4] = transform->matrix;
+ for (i=0; i<length; i++) {
+ unsigned char device_r = *src++;
+ unsigned char device_g = *src++;
+ unsigned char device_b = *src++;
+
+ float linear_r = transform->input_gamma_table_r[device_r];
+ float linear_g = transform->input_gamma_table_g[device_g];
+ float linear_b = transform->input_gamma_table_b[device_b];
+
+ float out_linear_r = mat[0][0]*linear_r + mat[1][0]*linear_g + mat[2][0]*linear_b;
+ float out_linear_g = mat[0][1]*linear_r + mat[1][1]*linear_g + mat[2][1]*linear_b;
+ float out_linear_b = mat[0][2]*linear_r + mat[1][2]*linear_g + mat[2][2]*linear_b;
+
+ float out_device_r = pow(out_linear_r, transform->out_gamma_r);
+ float out_device_g = pow(out_linear_g, transform->out_gamma_g);
+ float out_device_b = pow(out_linear_b, transform->out_gamma_b);
+
+ dest[OUTPUT_R_INDEX] = clamp_u8(255*out_device_r);
+ dest[OUTPUT_G_INDEX] = clamp_u8(255*out_device_g);
+ dest[OUTPUT_B_INDEX] = clamp_u8(255*out_device_b);
+ dest += RGB_OUTPUT_COMPONENTS;
+ }
+}
+#endif
+
+static void qcms_transform_data_gray_out_lut(qcms_transform *transform, unsigned char *src, unsigned char *dest, size_t length)
+{
+ unsigned int i;
+ for (i = 0; i < length; i++) {
+ float out_device_r, out_device_g, out_device_b;
+ unsigned char device = *src++;
+
+ float linear = transform->input_gamma_table_gray[device];
+
+ out_device_r = lut_interp_linear(linear, transform->output_gamma_lut_r, transform->output_gamma_lut_r_length);
+ out_device_g = lut_interp_linear(linear, transform->output_gamma_lut_g, transform->output_gamma_lut_g_length);
+ out_device_b = lut_interp_linear(linear, transform->output_gamma_lut_b, transform->output_gamma_lut_b_length);
+
+ dest[OUTPUT_R_INDEX] = clamp_u8(out_device_r*255);
+ dest[OUTPUT_G_INDEX] = clamp_u8(out_device_g*255);
+ dest[OUTPUT_B_INDEX] = clamp_u8(out_device_b*255);
+ dest += RGB_OUTPUT_COMPONENTS;
+ }
+}
+
+/* Alpha is not corrected.
+ A rationale for this is found in Alvy Ray's "Should Alpha Be Nonlinear If
+ RGB Is?" Tech Memo 17 (December 14, 1998).
+ See: ftp://ftp.alvyray.com/Acrobat/17_Nonln.pdf
+*/
+
+static void qcms_transform_data_graya_out_lut(qcms_transform *transform, unsigned char *src, unsigned char *dest, size_t length)
+{
+ unsigned int i;
+ for (i = 0; i < length; i++) {
+ float out_device_r, out_device_g, out_device_b;
+ unsigned char device = *src++;
+ unsigned char alpha = *src++;
+
+ float linear = transform->input_gamma_table_gray[device];
+
+ out_device_r = lut_interp_linear(linear, transform->output_gamma_lut_r, transform->output_gamma_lut_r_length);
+ out_device_g = lut_interp_linear(linear, transform->output_gamma_lut_g, transform->output_gamma_lut_g_length);
+ out_device_b = lut_interp_linear(linear, transform->output_gamma_lut_b, transform->output_gamma_lut_b_length);
+
+ dest[OUTPUT_R_INDEX] = clamp_u8(out_device_r*255);
+ dest[OUTPUT_G_INDEX] = clamp_u8(out_device_g*255);
+ dest[OUTPUT_B_INDEX] = clamp_u8(out_device_b*255);
+ dest[OUTPUT_A_INDEX] = alpha;
+ dest += RGBA_OUTPUT_COMPONENTS;
+ }
+}
+
+
+static void qcms_transform_data_gray_out_precache(qcms_transform *transform, unsigned char *src, unsigned char *dest, size_t length)
+{
+ unsigned int i;
+ for (i = 0; i < length; i++) {
+ unsigned char device = *src++;
+ uint16_t gray;
+
+ float linear = transform->input_gamma_table_gray[device];
+
+ /* we could round here... */
+ gray = linear * PRECACHE_OUTPUT_MAX;
+
+ dest[OUTPUT_R_INDEX] = transform->output_table_r->data[gray];
+ dest[OUTPUT_G_INDEX] = transform->output_table_g->data[gray];
+ dest[OUTPUT_B_INDEX] = transform->output_table_b->data[gray];
+ dest += RGB_OUTPUT_COMPONENTS;
+ }
+}
+
+static void qcms_transform_data_graya_out_precache(qcms_transform *transform, unsigned char *src, unsigned char *dest, size_t length)
+{
+ unsigned int i;
+ for (i = 0; i < length; i++) {
+ unsigned char device = *src++;
+ unsigned char alpha = *src++;
+ uint16_t gray;
+
+ float linear = transform->input_gamma_table_gray[device];
+
+ /* we could round here... */
+ gray = linear * PRECACHE_OUTPUT_MAX;
+
+ dest[OUTPUT_R_INDEX] = transform->output_table_r->data[gray];
+ dest[OUTPUT_G_INDEX] = transform->output_table_g->data[gray];
+ dest[OUTPUT_B_INDEX] = transform->output_table_b->data[gray];
+ dest[OUTPUT_A_INDEX] = alpha;
+ dest += RGBA_OUTPUT_COMPONENTS;
+ }
+}
+
+static void qcms_transform_data_rgb_out_lut_precache(qcms_transform *transform, unsigned char *src, unsigned char *dest, size_t length)
+{
+ unsigned int i;
+ float (*mat)[4] = transform->matrix;
+ for (i = 0; i < length; i++) {
+ unsigned char device_r = *src++;
+ unsigned char device_g = *src++;
+ unsigned char device_b = *src++;
+ uint16_t r, g, b;
+
+ float linear_r = transform->input_gamma_table_r[device_r];
+ float linear_g = transform->input_gamma_table_g[device_g];
+ float linear_b = transform->input_gamma_table_b[device_b];
+
+ float out_linear_r = mat[0][0]*linear_r + mat[1][0]*linear_g + mat[2][0]*linear_b;
+ float out_linear_g = mat[0][1]*linear_r + mat[1][1]*linear_g + mat[2][1]*linear_b;
+ float out_linear_b = mat[0][2]*linear_r + mat[1][2]*linear_g + mat[2][2]*linear_b;
+
+ out_linear_r = clamp_float(out_linear_r);
+ out_linear_g = clamp_float(out_linear_g);
+ out_linear_b = clamp_float(out_linear_b);
+
+ /* we could round here... */
+ r = out_linear_r * PRECACHE_OUTPUT_MAX;
+ g = out_linear_g * PRECACHE_OUTPUT_MAX;
+ b = out_linear_b * PRECACHE_OUTPUT_MAX;
+
+ dest[OUTPUT_R_INDEX] = transform->output_table_r->data[r];
+ dest[OUTPUT_G_INDEX] = transform->output_table_g->data[g];
+ dest[OUTPUT_B_INDEX] = transform->output_table_b->data[b];
+ dest += RGB_OUTPUT_COMPONENTS;
+ }
+}
+
+static void qcms_transform_data_rgba_out_lut_precache(qcms_transform *transform, unsigned char *src, unsigned char *dest, size_t length)
+{
+ unsigned int i;
+ float (*mat)[4] = transform->matrix;
+ for (i = 0; i < length; i++) {
+ unsigned char device_r = *src++;
+ unsigned char device_g = *src++;
+ unsigned char device_b = *src++;
+ unsigned char alpha = *src++;
+ uint16_t r, g, b;
+
+ float linear_r = transform->input_gamma_table_r[device_r];
+ float linear_g = transform->input_gamma_table_g[device_g];
+ float linear_b = transform->input_gamma_table_b[device_b];
+
+ float out_linear_r = mat[0][0]*linear_r + mat[1][0]*linear_g + mat[2][0]*linear_b;
+ float out_linear_g = mat[0][1]*linear_r + mat[1][1]*linear_g + mat[2][1]*linear_b;
+ float out_linear_b = mat[0][2]*linear_r + mat[1][2]*linear_g + mat[2][2]*linear_b;
+
+ out_linear_r = clamp_float(out_linear_r);
+ out_linear_g = clamp_float(out_linear_g);
+ out_linear_b = clamp_float(out_linear_b);
+
+ /* we could round here... */
+ r = out_linear_r * PRECACHE_OUTPUT_MAX;
+ g = out_linear_g * PRECACHE_OUTPUT_MAX;
+ b = out_linear_b * PRECACHE_OUTPUT_MAX;
+
+ dest[OUTPUT_R_INDEX] = transform->output_table_r->data[r];
+ dest[OUTPUT_G_INDEX] = transform->output_table_g->data[g];
+ dest[OUTPUT_B_INDEX] = transform->output_table_b->data[b];
+ dest[OUTPUT_A_INDEX] = alpha;
+ dest += RGBA_OUTPUT_COMPONENTS;
+ }
+}
+
+// Not used
+/*
+static void qcms_transform_data_clut(qcms_transform *transform, unsigned char *src, unsigned char *dest, size_t length) {
+ unsigned int i;
+ int xy_len = 1;
+ int x_len = transform->grid_size;
+ int len = x_len * x_len;
+ float* r_table = transform->r_clut;
+ float* g_table = transform->g_clut;
+ float* b_table = transform->b_clut;
+
+ for (i = 0; i < length; i++) {
+ unsigned char in_r = *src++;
+ unsigned char in_g = *src++;
+ unsigned char in_b = *src++;
+ float linear_r = in_r/255.0f, linear_g=in_g/255.0f, linear_b = in_b/255.0f;
+
+ int x = floorf(linear_r * (transform->grid_size-1));
+ int y = floorf(linear_g * (transform->grid_size-1));
+ int z = floorf(linear_b * (transform->grid_size-1));
+ int x_n = ceilf(linear_r * (transform->grid_size-1));
+ int y_n = ceilf(linear_g * (transform->grid_size-1));
+ int z_n = ceilf(linear_b * (transform->grid_size-1));
+ float x_d = linear_r * (transform->grid_size-1) - x;
+ float y_d = linear_g * (transform->grid_size-1) - y;
+ float z_d = linear_b * (transform->grid_size-1) - z;
+
+ float r_x1 = lerp(CLU(r_table,x,y,z), CLU(r_table,x_n,y,z), x_d);
+ float r_x2 = lerp(CLU(r_table,x,y_n,z), CLU(r_table,x_n,y_n,z), x_d);
+ float r_y1 = lerp(r_x1, r_x2, y_d);
+ float r_x3 = lerp(CLU(r_table,x,y,z_n), CLU(r_table,x_n,y,z_n), x_d);
+ float r_x4 = lerp(CLU(r_table,x,y_n,z_n), CLU(r_table,x_n,y_n,z_n), x_d);
+ float r_y2 = lerp(r_x3, r_x4, y_d);
+ float clut_r = lerp(r_y1, r_y2, z_d);
+
+ float g_x1 = lerp(CLU(g_table,x,y,z), CLU(g_table,x_n,y,z), x_d);
+ float g_x2 = lerp(CLU(g_table,x,y_n,z), CLU(g_table,x_n,y_n,z), x_d);
+ float g_y1 = lerp(g_x1, g_x2, y_d);
+ float g_x3 = lerp(CLU(g_table,x,y,z_n), CLU(g_table,x_n,y,z_n), x_d);
+ float g_x4 = lerp(CLU(g_table,x,y_n,z_n), CLU(g_table,x_n,y_n,z_n), x_d);
+ float g_y2 = lerp(g_x3, g_x4, y_d);
+ float clut_g = lerp(g_y1, g_y2, z_d);
+
+ float b_x1 = lerp(CLU(b_table,x,y,z), CLU(b_table,x_n,y,z), x_d);
+ float b_x2 = lerp(CLU(b_table,x,y_n,z), CLU(b_table,x_n,y_n,z), x_d);
+ float b_y1 = lerp(b_x1, b_x2, y_d);
+ float b_x3 = lerp(CLU(b_table,x,y,z_n), CLU(b_table,x_n,y,z_n), x_d);
+ float b_x4 = lerp(CLU(b_table,x,y_n,z_n), CLU(b_table,x_n,y_n,z_n), x_d);
+ float b_y2 = lerp(b_x3, b_x4, y_d);
+ float clut_b = lerp(b_y1, b_y2, z_d);
+
+ *dest++ = clamp_u8(clut_r*255.0f);
+ *dest++ = clamp_u8(clut_g*255.0f);
+ *dest++ = clamp_u8(clut_b*255.0f);
+ }
+}
+*/
+
+static int int_div_ceil(int value, int div) {
+ return ((value + div - 1) / div);
+}
+
+// Using lcms' tetra interpolation algorithm.
+static void qcms_transform_data_tetra_clut_rgba(qcms_transform *transform, unsigned char *src, unsigned char *dest, size_t length) {
+ unsigned int i;
+ int xy_len = 1;
+ int x_len = transform->grid_size;
+ int len = x_len * x_len;
+ float* r_table = transform->r_clut;
+ float* g_table = transform->g_clut;
+ float* b_table = transform->b_clut;
+ float c0_r, c1_r, c2_r, c3_r;
+ float c0_g, c1_g, c2_g, c3_g;
+ float c0_b, c1_b, c2_b, c3_b;
+ float clut_r, clut_g, clut_b;
+ for (i = 0; i < length; i++) {
+ unsigned char in_r = *src++;
+ unsigned char in_g = *src++;
+ unsigned char in_b = *src++;
+ unsigned char in_a = *src++;
+ float linear_r = in_r/255.0f, linear_g=in_g/255.0f, linear_b = in_b/255.0f;
+
+ int x = in_r * (transform->grid_size-1) / 255;
+ int y = in_g * (transform->grid_size-1) / 255;
+ int z = in_b * (transform->grid_size-1) / 255;
+ int x_n = int_div_ceil(in_r * (transform->grid_size-1), 255);
+ int y_n = int_div_ceil(in_g * (transform->grid_size-1), 255);
+ int z_n = int_div_ceil(in_b * (transform->grid_size-1), 255);
+ float rx = linear_r * (transform->grid_size-1) - x;
+ float ry = linear_g * (transform->grid_size-1) - y;
+ float rz = linear_b * (transform->grid_size-1) - z;
+
+ c0_r = CLU(r_table, x, y, z);
+ c0_g = CLU(g_table, x, y, z);
+ c0_b = CLU(b_table, x, y, z);
+
+ if( rx >= ry ) {
+ if (ry >= rz) { //rx >= ry && ry >= rz
+ c1_r = CLU(r_table, x_n, y, z) - c0_r;
+ c2_r = CLU(r_table, x_n, y_n, z) - CLU(r_table, x_n, y, z);
+ c3_r = CLU(r_table, x_n, y_n, z_n) - CLU(r_table, x_n, y_n, z);
+ c1_g = CLU(g_table, x_n, y, z) - c0_g;
+ c2_g = CLU(g_table, x_n, y_n, z) - CLU(g_table, x_n, y, z);
+ c3_g = CLU(g_table, x_n, y_n, z_n) - CLU(g_table, x_n, y_n, z);
+ c1_b = CLU(b_table, x_n, y, z) - c0_b;
+ c2_b = CLU(b_table, x_n, y_n, z) - CLU(b_table, x_n, y, z);
+ c3_b = CLU(b_table, x_n, y_n, z_n) - CLU(b_table, x_n, y_n, z);
+ } else {
+ if (rx >= rz) { //rx >= rz && rz >= ry
+ c1_r = CLU(r_table, x_n, y, z) - c0_r;
+ c2_r = CLU(r_table, x_n, y_n, z_n) - CLU(r_table, x_n, y, z_n);
+ c3_r = CLU(r_table, x_n, y, z_n) - CLU(r_table, x_n, y, z);
+ c1_g = CLU(g_table, x_n, y, z) - c0_g;
+ c2_g = CLU(g_table, x_n, y_n, z_n) - CLU(g_table, x_n, y, z_n);
+ c3_g = CLU(g_table, x_n, y, z_n) - CLU(g_table, x_n, y, z);
+ c1_b = CLU(b_table, x_n, y, z) - c0_b;
+ c2_b = CLU(b_table, x_n, y_n, z_n) - CLU(b_table, x_n, y, z_n);
+ c3_b = CLU(b_table, x_n, y, z_n) - CLU(b_table, x_n, y, z);
+ } else { //rz > rx && rx >= ry
+ c1_r = CLU(r_table, x_n, y, z_n) - CLU(r_table, x, y, z_n);
+ c2_r = CLU(r_table, x_n, y_n, z_n) - CLU(r_table, x_n, y, z_n);
+ c3_r = CLU(r_table, x, y, z_n) - c0_r;
+ c1_g = CLU(g_table, x_n, y, z_n) - CLU(g_table, x, y, z_n);
+ c2_g = CLU(g_table, x_n, y_n, z_n) - CLU(g_table, x_n, y, z_n);
+ c3_g = CLU(g_table, x, y, z_n) - c0_g;
+ c1_b = CLU(b_table, x_n, y, z_n) - CLU(b_table, x, y, z_n);
+ c2_b = CLU(b_table, x_n, y_n, z_n) - CLU(b_table, x_n, y, z_n);
+ c3_b = CLU(b_table, x, y, z_n) - c0_b;
+ }
+ }
+ } else {
+ if (rx >= rz) { //ry > rx && rx >= rz
+ c1_r = CLU(r_table, x_n, y_n, z) - CLU(r_table, x, y_n, z);
+ c2_r = CLU(r_table, x, y_n, z) - c0_r;
+ c3_r = CLU(r_table, x_n, y_n, z_n) - CLU(r_table, x_n, y_n, z);
+ c1_g = CLU(g_table, x_n, y_n, z) - CLU(g_table, x, y_n, z);
+ c2_g = CLU(g_table, x, y_n, z) - c0_g;
+ c3_g = CLU(g_table, x_n, y_n, z_n) - CLU(g_table, x_n, y_n, z);
+ c1_b = CLU(b_table, x_n, y_n, z) - CLU(b_table, x, y_n, z);
+ c2_b = CLU(b_table, x, y_n, z) - c0_b;
+ c3_b = CLU(b_table, x_n, y_n, z_n) - CLU(b_table, x_n, y_n, z);
+ } else {
+ if (ry >= rz) { //ry >= rz && rz > rx
+ c1_r = CLU(r_table, x_n, y_n, z_n) - CLU(r_table, x, y_n, z_n);
+ c2_r = CLU(r_table, x, y_n, z) - c0_r;
+ c3_r = CLU(r_table, x, y_n, z_n) - CLU(r_table, x, y_n, z);
+ c1_g = CLU(g_table, x_n, y_n, z_n) - CLU(g_table, x, y_n, z_n);
+ c2_g = CLU(g_table, x, y_n, z) - c0_g;
+ c3_g = CLU(g_table, x, y_n, z_n) - CLU(g_table, x, y_n, z);
+ c1_b = CLU(b_table, x_n, y_n, z_n) - CLU(b_table, x, y_n, z_n);
+ c2_b = CLU(b_table, x, y_n, z) - c0_b;
+ c3_b = CLU(b_table, x, y_n, z_n) - CLU(b_table, x, y_n, z);
+ } else { //rz > ry && ry > rx
+ c1_r = CLU(r_table, x_n, y_n, z_n) - CLU(r_table, x, y_n, z_n);
+ c2_r = CLU(r_table, x, y_n, z_n) - CLU(r_table, x, y, z_n);
+ c3_r = CLU(r_table, x, y, z_n) - c0_r;
+ c1_g = CLU(g_table, x_n, y_n, z_n) - CLU(g_table, x, y_n, z_n);
+ c2_g = CLU(g_table, x, y_n, z_n) - CLU(g_table, x, y, z_n);
+ c3_g = CLU(g_table, x, y, z_n) - c0_g;
+ c1_b = CLU(b_table, x_n, y_n, z_n) - CLU(b_table, x, y_n, z_n);
+ c2_b = CLU(b_table, x, y_n, z_n) - CLU(b_table, x, y, z_n);
+ c3_b = CLU(b_table, x, y, z_n) - c0_b;
+ }
+ }
+ }
+
+ clut_r = c0_r + c1_r*rx + c2_r*ry + c3_r*rz;
+ clut_g = c0_g + c1_g*rx + c2_g*ry + c3_g*rz;
+ clut_b = c0_b + c1_b*rx + c2_b*ry + c3_b*rz;
+
+ dest[OUTPUT_R_INDEX] = clamp_u8(clut_r*255.0f);
+ dest[OUTPUT_G_INDEX] = clamp_u8(clut_g*255.0f);
+ dest[OUTPUT_B_INDEX] = clamp_u8(clut_b*255.0f);
+ dest[OUTPUT_A_INDEX] = in_a;
+ dest += RGBA_OUTPUT_COMPONENTS;
+ }
+}
+
+// Using lcms' tetra interpolation code.
+static void qcms_transform_data_tetra_clut(qcms_transform *transform, unsigned char *src, unsigned char *dest, size_t length) {
+ unsigned int i;
+ int xy_len = 1;
+ int x_len = transform->grid_size;
+ int len = x_len * x_len;
+ float* r_table = transform->r_clut;
+ float* g_table = transform->g_clut;
+ float* b_table = transform->b_clut;
+ float c0_r, c1_r, c2_r, c3_r;
+ float c0_g, c1_g, c2_g, c3_g;
+ float c0_b, c1_b, c2_b, c3_b;
+ float clut_r, clut_g, clut_b;
+ for (i = 0; i < length; i++) {
+ unsigned char in_r = *src++;
+ unsigned char in_g = *src++;
+ unsigned char in_b = *src++;
+ float linear_r = in_r/255.0f, linear_g=in_g/255.0f, linear_b = in_b/255.0f;
+
+ int x = in_r * (transform->grid_size-1) / 255;
+ int y = in_g * (transform->grid_size-1) / 255;
+ int z = in_b * (transform->grid_size-1) / 255;
+ int x_n = int_div_ceil(in_r * (transform->grid_size-1), 255);
+ int y_n = int_div_ceil(in_g * (transform->grid_size-1), 255);
+ int z_n = int_div_ceil(in_b * (transform->grid_size-1), 255);
+ float rx = linear_r * (transform->grid_size-1) - x;
+ float ry = linear_g * (transform->grid_size-1) - y;
+ float rz = linear_b * (transform->grid_size-1) - z;
+
+ c0_r = CLU(r_table, x, y, z);
+ c0_g = CLU(g_table, x, y, z);
+ c0_b = CLU(b_table, x, y, z);
+
+ if( rx >= ry ) {
+ if (ry >= rz) { //rx >= ry && ry >= rz
+ c1_r = CLU(r_table, x_n, y, z) - c0_r;
+ c2_r = CLU(r_table, x_n, y_n, z) - CLU(r_table, x_n, y, z);
+ c3_r = CLU(r_table, x_n, y_n, z_n) - CLU(r_table, x_n, y_n, z);
+ c1_g = CLU(g_table, x_n, y, z) - c0_g;
+ c2_g = CLU(g_table, x_n, y_n, z) - CLU(g_table, x_n, y, z);
+ c3_g = CLU(g_table, x_n, y_n, z_n) - CLU(g_table, x_n, y_n, z);
+ c1_b = CLU(b_table, x_n, y, z) - c0_b;
+ c2_b = CLU(b_table, x_n, y_n, z) - CLU(b_table, x_n, y, z);
+ c3_b = CLU(b_table, x_n, y_n, z_n) - CLU(b_table, x_n, y_n, z);
+ } else {
+ if (rx >= rz) { //rx >= rz && rz >= ry
+ c1_r = CLU(r_table, x_n, y, z) - c0_r;
+ c2_r = CLU(r_table, x_n, y_n, z_n) - CLU(r_table, x_n, y, z_n);
+ c3_r = CLU(r_table, x_n, y, z_n) - CLU(r_table, x_n, y, z);
+ c1_g = CLU(g_table, x_n, y, z) - c0_g;
+ c2_g = CLU(g_table, x_n, y_n, z_n) - CLU(g_table, x_n, y, z_n);
+ c3_g = CLU(g_table, x_n, y, z_n) - CLU(g_table, x_n, y, z);
+ c1_b = CLU(b_table, x_n, y, z) - c0_b;
+ c2_b = CLU(b_table, x_n, y_n, z_n) - CLU(b_table, x_n, y, z_n);
+ c3_b = CLU(b_table, x_n, y, z_n) - CLU(b_table, x_n, y, z);
+ } else { //rz > rx && rx >= ry
+ c1_r = CLU(r_table, x_n, y, z_n) - CLU(r_table, x, y, z_n);
+ c2_r = CLU(r_table, x_n, y_n, z_n) - CLU(r_table, x_n, y, z_n);
+ c3_r = CLU(r_table, x, y, z_n) - c0_r;
+ c1_g = CLU(g_table, x_n, y, z_n) - CLU(g_table, x, y, z_n);
+ c2_g = CLU(g_table, x_n, y_n, z_n) - CLU(g_table, x_n, y, z_n);
+ c3_g = CLU(g_table, x, y, z_n) - c0_g;
+ c1_b = CLU(b_table, x_n, y, z_n) - CLU(b_table, x, y, z_n);
+ c2_b = CLU(b_table, x_n, y_n, z_n) - CLU(b_table, x_n, y, z_n);
+ c3_b = CLU(b_table, x, y, z_n) - c0_b;
+ }
+ }
+ } else {
+ if (rx >= rz) { //ry > rx && rx >= rz
+ c1_r = CLU(r_table, x_n, y_n, z) - CLU(r_table, x, y_n, z);
+ c2_r = CLU(r_table, x, y_n, z) - c0_r;
+ c3_r = CLU(r_table, x_n, y_n, z_n) - CLU(r_table, x_n, y_n, z);
+ c1_g = CLU(g_table, x_n, y_n, z) - CLU(g_table, x, y_n, z);
+ c2_g = CLU(g_table, x, y_n, z) - c0_g;
+ c3_g = CLU(g_table, x_n, y_n, z_n) - CLU(g_table, x_n, y_n, z);
+ c1_b = CLU(b_table, x_n, y_n, z) - CLU(b_table, x, y_n, z);
+ c2_b = CLU(b_table, x, y_n, z) - c0_b;
+ c3_b = CLU(b_table, x_n, y_n, z_n) - CLU(b_table, x_n, y_n, z);
+ } else {
+ if (ry >= rz) { //ry >= rz && rz > rx
+ c1_r = CLU(r_table, x_n, y_n, z_n) - CLU(r_table, x, y_n, z_n);
+ c2_r = CLU(r_table, x, y_n, z) - c0_r;
+ c3_r = CLU(r_table, x, y_n, z_n) - CLU(r_table, x, y_n, z);
+ c1_g = CLU(g_table, x_n, y_n, z_n) - CLU(g_table, x, y_n, z_n);
+ c2_g = CLU(g_table, x, y_n, z) - c0_g;
+ c3_g = CLU(g_table, x, y_n, z_n) - CLU(g_table, x, y_n, z);
+ c1_b = CLU(b_table, x_n, y_n, z_n) - CLU(b_table, x, y_n, z_n);
+ c2_b = CLU(b_table, x, y_n, z) - c0_b;
+ c3_b = CLU(b_table, x, y_n, z_n) - CLU(b_table, x, y_n, z);
+ } else { //rz > ry && ry > rx
+ c1_r = CLU(r_table, x_n, y_n, z_n) - CLU(r_table, x, y_n, z_n);
+ c2_r = CLU(r_table, x, y_n, z_n) - CLU(r_table, x, y, z_n);
+ c3_r = CLU(r_table, x, y, z_n) - c0_r;
+ c1_g = CLU(g_table, x_n, y_n, z_n) - CLU(g_table, x, y_n, z_n);
+ c2_g = CLU(g_table, x, y_n, z_n) - CLU(g_table, x, y, z_n);
+ c3_g = CLU(g_table, x, y, z_n) - c0_g;
+ c1_b = CLU(b_table, x_n, y_n, z_n) - CLU(b_table, x, y_n, z_n);
+ c2_b = CLU(b_table, x, y_n, z_n) - CLU(b_table, x, y, z_n);
+ c3_b = CLU(b_table, x, y, z_n) - c0_b;
+ }
+ }
+ }
+
+ clut_r = c0_r + c1_r*rx + c2_r*ry + c3_r*rz;
+ clut_g = c0_g + c1_g*rx + c2_g*ry + c3_g*rz;
+ clut_b = c0_b + c1_b*rx + c2_b*ry + c3_b*rz;
+
+ dest[OUTPUT_R_INDEX] = clamp_u8(clut_r*255.0f);
+ dest[OUTPUT_G_INDEX] = clamp_u8(clut_g*255.0f);
+ dest[OUTPUT_B_INDEX] = clamp_u8(clut_b*255.0f);
+ dest += RGB_OUTPUT_COMPONENTS;
+ }
+}
+
+static void qcms_transform_data_rgb_out_lut(qcms_transform *transform, unsigned char *src, unsigned char *dest, size_t length)
+{
+ unsigned int i;
+ float (*mat)[4] = transform->matrix;
+ for (i = 0; i < length; i++) {
+ unsigned char device_r = *src++;
+ unsigned char device_g = *src++;
+ unsigned char device_b = *src++;
+ float out_device_r, out_device_g, out_device_b;
+
+ float linear_r = transform->input_gamma_table_r[device_r];
+ float linear_g = transform->input_gamma_table_g[device_g];
+ float linear_b = transform->input_gamma_table_b[device_b];
+
+ float out_linear_r = mat[0][0]*linear_r + mat[1][0]*linear_g + mat[2][0]*linear_b;
+ float out_linear_g = mat[0][1]*linear_r + mat[1][1]*linear_g + mat[2][1]*linear_b;
+ float out_linear_b = mat[0][2]*linear_r + mat[1][2]*linear_g + mat[2][2]*linear_b;
+
+ out_linear_r = clamp_float(out_linear_r);
+ out_linear_g = clamp_float(out_linear_g);
+ out_linear_b = clamp_float(out_linear_b);
+
+ out_device_r = lut_interp_linear(out_linear_r,
+ transform->output_gamma_lut_r, transform->output_gamma_lut_r_length);
+ out_device_g = lut_interp_linear(out_linear_g,
+ transform->output_gamma_lut_g, transform->output_gamma_lut_g_length);
+ out_device_b = lut_interp_linear(out_linear_b,
+ transform->output_gamma_lut_b, transform->output_gamma_lut_b_length);
+
+ dest[OUTPUT_R_INDEX] = clamp_u8(out_device_r*255);
+ dest[OUTPUT_G_INDEX] = clamp_u8(out_device_g*255);
+ dest[OUTPUT_B_INDEX] = clamp_u8(out_device_b*255);
+ dest += RGB_OUTPUT_COMPONENTS;
+ }
+}
+
+static void qcms_transform_data_rgba_out_lut(qcms_transform *transform, unsigned char *src, unsigned char *dest, size_t length)
+{
+ unsigned int i;
+ float (*mat)[4] = transform->matrix;
+ for (i = 0; i < length; i++) {
+ unsigned char device_r = *src++;
+ unsigned char device_g = *src++;
+ unsigned char device_b = *src++;
+ unsigned char alpha = *src++;
+ float out_device_r, out_device_g, out_device_b;
+
+ float linear_r = transform->input_gamma_table_r[device_r];
+ float linear_g = transform->input_gamma_table_g[device_g];
+ float linear_b = transform->input_gamma_table_b[device_b];
+
+ float out_linear_r = mat[0][0]*linear_r + mat[1][0]*linear_g + mat[2][0]*linear_b;
+ float out_linear_g = mat[0][1]*linear_r + mat[1][1]*linear_g + mat[2][1]*linear_b;
+ float out_linear_b = mat[0][2]*linear_r + mat[1][2]*linear_g + mat[2][2]*linear_b;
+
+ out_linear_r = clamp_float(out_linear_r);
+ out_linear_g = clamp_float(out_linear_g);
+ out_linear_b = clamp_float(out_linear_b);
+
+ out_device_r = lut_interp_linear(out_linear_r,
+ transform->output_gamma_lut_r, transform->output_gamma_lut_r_length);
+ out_device_g = lut_interp_linear(out_linear_g,
+ transform->output_gamma_lut_g, transform->output_gamma_lut_g_length);
+ out_device_b = lut_interp_linear(out_linear_b,
+ transform->output_gamma_lut_b, transform->output_gamma_lut_b_length);
+
+ dest[OUTPUT_R_INDEX] = clamp_u8(out_device_r*255);
+ dest[OUTPUT_G_INDEX] = clamp_u8(out_device_g*255);
+ dest[OUTPUT_B_INDEX] = clamp_u8(out_device_b*255);
+ dest[OUTPUT_A_INDEX] = alpha;
+ dest += RGBA_OUTPUT_COMPONENTS;
+ }
+}
+
+#if 0
+static void qcms_transform_data_rgb_out_linear(qcms_transform *transform, unsigned char *src, unsigned char *dest, size_t length)
+{
+ int i;
+ float (*mat)[4] = transform->matrix;
+ for (i = 0; i < length; i++) {
+ unsigned char device_r = *src++;
+ unsigned char device_g = *src++;
+ unsigned char device_b = *src++;
+
+ float linear_r = transform->input_gamma_table_r[device_r];
+ float linear_g = transform->input_gamma_table_g[device_g];
+ float linear_b = transform->input_gamma_table_b[device_b];
+
+ float out_linear_r = mat[0][0]*linear_r + mat[1][0]*linear_g + mat[2][0]*linear_b;
+ float out_linear_g = mat[0][1]*linear_r + mat[1][1]*linear_g + mat[2][1]*linear_b;
+ float out_linear_b = mat[0][2]*linear_r + mat[1][2]*linear_g + mat[2][2]*linear_b;
+
+ *dest++ = clamp_u8(out_linear_r*255);
+ *dest++ = clamp_u8(out_linear_g*255);
+ *dest++ = clamp_u8(out_linear_b*255);
+ }
+}
+#endif
+
+/*
+ * If users create and destroy objects on different threads, even if the same
+ * objects aren't used on different threads at the same time, we can still run
+ * in to trouble with refcounts if they aren't atomic.
+ *
+ * This can lead to us prematurely deleting the precache if threads get unlucky
+ * and write the wrong value to the ref count.
+ */
+static struct precache_output *precache_reference(struct precache_output *p)
+{
+ qcms_atomic_increment(p->ref_count);
+ return p;
+}
+
+static struct precache_output *precache_create()
+{
+ struct precache_output *p = malloc(sizeof(struct precache_output));
+ if (p)
+ p->ref_count = 1;
+ return p;
+}
+
+void precache_release(struct precache_output *p)
+{
+ if (qcms_atomic_decrement(p->ref_count) == 0) {
+ free(p);
+ }
+}
+
+#ifdef HAVE_POSIX_MEMALIGN
+static qcms_transform *transform_alloc(void)
+{
+ qcms_transform *t;
+
+ void *allocated_memory;
+ if (!posix_memalign(&allocated_memory, 16, sizeof(qcms_transform))) {
+ /* Doing a memset to initialise all bits to 'zero'*/
+ memset(allocated_memory, 0, sizeof(qcms_transform));
+ t = allocated_memory;
+ return t;
+ } else {
+ return NULL;
+ }
+}
+static void transform_free(qcms_transform *t)
+{
+ free(t);
+}
+#else
+static qcms_transform *transform_alloc(void)
+{
+ /* transform needs to be aligned on a 16byte boundrary */
+ char *original_block = calloc(sizeof(qcms_transform) + sizeof(void*) + 16, 1);
+ /* make room for a pointer to the block returned by calloc */
+ void *transform_start = original_block + sizeof(void*);
+ /* align transform_start */
+ qcms_transform *transform_aligned = (qcms_transform*)(((uintptr_t)transform_start + 15) & ~0xf);
+
+ /* store a pointer to the block returned by calloc so that we can free it later */
+ void **(original_block_ptr) = (void**)transform_aligned;
+ if (!original_block)
+ return NULL;
+ original_block_ptr--;
+ *original_block_ptr = original_block;
+
+ return transform_aligned;
+}
+static void transform_free(qcms_transform *t)
+{
+ /* get at the pointer to the unaligned block returned by calloc */
+ void **p = (void**)t;
+ p--;
+ free(*p);
+}
+#endif
+
+void qcms_transform_release(qcms_transform *t)
+{
+ /* ensure we only free the gamma tables once even if there are
+ * multiple references to the same data */
+
+ if (t->output_table_r)
+ precache_release(t->output_table_r);
+ if (t->output_table_g)
+ precache_release(t->output_table_g);
+ if (t->output_table_b)
+ precache_release(t->output_table_b);
+
+ free(t->input_gamma_table_r);
+ if (t->input_gamma_table_g != t->input_gamma_table_r)
+ free(t->input_gamma_table_g);
+ if (t->input_gamma_table_g != t->input_gamma_table_r &&
+ t->input_gamma_table_g != t->input_gamma_table_b)
+ free(t->input_gamma_table_b);
+
+ free(t->input_gamma_table_gray);
+
+ free(t->output_gamma_lut_r);
+ free(t->output_gamma_lut_g);
+ free(t->output_gamma_lut_b);
+
+ transform_free(t);
+}
+
+#ifdef X86
+// Determine if we can build with SSE2 (this was partly copied from jmorecfg.h in
+// mozilla/jpeg)
+ // -------------------------------------------------------------------------
+#if defined(_M_IX86) && defined(_MSC_VER)
+#define HAS_CPUID
+/* Get us a CPUID function. Avoid clobbering EBX because sometimes it's the PIC
+ register - I'm not sure if that ever happens on windows, but cpuid isn't
+ on the critical path so we just preserve the register to be safe and to be
+ consistent with the non-windows version. */
+static void cpuid(uint32_t fxn, uint32_t *a, uint32_t *b, uint32_t *c, uint32_t *d) {
+ uint32_t a_, b_, c_, d_;
+ __asm {
+ xchg ebx, esi
+ mov eax, fxn
+ cpuid
+ mov a_, eax
+ mov b_, ebx
+ mov c_, ecx
+ mov d_, edx
+ xchg ebx, esi
+ }
+ *a = a_;
+ *b = b_;
+ *c = c_;
+ *d = d_;
+}
+#elif (defined(__GNUC__) || defined(__SUNPRO_C)) && (defined(__i386__) || defined(__i386))
+#define HAS_CPUID
+/* Get us a CPUID function. We can't use ebx because it's the PIC register on
+ some platforms, so we use ESI instead and save ebx to avoid clobbering it. */
+static void cpuid(uint32_t fxn, uint32_t *a, uint32_t *b, uint32_t *c, uint32_t *d) {
+
+ uint32_t a_, b_, c_, d_;
+ __asm__ __volatile__ ("xchgl %%ebx, %%esi; cpuid; xchgl %%ebx, %%esi;"
+ : "=a" (a_), "=S" (b_), "=c" (c_), "=d" (d_) : "a" (fxn));
+ *a = a_;
+ *b = b_;
+ *c = c_;
+ *d = d_;
+}
+#endif
+
+// -------------------------Runtime SSEx Detection-----------------------------
+
+/* MMX is always supported per
+ * Gecko v1.9.1 minimum CPU requirements */
+#define SSE1_EDX_MASK (1UL << 25)
+#define SSE2_EDX_MASK (1UL << 26)
+#define SSE3_ECX_MASK (1UL << 0)
+
+static int sse_version_available(void)
+{
+#if defined(__x86_64__) || defined(__x86_64) || defined(_M_AMD64)
+ /* we know at build time that 64-bit CPUs always have SSE2
+ * this tells the compiler that non-SSE2 branches will never be
+ * taken (i.e. OK to optimze away the SSE1 and non-SIMD code */
+ return 2;
+#elif defined(HAS_CPUID)
+ static int sse_version = -1;
+ uint32_t a, b, c, d;
+ uint32_t function = 0x00000001;
+
+ if (sse_version == -1) {
+ sse_version = 0;
+ cpuid(function, &a, &b, &c, &d);
+ if (c & SSE3_ECX_MASK)
+ sse_version = 3;
+ else if (d & SSE2_EDX_MASK)
+ sse_version = 2;
+ else if (d & SSE1_EDX_MASK)
+ sse_version = 1;
+ }
+
+ return sse_version;
+#else
+ return 0;
+#endif
+}
+#endif
+
+static const struct matrix bradford_matrix = {{ { 0.8951f, 0.2664f,-0.1614f},
+ {-0.7502f, 1.7135f, 0.0367f},
+ { 0.0389f,-0.0685f, 1.0296f}},
+ false};
+
+static const struct matrix bradford_matrix_inv = {{ { 0.9869929f,-0.1470543f, 0.1599627f},
+ { 0.4323053f, 0.5183603f, 0.0492912f},
+ {-0.0085287f, 0.0400428f, 0.9684867f}},
+ false};
+
+// See ICCv4 E.3
+struct matrix compute_whitepoint_adaption(float X, float Y, float Z) {
+ float p = (0.96422f*bradford_matrix.m[0][0] + 1.000f*bradford_matrix.m[1][0] + 0.82521f*bradford_matrix.m[2][0]) /
+ (X*bradford_matrix.m[0][0] + Y*bradford_matrix.m[1][0] + Z*bradford_matrix.m[2][0] );
+ float y = (0.96422f*bradford_matrix.m[0][1] + 1.000f*bradford_matrix.m[1][1] + 0.82521f*bradford_matrix.m[2][1]) /
+ (X*bradford_matrix.m[0][1] + Y*bradford_matrix.m[1][1] + Z*bradford_matrix.m[2][1] );
+ float b = (0.96422f*bradford_matrix.m[0][2] + 1.000f*bradford_matrix.m[1][2] + 0.82521f*bradford_matrix.m[2][2]) /
+ (X*bradford_matrix.m[0][2] + Y*bradford_matrix.m[1][2] + Z*bradford_matrix.m[2][2] );
+ struct matrix white_adaption = {{ {p,0,0}, {0,y,0}, {0,0,b}}, false};
+ return matrix_multiply( bradford_matrix_inv, matrix_multiply(white_adaption, bradford_matrix) );
+}
+
+void qcms_profile_precache_output_transform(qcms_profile *profile)
+{
+ /* we only support precaching on rgb profiles */
+ if (profile->color_space != RGB_SIGNATURE)
+ return;
+
+ if (qcms_supports_iccv4) {
+ /* don't precache since we will use the B2A LUT */
+ if (profile->B2A0)
+ return;
+
+ /* don't precache since we will use the mBA LUT */
+ if (profile->mBA)
+ return;
+ }
+
+ /* don't precache if we do not have the TRC curves */
+ if (!profile->redTRC || !profile->greenTRC || !profile->blueTRC)
+ return;
+
+ if (!profile->output_table_r) {
+ profile->output_table_r = precache_create();
+ if (profile->output_table_r &&
+ !compute_precache(profile->redTRC, profile->output_table_r->data)) {
+ precache_release(profile->output_table_r);
+ profile->output_table_r = NULL;
+ }
+ }
+ if (!profile->output_table_g) {
+ profile->output_table_g = precache_create();
+ if (profile->output_table_g &&
+ !compute_precache(profile->greenTRC, profile->output_table_g->data)) {
+ precache_release(profile->output_table_g);
+ profile->output_table_g = NULL;
+ }
+ }
+ if (!profile->output_table_b) {
+ profile->output_table_b = precache_create();
+ if (profile->output_table_b &&
+ !compute_precache(profile->blueTRC, profile->output_table_b->data)) {
+ precache_release(profile->output_table_b);
+ profile->output_table_b = NULL;
+ }
+ }
+}
+
+/* Replace the current transformation with a LUT transformation using a given number of sample points */
+qcms_transform* qcms_transform_precacheLUT_float(qcms_transform *transform, qcms_profile *in, qcms_profile *out,
+ int samples, qcms_data_type in_type)
+{
+ /* The range between which 2 consecutive sample points can be used to interpolate */
+ uint16_t x,y,z;
+ uint32_t l;
+ uint32_t lutSize = 3 * samples * samples * samples;
+ float* src = NULL;
+ float* dest = NULL;
+ float* lut = NULL;
+
+ src = malloc(lutSize*sizeof(float));
+ dest = malloc(lutSize*sizeof(float));
+
+ if (src && dest) {
+ /* Prepare a list of points we want to sample */
+ l = 0;
+ for (x = 0; x < samples; x++) {
+ for (y = 0; y < samples; y++) {
+ for (z = 0; z < samples; z++) {
+ src[l++] = x / (float)(samples-1);
+ src[l++] = y / (float)(samples-1);
+ src[l++] = z / (float)(samples-1);
+ }
+ }
+ }
+
+ lut = qcms_chain_transform(in, out, src, dest, lutSize);
+ if (lut) {
+ transform->r_clut = &lut[0];
+ transform->g_clut = &lut[1];
+ transform->b_clut = &lut[2];
+ transform->grid_size = samples;
+ if (in_type == QCMS_DATA_RGBA_8) {
+ transform->transform_fn = qcms_transform_data_tetra_clut_rgba;
+ } else {
+ transform->transform_fn = qcms_transform_data_tetra_clut;
+ }
+ }
+ }
+
+
+ //XXX: qcms_modular_transform_data may return either the src or dest buffer. If so it must not be free-ed
+ if (src && lut != src) {
+ free(src);
+ }
+ if (dest && lut != dest) {
+ free(dest);
+ }
+
+ if (lut == NULL) {
+ return NULL;
+ }
+ return transform;
+}
+
+#define NO_MEM_TRANSFORM NULL
+
+qcms_transform* qcms_transform_create(
+ qcms_profile *in, qcms_data_type in_type,
+ qcms_profile *out, qcms_data_type out_type,
+ qcms_intent intent)
+{
+ bool precache = false;
+
+ qcms_transform *transform = transform_alloc();
+ if (!transform) {
+ return NULL;
+ }
+ if (out_type != QCMS_DATA_RGB_8 &&
+ out_type != QCMS_DATA_RGBA_8) {
+ assert(0 && "output type");
+ qcms_transform_release(transform);
+ return NULL;
+ }
+
+ if (out->output_table_r &&
+ out->output_table_g &&
+ out->output_table_b) {
+ precache = true;
+ }
+
+ // This precache assumes RGB_SIGNATURE (fails on GRAY_SIGNATURE, for instance)
+ if (qcms_supports_iccv4 &&
+ (in_type == QCMS_DATA_RGB_8 || in_type == QCMS_DATA_RGBA_8) &&
+ (in->A2B0 || out->B2A0 || in->mAB || out->mAB))
+ {
+ // Precache the transformation to a CLUT 33x33x33 in size.
+ // 33 is used by many profiles and works well in pratice.
+ // This evenly divides 256 into blocks of 8x8x8.
+ // TODO For transforming small data sets of about 200x200 or less
+ // precaching should be avoided.
+ qcms_transform *result = qcms_transform_precacheLUT_float(transform, in, out, 33, in_type);
+ if (!result) {
+ assert(0 && "precacheLUT failed");
+ qcms_transform_release(transform);
+ return NULL;
+ }
+ return result;
+ }
+
+ if (precache) {
+ transform->output_table_r = precache_reference(out->output_table_r);
+ transform->output_table_g = precache_reference(out->output_table_g);
+ transform->output_table_b = precache_reference(out->output_table_b);
+ } else {
+ if (!out->redTRC || !out->greenTRC || !out->blueTRC) {
+ qcms_transform_release(transform);
+ return NO_MEM_TRANSFORM;
+ }
+ build_output_lut(out->redTRC, &transform->output_gamma_lut_r, &transform->output_gamma_lut_r_length);
+ build_output_lut(out->greenTRC, &transform->output_gamma_lut_g, &transform->output_gamma_lut_g_length);
+ build_output_lut(out->blueTRC, &transform->output_gamma_lut_b, &transform->output_gamma_lut_b_length);
+ if (!transform->output_gamma_lut_r || !transform->output_gamma_lut_g || !transform->output_gamma_lut_b) {
+ qcms_transform_release(transform);
+ return NO_MEM_TRANSFORM;
+ }
+ }
+
+ if (in->color_space == RGB_SIGNATURE) {
+ struct matrix in_matrix, out_matrix, result;
+
+ if (in_type != QCMS_DATA_RGB_8 &&
+ in_type != QCMS_DATA_RGBA_8){
+ assert(0 && "input type");
+ qcms_transform_release(transform);
+ return NULL;
+ }
+ if (precache) {
+#ifdef X86
+ if (sse_version_available() >= 2) {
+ if (in_type == QCMS_DATA_RGB_8)
+ transform->transform_fn = qcms_transform_data_rgb_out_lut_sse2;
+ else
+ transform->transform_fn = qcms_transform_data_rgba_out_lut_sse2;
+
+#if !(defined(_MSC_VER) && defined(_M_AMD64))
+ /* Microsoft Compiler for x64 doesn't support MMX.
+ * SSE code uses MMX so that we disable on x64 */
+ } else
+ if (sse_version_available() >= 1) {
+ if (in_type == QCMS_DATA_RGB_8)
+ transform->transform_fn = qcms_transform_data_rgb_out_lut_sse1;
+ else
+ transform->transform_fn = qcms_transform_data_rgba_out_lut_sse1;
+#endif
+ } else
+#endif
+#if (defined(__POWERPC__) || defined(__powerpc__) && !defined(__NO_FPRS__))
+ if (have_altivec()) {
+ if (in_type == QCMS_DATA_RGB_8)
+ transform->transform_fn = qcms_transform_data_rgb_out_lut_altivec;
+ else
+ transform->transform_fn = qcms_transform_data_rgba_out_lut_altivec;
+ } else
+#endif
+ {
+ if (in_type == QCMS_DATA_RGB_8)
+ transform->transform_fn = qcms_transform_data_rgb_out_lut_precache;
+ else
+ transform->transform_fn = qcms_transform_data_rgba_out_lut_precache;
+ }
+ } else {
+ if (in_type == QCMS_DATA_RGB_8)
+ transform->transform_fn = qcms_transform_data_rgb_out_lut;
+ else
+ transform->transform_fn = qcms_transform_data_rgba_out_lut;
+ }
+
+ //XXX: avoid duplicating tables if we can
+ transform->input_gamma_table_r = build_input_gamma_table(in->redTRC);
+ transform->input_gamma_table_g = build_input_gamma_table(in->greenTRC);
+ transform->input_gamma_table_b = build_input_gamma_table(in->blueTRC);
+ if (!transform->input_gamma_table_r || !transform->input_gamma_table_g || !transform->input_gamma_table_b) {
+ qcms_transform_release(transform);
+ return NO_MEM_TRANSFORM;
+ }
+
+
+ /* build combined colorant matrix */
+ in_matrix = build_colorant_matrix(in);
+ out_matrix = build_colorant_matrix(out);
+ out_matrix = matrix_invert(out_matrix);
+ if (out_matrix.invalid) {
+ qcms_transform_release(transform);
+ return NULL;
+ }
+ result = matrix_multiply(out_matrix, in_matrix);
+
+ /* check for NaN values in the matrix and bail if we find any */
+ for (unsigned i = 0 ; i < 3 ; ++i) {
+ for (unsigned j = 0 ; j < 3 ; ++j) {
+ if (result.m[i][j] != result.m[i][j]) {
+ qcms_transform_release(transform);
+ return NULL;
+ }
+ }
+ }
+
+ /* store the results in column major mode
+ * this makes doing the multiplication with sse easier */
+ transform->matrix[0][0] = result.m[0][0];
+ transform->matrix[1][0] = result.m[0][1];
+ transform->matrix[2][0] = result.m[0][2];
+ transform->matrix[0][1] = result.m[1][0];
+ transform->matrix[1][1] = result.m[1][1];
+ transform->matrix[2][1] = result.m[1][2];
+ transform->matrix[0][2] = result.m[2][0];
+ transform->matrix[1][2] = result.m[2][1];
+ transform->matrix[2][2] = result.m[2][2];
+
+ } else if (in->color_space == GRAY_SIGNATURE) {
+ if (in_type != QCMS_DATA_GRAY_8 &&
+ in_type != QCMS_DATA_GRAYA_8){
+ assert(0 && "input type");
+ qcms_transform_release(transform);
+ return NULL;
+ }
+
+ transform->input_gamma_table_gray = build_input_gamma_table(in->grayTRC);
+ if (!transform->input_gamma_table_gray) {
+ qcms_transform_release(transform);
+ return NO_MEM_TRANSFORM;
+ }
+
+ if (precache) {
+ if (in_type == QCMS_DATA_GRAY_8) {
+ transform->transform_fn = qcms_transform_data_gray_out_precache;
+ } else {
+ transform->transform_fn = qcms_transform_data_graya_out_precache;
+ }
+ } else {
+ if (in_type == QCMS_DATA_GRAY_8) {
+ transform->transform_fn = qcms_transform_data_gray_out_lut;
+ } else {
+ transform->transform_fn = qcms_transform_data_graya_out_lut;
+ }
+ }
+ } else {
+ assert(0 && "unexpected colorspace");
+ qcms_transform_release(transform);
+ return NULL;
+ }
+ return transform;
+}
+
+#if defined(__GNUC__) && defined(__i386__)
+/* we need this to avoid crashes when gcc assumes the stack is 128bit aligned */
+__attribute__((__force_align_arg_pointer__))
+#endif
+void qcms_transform_data(qcms_transform *transform, void *src, void *dest, size_t length)
+{
+ transform->transform_fn(transform, src, dest, length);
+}
+
+qcms_bool qcms_supports_iccv4;
+void qcms_enable_iccv4()
+{
+ qcms_supports_iccv4 = true;
+}
diff --git a/gfx/qcms/transform_util.c b/gfx/qcms/transform_util.c
new file mode 100644
index 000000000..f15a3f1cf
--- /dev/null
+++ b/gfx/qcms/transform_util.c
@@ -0,0 +1,516 @@
+#include <math.h>
+#include <assert.h>
+#include <string.h> //memcpy
+#include "qcmsint.h"
+#include "transform_util.h"
+#include "matrix.h"
+
+#define PARAMETRIC_CURVE_TYPE 0x70617261 //'para'
+
+/* value must be a value between 0 and 1 */
+//XXX: is the above a good restriction to have?
+// the output range of this functions is 0..1
+float lut_interp_linear(double input_value, uint16_t *table, int length)
+{
+ int upper, lower;
+ float value;
+ input_value = input_value * (length - 1); // scale to length of the array
+ upper = ceil(input_value);
+ lower = floor(input_value);
+ //XXX: can we be more performant here?
+ value = table[upper]*(1. - (upper - input_value)) + table[lower]*(upper - input_value);
+ /* scale the value */
+ return value * (1.f/65535.f);
+}
+
+/* same as above but takes and returns a uint16_t value representing a range from 0..1 */
+uint16_t lut_interp_linear16(uint16_t input_value, uint16_t *table, int length)
+{
+ /* Start scaling input_value to the length of the array: 65535*(length-1).
+ * We'll divide out the 65535 next */
+ uint32_t value = (input_value * (length - 1));
+ uint32_t upper = (value + 65534) / 65535; /* equivalent to ceil(value/65535) */
+ uint32_t lower = value / 65535; /* equivalent to floor(value/65535) */
+ /* interp is the distance from upper to value scaled to 0..65535 */
+ uint32_t interp = value % 65535;
+
+ value = (table[upper]*(interp) + table[lower]*(65535 - interp))/65535; // 0..65535*65535
+
+ return value;
+}
+
+/* same as above but takes an input_value from 0..PRECACHE_OUTPUT_MAX
+ * and returns a uint8_t value representing a range from 0..1 */
+static
+uint8_t lut_interp_linear_precache_output(uint32_t input_value, uint16_t *table, int length)
+{
+ /* Start scaling input_value to the length of the array: PRECACHE_OUTPUT_MAX*(length-1).
+ * We'll divide out the PRECACHE_OUTPUT_MAX next */
+ uint32_t value = (input_value * (length - 1));
+
+ /* equivalent to ceil(value/PRECACHE_OUTPUT_MAX) */
+ uint32_t upper = (value + PRECACHE_OUTPUT_MAX-1) / PRECACHE_OUTPUT_MAX;
+ /* equivalent to floor(value/PRECACHE_OUTPUT_MAX) */
+ uint32_t lower = value / PRECACHE_OUTPUT_MAX;
+ /* interp is the distance from upper to value scaled to 0..PRECACHE_OUTPUT_MAX */
+ uint32_t interp = value % PRECACHE_OUTPUT_MAX;
+
+ /* the table values range from 0..65535 */
+ value = (table[upper]*(interp) + table[lower]*(PRECACHE_OUTPUT_MAX - interp)); // 0..(65535*PRECACHE_OUTPUT_MAX)
+
+ /* round and scale */
+ value += (PRECACHE_OUTPUT_MAX*65535/255)/2;
+ value /= (PRECACHE_OUTPUT_MAX*65535/255); // scale to 0..255
+ return value;
+}
+
+/* value must be a value between 0 and 1 */
+//XXX: is the above a good restriction to have?
+float lut_interp_linear_float(float value, float *table, int length)
+{
+ int upper, lower;
+ value = value * (length - 1);
+ upper = ceilf(value);
+ lower = floorf(value);
+ //XXX: can we be more performant here?
+ value = table[upper]*(1. - (upper - value)) + table[lower]*(upper - value);
+ /* scale the value */
+ return value;
+}
+
+#if 0
+/* if we use a different representation i.e. one that goes from 0 to 0x1000 we can be more efficient
+ * because we can avoid the divisions and use a shifting instead */
+/* same as above but takes and returns a uint16_t value representing a range from 0..1 */
+uint16_t lut_interp_linear16(uint16_t input_value, uint16_t *table, int length)
+{
+ uint32_t value = (input_value * (length - 1));
+ uint32_t upper = (value + 4095) / 4096; /* equivalent to ceil(value/4096) */
+ uint32_t lower = value / 4096; /* equivalent to floor(value/4096) */
+ uint32_t interp = value % 4096;
+
+ value = (table[upper]*(interp) + table[lower]*(4096 - interp))/4096; // 0..4096*4096
+
+ return value;
+}
+#endif
+
+void compute_curve_gamma_table_type1(float gamma_table[256], uint16_t gamma)
+{
+ unsigned int i;
+ float gamma_float = u8Fixed8Number_to_float(gamma);
+ for (i = 0; i < 256; i++) {
+ // 0..1^(0..255 + 255/256) will always be between 0 and 1
+ gamma_table[i] = pow(i/255., gamma_float);
+ }
+}
+
+void compute_curve_gamma_table_type2(float gamma_table[256], uint16_t *table, int length)
+{
+ unsigned int i;
+ for (i = 0; i < 256; i++) {
+ gamma_table[i] = lut_interp_linear(i/255., table, length);
+ }
+}
+
+void compute_curve_gamma_table_type_parametric(float gamma_table[256], float parameter[7], int count)
+{
+ size_t X;
+ float interval;
+ float a, b, c, e, f;
+ float y = parameter[0];
+ if (count == 0) {
+ a = 1;
+ b = 0;
+ c = 0;
+ e = 0;
+ f = 0;
+ interval = -1;
+ } else if(count == 1) {
+ a = parameter[1];
+ b = parameter[2];
+ c = 0;
+ e = 0;
+ f = 0;
+ interval = -1 * parameter[2] / parameter[1];
+ } else if(count == 2) {
+ a = parameter[1];
+ b = parameter[2];
+ c = 0;
+ e = parameter[3];
+ f = parameter[3];
+ interval = -1 * parameter[2] / parameter[1];
+ } else if(count == 3) {
+ a = parameter[1];
+ b = parameter[2];
+ c = parameter[3];
+ e = -c;
+ f = 0;
+ interval = parameter[4];
+ } else if(count == 4) {
+ a = parameter[1];
+ b = parameter[2];
+ c = parameter[3];
+ e = parameter[5] - c;
+ f = parameter[6];
+ interval = parameter[4];
+ } else {
+ assert(0 && "invalid parametric function type.");
+ a = 1;
+ b = 0;
+ c = 0;
+ e = 0;
+ f = 0;
+ interval = -1;
+ }
+ for (X = 0; X < 256; X++) {
+ if (X >= interval) {
+ // XXX The equations are not exactly as defined in the spec but are
+ // algebraically equivalent.
+ // TODO Should division by 255 be for the whole expression.
+ gamma_table[X] = clamp_float(pow(a * X / 255. + b, y) + c + e);
+ } else {
+ gamma_table[X] = clamp_float(c * X / 255. + f);
+ }
+ }
+}
+
+void compute_curve_gamma_table_type0(float gamma_table[256])
+{
+ unsigned int i;
+ for (i = 0; i < 256; i++) {
+ gamma_table[i] = i/255.;
+ }
+}
+
+float *build_input_gamma_table(struct curveType *TRC)
+{
+ float *gamma_table;
+
+ if (!TRC) return NULL;
+ gamma_table = malloc(sizeof(float)*256);
+ if (gamma_table) {
+ if (TRC->type == PARAMETRIC_CURVE_TYPE) {
+ compute_curve_gamma_table_type_parametric(gamma_table, TRC->parameter, TRC->count);
+ } else {
+ if (TRC->count == 0) {
+ compute_curve_gamma_table_type0(gamma_table);
+ } else if (TRC->count == 1) {
+ compute_curve_gamma_table_type1(gamma_table, TRC->data[0]);
+ } else {
+ compute_curve_gamma_table_type2(gamma_table, TRC->data, TRC->count);
+ }
+ }
+ }
+ return gamma_table;
+}
+
+struct matrix build_colorant_matrix(qcms_profile *p)
+{
+ struct matrix result;
+ result.m[0][0] = s15Fixed16Number_to_float(p->redColorant.X);
+ result.m[0][1] = s15Fixed16Number_to_float(p->greenColorant.X);
+ result.m[0][2] = s15Fixed16Number_to_float(p->blueColorant.X);
+ result.m[1][0] = s15Fixed16Number_to_float(p->redColorant.Y);
+ result.m[1][1] = s15Fixed16Number_to_float(p->greenColorant.Y);
+ result.m[1][2] = s15Fixed16Number_to_float(p->blueColorant.Y);
+ result.m[2][0] = s15Fixed16Number_to_float(p->redColorant.Z);
+ result.m[2][1] = s15Fixed16Number_to_float(p->greenColorant.Z);
+ result.m[2][2] = s15Fixed16Number_to_float(p->blueColorant.Z);
+ result.invalid = false;
+ return result;
+}
+
+/* The following code is copied nearly directly from lcms.
+ * I think it could be much better. For example, Argyll seems to have better code in
+ * icmTable_lookup_bwd and icmTable_setup_bwd. However, for now this is a quick way
+ * to a working solution and allows for easy comparing with lcms. */
+uint16_fract_t lut_inverse_interp16(uint16_t Value, uint16_t LutTable[], int length)
+{
+ int l = 1;
+ int r = 0x10000;
+ int x = 0, res; // 'int' Give spacing for negative values
+ int NumZeroes, NumPoles;
+ int cell0, cell1;
+ double val2;
+ double y0, y1, x0, x1;
+ double a, b, f;
+
+ // July/27 2001 - Expanded to handle degenerated curves with an arbitrary
+ // number of elements containing 0 at the begining of the table (Zeroes)
+ // and another arbitrary number of poles (FFFFh) at the end.
+ // First the zero and pole extents are computed, then value is compared.
+
+ NumZeroes = 0;
+ while (LutTable[NumZeroes] == 0 && NumZeroes < length-1)
+ NumZeroes++;
+
+ // There are no zeros at the beginning and we are trying to find a zero, so
+ // return anything. It seems zero would be the less destructive choice
+ /* I'm not sure that this makes sense, but oh well... */
+ if (NumZeroes == 0 && Value == 0)
+ return 0;
+
+ NumPoles = 0;
+ while (LutTable[length-1- NumPoles] == 0xFFFF && NumPoles < length-1)
+ NumPoles++;
+
+ // Does the curve belong to this case?
+ if (NumZeroes > 1 || NumPoles > 1)
+ {
+ int a, b;
+
+ // Identify if value fall downto 0 or FFFF zone
+ if (Value == 0) return 0;
+ // if (Value == 0xFFFF) return 0xFFFF;
+
+ // else restrict to valid zone
+
+ if (NumZeroes > 1) {
+ a = ((NumZeroes-1) * 0xFFFF) / (length-1);
+ l = a - 1;
+ }
+ if (NumPoles > 1) {
+ b = ((length-1 - NumPoles) * 0xFFFF) / (length-1);
+ r = b + 1;
+ }
+ }
+
+ if (r <= l) {
+ // If this happens LutTable is not invertible
+ return 0;
+ }
+
+
+ // Seems not a degenerated case... apply binary search
+ while (r > l) {
+
+ x = (l + r) / 2;
+
+ res = (int) lut_interp_linear16((uint16_fract_t) (x-1), LutTable, length);
+
+ if (res == Value) {
+
+ // Found exact match.
+
+ return (uint16_fract_t) (x - 1);
+ }
+
+ if (res > Value) r = x - 1;
+ else l = x + 1;
+ }
+
+ // Not found, should we interpolate?
+
+ // Get surrounding nodes
+
+ assert(x >= 1);
+
+ val2 = (length-1) * ((double) (x - 1) / 65535.0);
+
+ cell0 = (int) floor(val2);
+ cell1 = (int) ceil(val2);
+
+ if (cell0 == cell1) return (uint16_fract_t) x;
+
+ y0 = LutTable[cell0] ;
+ x0 = (65535.0 * cell0) / (length-1);
+
+ y1 = LutTable[cell1] ;
+ x1 = (65535.0 * cell1) / (length-1);
+
+ a = (y1 - y0) / (x1 - x0);
+ b = y0 - a * x0;
+
+ if (fabs(a) < 0.01) return (uint16_fract_t) x;
+
+ f = ((Value - b) / a);
+
+ if (f < 0.0) return (uint16_fract_t) 0;
+ if (f >= 65535.0) return (uint16_fract_t) 0xFFFF;
+
+ return (uint16_fract_t) floor(f + 0.5);
+
+}
+
+/*
+ The number of entries needed to invert a lookup table should not
+ necessarily be the same as the original number of entries. This is
+ especially true of lookup tables that have a small number of entries.
+
+ For example:
+ Using a table like:
+ {0, 3104, 14263, 34802, 65535}
+ invert_lut will produce an inverse of:
+ {3, 34459, 47529, 56801, 65535}
+ which has an maximum error of about 9855 (pixel difference of ~38.346)
+
+ For now, we punt the decision of output size to the caller. */
+static uint16_t *invert_lut(uint16_t *table, int length, int out_length)
+{
+ int i;
+ /* for now we invert the lut by creating a lut of size out_length
+ * and attempting to lookup a value for each entry using lut_inverse_interp16 */
+ uint16_t *output = malloc(sizeof(uint16_t)*out_length);
+ if (!output)
+ return NULL;
+
+ for (i = 0; i < out_length; i++) {
+ double x = ((double) i * 65535.) / (double) (out_length - 1);
+ uint16_fract_t input = floor(x + .5);
+ output[i] = lut_inverse_interp16(input, table, length);
+ }
+ return output;
+}
+
+static void compute_precache_pow(uint8_t *output, float gamma)
+{
+ uint32_t v = 0;
+ for (v = 0; v < PRECACHE_OUTPUT_SIZE; v++) {
+ //XXX: don't do integer/float conversion... and round?
+ output[v] = 255. * pow(v/(double)PRECACHE_OUTPUT_MAX, gamma);
+ }
+}
+
+void compute_precache_lut(uint8_t *output, uint16_t *table, int length)
+{
+ uint32_t v = 0;
+ for (v = 0; v < PRECACHE_OUTPUT_SIZE; v++) {
+ output[v] = lut_interp_linear_precache_output(v, table, length);
+ }
+}
+
+void compute_precache_linear(uint8_t *output)
+{
+ uint32_t v = 0;
+ for (v = 0; v < PRECACHE_OUTPUT_SIZE; v++) {
+ //XXX: round?
+ output[v] = v / (PRECACHE_OUTPUT_SIZE/256);
+ }
+}
+
+qcms_bool compute_precache(struct curveType *trc, uint8_t *output)
+{
+
+ if (trc->type == PARAMETRIC_CURVE_TYPE) {
+ float gamma_table[256];
+ uint16_t gamma_table_uint[256];
+ uint16_t i;
+ uint16_t *inverted;
+ int inverted_size = 256;
+
+ compute_curve_gamma_table_type_parametric(gamma_table, trc->parameter, trc->count);
+ for(i = 0; i < 256; i++) {
+ gamma_table_uint[i] = (uint16_t)(gamma_table[i] * 65535);
+ }
+
+ //XXX: the choice of a minimum of 256 here is not backed by any theory,
+ // measurement or data, howeve r it is what lcms uses.
+ // the maximum number we would need is 65535 because that's the
+ // accuracy used for computing the pre cache table
+ if (inverted_size < 256)
+ inverted_size = 256;
+
+ inverted = invert_lut(gamma_table_uint, 256, inverted_size);
+ if (!inverted)
+ return false;
+ compute_precache_lut(output, inverted, inverted_size);
+ free(inverted);
+ } else {
+ if (trc->count == 0) {
+ compute_precache_linear(output);
+ } else if (trc->count == 1) {
+ compute_precache_pow(output, 1./u8Fixed8Number_to_float(trc->data[0]));
+ } else {
+ uint16_t *inverted;
+ int inverted_size = trc->count;
+ //XXX: the choice of a minimum of 256 here is not backed by any theory,
+ // measurement or data, howeve r it is what lcms uses.
+ // the maximum number we would need is 65535 because that's the
+ // accuracy used for computing the pre cache table
+ if (inverted_size < 256)
+ inverted_size = 256;
+
+ inverted = invert_lut(trc->data, trc->count, inverted_size);
+ if (!inverted)
+ return false;
+ compute_precache_lut(output, inverted, inverted_size);
+ free(inverted);
+ }
+ }
+ return true;
+}
+
+
+static uint16_t *build_linear_table(int length)
+{
+ int i;
+ uint16_t *output = malloc(sizeof(uint16_t)*length);
+ if (!output)
+ return NULL;
+
+ for (i = 0; i < length; i++) {
+ double x = ((double) i * 65535.) / (double) (length - 1);
+ uint16_fract_t input = floor(x + .5);
+ output[i] = input;
+ }
+ return output;
+}
+
+static uint16_t *build_pow_table(float gamma, int length)
+{
+ int i;
+ uint16_t *output = malloc(sizeof(uint16_t)*length);
+ if (!output)
+ return NULL;
+
+ for (i = 0; i < length; i++) {
+ uint16_fract_t result;
+ double x = ((double) i) / (double) (length - 1);
+ x = pow(x, gamma); //XXX turn this conversion into a function
+ result = floor(x*65535. + .5);
+ output[i] = result;
+ }
+ return output;
+}
+
+void build_output_lut(struct curveType *trc,
+ uint16_t **output_gamma_lut, size_t *output_gamma_lut_length)
+{
+ if (trc->type == PARAMETRIC_CURVE_TYPE) {
+ float gamma_table[256];
+ uint16_t i;
+ uint16_t *output = malloc(sizeof(uint16_t)*256);
+
+ if (!output) {
+ *output_gamma_lut = NULL;
+ return;
+ }
+
+ compute_curve_gamma_table_type_parametric(gamma_table, trc->parameter, trc->count);
+ *output_gamma_lut_length = 256;
+ for(i = 0; i < 256; i++) {
+ output[i] = (uint16_t)(gamma_table[i] * 65535);
+ }
+ *output_gamma_lut = output;
+ } else {
+ if (trc->count == 0) {
+ *output_gamma_lut = build_linear_table(4096);
+ *output_gamma_lut_length = 4096;
+ } else if (trc->count == 1) {
+ float gamma = 1./u8Fixed8Number_to_float(trc->data[0]);
+ *output_gamma_lut = build_pow_table(gamma, 4096);
+ *output_gamma_lut_length = 4096;
+ } else {
+ //XXX: the choice of a minimum of 256 here is not backed by any theory,
+ // measurement or data, however it is what lcms uses.
+ *output_gamma_lut_length = trc->count;
+ if (*output_gamma_lut_length < 256)
+ *output_gamma_lut_length = 256;
+
+ *output_gamma_lut = invert_lut(trc->data, trc->count, *output_gamma_lut_length);
+ }
+ }
+
+}
+
diff --git a/gfx/qcms/transform_util.h b/gfx/qcms/transform_util.h
new file mode 100644
index 000000000..9d9514e99
--- /dev/null
+++ b/gfx/qcms/transform_util.h
@@ -0,0 +1,104 @@
+/* vim: set ts=8 sw=8 noexpandtab: */
+// qcms
+// Copyright (C) 2009 Mozilla Foundation
+// Copyright (C) 1998-2007 Marti Maria
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the "Software"),
+// to deal in the Software without restriction, including without limitation
+// the rights to use, copy, modify, merge, publish, distribute, sublicense,
+// and/or sell copies of the Software, and to permit persons to whom the Software
+// is furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+// THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+#ifndef _QCMS_TRANSFORM_UTIL_H
+#define _QCMS_TRANSFORM_UTIL_H
+
+#include <stdlib.h>
+#include <math.h>
+
+#define CLU(table,x,y,z) table[(x*len + y*x_len + z*xy_len)*3]
+
+//XXX: could use a bettername
+typedef uint16_t uint16_fract_t;
+
+float lut_interp_linear(double value, uint16_t *table, int length);
+float lut_interp_linear_float(float value, float *table, int length);
+uint16_t lut_interp_linear16(uint16_t input_value, uint16_t *table, int length);
+
+
+static inline float lerp(float a, float b, float t)
+{
+ return a*(1.f-t) + b*t;
+}
+
+static inline unsigned char clamp_u8(float v)
+{
+ if (v > 255.)
+ return 255;
+ else if (v < 0)
+ return 0;
+ else
+ return floorf(v+.5);
+}
+
+static inline float clamp_float(float a)
+{
+ /* One would naturally write this function as the following:
+ if (a > 1.)
+ return 1.;
+ else if (a < 0)
+ return 0;
+ else
+ return a;
+
+ However, that version will let NaNs pass through which is undesirable
+ for most consumers.
+ */
+
+ if (a > 1.)
+ return 1.;
+ else if (a >= 0)
+ return a;
+ else // a < 0 or a is NaN
+ return 0;
+}
+
+static inline float u8Fixed8Number_to_float(uint16_t x)
+{
+ // 0x0000 = 0.
+ // 0x0100 = 1.
+ // 0xffff = 255 + 255/256
+ return x/256.;
+}
+
+float *build_input_gamma_table(struct curveType *TRC);
+struct matrix build_colorant_matrix(qcms_profile *p);
+void build_output_lut(struct curveType *trc,
+ uint16_t **output_gamma_lut, size_t *output_gamma_lut_length);
+
+struct matrix matrix_invert(struct matrix mat);
+qcms_bool compute_precache(struct curveType *trc, uint8_t *output);
+
+// Tested by GTest
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+uint16_fract_t lut_inverse_interp16(uint16_t Value, uint16_t LutTable[], int length);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif