I'm pretty sure this isn't a Libraw problem, but a problem with my code! I use unpack() to extract the raw image data and then convert the data into my application's internal image format using code base on code I found in the Libraw/dcraw source code.
The problem is that my images are somewhat too red in colour as compared to how (e.g.) Windows "Icon view" or IrfanView display them. A somewhat simplified version of the code I use is here:
// No Auto White Balance
O.use_auto_wb = false;
//
// Do we disable all White Balance processing?
//
const auto setWbMult = [&](const float factor)
{
O.user_mul[0] = factor;
O.user_mul[1] = factor;
O.user_mul[2] = factor;
O.user_mul[3] = factor;
};
if (workspace.value("RawDDP/NoWB", false).toBool())
setWbMult(1.0f); // Yes, so set the user white balance multipliers to 1.0
else
setWbMult(0.0f); // No, so set the user white balance multipliers to 0.0
O.use_camera_wb = workspace.value("RawDDP/CameraWB", false).toBool() ? 1 : 0;
// Don't stretch or rotate raw pixels (equivalent to dcraw -j)
O.use_fuji_rotate = 0;
// Don't flip the image (equivalent to dcraw -t 0)
O.user_flip = 0;
// Output color space : raw-> sRGB (default)
/*
argv[argc] = _T("-o");
argc++;
argv[argc] = _T("0");
argc++;*/
O.user_black = workspace.value("RawDDP/BlackPointTo0", false).toBool() ? 0 : -1;
// Output is 16 bits (equivalent of dcraw flag -4)
O.gamm[0] = O.gamm[1] = O.no_auto_bright = 1;
O.output_bps = 16;
g_Progress = pProgress;
ZTRACE_RUNTIME("Calling LibRaw::unpack()");
if ((ret = rawProcessor.unpack()) != LIBRAW_SUCCESS)
{
bResult = false;
ZTRACE_RUNTIME("Cannot unpack %s: %s", file.generic_u8string().c_str(), libraw_strerror(ret));
}
if (!bResult)
break;
//
// Create the class that populates the bitmap
//
CopyableSmartPtr<BitmapFillerInterface> pFiller = BitmapFillerInterface::makeBitmapFiller(pBitmap, pProgress);
// Get the Colour Filter Array type and set into the bitmap filler
m_CFAType = GetCurrentCFAType();
pFiller->SetCFAType(m_CFAType);
#define RAW(row,col) raw_image[(row) * S.width + (col)]
ZTRACE_DEVELOP("Extracting real image data (excluding the frame) from rawProcessor.imgdata.rawdata.raw_image");
//
// This is a regular RAW file so no Fuji Super-CCD stuff
//
// Just copy the "real image" portion of the data excluding
// the frame
//
#pragma omp parallel for default(shared) if(numberOfProcessors > 1)
for (int row = 0; row < S.height; row++)
{
for (int col = 0; col < S.width; col++)
{
RAW(row, col) = RawData.raw_image[(row + S.top_margin) * S.raw_pitch / 2 + (col + S.left_margin)];
}
}
//
// Now process the data that raw_image points to which is either
//
// 1) The output of post processing the Fuji Super-CCD raw,
// stored in the USHORT array hung off raw_image, or
//
// 2) Normal common or garden raw Bayer matrix data that's been
// copied from RawData.raw_image to raw_image (less the frame)
//
// Either way we should now be processing a regular greyscale 16-bit
// pixel array which has an associated Bayer Matrix or is true monochrome
//
pFiller->setGrey(true);
pFiller->setWidth(S.width);
pFiller->setHeight(S.height);
pFiller->setMaxColors((1 << 16) - 1);
// Report User Black Point over-ride
if (0 == O.user_black)
ZTRACE_RUNTIME("User set Black Point to 0");
//
// Before doing dark subtraction, normalise C.black / C.cblack[]
//
ZTRACE_DEVELOP("Before adjust_bl() C.black = %d.", C.black);
ZTRACE_DEVELOP("First 10 C.cblack elements\n %d, %d, %d, %d\n %d, %d\n %d, %d, %d, %d",
C.cblack[0], C.cblack[1], C.cblack[2], C.cblack[3],
C.cblack[4], C.cblack[5],
C.cblack[6], C.cblack[7], C.cblack[8], C.cblack[9]);
rawProcessor.adjust_bl();
//
// This code is based on code from LibRaw Version 19.2, specifically method:
//
// int LibRaw::subtract_black_internal()
//
// found at line 4532 in source file libraw_cxx.cpp
//
// Do dark subtraction on the image. If a user defined black level has
// been set (it will be zero) then use that, otherwise just use the black
// level for the camera.
//
// Note that this is only done on real image data, not the frame
//
// While doing so collect the largest value in the image data.
//
ZTRACE_DEVELOP("Subtracting black level of C.black = %d from raw_image data.", C.black);
ZTRACE_DEVELOP("First 10 C.cblack elements\n %d, %d, %d, %d\n %d, %d\n %d, %d, %d, %d",
C.cblack[0], C.cblack[1], C.cblack[2], C.cblack[3],
C.cblack[4], C.cblack[5],
C.cblack[6], C.cblack[7], C.cblack[8], C.cblack[9]);
const int size = static_cast<int>(S.height) * static_cast<int>(S.width);
if (!rawProcessor.is_phaseone_compressed() && (C.cblack[0] || C.cblack[1] || C.cblack[2] || C.cblack[3] || (C.cblack[4] && C.cblack[5])))
{
const int cblk[4] = { static_cast<int>(C.cblack[0]), static_cast<int>(C.cblack[1]), static_cast<int>(C.cblack[2]), static_cast<int>(C.cblack[3]) };
int dmax = 0; // Maximum value of pixels in entire image.
int lmax = 0; // Local (or Loop) maximum value found in the 'for' loops below. For OMP.
if (C.cblack[4] && C.cblack[5])
{
#pragma omp parallel default(shared) firstprivate(lmax) if(numberOfProcessors > 1)
{
#pragma omp for
for (int i = 0; i < size; i++)
{
int val = raw_image[i];
val -= C.cblack[6 + i / S.width % C.cblack[4] * C.cblack[5] + i % S.width % C.cblack[5]];
val -= cblk[i & 3];
raw_image[i] = static_cast<std::uint16_t>(std::clamp(val, 0, 65535));
lmax = std::max(val, lmax);
}
#pragma omp critical
dmax = std::max(lmax, dmax); // For non-OMP case this is equal to dmax = lmax.
}
}
else
{
#pragma omp parallel default(shared) firstprivate(lmax) if(numberOfProcessors > 1)
{
#pragma omp for
for (int i = 0; i < size; i++)
{
int val = raw_image[i];
val -= cblk[i & 3];
raw_image[i] = static_cast<std::uint16_t>(std::clamp(val, 0, 65535));
lmax = std::max(val, lmax);
}
#pragma omp critical
dmax = std::max(lmax, dmax); // For non-OMP case this is equal to dmax = lmax.
}
}
C.data_maximum = dmax & 0xffff;
C.maximum -= C.black;
memset(&C.cblack, 0, sizeof(C.cblack)); // Yeah, we used cblack[6+] values too!
C.black = 0;
}
else
{
// Nothing to Do, maximum is already calculated, black level is 0, so no change
// only calculate channel maximum;
unsigned int dmax = 0; // Maximum value of pixels in entire image.
unsigned int lmax = 0; // For OpenMP.
#pragma omp parallel default(shared) firstprivate(lmax) if(numberOfProcessors > 1)
{
#pragma omp for
for (int i = 0; i < size; i++)
lmax = std::max(static_cast<unsigned int>(raw_image[i]), lmax);
#pragma omp critical
dmax = std::max(lmax, dmax); // For non-OMP case this is equal to dmax = lmax.
}
C.data_maximum = dmax;
}
//
// The image data needs to be scaled to the "white balance co-efficients"
// Currently do not handle "Auto White Balance"
//
float pre_mul[4] = { 0.0, 0.0, 0.0, 0.0 };
if (1 == O.user_mul[0])
{
static_assert(sizeof(O.user_mul) >= sizeof(pre_mul));
ZTRACE_RUNTIME("No White Balance processing.");
memcpy(pre_mul, O.user_mul, sizeof(pre_mul));
}
else if (1 == O.use_camera_wb && -1 != C.cam_mul[0])
{
static_assert(sizeof(C.cam_mul) >= sizeof(pre_mul));
ZTRACE_RUNTIME("Using Camera White Balance (as shot).");
memcpy(pre_mul, C.cam_mul, sizeof(pre_mul));
}
else
{
static_assert(sizeof(C.pre_mul) >= sizeof(pre_mul));
ZTRACE_RUNTIME("Using Daylight White Balance.");
memcpy(pre_mul, C.pre_mul, sizeof(pre_mul));
}
ZTRACE_RUNTIME("White balance co-efficients being used are %f, %f, %f, %f",
pre_mul[0], pre_mul[1], pre_mul[2], pre_mul[3]);
#if (0)
qDebug() << "Colour Correction Matrix:";
for (int c = 0; c < 3; c++)
{
qDebug() << " "
<< C.ccm[geshifilter-c][0] << " " << C.ccm[c][1] << " " << C.ccm[c][2] << " " << C.ccm[c][3]; } #endif if (0 == pre_mul[3]) pre_mul[3] = P1.colors < 4 ? pre_mul[1] : 1; // // Now apply a linear stretch to the raw data, scale to the "saturation" level // not to the value of the pixel with the greatest value (which may be higher // than the saturation level). // // const double dmax = *std::max_element(&pre_mul[0], &pre_mul[4]); const float dmin = *std::ranges::min_element(pre_mul); const float saturationScaling = 65535.0f / static_cast<float>(C.maximum); std::array<float, 8> scale_mul = { 0.0f, 0.0f, 0.0f, 0.0f, saturationScaling, saturationScaling, saturationScaling, saturationScaling }; for (int c = 0; c < 4; c++) scale_mul[c] = (pre_mul[c] /= dmin) * saturationScaling; ZTRACE_DEVELOP("Maximum value pixel has value %d", C.data_maximum); ZTRACE_DEVELOP("Saturation level is %d", C.maximum); ZTRACE_DEVELOP("Applying linear stretch to raw data. Scale values %f, %f, %f, %f %f, %f, %f, %f", scale_mul[0], scale_mul[1], scale_mul[2], scale_mul[3], scale_mul[4], scale_mul[5], scale_mul[6], scale_mul[7]); #pragma omp parallel default(shared) if(numberOfProcessors > 1) // No OPENMP: 240ms, with OPENMP: 92ms { #pragma omp master // There is no implied barrier. ZTRACE_RUNTIME("RAW file processing with %d OpenMP threads, little_endian is %s", omp_get_num_threads(), littleEndian ? "true" : "false"); #pragma omp for for (int row = 0; row < S.height; row++) { for (int col = 0; col < S.width; col++) { // What colour will this pixel become const int colour = rawProcessor.COLOR(row, col); const float val = scale_mul[colour] * static_cast<float>(RAW(row, col)); RAW(row, col) = static_cast<std::uint16_t>(std::clamp(static_cast<int>(val), 0, 65535)); } } } If anyone is able to spot what I am doing wrong, I would be most grateful. I have been puzzling over this problem for some days and so far have failed to spot the probem[/geshifilter-c]

code got truncated the rest
code display got messed up at the end. Here's what I hope is not messad up.
// No Auto White Balance O.use_auto_wb = false; // // Do we disable all White Balance processing? // const auto setWbMult = [&](const float factor) { O.user_mul[0] = factor; O.user_mul[1] = factor; O.user_mul[2] = factor; O.user_mul[3] = factor; }; if (workspace.value("RawDDP/NoWB", false).toBool()) setWbMult(1.0f); // Yes, so set the user white balance multipliers to 1.0 else setWbMult(0.0f); // No, so set the user white balance multipliers to 0.0 O.use_camera_wb = workspace.value("RawDDP/CameraWB", false).toBool() ? 1 : 0; // Don't stretch or rotate raw pixels (equivalent to dcraw -j) O.use_fuji_rotate = 0; // Don't flip the image (equivalent to dcraw -t 0) O.user_flip = 0; // Output color space : raw-> sRGB (default) /* argv[argc] = _T("-o"); argc++; argv[argc] = _T("0"); argc++;*/ O.user_black = workspace.value("RawDDP/BlackPointTo0", false).toBool() ? 0 : -1; // Output is 16 bits (equivalent of dcraw flag -4) O.gamm[0] = O.gamm[1] = O.no_auto_bright = 1; O.output_bps = 16; g_Progress = pProgress; ZTRACE_RUNTIME("Calling LibRaw::unpack()"); if ((ret = rawProcessor.unpack()) != LIBRAW_SUCCESS) { bResult = false; ZTRACE_RUNTIME("Cannot unpack %s: %s", file.generic_u8string().c_str(), libraw_strerror(ret)); } if (!bResult) break; // // Create the class that populates the bitmap // CopyableSmartPtr<BitmapFillerInterface> pFiller = BitmapFillerInterface::makeBitmapFiller(pBitmap, pProgress); // Get the Colour Filter Array type and set into the bitmap filler m_CFAType = GetCurrentCFAType(); pFiller->SetCFAType(m_CFAType); #define RAW(row,col) raw_image[(row) * S.width + (col)] ZTRACE_DEVELOP("Extracting real image data (excluding the frame) from rawProcessor.imgdata.rawdata.raw_image"); // // This is a regular RAW file so no Fuji Super-CCD stuff // // Just copy the "real image" portion of the data excluding // the frame // #pragma omp parallel for default(shared) if(numberOfProcessors > 1) for (int row = 0; row < S.height; row++) { for (int col = 0; col < S.width; col++) { RAW(row, col) = RawData.raw_image[(row + S.top_margin) * S.raw_pitch / 2 + (col + S.left_margin)]; } } // // Now process the data that raw_image points to which is either // // 1) The output of post processing the Fuji Super-CCD raw, // stored in the USHORT array hung off raw_image, or // // 2) Normal common or garden raw Bayer matrix data that's been // copied from RawData.raw_image to raw_image (less the frame) // // Either way we should now be processing a regular greyscale 16-bit // pixel array which has an associated Bayer Matrix or is true monochrome // pFiller->setGrey(true); pFiller->setWidth(S.width); pFiller->setHeight(S.height); pFiller->setMaxColors((1 << 16) - 1); // Report User Black Point over-ride if (0 == O.user_black) ZTRACE_RUNTIME("User set Black Point to 0"); // // Before doing dark subtraction, normalise C.black / C.cblack[] // ZTRACE_DEVELOP("Before adjust_bl() C.black = %d.", C.black); ZTRACE_DEVELOP("First 10 C.cblack elements\n %d, %d, %d, %d\n %d, %d\n %d, %d, %d, %d", C.cblack[0], C.cblack[1], C.cblack[2], C.cblack[3], C.cblack[4], C.cblack[5], C.cblack[6], C.cblack[7], C.cblack[8], C.cblack[9]); rawProcessor.adjust_bl(); // // This code is based on code from LibRaw Version 19.2, specifically method: // // int LibRaw::subtract_black_internal() // // found at line 4532 in source file libraw_cxx.cpp // // Do dark subtraction on the image. If a user defined black level has // been set (it will be zero) then use that, otherwise just use the black // level for the camera. // // Note that this is only done on real image data, not the frame // // While doing so collect the largest value in the image data. // ZTRACE_DEVELOP("Subtracting black level of C.black = %d from raw_image data.", C.black); ZTRACE_DEVELOP("First 10 C.cblack elements\n %d, %d, %d, %d\n %d, %d\n %d, %d, %d, %d", C.cblack[0], C.cblack[1], C.cblack[2], C.cblack[3], C.cblack[4], C.cblack[5], C.cblack[6], C.cblack[7], C.cblack[8], C.cblack[9]); const int size = static_cast<int>(S.height) * static_cast<int>(S.width); if (!rawProcessor.is_phaseone_compressed() && (C.cblack[0] || C.cblack[1] || C.cblack[2] || C.cblack[3] || (C.cblack[4] && C.cblack[5]))) { const int cblk[4] = { static_cast<int>(C.cblack[0]), static_cast<int>(C.cblack[1]), static_cast<int>(C.cblack[2]), static_cast<int>(C.cblack[3]) }; int dmax = 0; // Maximum value of pixels in entire image. int lmax = 0; // Local (or Loop) maximum value found in the 'for' loops below. For OMP. if (C.cblack[4] && C.cblack[5]) { #pragma omp parallel default(shared) firstprivate(lmax) if(numberOfProcessors > 1) { #pragma omp for for (int i = 0; i < size; i++) { int val = raw_image[i]; val -= C.cblack[6 + i / S.width % C.cblack[4] * C.cblack[5] + i % S.width % C.cblack[5]]; val -= cblk[i & 3]; raw_image[i] = static_cast<std::uint16_t>(std::clamp(val, 0, 65535)); lmax = std::max(val, lmax); } #pragma omp critical dmax = std::max(lmax, dmax); // For non-OMP case this is equal to dmax = lmax. } } else { #pragma omp parallel default(shared) firstprivate(lmax) if(numberOfProcessors > 1) { #pragma omp for for (int i = 0; i < size; i++) { int val = raw_image[i]; val -= cblk[i & 3]; raw_image[i] = static_cast<std::uint16_t>(std::clamp(val, 0, 65535)); lmax = std::max(val, lmax); } #pragma omp critical dmax = std::max(lmax, dmax); // For non-OMP case this is equal to dmax = lmax. } } C.data_maximum = dmax & 0xffff; C.maximum -= C.black; memset(&C.cblack, 0, sizeof(C.cblack)); // Yeah, we used cblack[6+] values too! C.black = 0; } else { // Nothing to Do, maximum is already calculated, black level is 0, so no change // only calculate channel maximum; unsigned int dmax = 0; // Maximum value of pixels in entire image. unsigned int lmax = 0; // For OpenMP. #pragma omp parallel default(shared) firstprivate(lmax) if(numberOfProcessors > 1) { #pragma omp for for (int i = 0; i < size; i++) lmax = std::max(static_cast<unsigned int>(raw_image[i]), lmax); #pragma omp critical dmax = std::max(lmax, dmax); // For non-OMP case this is equal to dmax = lmax. } C.data_maximum = dmax; } // // The image data needs to be scaled to the "white balance co-efficients" // Currently do not handle "Auto White Balance" // float pre_mul[4] = { 0.0, 0.0, 0.0, 0.0 }; if (1 == O.user_mul[0]) { static_assert(sizeof(O.user_mul) >= sizeof(pre_mul)); ZTRACE_RUNTIME("No White Balance processing."); memcpy(pre_mul, O.user_mul, sizeof(pre_mul)); } else if (1 == O.use_camera_wb && -1 != C.cam_mul[0]) { static_assert(sizeof(C.cam_mul) >= sizeof(pre_mul)); ZTRACE_RUNTIME("Using Camera White Balance (as shot)."); memcpy(pre_mul, C.cam_mul, sizeof(pre_mul)); } else { static_assert(sizeof(C.pre_mul) >= sizeof(pre_mul)); ZTRACE_RUNTIME("Using Daylight White Balance."); memcpy(pre_mul, C.pre_mul, sizeof(pre_mul)); } ZTRACE_RUNTIME("White balance co-efficients being used are %f, %f, %f, %f", pre_mul[0], pre_mul[1], pre_mul[2], pre_mul[3]); if (0 == pre_mul[3]) pre_mul[3] = P1.colors < 4 ? pre_mul[1] : 1; // // Now apply a linear stretch to the raw data, scale to the "saturation" level // not to the value of the pixel with the greatest value (which may be higher // than the saturation level). // // const double dmax = *std::max_element(&pre_mul[0], &pre_mul[4]); const float dmin = *std::ranges::min_element(pre_mul); const float saturationScaling = 65535.0f / static_cast<float>(C.maximum); std::array<float, 8> scale_mul = { 0.0f, 0.0f, 0.0f, 0.0f, saturationScaling, saturationScaling, saturationScaling, saturationScaling }; for (int q = 0; q < 4; q++) scale_mul[q] = (pre_mul[q] /= dmin) * saturationScaling; ZTRACE_DEVELOP("Maximum value pixel has value %d", C.data_maximum); ZTRACE_DEVELOP("Saturation level is %d", C.maximum); ZTRACE_DEVELOP("Applying linear stretch to raw data. Scale values %f, %f, %f, %f %f, %f, %f, %f", scale_mul[0], scale_mul[1], scale_mul[2], scale_mul[3], scale_mul[4], scale_mul[5], scale_mul[6], scale_mul[7]); #pragma omp parallel default(shared) if(numberOfProcessors > 1) // No OPENMP: 240ms, with OPENMP: 92ms { #pragma omp master // There is no implied barrier. ZTRACE_RUNTIME("RAW file processing with %d OpenMP threads, little_endian is %s", omp_get_num_threads(), littleEndian ? "true" : "false"); #pragma omp for for (int row = 0; row < S.height; row++) { for (int col = 0; col < S.width; col++) { // What colour will this pixel become const int colour = rawProcessor.COLOR(row, col); const float val = scale_mul[colour] * static_cast<float>(RAW(row, col)); RAW(row, col) = static_cast<std::uint16_t>(std::clamp(static_cast<int>(val), 0, 65535)); } } }David Partridge
A long piece of code, 2/3 of
A long piece of code, 2/3 of which is some kind of debugging or verification stuff or comments, and the question "what's wrong here" is clearly not a question for LibRaw support.
We do not know. Too much extra details not related to the question itself.
Try asking your question in 20 times more compact form, in this case there is at least a chance that a person (and not a compiler or debugger) will understand at first glance what is written (and what is wrong).
-- Alex Tutubalin @LibRaw LLC
Alex,
Alex,
If you can't see my code, then you can't advise me what I am doing wrong. I left the comments there deliberately, as IMHO they should assist in understanding what I am doing
I imagine that this has to do with my code that processes the white balance but I don't know for sure. If I knew exactly where in my code the problem was I would of course post just that small section, but as I don't know where I am going wrong, I felt it much better to provide more information rather than less.
If you think this isn't a question for LibRaw technical support, then should I post this in the General section? If not there then where else should I go for assistance?
Thanks, David
David Partridge
Unless there's a compact,
Unless there's a compact, self-contained example that can be compiled independently of your environment, your macros, and so on, you shouldn't expect a meaningful answer.
Especially since there's no question. "What am I doing wrong" is meaningless without the context of "what do I want to achieve?" (That's unclear after a quick review of your code, and expecting someone to spend hours on it is also pretty unreasonable.)
-- Alex Tutubalin @LibRaw LLC
I totally understand your
I totally understand your position on this. I'll ask on the General section if there's anyone who would be prepared to assist.
As to what I am trying to achieve - it is simply to decode the raw file into my internal format such that when it is displayed I get the same visual results that other sw does - right now the images are too red.
David Partridge
'Other software' usually does
'Other software' usually does some post-processing (black subtraction, white balance, demosaicing, tone curve...)
-- Alex Tutubalin @LibRaw LLC