code display got messed up at the end. Here's what I hope is not messad up.
// No Auto White Balance
O.use_auto_wb = false;
//
// Do we disable all White Balance processing?
//
const auto setWbMult = [&](const float factor)
{
O.user_mul[0] = factor;
O.user_mul[1] = factor;
O.user_mul[2] = factor;
O.user_mul[3] = factor;
};
if (workspace.value("RawDDP/NoWB", false).toBool())
setWbMult(1.0f); // Yes, so set the user white balance multipliers to 1.0
else
setWbMult(0.0f); // No, so set the user white balance multipliers to 0.0
O.use_camera_wb = workspace.value("RawDDP/CameraWB", false).toBool() ? 1 : 0;
// Don't stretch or rotate raw pixels (equivalent to dcraw -j)
O.use_fuji_rotate = 0;
// Don't flip the image (equivalent to dcraw -t 0)
O.user_flip = 0;
// Output color space : raw-> sRGB (default)
/*
argv[argc] = _T("-o");
argc++;
argv[argc] = _T("0");
argc++;*/
O.user_black = workspace.value("RawDDP/BlackPointTo0", false).toBool() ? 0 : -1;
// Output is 16 bits (equivalent of dcraw flag -4)
O.gamm[0] = O.gamm[1] = O.no_auto_bright = 1;
O.output_bps = 16;
g_Progress = pProgress;
ZTRACE_RUNTIME("Calling LibRaw::unpack()");
if ((ret = rawProcessor.unpack()) != LIBRAW_SUCCESS)
{
bResult = false;
ZTRACE_RUNTIME("Cannot unpack %s: %s", file.generic_u8string().c_str(), libraw_strerror(ret));
}
if (!bResult)
break;
//
// Create the class that populates the bitmap
//
CopyableSmartPtr<BitmapFillerInterface> pFiller = BitmapFillerInterface::makeBitmapFiller(pBitmap, pProgress);
// Get the Colour Filter Array type and set into the bitmap filler
m_CFAType = GetCurrentCFAType();
pFiller->SetCFAType(m_CFAType);
#define RAW(row,col) raw_image[(row) * S.width + (col)]
ZTRACE_DEVELOP("Extracting real image data (excluding the frame) from rawProcessor.imgdata.rawdata.raw_image");
//
// This is a regular RAW file so no Fuji Super-CCD stuff
//
// Just copy the "real image" portion of the data excluding
// the frame
//
#pragma omp parallel for default(shared) if(numberOfProcessors > 1)
for (int row = 0; row < S.height; row++)
{
for (int col = 0; col < S.width; col++)
{
RAW(row, col) = RawData.raw_image[(row + S.top_margin) * S.raw_pitch / 2 + (col + S.left_margin)];
}
}
//
// Now process the data that raw_image points to which is either
//
// 1) The output of post processing the Fuji Super-CCD raw,
// stored in the USHORT array hung off raw_image, or
//
// 2) Normal common or garden raw Bayer matrix data that's been
// copied from RawData.raw_image to raw_image (less the frame)
//
// Either way we should now be processing a regular greyscale 16-bit
// pixel array which has an associated Bayer Matrix or is true monochrome
//
pFiller->setGrey(true);
pFiller->setWidth(S.width);
pFiller->setHeight(S.height);
pFiller->setMaxColors((1 << 16) - 1);
// Report User Black Point over-ride
if (0 == O.user_black)
ZTRACE_RUNTIME("User set Black Point to 0");
//
// Before doing dark subtraction, normalise C.black / C.cblack[]
//
ZTRACE_DEVELOP("Before adjust_bl() C.black = %d.", C.black);
ZTRACE_DEVELOP("First 10 C.cblack elements\n %d, %d, %d, %d\n %d, %d\n %d, %d, %d, %d",
C.cblack[0], C.cblack[1], C.cblack[2], C.cblack[3],
C.cblack[4], C.cblack[5],
C.cblack[6], C.cblack[7], C.cblack[8], C.cblack[9]);
rawProcessor.adjust_bl();
//
// This code is based on code from LibRaw Version 19.2, specifically method:
//
// int LibRaw::subtract_black_internal()
//
// found at line 4532 in source file libraw_cxx.cpp
//
// Do dark subtraction on the image. If a user defined black level has
// been set (it will be zero) then use that, otherwise just use the black
// level for the camera.
//
// Note that this is only done on real image data, not the frame
//
// While doing so collect the largest value in the image data.
//
ZTRACE_DEVELOP("Subtracting black level of C.black = %d from raw_image data.", C.black);
ZTRACE_DEVELOP("First 10 C.cblack elements\n %d, %d, %d, %d\n %d, %d\n %d, %d, %d, %d",
C.cblack[0], C.cblack[1], C.cblack[2], C.cblack[3],
C.cblack[4], C.cblack[5],
C.cblack[6], C.cblack[7], C.cblack[8], C.cblack[9]);
const int size = static_cast<int>(S.height) * static_cast<int>(S.width);
if (!rawProcessor.is_phaseone_compressed() && (C.cblack[0] || C.cblack[1] || C.cblack[2] || C.cblack[3] || (C.cblack[4] && C.cblack[5])))
{
const int cblk[4] = { static_cast<int>(C.cblack[0]), static_cast<int>(C.cblack[1]), static_cast<int>(C.cblack[2]), static_cast<int>(C.cblack[3]) };
int dmax = 0; // Maximum value of pixels in entire image.
int lmax = 0; // Local (or Loop) maximum value found in the 'for' loops below. For OMP.
if (C.cblack[4] && C.cblack[5])
{
#pragma omp parallel default(shared) firstprivate(lmax) if(numberOfProcessors > 1)
{
#pragma omp for
for (int i = 0; i < size; i++)
{
int val = raw_image[i];
val -= C.cblack[6 + i / S.width % C.cblack[4] * C.cblack[5] + i % S.width % C.cblack[5]];
val -= cblk[i & 3];
raw_image[i] = static_cast<std::uint16_t>(std::clamp(val, 0, 65535));
lmax = std::max(val, lmax);
}
#pragma omp critical
dmax = std::max(lmax, dmax); // For non-OMP case this is equal to dmax = lmax.
}
}
else
{
#pragma omp parallel default(shared) firstprivate(lmax) if(numberOfProcessors > 1)
{
#pragma omp for
for (int i = 0; i < size; i++)
{
int val = raw_image[i];
val -= cblk[i & 3];
raw_image[i] = static_cast<std::uint16_t>(std::clamp(val, 0, 65535));
lmax = std::max(val, lmax);
}
#pragma omp critical
dmax = std::max(lmax, dmax); // For non-OMP case this is equal to dmax = lmax.
}
}
C.data_maximum = dmax & 0xffff;
C.maximum -= C.black;
memset(&C.cblack, 0, sizeof(C.cblack)); // Yeah, we used cblack[6+] values too!
C.black = 0;
}
else
{
// Nothing to Do, maximum is already calculated, black level is 0, so no change
// only calculate channel maximum;
unsigned int dmax = 0; // Maximum value of pixels in entire image.
unsigned int lmax = 0; // For OpenMP.
#pragma omp parallel default(shared) firstprivate(lmax) if(numberOfProcessors > 1)
{
#pragma omp for
for (int i = 0; i < size; i++)
lmax = std::max(static_cast<unsigned int>(raw_image[i]), lmax);
#pragma omp critical
dmax = std::max(lmax, dmax); // For non-OMP case this is equal to dmax = lmax.
}
C.data_maximum = dmax;
}
//
// The image data needs to be scaled to the "white balance co-efficients"
// Currently do not handle "Auto White Balance"
//
float pre_mul[4] = { 0.0, 0.0, 0.0, 0.0 };
if (1 == O.user_mul[0])
{
static_assert(sizeof(O.user_mul) >= sizeof(pre_mul));
ZTRACE_RUNTIME("No White Balance processing.");
memcpy(pre_mul, O.user_mul, sizeof(pre_mul));
}
else if (1 == O.use_camera_wb && -1 != C.cam_mul[0])
{
static_assert(sizeof(C.cam_mul) >= sizeof(pre_mul));
ZTRACE_RUNTIME("Using Camera White Balance (as shot).");
memcpy(pre_mul, C.cam_mul, sizeof(pre_mul));
}
else
{
static_assert(sizeof(C.pre_mul) >= sizeof(pre_mul));
ZTRACE_RUNTIME("Using Daylight White Balance.");
memcpy(pre_mul, C.pre_mul, sizeof(pre_mul));
}
ZTRACE_RUNTIME("White balance co-efficients being used are %f, %f, %f, %f",
pre_mul[0], pre_mul[1], pre_mul[2], pre_mul[3]);
if (0 == pre_mul[3])
pre_mul[3] = P1.colors < 4 ? pre_mul[1] : 1;
//
// Now apply a linear stretch to the raw data, scale to the "saturation" level
// not to the value of the pixel with the greatest value (which may be higher
// than the saturation level).
//
// const double dmax = *std::max_element(&pre_mul[0], &pre_mul[4]);
const float dmin = *std::ranges::min_element(pre_mul);
const float saturationScaling = 65535.0f / static_cast<float>(C.maximum);
std::array<float, 8> scale_mul = { 0.0f, 0.0f, 0.0f, 0.0f, saturationScaling, saturationScaling, saturationScaling, saturationScaling };
for (int q = 0; q < 4; q++)
scale_mul[q] = (pre_mul[q] /= dmin) * saturationScaling;
ZTRACE_DEVELOP("Maximum value pixel has value %d", C.data_maximum);
ZTRACE_DEVELOP("Saturation level is %d", C.maximum);
ZTRACE_DEVELOP("Applying linear stretch to raw data. Scale values %f, %f, %f, %f %f, %f, %f, %f",
scale_mul[0], scale_mul[1], scale_mul[2], scale_mul[3], scale_mul[4], scale_mul[5], scale_mul[6], scale_mul[7]);
#pragma omp parallel default(shared) if(numberOfProcessors > 1) // No OPENMP: 240ms, with OPENMP: 92ms
{
#pragma omp master // There is no implied barrier.
ZTRACE_RUNTIME("RAW file processing with %d OpenMP threads, little_endian is %s", omp_get_num_threads(), littleEndian ? "true" : "false");
#pragma omp for
for (int row = 0; row < S.height; row++)
{
for (int col = 0; col < S.width; col++)
{
// What colour will this pixel become
const int colour = rawProcessor.COLOR(row, col);
const float val = scale_mul[colour] * static_cast<float>(RAW(row, col));
RAW(row, col) = static_cast<std::uint16_t>(std::clamp(static_cast<int>(val), 0, 65535));
}
}
}
code display got messed up at the end. Here's what I hope is not messad up.
// No Auto White Balance O.use_auto_wb = false; // // Do we disable all White Balance processing? // const auto setWbMult = [&](const float factor) { O.user_mul[0] = factor; O.user_mul[1] = factor; O.user_mul[2] = factor; O.user_mul[3] = factor; }; if (workspace.value("RawDDP/NoWB", false).toBool()) setWbMult(1.0f); // Yes, so set the user white balance multipliers to 1.0 else setWbMult(0.0f); // No, so set the user white balance multipliers to 0.0 O.use_camera_wb = workspace.value("RawDDP/CameraWB", false).toBool() ? 1 : 0; // Don't stretch or rotate raw pixels (equivalent to dcraw -j) O.use_fuji_rotate = 0; // Don't flip the image (equivalent to dcraw -t 0) O.user_flip = 0; // Output color space : raw-> sRGB (default) /* argv[argc] = _T("-o"); argc++; argv[argc] = _T("0"); argc++;*/ O.user_black = workspace.value("RawDDP/BlackPointTo0", false).toBool() ? 0 : -1; // Output is 16 bits (equivalent of dcraw flag -4) O.gamm[0] = O.gamm[1] = O.no_auto_bright = 1; O.output_bps = 16; g_Progress = pProgress; ZTRACE_RUNTIME("Calling LibRaw::unpack()"); if ((ret = rawProcessor.unpack()) != LIBRAW_SUCCESS) { bResult = false; ZTRACE_RUNTIME("Cannot unpack %s: %s", file.generic_u8string().c_str(), libraw_strerror(ret)); } if (!bResult) break; // // Create the class that populates the bitmap // CopyableSmartPtr<BitmapFillerInterface> pFiller = BitmapFillerInterface::makeBitmapFiller(pBitmap, pProgress); // Get the Colour Filter Array type and set into the bitmap filler m_CFAType = GetCurrentCFAType(); pFiller->SetCFAType(m_CFAType); #define RAW(row,col) raw_image[(row) * S.width + (col)] ZTRACE_DEVELOP("Extracting real image data (excluding the frame) from rawProcessor.imgdata.rawdata.raw_image"); // // This is a regular RAW file so no Fuji Super-CCD stuff // // Just copy the "real image" portion of the data excluding // the frame // #pragma omp parallel for default(shared) if(numberOfProcessors > 1) for (int row = 0; row < S.height; row++) { for (int col = 0; col < S.width; col++) { RAW(row, col) = RawData.raw_image[(row + S.top_margin) * S.raw_pitch / 2 + (col + S.left_margin)]; } } // // Now process the data that raw_image points to which is either // // 1) The output of post processing the Fuji Super-CCD raw, // stored in the USHORT array hung off raw_image, or // // 2) Normal common or garden raw Bayer matrix data that's been // copied from RawData.raw_image to raw_image (less the frame) // // Either way we should now be processing a regular greyscale 16-bit // pixel array which has an associated Bayer Matrix or is true monochrome // pFiller->setGrey(true); pFiller->setWidth(S.width); pFiller->setHeight(S.height); pFiller->setMaxColors((1 << 16) - 1); // Report User Black Point over-ride if (0 == O.user_black) ZTRACE_RUNTIME("User set Black Point to 0"); // // Before doing dark subtraction, normalise C.black / C.cblack[] // ZTRACE_DEVELOP("Before adjust_bl() C.black = %d.", C.black); ZTRACE_DEVELOP("First 10 C.cblack elements\n %d, %d, %d, %d\n %d, %d\n %d, %d, %d, %d", C.cblack[0], C.cblack[1], C.cblack[2], C.cblack[3], C.cblack[4], C.cblack[5], C.cblack[6], C.cblack[7], C.cblack[8], C.cblack[9]); rawProcessor.adjust_bl(); // // This code is based on code from LibRaw Version 19.2, specifically method: // // int LibRaw::subtract_black_internal() // // found at line 4532 in source file libraw_cxx.cpp // // Do dark subtraction on the image. If a user defined black level has // been set (it will be zero) then use that, otherwise just use the black // level for the camera. // // Note that this is only done on real image data, not the frame // // While doing so collect the largest value in the image data. // ZTRACE_DEVELOP("Subtracting black level of C.black = %d from raw_image data.", C.black); ZTRACE_DEVELOP("First 10 C.cblack elements\n %d, %d, %d, %d\n %d, %d\n %d, %d, %d, %d", C.cblack[0], C.cblack[1], C.cblack[2], C.cblack[3], C.cblack[4], C.cblack[5], C.cblack[6], C.cblack[7], C.cblack[8], C.cblack[9]); const int size = static_cast<int>(S.height) * static_cast<int>(S.width); if (!rawProcessor.is_phaseone_compressed() && (C.cblack[0] || C.cblack[1] || C.cblack[2] || C.cblack[3] || (C.cblack[4] && C.cblack[5]))) { const int cblk[4] = { static_cast<int>(C.cblack[0]), static_cast<int>(C.cblack[1]), static_cast<int>(C.cblack[2]), static_cast<int>(C.cblack[3]) }; int dmax = 0; // Maximum value of pixels in entire image. int lmax = 0; // Local (or Loop) maximum value found in the 'for' loops below. For OMP. if (C.cblack[4] && C.cblack[5]) { #pragma omp parallel default(shared) firstprivate(lmax) if(numberOfProcessors > 1) { #pragma omp for for (int i = 0; i < size; i++) { int val = raw_image[i]; val -= C.cblack[6 + i / S.width % C.cblack[4] * C.cblack[5] + i % S.width % C.cblack[5]]; val -= cblk[i & 3]; raw_image[i] = static_cast<std::uint16_t>(std::clamp(val, 0, 65535)); lmax = std::max(val, lmax); } #pragma omp critical dmax = std::max(lmax, dmax); // For non-OMP case this is equal to dmax = lmax. } } else { #pragma omp parallel default(shared) firstprivate(lmax) if(numberOfProcessors > 1) { #pragma omp for for (int i = 0; i < size; i++) { int val = raw_image[i]; val -= cblk[i & 3]; raw_image[i] = static_cast<std::uint16_t>(std::clamp(val, 0, 65535)); lmax = std::max(val, lmax); } #pragma omp critical dmax = std::max(lmax, dmax); // For non-OMP case this is equal to dmax = lmax. } } C.data_maximum = dmax & 0xffff; C.maximum -= C.black; memset(&C.cblack, 0, sizeof(C.cblack)); // Yeah, we used cblack[6+] values too! C.black = 0; } else { // Nothing to Do, maximum is already calculated, black level is 0, so no change // only calculate channel maximum; unsigned int dmax = 0; // Maximum value of pixels in entire image. unsigned int lmax = 0; // For OpenMP. #pragma omp parallel default(shared) firstprivate(lmax) if(numberOfProcessors > 1) { #pragma omp for for (int i = 0; i < size; i++) lmax = std::max(static_cast<unsigned int>(raw_image[i]), lmax); #pragma omp critical dmax = std::max(lmax, dmax); // For non-OMP case this is equal to dmax = lmax. } C.data_maximum = dmax; } // // The image data needs to be scaled to the "white balance co-efficients" // Currently do not handle "Auto White Balance" // float pre_mul[4] = { 0.0, 0.0, 0.0, 0.0 }; if (1 == O.user_mul[0]) { static_assert(sizeof(O.user_mul) >= sizeof(pre_mul)); ZTRACE_RUNTIME("No White Balance processing."); memcpy(pre_mul, O.user_mul, sizeof(pre_mul)); } else if (1 == O.use_camera_wb && -1 != C.cam_mul[0]) { static_assert(sizeof(C.cam_mul) >= sizeof(pre_mul)); ZTRACE_RUNTIME("Using Camera White Balance (as shot)."); memcpy(pre_mul, C.cam_mul, sizeof(pre_mul)); } else { static_assert(sizeof(C.pre_mul) >= sizeof(pre_mul)); ZTRACE_RUNTIME("Using Daylight White Balance."); memcpy(pre_mul, C.pre_mul, sizeof(pre_mul)); } ZTRACE_RUNTIME("White balance co-efficients being used are %f, %f, %f, %f", pre_mul[0], pre_mul[1], pre_mul[2], pre_mul[3]); if (0 == pre_mul[3]) pre_mul[3] = P1.colors < 4 ? pre_mul[1] : 1; // // Now apply a linear stretch to the raw data, scale to the "saturation" level // not to the value of the pixel with the greatest value (which may be higher // than the saturation level). // // const double dmax = *std::max_element(&pre_mul[0], &pre_mul[4]); const float dmin = *std::ranges::min_element(pre_mul); const float saturationScaling = 65535.0f / static_cast<float>(C.maximum); std::array<float, 8> scale_mul = { 0.0f, 0.0f, 0.0f, 0.0f, saturationScaling, saturationScaling, saturationScaling, saturationScaling }; for (int q = 0; q < 4; q++) scale_mul[q] = (pre_mul[q] /= dmin) * saturationScaling; ZTRACE_DEVELOP("Maximum value pixel has value %d", C.data_maximum); ZTRACE_DEVELOP("Saturation level is %d", C.maximum); ZTRACE_DEVELOP("Applying linear stretch to raw data. Scale values %f, %f, %f, %f %f, %f, %f, %f", scale_mul[0], scale_mul[1], scale_mul[2], scale_mul[3], scale_mul[4], scale_mul[5], scale_mul[6], scale_mul[7]); #pragma omp parallel default(shared) if(numberOfProcessors > 1) // No OPENMP: 240ms, with OPENMP: 92ms { #pragma omp master // There is no implied barrier. ZTRACE_RUNTIME("RAW file processing with %d OpenMP threads, little_endian is %s", omp_get_num_threads(), littleEndian ? "true" : "false"); #pragma omp for for (int row = 0; row < S.height; row++) { for (int col = 0; col < S.width; col++) { // What colour will this pixel become const int colour = rawProcessor.COLOR(row, col); const float val = scale_mul[colour] * static_cast<float>(RAW(row, col)); RAW(row, col) = static_cast<std::uint16_t>(std::clamp(static_cast<int>(val), 0, 65535)); } } }