Changeset View
Changeset View
Standalone View
Standalone View
kstars/fitsviewer/stretch.cpp
- This file was added.
1 | #include "stretch.h" | ||||
---|---|---|---|---|---|
2 | | ||||
3 | /* Stretch | ||||
4 | | ||||
5 | This application is free software; you can redistribute it and/or | ||||
6 | modify it under the terms of the GNU General Public | ||||
7 | License as published by the Free Software Foundation; either | ||||
8 | version 2 of the License, or (at your option) any later version. | ||||
9 | */ | ||||
10 | | ||||
11 | #include "stretch.h" | ||||
12 | | ||||
13 | #include <fitsio.h> | ||||
14 | #include <math.h> | ||||
15 | #include <QtConcurrent> | ||||
16 | | ||||
17 | namespace { | ||||
18 | | ||||
19 | // Returns the median v of the vector. | ||||
20 | // The vector is modified in an undefined way. | ||||
21 | template <typename T> | ||||
22 | T median(std::vector<T>& values) | ||||
23 | { | ||||
24 | const int middle = values.size() / 2; | ||||
25 | std::nth_element(values.begin(), values.begin() + middle, values.end()); | ||||
26 | return values[middle]; | ||||
27 | } | ||||
28 | | ||||
29 | // Returns the median of the sample values. | ||||
30 | // The values are not modified. | ||||
31 | template <typename T> | ||||
32 | T median(T *values, int size, int sampleBy) | ||||
33 | { | ||||
34 | const int downsampled_size = size / sampleBy; | ||||
35 | std::vector<T> samples(downsampled_size); | ||||
36 | for (int index = 0, i = 0; index < size; ++i, index += sampleBy) | ||||
37 | samples[i] = values[index]; | ||||
38 | return median(samples); | ||||
39 | } | ||||
40 | | ||||
41 | // This stretches one channel given the input parameters. | ||||
42 | // Based on the spec in section 8.5.6 | ||||
43 | // http://pixinsight.com/doc/docs/XISF-1.0-spec/XISF-1.0-spec.html | ||||
44 | // Uses multiple threads, blocks until done. | ||||
45 | // The extension parameters are not used. | ||||
46 | template <typename T> | ||||
47 | void stretchOneChannel(T *input_buffer, QImage *output_image, | ||||
48 | const StretchParams& stretch_params, | ||||
49 | int input_range, int image_height, int image_width) | ||||
50 | { | ||||
51 | QVector<QFuture<void>> futures; | ||||
52 | | ||||
53 | // We're outputting uint8, so the max output is 255. | ||||
54 | constexpr int maxOutput = 255; | ||||
55 | | ||||
56 | // Maximum possible input value (e.g. 1024*64 - 1 for a 16 bit unsigned int). | ||||
57 | const float maxInput = input_range > 1 ? input_range - 1 : input_range; | ||||
58 | | ||||
59 | const float midtones = stretch_params.grey_red.midtones; | ||||
60 | const float highlights = stretch_params.grey_red.highlights; | ||||
61 | const float shadows = stretch_params.grey_red.shadows; | ||||
62 | | ||||
63 | // Precomputed expressions moved out of the loop. | ||||
64 | // hightlights - shadows, protecting for divide-by-0, in a 0->1.0 scale. | ||||
65 | const float hsRangeFactor = highlights == shadows ? 1.0 : 1.0 / (highlights - shadows); | ||||
66 | // Shadow and highlight values translated to the ADU scale. | ||||
67 | const T nativeShadows = shadows * maxInput; | ||||
68 | const T nativeHighlights = highlights * maxInput; | ||||
69 | // Constants based on above needed for the stretch calculations. | ||||
70 | const float k1 = (midtones - 1) * hsRangeFactor * maxOutput / maxInput; | ||||
71 | const float k2 = ((2 * midtones) - 1) * hsRangeFactor / maxInput; | ||||
72 | | ||||
73 | for (int j = 0; j < image_height; j++) | ||||
74 | { | ||||
75 | futures.append(QtConcurrent::run([ = ]() | ||||
76 | { | ||||
77 | T * inputLine = input_buffer + j * image_width; | ||||
78 | auto * scanLine = output_image->scanLine(j); | ||||
79 | | ||||
80 | for (int i = 0; i < image_width; i++) | ||||
81 | { | ||||
82 | const T input = inputLine[i]; | ||||
83 | if (input < nativeShadows) scanLine[i] = 0; | ||||
84 | else if (input >= nativeHighlights) scanLine[i] = maxOutput; | ||||
85 | else | ||||
86 | { | ||||
87 | const T inputFloored = (input - nativeShadows); | ||||
88 | scanLine[i] = (inputFloored * k1) / (inputFloored * k2 - midtones); | ||||
89 | } | ||||
90 | } | ||||
91 | })); | ||||
92 | } | ||||
93 | for(QFuture<void> future : futures) | ||||
94 | future.waitForFinished(); | ||||
95 | } | ||||
96 | | ||||
97 | // This is like the above 1-channel stretch, but extended for 3 channels. | ||||
98 | // This could have been more modular, but the three channels are combined | ||||
99 | // into a single qRgb value at the end, so it seems the simplest thing is to | ||||
100 | // replicate the code. It is assume the colors are not interleaved--the red image | ||||
101 | // is stored fully, then the green, then the blue. | ||||
102 | template <typename T> | ||||
103 | void stretchThreeChannels(T *inputBuffer, QImage *outputImage, | ||||
104 | const StretchParams& stretchParams, | ||||
105 | int inputRange, int imageHeight, int imageWidth) | ||||
106 | { | ||||
107 | QVector<QFuture<void>> futures; | ||||
108 | | ||||
109 | // We're outputting uint8, so the max output is 255. | ||||
110 | constexpr int maxOutput = 255; | ||||
111 | | ||||
112 | // Maximum possible input value (e.g. 1024*64 - 1 for a 16 bit unsigned int). | ||||
113 | const float maxInput = inputRange > 1 ? inputRange - 1 : inputRange; | ||||
114 | | ||||
115 | const float midtonesR = stretchParams.grey_red.midtones; | ||||
116 | const float highlightsR = stretchParams.grey_red.highlights; | ||||
117 | const float shadowsR = stretchParams.grey_red.shadows; | ||||
118 | const float midtonesG = stretchParams.green.midtones; | ||||
119 | const float highlightsG = stretchParams.green.highlights; | ||||
120 | const float shadowsG = stretchParams.green.shadows; | ||||
121 | const float midtonesB = stretchParams.blue.midtones; | ||||
122 | const float highlightsB = stretchParams.blue.highlights; | ||||
123 | const float shadowsB = stretchParams.blue.shadows; | ||||
124 | | ||||
125 | // Precomputed expressions moved out of the loop. | ||||
126 | // hightlights - shadows, protecting for divide-by-0, in a 0->1.0 scale. | ||||
127 | const float hsRangeFactorR = highlightsR == shadowsR ? 1.0 : 1.0 / (highlightsR - shadowsR); | ||||
128 | const float hsRangeFactorG = highlightsG == shadowsG ? 1.0 : 1.0 / (highlightsG - shadowsG); | ||||
129 | const float hsRangeFactorB = highlightsB == shadowsB ? 1.0 : 1.0 / (highlightsB - shadowsB); | ||||
130 | // Shadow and highlight values translated to the ADU scale. | ||||
131 | const T nativeShadowsR = shadowsR * maxInput; | ||||
132 | const T nativeShadowsG = shadowsG * maxInput; | ||||
133 | const T nativeShadowsB = shadowsB * maxInput; | ||||
134 | const T nativeHighlightsR = highlightsR * maxInput; | ||||
135 | const T nativeHighlightsG = highlightsG * maxInput; | ||||
136 | const T nativeHighlightsB = highlightsB * maxInput; | ||||
137 | // Constants based on above needed for the stretch calculations. | ||||
138 | const float k1R = (midtonesR - 1) * hsRangeFactorR * maxOutput / maxInput; | ||||
139 | const float k1G = (midtonesG - 1) * hsRangeFactorG * maxOutput / maxInput; | ||||
140 | const float k1B = (midtonesB - 1) * hsRangeFactorB * maxOutput / maxInput; | ||||
141 | const float k2R = ((2 * midtonesR) - 1) * hsRangeFactorR / maxInput; | ||||
142 | const float k2G = ((2 * midtonesG) - 1) * hsRangeFactorG / maxInput; | ||||
143 | const float k2B = ((2 * midtonesB) - 1) * hsRangeFactorB / maxInput; | ||||
144 | | ||||
145 | const int size = imageWidth * imageHeight; | ||||
146 | | ||||
147 | for (int j = 0; j < imageHeight; j++) | ||||
148 | { | ||||
149 | futures.append(QtConcurrent::run([ = ]() | ||||
150 | { | ||||
151 | // R, G, B input images are stored one after another. | ||||
152 | T * inputLineR = inputBuffer + j * imageWidth; | ||||
153 | T * inputLineG = inputLineR + size; | ||||
154 | T * inputLineB = inputLineG + size; | ||||
155 | | ||||
156 | auto * scanLine = reinterpret_cast<QRgb*>(outputImage->scanLine(j)); | ||||
157 | | ||||
158 | for (int i = 0; i < imageWidth; i++) | ||||
159 | { | ||||
160 | const T inputR = inputLineR[i]; | ||||
161 | const T inputG = inputLineG[i]; | ||||
162 | const T inputB = inputLineB[i]; | ||||
163 | | ||||
164 | uint8_t red, green, blue; | ||||
165 | | ||||
166 | if (inputR < nativeShadowsR) red = 0; | ||||
167 | else if (inputR >= nativeHighlightsR) red = maxOutput; | ||||
168 | else | ||||
169 | { | ||||
170 | const T inputFloored = (inputR - nativeShadowsR); | ||||
171 | red = (inputFloored * k1R) / (inputFloored * k2R - midtonesR); | ||||
172 | } | ||||
173 | | ||||
174 | if (inputG < nativeShadowsG) green = 0; | ||||
175 | else if (inputG >= nativeHighlightsG) green = maxOutput; | ||||
176 | else | ||||
177 | { | ||||
178 | const T inputFloored = (inputG - nativeShadowsG); | ||||
179 | green = (inputFloored * k1G) / (inputFloored * k2G - midtonesG); | ||||
180 | } | ||||
181 | | ||||
182 | if (inputB < nativeShadowsB) blue = 0; | ||||
183 | else if (inputB >= nativeHighlightsB) blue = maxOutput; | ||||
184 | else | ||||
185 | { | ||||
186 | const T inputFloored = (inputB - nativeShadowsB); | ||||
187 | blue = (inputFloored * k1B) / (inputFloored * k2B - midtonesB); | ||||
188 | } | ||||
189 | scanLine[i] = qRgb(red, green, blue); | ||||
190 | } | ||||
191 | })); | ||||
192 | } | ||||
193 | for(QFuture<void> future : futures) | ||||
194 | future.waitForFinished(); | ||||
195 | } | ||||
196 | | ||||
197 | template <typename T> | ||||
198 | void stretchChannels(T *input_buffer, QImage *output_image, | ||||
199 | const StretchParams& stretch_params, | ||||
200 | int input_range, int image_height, int image_width, int num_channels) | ||||
201 | { | ||||
202 | if (num_channels == 1) | ||||
203 | stretchOneChannel(input_buffer, output_image, stretch_params, input_range, | ||||
204 | image_height, image_width); | ||||
205 | else if (num_channels == 3) | ||||
206 | stretchThreeChannels(input_buffer, output_image, stretch_params, input_range, | ||||
207 | image_height, image_width); | ||||
208 | } | ||||
209 | | ||||
210 | // See section 8.5.7 in above link http://pixinsight.com/doc/docs/XISF-1.0-spec/XISF-1.0-spec.html | ||||
211 | template <typename T> | ||||
212 | void computeParamsOneChannel(T *buffer, StretchParams1Channel *params, | ||||
213 | int inputRange, int height, int width) | ||||
214 | { | ||||
215 | // Find the median sample. | ||||
216 | constexpr int maxSamples = 500000; | ||||
217 | const int sampleBy = width * height < maxSamples ? 1 : width * height / maxSamples; | ||||
218 | const int size = width * height; | ||||
219 | T medianSample = median(buffer, width * height, sampleBy); | ||||
220 | | ||||
221 | // Find the Median deviation: 1.4826 * median of abs(sample[i] - median). | ||||
222 | const int numSamples = width * height / sampleBy; | ||||
223 | std::vector<T> deviations(numSamples); | ||||
224 | for (int index = 0, i = 0; index < size; ++i, index += sampleBy) | ||||
225 | { | ||||
226 | if (medianSample > buffer[index]) | ||||
227 | deviations[i] = medianSample - buffer[index]; | ||||
228 | else | ||||
229 | deviations[i] = buffer[index] - medianSample; | ||||
230 | } | ||||
231 | | ||||
232 | // Shift everything to 0 -> 1.0. | ||||
233 | const float medDev = median(deviations); | ||||
234 | const float normalizedMedian = medianSample / static_cast<float>(inputRange); | ||||
235 | const float MADN = 1.4826 * medDev / static_cast<float>(inputRange); | ||||
236 | | ||||
237 | const bool upperHalf = normalizedMedian > 0.5; | ||||
238 | | ||||
239 | const float shadows = (upperHalf || MADN == 0) ? 0.0 : | ||||
240 | fmin(1.0, fmax(0.0, (normalizedMedian + -2.8 * MADN))); | ||||
241 | | ||||
242 | const float highlights = (!upperHalf || MADN == 0) ? 1.0 : | ||||
243 | fmin(1.0, fmax(0.0, (normalizedMedian - -2.8 * MADN))); | ||||
244 | | ||||
245 | float X, M; | ||||
246 | constexpr float B = 0.25; | ||||
247 | if (!upperHalf) { | ||||
248 | X = normalizedMedian - shadows; | ||||
249 | M = B; | ||||
250 | } else { | ||||
251 | X = B; | ||||
252 | M = highlights - normalizedMedian; | ||||
253 | } | ||||
254 | float midtones; | ||||
255 | if (X == 0) midtones = 0; | ||||
256 | else if (X == M) midtones = 0.5; | ||||
257 | else if (X == 1) midtones = 1.0; | ||||
258 | else midtones = ((M - 1) * X) / ((2 * M - 1) * X - M); | ||||
259 | | ||||
260 | // Store the params. | ||||
261 | params->shadows = shadows; | ||||
262 | params->highlights = highlights; | ||||
263 | params->midtones = midtones; | ||||
264 | params->shadows_expansion = 0.0; | ||||
265 | params->highlights_expansion = 1.0; | ||||
266 | } | ||||
267 | | ||||
268 | // Need to know the possible range of input values. | ||||
269 | // Using the type of the sample and guessing. | ||||
270 | // Perhaps we should examine the contents for the file | ||||
271 | // (e.g. look at maximum value and extrapolate from that). | ||||
272 | int getRange(int data_type) | ||||
273 | { | ||||
274 | switch (data_type) | ||||
275 | { | ||||
276 | case TBYTE: | ||||
277 | return 256; | ||||
278 | break; | ||||
279 | case TSHORT: | ||||
280 | return 64*1024; | ||||
281 | break; | ||||
282 | case TUSHORT: | ||||
283 | return 64*1024; | ||||
284 | break; | ||||
285 | case TLONG: | ||||
286 | return 64*1024; | ||||
287 | break; | ||||
288 | case TFLOAT: | ||||
289 | return 64*1024; | ||||
290 | break; | ||||
291 | case TLONGLONG: | ||||
292 | return 64*1024; | ||||
293 | break; | ||||
294 | case TDOUBLE: | ||||
295 | return 64*1024; | ||||
296 | break; | ||||
297 | default: | ||||
298 | return 64*1024; | ||||
299 | break; | ||||
300 | } | ||||
301 | } | ||||
302 | | ||||
303 | } // namespace | ||||
304 | | ||||
305 | Stretch::Stretch(int width, int height, int channels, int data_type) | ||||
306 | { | ||||
307 | image_width = width; | ||||
308 | image_height = height; | ||||
309 | image_channels = channels; | ||||
310 | dataType = data_type; | ||||
311 | input_range = getRange(dataType); | ||||
312 | } | ||||
313 | | ||||
314 | void Stretch::run(uint8_t *input, QImage *outputImage) | ||||
315 | { | ||||
316 | switch (dataType) | ||||
317 | { | ||||
318 | case TBYTE: | ||||
319 | stretchChannels(reinterpret_cast<uint8_t*>(input), outputImage, params, | ||||
320 | input_range, image_height, image_width, image_channels); | ||||
321 | break; | ||||
322 | case TSHORT: | ||||
323 | stretchChannels(reinterpret_cast<short*>(input), outputImage, params, | ||||
324 | input_range, image_height, image_width, image_channels); | ||||
325 | break; | ||||
326 | case TUSHORT: | ||||
327 | stretchChannels(reinterpret_cast<unsigned short*>(input), outputImage, params, | ||||
328 | input_range, image_height, image_width, image_channels); | ||||
329 | break; | ||||
330 | case TLONG: | ||||
331 | stretchChannels(reinterpret_cast<long*>(input), outputImage, params, | ||||
332 | input_range, image_height, image_width, image_channels); | ||||
333 | break; | ||||
334 | case TFLOAT: | ||||
335 | stretchChannels(reinterpret_cast<float*>(input), outputImage, params, | ||||
336 | input_range, image_height, image_width, image_channels); | ||||
337 | break; | ||||
338 | case TLONGLONG: | ||||
339 | stretchChannels(reinterpret_cast<long long*>(input), outputImage, params, | ||||
340 | input_range, image_height, image_width, image_channels); | ||||
341 | break; | ||||
342 | case TDOUBLE: | ||||
343 | stretchChannels(reinterpret_cast<double*>(input), outputImage, params, | ||||
344 | input_range, image_height, image_width, image_channels); | ||||
345 | break; | ||||
346 | default: | ||||
347 | break; | ||||
348 | } | ||||
349 | } | ||||
350 | | ||||
351 | StretchParams Stretch::computeParams(uint8_t *input) | ||||
352 | { | ||||
353 | StretchParams result; | ||||
354 | for (int channel = 0; channel < image_channels; ++channel) | ||||
355 | { | ||||
356 | int offset = channel * image_width * image_height; | ||||
357 | StretchParams1Channel *params = channel == 0 ? &result.grey_red : | ||||
358 | (channel == 1 ? &result.green : &result.blue); | ||||
359 | switch (dataType) | ||||
360 | { | ||||
361 | case TBYTE: | ||||
362 | { | ||||
363 | auto buffer = reinterpret_cast<uint8_t*>(input); | ||||
364 | computeParamsOneChannel(buffer + offset, params, input_range, | ||||
365 | image_height, image_width); | ||||
366 | break; | ||||
367 | } | ||||
368 | case TSHORT: | ||||
369 | { | ||||
370 | auto buffer = reinterpret_cast<short*>(input); | ||||
371 | computeParamsOneChannel(buffer + offset, params, input_range, | ||||
372 | image_height, image_width); | ||||
373 | break; | ||||
374 | } | ||||
375 | case TUSHORT: | ||||
376 | { | ||||
377 | auto buffer = reinterpret_cast<unsigned short*>(input); | ||||
378 | computeParamsOneChannel(buffer + offset, params, input_range, | ||||
379 | image_height, image_width); | ||||
380 | break; | ||||
381 | } | ||||
382 | case TLONG: | ||||
383 | { | ||||
384 | auto buffer = reinterpret_cast<long*>(input); | ||||
385 | computeParamsOneChannel(buffer + offset, params, input_range, | ||||
386 | image_height, image_width); | ||||
387 | break; | ||||
388 | } | ||||
389 | case TFLOAT: | ||||
390 | { | ||||
391 | auto buffer = reinterpret_cast<float*>(input); | ||||
392 | computeParamsOneChannel(buffer + offset, params, input_range, | ||||
393 | image_height, image_width); | ||||
394 | break; | ||||
395 | } | ||||
396 | case TLONGLONG: | ||||
397 | { | ||||
398 | auto buffer = reinterpret_cast<long long*>(input); | ||||
399 | computeParamsOneChannel(buffer + offset, params, input_range, | ||||
400 | image_height, image_width); | ||||
401 | break; | ||||
402 | } | ||||
403 | case TDOUBLE: | ||||
404 | { | ||||
405 | auto buffer = reinterpret_cast<double*>(input); | ||||
406 | computeParamsOneChannel(buffer + offset, params, input_range, | ||||
407 | image_height, image_width); | ||||
408 | break; | ||||
409 | } | ||||
410 | default: | ||||
411 | break; | ||||
412 | } | ||||
413 | } | ||||
414 | return result; | ||||
415 | } |