Coverage for colour/appearance/ciecam02.py: 100%
269 statements
« prev ^ index » next coverage.py v7.11.0, created at 2025-11-15 19:01 +1300
« prev ^ index » next coverage.py v7.11.0, created at 2025-11-15 19:01 +1300
1"""
2CIECAM02 Colour Appearance Model
3================================
5Define the *CIECAM02* colour appearance model for predicting perceptual colour
6attributes under varying viewing conditions.
8- :class:`colour.appearance.InductionFactors_CIECAM02`
9- :attr:`colour.VIEWING_CONDITIONS_CIECAM02`
10- :class:`colour.CAM_Specification_CIECAM02`
11- :func:`colour.XYZ_to_CIECAM02`
12- :func:`colour.CIECAM02_to_XYZ`
14References
15----------
16- :cite:`Fairchild2004c` : Fairchild, M. D. (2004). CIECAM02. In Color
17 Appearance Models (2nd ed., pp. 289-301). Wiley. ISBN:978-0-470-01216-1
18- :cite:`InternationalElectrotechnicalCommission1999a` : International
19 Electrotechnical Commission. (1999). IEC 61966-2-1:1999 - Multimedia
20 systems and equipment - Colour measurement and management - Part 2-1:
21 Colour management - Default RGB colour space - sRGB (p. 51).
22 https://webstore.iec.ch/publication/6169
23- :cite:`Luo2013` : Luo, Ming Ronnier, & Li, C. (2013). CIECAM02 and Its
24 Recent Developments. In C. Fernandez-Maloigne (Ed.), Advanced Color Image
25 Processing and Analysis (pp. 19-58). Springer New York.
26 doi:10.1007/978-1-4419-6190-7
27- :cite:`Moroneya` : Moroney, N., Fairchild, M. D., Hunt, R. W. G., Li, C.,
28 Luo, M. R., & Newman, T. (2002). The CIECAM02 color appearance model. Color
29 and Imaging Conference, 1, 23-27.
30- :cite:`Wikipedia2007a` : Fairchild, M. D. (2004). CIECAM02. In Color
31 Appearance Models (2nd ed., pp. 289-301). Wiley. ISBN:978-0-470-01216-1
32"""
34from __future__ import annotations
36import typing
37from dataclasses import astuple, dataclass, field
39import numpy as np
41from colour.adaptation import CAT_CAT02
42from colour.algebra import sdiv, sdiv_mode, spow, vecmul
43from colour.appearance.hunt import (
44 MATRIX_HPE_TO_XYZ,
45 MATRIX_XYZ_TO_HPE,
46 luminance_level_adaptation_factor,
47)
48from colour.colorimetry import CCS_ILLUMINANTS
49from colour.constants import EPSILON
51if typing.TYPE_CHECKING:
52 from colour.hints import ArrayLike, Domain100, Range100, Tuple
54from colour.hints import Annotated, NDArrayFloat, cast
55from colour.models import xy_to_XYZ
56from colour.utilities import (
57 CanonicalMapping,
58 MixinDataclassArithmetic,
59 MixinDataclassIterable,
60 as_float,
61 as_float_array,
62 as_int_array,
63 from_range_100,
64 from_range_degrees,
65 has_only_nan,
66 ones,
67 to_domain_100,
68 to_domain_degrees,
69 tsplit,
70 tstack,
71 zeros,
72)
73from colour.utilities.documentation import DocstringDict, is_documentation_building
75__author__ = "Colour Developers"
76__copyright__ = "Copyright 2013 Colour Developers"
77__license__ = "BSD-3-Clause - https://opensource.org/licenses/BSD-3-Clause"
78__maintainer__ = "Colour Developers"
79__email__ = "colour-developers@colour-science.org"
80__status__ = "Production"
82__all__ = [
83 "CAT_INVERSE_CAT02",
84 "InductionFactors_CIECAM02",
85 "VIEWING_CONDITIONS_CIECAM02",
86 "HUE_DATA_FOR_HUE_QUADRATURE",
87 "CAM_KWARGS_CIECAM02_sRGB",
88 "CAM_Specification_CIECAM02",
89 "XYZ_to_CIECAM02",
90 "CIECAM02_to_XYZ",
91 "chromatic_induction_factors",
92 "base_exponential_non_linearity",
93 "viewing_conditions_dependent_parameters",
94 "degree_of_adaptation",
95 "full_chromatic_adaptation_forward",
96 "full_chromatic_adaptation_inverse",
97 "RGB_to_rgb",
98 "rgb_to_RGB",
99 "post_adaptation_non_linear_response_compression_forward",
100 "post_adaptation_non_linear_response_compression_inverse",
101 "opponent_colour_dimensions_forward",
102 "opponent_colour_dimensions_inverse",
103 "hue_angle",
104 "hue_quadrature",
105 "eccentricity_factor",
106 "achromatic_response_forward",
107 "achromatic_response_inverse",
108 "lightness_correlate",
109 "brightness_correlate",
110 "temporary_magnitude_quantity_forward",
111 "temporary_magnitude_quantity_inverse",
112 "chroma_correlate",
113 "colourfulness_correlate",
114 "saturation_correlate",
115 "P",
116 "matrix_post_adaptation_non_linear_response_compression",
117]
119CAT_INVERSE_CAT02: NDArrayFloat = np.linalg.inv(CAT_CAT02)
120"""Inverse CAT02 chromatic adaptation transform."""
123@dataclass(frozen=True)
124class InductionFactors_CIECAM02(MixinDataclassIterable):
125 """
126 Define the *CIECAM02* colour appearance model induction factors.
128 Parameters
129 ----------
130 F
131 Maximum degree of adaptation :math:`F`.
132 c
133 Exponential non-linearity :math:`c`.
134 N_c
135 Chromatic induction factor :math:`N_c`.
137 References
138 ----------
139 :cite:`Fairchild2004c`, :cite:`Luo2013`, :cite:`Moroneya`,
140 :cite:`Wikipedia2007a`
141 """
143 F: float
144 c: float
145 N_c: float
148VIEWING_CONDITIONS_CIECAM02: CanonicalMapping = CanonicalMapping(
149 {
150 "Average": InductionFactors_CIECAM02(1, 0.69, 1),
151 "Dim": InductionFactors_CIECAM02(0.9, 0.59, 0.9),
152 "Dark": InductionFactors_CIECAM02(0.8, 0.525, 0.8),
153 }
154)
155VIEWING_CONDITIONS_CIECAM02.__doc__ = """
156Define the reference *CIECAM02* colour appearance model viewing conditions.
158References
159----------
160:cite:`Fairchild2004c`, :cite:`Luo2013`, :cite:`Moroneya`,
161:cite:`Wikipedia2007a`
162"""
164HUE_DATA_FOR_HUE_QUADRATURE: dict = {
165 "h_i": np.array([20.14, 90.00, 164.25, 237.53, 380.14]),
166 "e_i": np.array([0.8, 0.7, 1.0, 1.2, 0.8]),
167 "H_i": np.array([0.0, 100.0, 200.0, 300.0, 400.0]),
168}
170CAM_KWARGS_CIECAM02_sRGB: dict = {
171 "XYZ_w": xy_to_XYZ(CCS_ILLUMINANTS["CIE 1931 2 Degree Standard Observer"]["D65"])
172 * 100,
173 "L_A": 64 / np.pi * 0.2,
174 "Y_b": 20,
175 "surround": VIEWING_CONDITIONS_CIECAM02["Average"],
176}
177if is_documentation_building(): # pragma: no cover
178 CAM_KWARGS_CIECAM02_sRGB = DocstringDict(CAM_KWARGS_CIECAM02_sRGB)
179 CAM_KWARGS_CIECAM02_sRGB.__doc__ = """
180Default parameter values for the *CIECAM02* colour appearance model usage in
181the context of *sRGB*.
183References
184----------
185:cite:`Fairchild2004c`, :cite:`InternationalElectrotechnicalCommission1999a`,
186:cite:`Luo2013`, :cite:`Moroneya`, :cite:`Wikipedia2007a`
187"""
190@dataclass
191class CAM_Specification_CIECAM02(MixinDataclassArithmetic):
192 """
193 Define the *CIECAM02* colour appearance model specification.
195 Parameters
196 ----------
197 J
198 Correlate of *lightness* :math:`J`.
199 C
200 Correlate of *chroma* :math:`C`.
201 h
202 *Hue* angle :math:`h` in degrees.
203 s
204 Correlate of *saturation* :math:`s`.
205 Q
206 Correlate of *brightness* :math:`Q`.
207 M
208 Correlate of *colourfulness* :math:`M`.
209 H
210 *Hue* :math:`h` quadrature :math:`H`.
211 HC
212 *Hue* :math:`h` composition :math:`H^C`.
214 References
215 ----------
216 :cite:`Fairchild2004c`, :cite:`Luo2013`, :cite:`Moroneya`,
217 :cite:`Wikipedia2007a`
218 """
220 J: float | NDArrayFloat | None = field(default_factory=lambda: None)
221 C: float | NDArrayFloat | None = field(default_factory=lambda: None)
222 h: float | NDArrayFloat | None = field(default_factory=lambda: None)
223 s: float | NDArrayFloat | None = field(default_factory=lambda: None)
224 Q: float | NDArrayFloat | None = field(default_factory=lambda: None)
225 M: float | NDArrayFloat | None = field(default_factory=lambda: None)
226 H: float | NDArrayFloat | None = field(default_factory=lambda: None)
227 HC: float | NDArrayFloat | None = field(default_factory=lambda: None)
230def XYZ_to_CIECAM02(
231 XYZ: Domain100,
232 XYZ_w: Domain100,
233 L_A: ArrayLike,
234 Y_b: ArrayLike,
235 surround: InductionFactors_CIECAM02 = VIEWING_CONDITIONS_CIECAM02["Average"],
236 discount_illuminant: bool = False,
237 compute_H: bool = True,
238) -> Annotated[CAM_Specification_CIECAM02, (100, 100, 360, 100, 100, 100, 400)]:
239 """
240 Compute the *CIECAM02* colour appearance model correlates from the
241 specified *CIE XYZ* tristimulus values.
243 Parameters
244 ----------
245 XYZ
246 *CIE XYZ* tristimulus values of test sample / stimulus.
247 XYZ_w
248 *CIE XYZ* tristimulus values of reference white.
249 L_A
250 Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`, (often
251 taken to be 20% of the luminance of a white object in the scene).
252 Y_b
253 Luminous factor of background :math:`Y_b` such as :math:`Y_b = 100
254 \\times L_b / L_w` where :math:`L_w` is the luminance of the light
255 source and :math:`L_b` is the luminance of the background. For
256 viewing images, :math:`Y_b` can be the average :math:`Y` value for
257 the pixels in the entire image, or frequently, a :math:`Y` value of
258 20, approximate an :math:`L^*` of 50 is used.
259 surround
260 Surround viewing conditions induction factors.
261 discount_illuminant
262 Truth value indicating if the illuminant should be discounted.
263 compute_H
264 Whether to compute *Hue* :math:`h` quadrature :math:`H`. :math:`H`
265 is rarely used, and expensive to compute.
267 Returns
268 -------
269 :class:`colour.CAM_Specification_CIECAM02`
270 *CIECAM02* colour appearance model specification.
272 Notes
273 -----
274 +---------------------+-----------------------+---------------+
275 | **Domain** | **Scale - Reference** | **Scale - 1** |
276 +=====================+=======================+===============+
277 | ``XYZ`` | 100 | 1 |
278 +---------------------+-----------------------+---------------+
279 | ``XYZ_w`` | 100 | 1 |
280 +---------------------+-----------------------+---------------+
282 +---------------------+-----------------------+---------------+
283 | **Range** | **Scale - Reference** | **Scale - 1** |
284 +=====================+=======================+===============+
285 | ``specification.J`` | 100 | 1 |
286 +---------------------+-----------------------+---------------+
287 | ``specification.C`` | 100 | 1 |
288 +---------------------+-----------------------+---------------+
289 | ``specification.h`` | 360 | 1 |
290 +---------------------+-----------------------+---------------+
291 | ``specification.s`` | 100 | 1 |
292 +---------------------+-----------------------+---------------+
293 | ``specification.Q`` | 100 | 1 |
294 +---------------------+-----------------------+---------------+
295 | ``specification.M`` | 100 | 1 |
296 +---------------------+-----------------------+---------------+
297 | ``specification.H`` | 400 | 1 |
298 +---------------------+-----------------------+---------------+
300 References
301 ----------
302 :cite:`Fairchild2004c`, :cite:`Luo2013`, :cite:`Moroneya`,
303 :cite:`Wikipedia2007a`
305 Examples
306 --------
307 >>> XYZ = np.array([19.01, 20.00, 21.78])
308 >>> XYZ_w = np.array([95.05, 100.00, 108.88])
309 >>> L_A = 318.31
310 >>> Y_b = 20.0
311 >>> surround = VIEWING_CONDITIONS_CIECAM02["Average"]
312 >>> XYZ_to_CIECAM02(XYZ, XYZ_w, L_A, Y_b, surround) # doctest: +ELLIPSIS
313 CAM_Specification_CIECAM02(J=41.7310911..., C=0.1047077..., \
314h=219.0484326..., s=2.3603053..., Q=195.3713259..., M=0.1088421..., \
315H=278.0607358..., HC=None)
316 """
318 XYZ = to_domain_100(XYZ)
319 XYZ_w = to_domain_100(XYZ_w)
320 _X_w, Y_w, _Z_w = tsplit(XYZ_w)
321 L_A = as_float_array(L_A)
322 Y_b = as_float_array(Y_b)
324 n, F_L, N_bb, N_cb, z = viewing_conditions_dependent_parameters(Y_b, Y_w, L_A)
326 # Converting *CIE XYZ* tristimulus values to *CMCCAT2000* transform
327 # sharpened *RGB* values.
328 RGB = vecmul(CAT_CAT02, XYZ)
329 RGB_w = vecmul(CAT_CAT02, XYZ_w)
331 # Computing degree of adaptation :math:`D`.
332 D = (
333 degree_of_adaptation(surround.F, L_A)
334 if not discount_illuminant
335 else ones(L_A.shape)
336 )
338 # Computing full chromatic adaptation.
339 RGB_c = full_chromatic_adaptation_forward(RGB, RGB_w, Y_w, D)
340 RGB_wc = full_chromatic_adaptation_forward(RGB_w, RGB_w, Y_w, D)
342 # Converting to *Hunt-Pointer-Estevez* colourspace.
343 RGB_p = RGB_to_rgb(RGB_c)
344 RGB_pw = RGB_to_rgb(RGB_wc)
346 # Applying forward post-adaptation non-linear response compression.
347 RGB_a = post_adaptation_non_linear_response_compression_forward(RGB_p, F_L)
348 RGB_aw = post_adaptation_non_linear_response_compression_forward(RGB_pw, F_L)
350 # Converting to preliminary cartesian coordinates.
351 a, b = tsplit(opponent_colour_dimensions_forward(RGB_a))
353 # Computing the *hue* angle :math:`h`.
354 h = hue_angle(a, b)
356 # Computing hue :math:`h` quadrature :math:`H`.
357 H = hue_quadrature(h) if compute_H else np.full(h.shape, np.nan)
358 # TODO: Compute hue composition.
360 # Computing eccentricity factor *e_t*.
361 e_t = eccentricity_factor(h)
363 # Computing achromatic responses for the stimulus and the whitepoint.
364 A = achromatic_response_forward(RGB_a, N_bb)
365 A_w = achromatic_response_forward(RGB_aw, N_bb)
367 # Computing the correlate of *Lightness* :math:`J`.
368 J = lightness_correlate(A, A_w, surround.c, z)
370 # Computing the correlate of *brightness* :math:`Q`.
371 Q = brightness_correlate(surround.c, J, A_w, F_L)
373 # Computing the correlate of *chroma* :math:`C`.
374 C = chroma_correlate(J, n, surround.N_c, N_cb, e_t, a, b, RGB_a)
376 # Computing the correlate of *colourfulness* :math:`M`.
377 M = colourfulness_correlate(C, F_L)
379 # Computing the correlate of *saturation* :math:`s`.
380 s = saturation_correlate(M, Q)
382 return CAM_Specification_CIECAM02(
383 J=as_float(from_range_100(J)),
384 C=as_float(from_range_100(C)),
385 h=as_float(from_range_degrees(h)),
386 s=as_float(from_range_100(s)),
387 Q=as_float(from_range_100(Q)),
388 M=as_float(from_range_100(M)),
389 H=as_float(from_range_degrees(H, 400)),
390 HC=None,
391 )
394def CIECAM02_to_XYZ(
395 specification: Annotated[
396 CAM_Specification_CIECAM02, (100, 100, 360, 100, 100, 100, 400)
397 ],
398 XYZ_w: Domain100,
399 L_A: ArrayLike,
400 Y_b: ArrayLike,
401 surround: InductionFactors_CIECAM02 = VIEWING_CONDITIONS_CIECAM02["Average"],
402 discount_illuminant: bool = False,
403) -> Range100:
404 """
405 Convert the *CIECAM02* colour appearance model specification to *CIE XYZ*
406 tristimulus values.
408 Parameters
409 ----------
410 specification
411 *CIECAM02* colour appearance model specification. Correlate of
412 *Lightness* :math:`J`, correlate of *chroma* :math:`C` or correlate of
413 *colourfulness* :math:`M` and *hue* angle :math:`h` in degrees must be
414 specified, e.g., :math:`JCh` or :math:`JMh`.
415 XYZ_w
416 *CIE XYZ* tristimulus values of reference white.
417 L_A
418 Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`, (often taken
419 to be 20% of the luminance of a white object in the scene).
420 Y_b
421 Luminous factor of background :math:`Y_b` such as
422 :math:`Y_b = 100 \\times L_b / L_w` where :math:`L_w` is the luminance
423 of the light source and :math:`L_b` is the luminance of the background.
424 For viewing images, :math:`Y_b` can be the average :math:`Y` value for
425 the pixels in the entire image, or frequently, a :math:`Y` value of 20,
426 approximating an :math:`L^*` of 50 is used.
427 surround
428 Surround viewing conditions.
429 discount_illuminant
430 Discount the illuminant.
432 Returns
433 -------
434 :class:`numpy.ndarray`
435 *CIE XYZ* tristimulus values.
437 Raises
438 ------
439 ValueError
440 If neither :math:`C` nor :math:`M` correlates have been defined in the
441 ``specification`` argument.
443 Notes
444 -----
445 +---------------------+-----------------------+---------------+
446 | **Domain** | **Scale - Reference** | **Scale - 1** |
447 +=====================+=======================+===============+
448 | ``specification.J`` | 100 | 1 |
449 +---------------------+-----------------------+---------------+
450 | ``specification.C`` | 100 | 1 |
451 +---------------------+-----------------------+---------------+
452 | ``specification.h`` | 360 | 1 |
453 +---------------------+-----------------------+---------------+
454 | ``specification.s`` | 100 | 1 |
455 +---------------------+-----------------------+---------------+
456 | ``specification.Q`` | 100 | 1 |
457 +---------------------+-----------------------+---------------+
458 | ``specification.M`` | 100 | 1 |
459 +---------------------+-----------------------+---------------+
460 | ``specification.H`` | 360 | 1 |
461 +---------------------+-----------------------+---------------+
462 | ``XYZ_w`` | 100 | 1 |
463 +---------------------+-----------------------+---------------+
465 +---------------------+-----------------------+---------------+
466 | **Range** | **Scale - Reference** | **Scale - 1** |
467 +=====================+=======================+===============+
468 | ``XYZ`` | 100 | 1 |
469 +---------------------+-----------------------+---------------+
471 References
472 ----------
473 :cite:`Fairchild2004c`, :cite:`Luo2013`, :cite:`Moroneya`,
474 :cite:`Wikipedia2007a`
476 Examples
477 --------
478 >>> specification = CAM_Specification_CIECAM02(
479 ... J=41.731091132513917, C=0.104707757171031, h=219.048432658311780
480 ... )
481 >>> XYZ_w = np.array([95.05, 100.00, 108.88])
482 >>> L_A = 318.31
483 >>> Y_b = 20.0
484 >>> CIECAM02_to_XYZ(specification, XYZ_w, L_A, Y_b) # doctest: +ELLIPSIS
485 array([ 19.01..., 20... , 21.78...])
486 """
488 J, C, h, _s, _Q, M, _H, _HC = astuple(specification)
490 J = to_domain_100(J)
491 C = to_domain_100(C)
492 h = to_domain_degrees(h)
493 M = to_domain_100(M)
494 L_A = as_float_array(L_A)
495 XYZ_w = to_domain_100(XYZ_w)
496 _X_w, Y_w, _Z_w = tsplit(XYZ_w)
498 n, F_L, N_bb, N_cb, z = viewing_conditions_dependent_parameters(Y_b, Y_w, L_A)
500 if has_only_nan(C) and not has_only_nan(M):
501 C = M / spow(F_L, 0.25)
502 elif has_only_nan(C):
503 error = (
504 'Either "C" or "M" correlate must be defined in '
505 'the "CAM_Specification_CIECAM02" argument!'
506 )
508 raise ValueError(error)
510 # Converting *CIE XYZ* tristimulus values to *CMCCAT2000* transform
511 # sharpened *RGB* values.
512 RGB_w = vecmul(CAT_CAT02, XYZ_w)
514 # Computing degree of adaptation :math:`D`.
515 D = (
516 degree_of_adaptation(surround.F, L_A)
517 if not discount_illuminant
518 else ones(L_A.shape)
519 )
521 # Computing full chromatic adaptation.
522 RGB_wc = full_chromatic_adaptation_forward(RGB_w, RGB_w, Y_w, D)
524 # Converting to *Hunt-Pointer-Estevez* colourspace.
525 RGB_pw = RGB_to_rgb(RGB_wc)
527 # Applying post-adaptation non-linear response compression.
528 RGB_aw = post_adaptation_non_linear_response_compression_forward(RGB_pw, F_L)
530 # Computing achromatic response for the whitepoint.
531 A_w = achromatic_response_forward(RGB_aw, N_bb)
533 # Computing temporary magnitude quantity :math:`t`.
534 t = temporary_magnitude_quantity_inverse(C, J, n)
536 # Computing eccentricity factor *e_t*.
537 e_t = eccentricity_factor(h)
539 # Computing achromatic response :math:`A` for the stimulus.
540 A = achromatic_response_inverse(A_w, J, surround.c, z)
542 # Computing *P_1* to *P_3*.
543 P_n = P(surround.N_c, N_cb, e_t, t, A, N_bb)
544 _P_1, P_2, _P_3 = tsplit(P_n)
546 # Computing opponent colour dimensions :math:`a` and :math:`b`.
547 ab = opponent_colour_dimensions_inverse(P_n, h)
548 a, b = tsplit(ab) * np.where(t == 0, 0, 1)
550 # Applying post-adaptation non-linear response compression matrix.
551 RGB_a = matrix_post_adaptation_non_linear_response_compression(P_2, a, b)
553 # Applying inverse post-adaptation non-linear response compression.
554 RGB_p = post_adaptation_non_linear_response_compression_inverse(RGB_a, F_L)
556 # Converting to *Hunt-Pointer-Estevez* colourspace.
557 RGB_c = rgb_to_RGB(RGB_p)
559 # Applying inverse full chromatic adaptation.
560 RGB = full_chromatic_adaptation_inverse(RGB_c, RGB_w, Y_w, D)
562 # Converting *CMCCAT2000* transform sharpened *RGB* values to *CIE XYZ*
563 # tristimulus values.
564 XYZ = vecmul(CAT_INVERSE_CAT02, RGB)
566 return from_range_100(XYZ)
569def chromatic_induction_factors(n: ArrayLike) -> NDArrayFloat:
570 """
571 Compute the chromatic induction factors :math:`N_{bb}` and
572 :math:`N_{cb}`.
574 Parameters
575 ----------
576 n
577 Function of the luminance factor of the background :math:`n`.
579 Returns
580 -------
581 :class:`numpy.ndarray`
582 Chromatic induction factors :math:`N_{bb}` and :math:`N_{cb}`.
584 Examples
585 --------
586 >>> chromatic_induction_factors(0.2) # doctest: +ELLIPSIS
587 array([ 1.000304, 1.000304])
588 """
590 n = as_float_array(n)
592 with sdiv_mode():
593 N_bb = N_cb = 0.725 * spow(sdiv(1, n), 0.2)
595 return tstack([N_bb, N_cb])
598def base_exponential_non_linearity(
599 n: ArrayLike,
600) -> NDArrayFloat:
601 """
602 Compute the base exponential non-linearity :math:`n`.
604 Parameters
605 ----------
606 n
607 Function of the luminance factor of the background :math:`n`.
609 Returns
610 -------
611 :class:`numpy.ndarray`
612 Base exponential non-linearity :math:`z`.
614 Examples
615 --------
616 >>> base_exponential_non_linearity(0.2) # doctest: +ELLIPSIS
617 1.9272135...
618 """
620 n = as_float_array(n)
622 return 1.48 + np.sqrt(n)
625def viewing_conditions_dependent_parameters(
626 Y_b: ArrayLike,
627 Y_w: ArrayLike,
628 L_A: ArrayLike,
629) -> Tuple[
630 NDArrayFloat,
631 NDArrayFloat,
632 NDArrayFloat,
633 NDArrayFloat,
634 NDArrayFloat,
635]:
636 """
637 Compute the viewing condition dependent parameters.
639 Parameters
640 ----------
641 Y_b
642 Adapting field *Y* tristimulus value :math:`Y_b`.
643 Y_w
644 Whitepoint *Y* tristimulus value :math:`Y_w`.
645 L_A
646 Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`.
648 Returns
649 -------
650 :class:`tuple`
651 Viewing condition dependent parameters :math:`(n, F_L, F_{Lb},
652 F_{Lw}, z)` where :math:`n` is the background induction factor,
653 :math:`F_L` is the luminance adaptation factor, :math:`F_{Lb}` and
654 :math:`F_{Lw}` are the background and whitepoint luminance
655 adaptation factors respectively, and :math:`z` is the base linear
656 exponent for the nonlinear response compression.
658 Examples
659 --------
660 >>> viewing_conditions_dependent_parameters(20.0, 100.0, 318.31)
661 ... # doctest: +ELLIPSIS
662 (0.2000000..., 1.1675444..., 1.0003040..., 1.0003040..., 1.9272135...)
663 """
665 Y_b = as_float_array(Y_b)
666 Y_w = as_float_array(Y_w)
668 with sdiv_mode():
669 n = sdiv(Y_b, Y_w)
671 F_L = luminance_level_adaptation_factor(L_A)
672 N_bb, N_cb = tsplit(chromatic_induction_factors(n))
673 z = base_exponential_non_linearity(n)
675 return n, F_L, N_bb, N_cb, z
678def degree_of_adaptation(F: ArrayLike, L_A: ArrayLike) -> NDArrayFloat:
679 """
680 Compute the degree of adaptation :math:`D` from the specified surround
681 maximum degree of adaptation :math:`F` and adapting field *luminance*
682 :math:`L_A` in :math:`cd/m^2`.
684 Parameters
685 ----------
686 F
687 Surround maximum degree of adaptation :math:`F`.
688 L_A
689 Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`.
691 Returns
692 -------
693 :class:`numpy.ndarray`
694 Degree of adaptation :math:`D`.
696 Examples
697 --------
698 >>> degree_of_adaptation(1.0, 318.31) # doctest: +ELLIPSIS
699 0.9944687...
700 """
702 F = as_float_array(F)
703 L_A = as_float_array(L_A)
705 return F * (1 - (1 / 3.6) * np.exp((-L_A - 42) / 92))
708def full_chromatic_adaptation_forward(
709 RGB: ArrayLike,
710 RGB_w: ArrayLike,
711 Y_w: ArrayLike,
712 D: ArrayLike,
713) -> NDArrayFloat:
714 """
715 Apply full chromatic adaptation to the specified *CMCCAT2000* transform
716 sharpened *RGB* array using the specified *CMCCAT2000* transform sharpened
717 whitepoint *RGB_w* array.
719 Parameters
720 ----------
721 RGB
722 *CMCCAT2000* transform sharpened *RGB* array.
723 RGB_w
724 *CMCCAT2000* transform sharpened whitepoint *RGB_w* array.
725 Y_w
726 Whitepoint *Y* tristimulus value :math:`Y_w`.
727 D
728 Degree of adaptation :math:`D`.
730 Returns
731 -------
732 :class:`numpy.ndarray`
733 Adapted *RGB* array.
735 Examples
736 --------
737 >>> RGB = np.array([18.985456, 20.707422, 21.747482])
738 >>> RGB_w = np.array([94.930528, 103.536988, 108.717742])
739 >>> Y_w = 100.0
740 >>> D = 0.994468780088
741 >>> full_chromatic_adaptation_forward(RGB, RGB_w, Y_w, D)
742 ... # doctest: +ELLIPSIS
743 array([ 19.9937078..., 20.0039363..., 20.0132638...])
744 """
746 RGB = as_float_array(RGB)
747 RGB_w = as_float_array(RGB_w)
748 Y_w = as_float_array(Y_w)
749 D = as_float_array(D)
751 with sdiv_mode():
752 RGB_c = (Y_w[..., None] * sdiv(D[..., None], RGB_w) + 1 - D[..., None]) * RGB
754 return cast("NDArrayFloat", RGB_c)
757def full_chromatic_adaptation_inverse(
758 RGB: ArrayLike,
759 RGB_w: ArrayLike,
760 Y_w: ArrayLike,
761 D: ArrayLike,
762) -> NDArrayFloat:
763 """
764 Revert full chromatic adaptation of the specified *CMCCAT2000* transform
765 sharpened *RGB* array using the specified *CMCCAT2000* transform sharpened
766 whitepoint :math:`RGB_w` array.
768 Parameters
769 ----------
770 RGB
771 *CMCCAT2000* transform sharpened *RGB* array.
772 RGB_w
773 *CMCCAT2000* transform sharpened whitepoint :math:`RGB_w` array.
774 Y_w
775 Whitepoint *Y* tristimulus value :math:`Y_w`.
776 D
777 Degree of adaptation :math:`D`.
779 Returns
780 -------
781 :class:`numpy.ndarray`
782 Adapted *RGB* array.
784 Examples
785 --------
786 >>> RGB = np.array([19.99370783, 20.00393634, 20.01326387])
787 >>> RGB_w = np.array([94.930528, 103.536988, 108.717742])
788 >>> Y_w = 100.0
789 >>> D = 0.994468780088
790 >>> full_chromatic_adaptation_inverse(RGB, RGB_w, Y_w, D)
791 array([ 18.985456, 20.707422, 21.747482])
792 """
794 RGB = as_float_array(RGB)
795 RGB_w = as_float_array(RGB_w)
796 Y_w = as_float_array(Y_w)
797 D = as_float_array(D)
799 with sdiv_mode():
800 RGB_c = RGB / (Y_w[..., None] * sdiv(D[..., None], RGB_w) + 1 - D[..., None])
802 return cast("NDArrayFloat", RGB_c)
805def RGB_to_rgb(RGB: ArrayLike) -> NDArrayFloat:
806 """
807 Convert the specified *RGB* array to *Hunt-Pointer-Estevez*
808 :math:`\\rho\\gamma\\beta` colourspace.
810 Parameters
811 ----------
812 RGB
813 *RGB* array.
815 Returns
816 -------
817 :class:`numpy.ndarray`
818 *Hunt-Pointer-Estevez* :math:`\\rho\\gamma\\beta` colourspace array.
820 Examples
821 --------
822 >>> RGB = np.array([19.99370783, 20.00393634, 20.01326387])
823 >>> RGB_to_rgb(RGB) # doctest: +ELLIPSIS
824 array([ 19.9969397..., 20.0018612..., 20.0135053...])
825 """
827 return vecmul(np.matmul(MATRIX_XYZ_TO_HPE, CAT_INVERSE_CAT02), RGB)
830def rgb_to_RGB(rgb: ArrayLike) -> NDArrayFloat:
831 """
832 Convert from *Hunt-Pointer-Estevez* :math:`\\rho\\gamma\\beta`
833 colourspace array to adapted *RGB* array.
835 Parameters
836 ----------
837 rgb
838 *Hunt-Pointer-Estevez* :math:`\\rho\\gamma\\beta` colourspace array.
840 Returns
841 -------
842 :class:`numpy.ndarray`
843 Adapted *RGB* array.
845 Examples
846 --------
847 >>> rgb = np.array([19.99693975, 20.00186123, 20.01350530])
848 >>> rgb_to_RGB(rgb) # doctest: +ELLIPSIS
849 array([ 19.9937078..., 20.0039363..., 20.0132638...])
850 """
852 return vecmul(np.matmul(CAT_CAT02, MATRIX_HPE_TO_XYZ), rgb)
855def post_adaptation_non_linear_response_compression_forward(
856 RGB: ArrayLike, F_L: ArrayLike
857) -> NDArrayFloat:
858 """
859 Apply post-adaptation non-linear response compression to the specified
860 *CMCCAT2000* transform sharpened *RGB* array.
862 Parameters
863 ----------
864 RGB
865 *CMCCAT2000* transform sharpened *RGB* array.
866 F_L
867 *Luminance* level adaptation factor :math:`F_L`.
869 Returns
870 -------
871 :class:`numpy.ndarray`
872 Compressed *CMCCAT2000* transform sharpened *RGB* array.
874 Notes
875 -----
876 - This definition implements negative values handling as per
877 :cite:`Luo2013`.
879 Examples
880 --------
881 >>> RGB = np.array([19.99693975, 20.00186123, 20.01350530])
882 >>> F_L = 1.16754446415
883 >>> post_adaptation_non_linear_response_compression_forward(RGB, F_L)
884 ... # doctest: +ELLIPSIS
885 array([ 7.9463202..., 7.9471152..., 7.9489959...])
886 """
888 RGB = as_float_array(RGB)
889 F_L = as_float_array(F_L)
891 F_L_RGB = spow(F_L[..., None] * np.absolute(RGB) / 100, 0.42)
893 return (400 * np.sign(RGB) * F_L_RGB) / (27.13 + F_L_RGB) + 0.1
896def post_adaptation_non_linear_response_compression_inverse(
897 RGB: ArrayLike, F_L: ArrayLike
898) -> NDArrayFloat:
899 """
900 Remove post-adaptation non-linear response compression from the specified
901 *CMCCAT2000* transform sharpened *RGB* array.
903 Parameters
904 ----------
905 RGB
906 *CMCCAT2000* transform sharpened *RGB* array.
907 F_L
908 *Luminance* level adaptation factor :math:`F_L`.
910 Returns
911 -------
912 :class:`numpy.ndarray`
913 Uncompressed *CMCCAT2000* transform sharpened *RGB* array.
915 Examples
916 --------
917 >>> RGB = np.array([7.94632020, 7.94711528, 7.94899595])
918 >>> F_L = 1.16754446415
919 >>> post_adaptation_non_linear_response_compression_inverse(RGB, F_L)
920 ... # doctest: +ELLIPSIS
921 array([ 19.9969397..., 20.0018612..., 20.0135052...])
922 """
924 RGB = as_float_array(RGB)
925 F_L = as_float_array(F_L)
927 return (
928 np.sign(RGB - 0.1)
929 * 100
930 / F_L[..., None]
931 * spow(
932 (27.13 * np.absolute(RGB - 0.1)) / (400 - np.absolute(RGB - 0.1)),
933 1 / 0.42,
934 )
935 )
938def opponent_colour_dimensions_forward(RGB: ArrayLike) -> NDArrayFloat:
939 """
940 Compute opponent colour dimensions from compressed *CMCCAT2000* transform
941 sharpened *RGB* array for forward *CIECAM02* implementation.
943 Parameters
944 ----------
945 RGB
946 Compressed *CMCCAT2000* transform sharpened *RGB* array.
948 Returns
949 -------
950 :class:`numpy.ndarray`
951 Opponent colour dimensions.
953 Examples
954 --------
955 >>> RGB = np.array([7.94632020, 7.94711528, 7.94899595])
956 >>> opponent_colour_dimensions_forward(RGB) # doctest: +ELLIPSIS
957 array([-0.0006241..., -0.0005062...])
958 """
960 R, G, B = tsplit(RGB)
962 a = R - 12 * G / 11 + B / 11
963 b = (R + G - 2 * B) / 9
965 return tstack([a, b])
968def opponent_colour_dimensions_inverse(P_n: ArrayLike, h: ArrayLike) -> NDArrayFloat:
969 """
970 Compute opponent colour dimensions from the specified points :math:`P_n`
971 and hue :math:`h` in degrees for the inverse *CIECAM02* implementation.
973 Parameters
974 ----------
975 P_n
976 Points :math:`P_n`.
977 h
978 Hue :math:`h` in degrees.
980 Returns
981 -------
982 :class:`numpy.ndarray`
983 Opponent colour dimensions.
985 Examples
986 --------
987 >>> P_n = np.array([30162.89081534, 24.23720547, 1.05000000])
988 >>> h = -140.95156734
989 >>> opponent_colour_dimensions_inverse(P_n, h) # doctest: +ELLIPSIS
990 array([-0.0006241..., -0.0005062...])
991 """
993 P_1, P_2, P_3 = tsplit(P_n)
994 hr = np.radians(h)
996 sin_hr = np.sin(hr)
997 cos_hr = np.cos(hr)
999 with sdiv_mode():
1000 cos_hr_sin_hr = sdiv(cos_hr, sin_hr)
1001 sin_hr_cos_hr = sdiv(sin_hr, cos_hr)
1003 P_4 = sdiv(P_1, sin_hr)
1004 P_5 = sdiv(P_1, cos_hr)
1006 n = P_2 * (2 + P_3) * (460 / 1403)
1008 a = zeros(hr.shape)
1009 b = zeros(hr.shape)
1011 abs_sin_hr_gt_cos_hr = np.abs(sin_hr) >= np.abs(cos_hr)
1012 abs_sin_hr_lt_cos_hr = np.abs(sin_hr) < np.abs(cos_hr)
1014 b = np.where(
1015 abs_sin_hr_gt_cos_hr,
1016 n
1017 / (
1018 P_4
1019 + (2 + P_3) * (220 / 1403) * cos_hr_sin_hr
1020 - (27 / 1403)
1021 + P_3 * (6300 / 1403)
1022 ),
1023 b,
1024 )
1026 a = np.where(
1027 abs_sin_hr_gt_cos_hr,
1028 b * cos_hr_sin_hr,
1029 a,
1030 )
1032 a = np.where(
1033 abs_sin_hr_lt_cos_hr,
1034 n
1035 / (
1036 P_5
1037 + (2 + P_3) * (220 / 1403)
1038 - ((27 / 1403) - P_3 * (6300 / 1403)) * sin_hr_cos_hr
1039 ),
1040 a,
1041 )
1043 b = np.where(
1044 abs_sin_hr_lt_cos_hr,
1045 a * sin_hr_cos_hr,
1046 b,
1047 )
1049 return tstack([a, b])
1052def hue_angle(a: ArrayLike, b: ArrayLike) -> NDArrayFloat:
1053 """
1054 Compute the *hue* angle :math:`h` in degrees from the specified opponent
1055 colour dimensions.
1057 Parameters
1058 ----------
1059 a
1060 Opponent colour dimension :math:`a`.
1061 b
1062 Opponent colour dimension :math:`b`.
1064 Returns
1065 -------
1066 :class:`numpy.ndarray`
1067 *Hue* angle :math:`h` in degrees.
1069 Examples
1070 --------
1071 >>> a = -0.000624112068243
1072 >>> b = -0.000506270106773
1073 >>> hue_angle(a, b) # doctest: +ELLIPSIS
1074 219.0484326...
1075 """
1077 a = as_float_array(a)
1078 b = as_float_array(b)
1080 h = np.degrees(np.arctan2(b, a)) % 360
1082 return as_float(h)
1085def hue_quadrature(h: ArrayLike) -> NDArrayFloat:
1086 """
1087 Compute hue quadrature from the specified hue :math:`h` angle in degrees.
1089 Parameters
1090 ----------
1091 h
1092 Hue :math:`h` angle in degrees.
1094 Returns
1095 -------
1096 :class:`numpy.ndarray`
1097 Hue quadrature.
1099 Examples
1100 --------
1101 >>> hue_quadrature(219.0484326582719) # doctest: +ELLIPSIS
1102 278.0607358...
1103 """
1105 h = as_float_array(h)
1107 h_i = HUE_DATA_FOR_HUE_QUADRATURE["h_i"]
1108 e_i = HUE_DATA_FOR_HUE_QUADRATURE["e_i"]
1109 H_i = HUE_DATA_FOR_HUE_QUADRATURE["H_i"]
1111 # *np.searchsorted* returns an erroneous index if a *nan* is used as input.
1112 h[np.asarray(np.isnan(h))] = 0
1113 i = as_int_array(np.searchsorted(h_i, h, side="left") - 1)
1115 h_ii = h_i[i]
1116 e_ii = e_i[i]
1117 H_ii = H_i[i]
1118 h_ii1 = h_i[i + 1]
1119 e_ii1 = e_i[i + 1]
1121 H = H_ii + ((100 * (h - h_ii) / e_ii) / ((h - h_ii) / e_ii + (h_ii1 - h) / e_ii1))
1123 H = np.where(
1124 h < 20.14,
1125 385.9 + (14.1 * h / 0.856) / (h / 0.856 + (20.14 - h) / 0.8),
1126 H,
1127 )
1128 H = np.where(
1129 h >= 237.53,
1130 H_ii + ((85.9 * (h - h_ii) / e_ii) / ((h - h_ii) / e_ii + (360 - h) / 0.856)),
1131 H,
1132 )
1134 return as_float(H)
1137def eccentricity_factor(h: ArrayLike) -> NDArrayFloat:
1138 """
1139 Compute the eccentricity factor :math:`e_t` from the specified hue
1140 :math:`h` angle in degrees for forward *CIECAM02* implementation.
1142 Parameters
1143 ----------
1144 h
1145 Hue :math:`h` angle in degrees.
1147 Returns
1148 -------
1149 :class:`numpy.ndarray`
1150 Eccentricity factor :math:`e_t`.
1152 Examples
1153 --------
1154 >>> eccentricity_factor(-140.951567342) # doctest: +ELLIPSIS
1155 1.1740054...
1156 """
1158 h = as_float_array(h)
1160 return 1 / 4 * (np.cos(2 + h * np.pi / 180) + 3.8)
1163def achromatic_response_forward(RGB: ArrayLike, N_bb: ArrayLike) -> NDArrayFloat:
1164 """
1165 Compute the achromatic response :math:`A` from the specified compressed
1166 *CMCCAT2000* transform sharpened *RGB* array and :math:`N_{bb}` chromatic
1167 induction factor for forward *CIECAM02* implementation.
1169 Parameters
1170 ----------
1171 RGB
1172 Compressed *CMCCAT2000* transform sharpened *RGB* array.
1173 N_bb
1174 Chromatic induction factor :math:`N_{bb}`.
1176 Returns
1177 -------
1178 :class:`numpy.ndarray`
1179 Achromatic response :math:`A`.
1181 Examples
1182 --------
1183 >>> RGB = np.array([7.94632020, 7.94711528, 7.94899595])
1184 >>> N_bb = 1.000304004559381
1185 >>> achromatic_response_forward(RGB, N_bb) # doctest: +ELLIPSIS
1186 23.9394809...
1187 """
1189 R, G, B = tsplit(RGB)
1191 return (2 * R + G + (1 / 20) * B - 0.305) * N_bb
1194def achromatic_response_inverse(
1195 A_w: ArrayLike,
1196 J: ArrayLike,
1197 c: ArrayLike,
1198 z: ArrayLike,
1199) -> NDArrayFloat:
1200 """
1201 Compute the achromatic response :math:`A` from the specified achromatic
1202 response :math:`A_w` for the whitepoint, *Lightness* correlate
1203 :math:`J`, surround exponential non-linearity :math:`c` and base
1204 exponential non-linearity :math:`z` for inverse *CIECAM02*
1205 implementation.
1207 Parameters
1208 ----------
1209 A_w
1210 Achromatic response :math:`A_w` for the whitepoint.
1211 J
1212 *Lightness* correlate :math:`J`.
1213 c
1214 Surround exponential non-linearity :math:`c`.
1215 z
1216 Base exponential non-linearity :math:`z`.
1218 Returns
1219 -------
1220 :class:`numpy.ndarray`
1221 Achromatic response :math:`A`.
1223 Examples
1224 --------
1225 >>> A_w = 46.1882087914
1226 >>> J = 41.73109113251392
1227 >>> c = 0.69
1228 >>> z = 1.927213595499958
1229 >>> achromatic_response_inverse(A_w, J, c, z) # doctest: +ELLIPSIS
1230 23.9394809...
1231 """
1233 A_w = as_float_array(A_w)
1234 J = as_float_array(J)
1235 c = as_float_array(c)
1236 z = as_float_array(z)
1238 return A_w * spow(J / 100, 1 / (c * z))
1241def lightness_correlate(
1242 A: ArrayLike,
1243 A_w: ArrayLike,
1244 c: ArrayLike,
1245 z: ArrayLike,
1246) -> NDArrayFloat:
1247 """
1248 Compute the *Lightness* correlate :math:`J`.
1250 Parameters
1251 ----------
1252 A
1253 Achromatic response :math:`A` for the stimulus.
1254 A_w
1255 Achromatic response :math:`A_w` for the whitepoint.
1256 c
1257 Surround exponential non-linearity :math:`c`.
1258 z
1259 Base exponential non-linearity :math:`z`.
1261 Returns
1262 -------
1263 :class:`numpy.ndarray`
1264 *Lightness* correlate :math:`J`.
1266 Examples
1267 --------
1268 >>> A = 23.9394809667
1269 >>> A_w = 46.1882087914
1270 >>> c = 0.69
1271 >>> z = 1.9272135955
1272 >>> lightness_correlate(A, A_w, c, z) # doctest: +ELLIPSIS
1273 41.7310911...
1274 """
1276 A = as_float_array(A)
1277 A_w = as_float_array(A_w)
1278 c = as_float_array(c)
1279 z = as_float_array(z)
1281 with sdiv_mode():
1282 return 100 * spow(sdiv(A, A_w), c * z)
1285def brightness_correlate(
1286 c: ArrayLike,
1287 J: ArrayLike,
1288 A_w: ArrayLike,
1289 F_L: ArrayLike,
1290) -> NDArrayFloat:
1291 """
1292 Compute the *brightness* correlate :math:`Q`.
1294 Parameters
1295 ----------
1296 c
1297 Surround exponential non-linearity :math:`c`.
1298 J
1299 *Lightness* correlate :math:`J`.
1300 A_w
1301 Achromatic response :math:`A_w` for the whitepoint.
1302 F_L
1303 *Luminance* level adaptation factor :math:`F_L`.
1305 Returns
1306 -------
1307 :class:`numpy.ndarray`
1308 *Brightness* correlate :math:`Q`.
1310 Examples
1311 --------
1312 >>> c = 0.69
1313 >>> J = 41.7310911325
1314 >>> A_w = 46.1882087914
1315 >>> F_L = 1.16754446415
1316 >>> brightness_correlate(c, J, A_w, F_L) # doctest: +ELLIPSIS
1317 195.3713259...
1318 """
1320 c = as_float_array(c)
1321 J = as_float_array(J)
1322 A_w = as_float_array(A_w)
1323 F_L = as_float_array(F_L)
1325 return (4 / c) * np.sqrt(J / 100) * (A_w + 4) * spow(F_L, 0.25)
1328def temporary_magnitude_quantity_forward(
1329 N_c: ArrayLike,
1330 N_cb: ArrayLike,
1331 e_t: ArrayLike,
1332 a: ArrayLike,
1333 b: ArrayLike,
1334 RGB_a: ArrayLike,
1335) -> NDArrayFloat:
1336 """
1337 Compute the temporary magnitude quantity :math:`t` for forward
1338 *CIECAM02* implementation.
1340 Parameters
1341 ----------
1342 N_c
1343 Surround chromatic induction factor :math:`N_{c}`.
1344 N_cb
1345 Chromatic induction factor :math:`N_{cb}`.
1346 e_t
1347 Eccentricity factor :math:`e_t`.
1348 a
1349 Opponent colour dimension :math:`a`.
1350 b
1351 Opponent colour dimension :math:`b`.
1352 RGB_a
1353 Compressed stimulus *CMCCAT2000* transform sharpened *RGB* array.
1355 Returns
1356 -------
1357 :class:`numpy.ndarray`
1358 Temporary magnitude quantity :math:`t`.
1360 Examples
1361 --------
1362 >>> N_c = 1.0
1363 >>> N_cb = 1.00030400456
1364 >>> e_t = 1.174005472851914
1365 >>> a = -0.000624112068243
1366 >>> b = -0.000506270106773
1367 >>> RGB_a = np.array([7.94632020, 7.94711528, 7.94899595])
1368 >>> temporary_magnitude_quantity_forward(N_c, N_cb, e_t, a, b, RGB_a)
1369 ... # doctest: +ELLIPSIS
1370 0.1497462...
1371 """
1373 N_c = as_float_array(N_c)
1374 N_cb = as_float_array(N_cb)
1375 e_t = as_float_array(e_t)
1376 a = as_float_array(a)
1377 b = as_float_array(b)
1378 Ra, Ga, Ba = tsplit(RGB_a)
1380 with sdiv_mode():
1381 return ((50000 / 13) * N_c * N_cb) * sdiv(
1382 e_t * spow(a**2 + b**2, 0.5), Ra + Ga + 21 * Ba / 20
1383 )
1386def temporary_magnitude_quantity_inverse(
1387 C: ArrayLike, J: ArrayLike, n: ArrayLike
1388) -> NDArrayFloat:
1389 """
1390 Compute the temporary magnitude quantity :math:`t` for inverse
1391 *CIECAM02* implementation.
1393 Parameters
1394 ----------
1395 C
1396 *Chroma* correlate :math:`C`.
1397 J
1398 *Lightness* correlate :math:`J`.
1399 n
1400 Function of the luminance factor of the background :math:`n`.
1402 Returns
1403 -------
1404 :class:`numpy.ndarray`
1405 Temporary magnitude quantity :math:`t`.
1407 Examples
1408 --------
1409 >>> C = 68.8364136888275
1410 >>> J = 41.749268505999
1411 >>> n = 0.2
1412 >>> temporary_magnitude_quantity_inverse(C, J, n) # doctest: +ELLIPSIS
1413 202.3873619...
1414 """
1416 C = as_float_array(C)
1417 J_prime = np.maximum(J, EPSILON)
1418 n = as_float_array(n)
1420 return spow(C / (np.sqrt(J_prime / 100) * spow(1.64 - 0.29**n, 0.73)), 1 / 0.9)
1423def chroma_correlate(
1424 J: ArrayLike,
1425 n: ArrayLike,
1426 N_c: ArrayLike,
1427 N_cb: ArrayLike,
1428 e_t: ArrayLike,
1429 a: ArrayLike,
1430 b: ArrayLike,
1431 RGB_a: ArrayLike,
1432) -> NDArrayFloat:
1433 """
1434 Compute the *chroma* correlate :math:`C`.
1436 Parameters
1437 ----------
1438 J
1439 *Lightness* correlate :math:`J`.
1440 n
1441 Function of the luminance factor of the background :math:`n`.
1442 N_c
1443 Surround chromatic induction factor :math:`N_{c}`.
1444 N_cb
1445 Chromatic induction factor :math:`N_{cb}`.
1446 e_t
1447 Eccentricity factor :math:`e_t`.
1448 a
1449 Opponent colour dimension :math:`a`.
1450 b
1451 Opponent colour dimension :math:`b`.
1452 RGB_a
1453 Compressed stimulus *CMCCAT2000* transform sharpened *RGB*
1454 array.
1456 Returns
1457 -------
1458 :class:`numpy.ndarray`
1459 *Chroma* correlate :math:`C`.
1461 Examples
1462 --------
1463 >>> J = 41.7310911325
1464 >>> n = 0.2
1465 >>> N_c = 1.0
1466 >>> N_cb = 1.00030400456
1467 >>> e_t = 1.17400547285
1468 >>> a = -0.000624112068243
1469 >>> b = -0.000506270106773
1470 >>> RGB_a = np.array([7.94632020, 7.94711528, 7.94899595])
1471 >>> chroma_correlate(J, n, N_c, N_cb, e_t, a, b, RGB_a)
1472 ... # doctest: +ELLIPSIS
1473 0.1047077...
1474 """
1476 J = as_float_array(J)
1477 n = as_float_array(n)
1479 t = temporary_magnitude_quantity_forward(N_c, N_cb, e_t, a, b, RGB_a)
1481 return spow(t, 0.9) * spow(J / 100, 0.5) * spow(1.64 - 0.29**n, 0.73)
1484def colourfulness_correlate(C: ArrayLike, F_L: ArrayLike) -> NDArrayFloat:
1485 """
1486 Compute the *colourfulness* correlate :math:`M`.
1488 Parameters
1489 ----------
1490 C
1491 *Chroma* correlate :math:`C`.
1492 F_L
1493 *Luminance* level adaptation factor :math:`F_L`.
1495 Returns
1496 -------
1497 :class:`numpy.ndarray`
1498 *Colourfulness* correlate :math:`M`.
1500 Examples
1501 --------
1502 >>> C = 0.104707757171
1503 >>> F_L = 1.16754446415
1504 >>> colourfulness_correlate(C, F_L) # doctest: +ELLIPSIS
1505 0.1088421...
1506 """
1508 C = as_float_array(C)
1509 F_L = as_float_array(F_L)
1511 return C * spow(F_L, 0.25)
1514def saturation_correlate(M: ArrayLike, Q: ArrayLike) -> NDArrayFloat:
1515 """
1516 Compute the *saturation* correlate :math:`s`.
1518 Parameters
1519 ----------
1520 M
1521 *Colourfulness* correlate :math:`M`.
1522 Q
1523 *Brightness* correlate :math:`Q`.
1525 Returns
1526 -------
1527 :class:`numpy.ndarray`
1528 *Saturation* correlate :math:`s`.
1530 Examples
1531 --------
1532 >>> M = 0.108842175669
1533 >>> Q = 195.371325966
1534 >>> saturation_correlate(M, Q) # doctest: +ELLIPSIS
1535 2.3603053...
1536 """
1538 M = as_float_array(M)
1539 Q = as_float_array(Q)
1541 with sdiv_mode():
1542 return 100 * spow(sdiv(M, Q), 0.5)
1545def P(
1546 N_c: ArrayLike,
1547 N_cb: ArrayLike,
1548 e_t: ArrayLike,
1549 t: ArrayLike,
1550 A: ArrayLike,
1551 N_bb: ArrayLike,
1552) -> NDArrayFloat:
1553 """
1554 Compute the points :math:`P_1`, :math:`P_2` and :math:`P_3`.
1556 Parameters
1557 ----------
1558 N_c
1559 Surround chromatic induction factor :math:`N_{c}`.
1560 N_cb
1561 Chromatic induction factor :math:`N_{cb}`.
1562 e_t
1563 Eccentricity factor :math:`e_t`.
1564 t
1565 Temporary magnitude quantity :math:`t`.
1566 A
1567 Achromatic response :math:`A` for the stimulus.
1568 N_bb
1569 Chromatic induction factor :math:`N_{bb}`.
1571 Returns
1572 -------
1573 :class:`numpy.ndarray`
1574 Points :math:`P`.
1576 Examples
1577 --------
1578 >>> N_c = 1.0
1579 >>> N_cb = 1.00030400456
1580 >>> e_t = 1.174005472851914
1581 >>> t = 0.149746202921
1582 >>> A = 23.9394809667
1583 >>> N_bb = 1.00030400456
1584 >>> P(N_c, N_cb, e_t, t, A, N_bb) # doctest: +ELLIPSIS
1585 array([ 3.0162890...e+04, 2.4237205...e+01, 1.0500000...e+00])
1586 """
1588 N_c = as_float_array(N_c)
1589 N_cb = as_float_array(N_cb)
1590 e_t = as_float_array(e_t)
1591 t = as_float_array(t)
1592 A = as_float_array(A)
1593 N_bb = as_float_array(N_bb)
1595 with sdiv_mode():
1596 P_1 = sdiv((50000 / 13) * N_c * N_cb * e_t, t)
1598 P_2 = A / N_bb + 0.305
1599 P_3 = ones(P_1.shape) * (21 / 20)
1601 return tstack([P_1, P_2, P_3])
1604def matrix_post_adaptation_non_linear_response_compression(
1605 P_2: ArrayLike, a: ArrayLike, b: ArrayLike
1606) -> NDArrayFloat:
1607 """
1608 Apply post-adaptation non-linear response compression matrix to
1609 specified opponent colour components.
1611 Parameters
1612 ----------
1613 P_2
1614 Point :math:`P_2` representing the post-adaptation response value.
1615 a
1616 Opponent colour dimension :math:`a` component.
1617 b
1618 Opponent colour dimension :math:`b` component.
1620 Returns
1621 -------
1622 :class:`numpy.ndarray`
1623 Array of compressed points :math:`P` containing three values
1624 after non-linear response compression.
1626 Examples
1627 --------
1628 >>> P_2 = 24.2372054671
1629 >>> a = -0.000624112068243
1630 >>> b = -0.000506270106773
1631 >>> matrix_post_adaptation_non_linear_response_compression(P_2, a, b)
1632 ... # doctest: +ELLIPSIS
1633 array([ 7.9463202..., 7.9471152..., 7.9489959...])
1634 """
1636 P_2 = as_float_array(P_2)
1637 a = as_float_array(a)
1638 b = as_float_array(b)
1640 return (
1641 vecmul(
1642 [
1643 [460, 451, 288],
1644 [460, -891, -261],
1645 [460, -220, -6300],
1646 ],
1647 tstack([P_2, a, b]),
1648 )
1649 / 1403
1650 )