conspire/math/special/mod.rs
1#[cfg(test)]
2mod test;
3
4use super::{Scalar, Tensor};
5use crate::{ABS_TOL, REL_TOL};
6use std::f64::consts::E;
7
8const NINE_FIFTHS: Scalar = 9.0 / 5.0;
9
10/// Returns the inverse Langevin function.
11///
12/// ```math
13/// x = \mathcal{L}^{-1}(y)
14/// ```
15/// The first few terms of the Maclaurin series are used when $`|y|<3e^{-3}`$.
16/// ```math
17/// x \sim 3y + \frac{9}{5}y^3 + \mathrm{ord}(y^5)
18/// ```
19/// Two iterations of Newton's method are used to improve upon an initial guess given by [inverse_langevin_approximate()] otherwise.
20/// The resulting maximum relative error is below $`1e^{-12}`$.
21pub fn inverse_langevin(y: Scalar) -> Scalar {
22 let y_abs = y.abs();
23 if y_abs >= 1.0 {
24 panic!()
25 } else if y_abs <= 3e-3 {
26 3.0 * y + NINE_FIFTHS * y.powi(3)
27 } else {
28 let mut x = inverse_langevin_approximate(y_abs);
29 for _ in 0..2 {
30 x += (y_abs - langevin(x)) / langevin_derivative(x);
31 }
32 if y < 0.0 { -x } else { x }
33 }
34}
35
36/// Returns an approximation of the inverse Langevin function.[^cite]
37///
38/// [^cite]: R. Jedynak, [Math. Mech. Solids **24**, 1992 (2019)](https://doi.org/10.1177/1081286518811395).
39///
40/// ```math
41/// \mathcal{L}^{-1}(y) \approx \frac{2.14234 y^3 - 4.22785 y^2 + 3y}{(1 - y)(0.71716 y^3 - 0.41103 y^2 - 0.39165 y + 1)}
42/// ```
43/// This approximation has a maximum relative error of $`8.2e^{-4}`$.
44pub fn inverse_langevin_approximate(y: Scalar) -> Scalar {
45 (2.14234 * y.powi(3) - 4.22785 * y.powi(2) + 3.0 * y)
46 / (1.0 - y)
47 / (0.71716 * y.powi(3) - 0.41103 * y.powi(2) - 0.39165 * y + 1.0)
48}
49
50/// Returns the Lambert W function.
51///
52/// ```math
53/// y = W_0(x)
54/// ```
55pub fn lambert_w(x: Scalar) -> Scalar {
56 if x == -1.0 / E {
57 -1.0
58 } else if x == 0.0 {
59 0.0
60 } else if x == E {
61 1.0
62 } else if x < -1.0 / E {
63 panic!()
64 } else {
65 let mut w = if x < 0.0 {
66 (E * x * (1.0 + (1.0 + E * x).sqrt()).ln()) / (1.0 + E * x + (1.0 + E * x).sqrt())
67 } else if x < E {
68 x / E
69 } else {
70 x.ln() - x.ln().ln()
71 };
72 let mut error = w * w.exp() - x;
73 while error.abs() >= ABS_TOL && (error / x).abs() >= REL_TOL {
74 w *= (1.0 + (x / w).ln()) / (1.0 + w);
75 error = w * w.exp() - x;
76 }
77 w
78 }
79}
80
81/// Returns the Langevin function.
82///
83/// ```math
84/// \mathcal{L}(x) = \coth(x) - x^{-1}
85/// ```
86pub fn langevin(x: Scalar) -> Scalar {
87 if x == 0.0 {
88 0.0
89 } else {
90 1.0 / x.tanh() - 1.0 / x
91 }
92}
93
94/// Returns the derivative of the Langevin function.
95///
96/// ```math
97/// \mathcal{L}'(x) = x^{-2} - \sinh^{-2}(x)
98/// ```
99pub fn langevin_derivative(x: Scalar) -> Scalar {
100 1.0 / x.powi(2) - 1.0 / x.sinh().powi(2)
101}
102
103/// Returns the Rosenbrock function.
104///
105/// ```math
106/// f(\mathbf{x}) = \sum_{i=1}^{N-1} \left[\left(a - x_i\right)^2 + b\left(x_{i+1} - x_i^2\right)^2\right]
107/// ```
108pub fn rosenbrock<T>(x: &T, a: Scalar, b: Scalar) -> Scalar
109where
110 T: Tensor<Item = Scalar>,
111{
112 x.iter()
113 .zip(x.iter().skip(1))
114 .map(|(x_i, x_ip1)| (a - x_i).powi(2) + b * (x_ip1 - x_i.powi(2)).powi(2))
115 .sum()
116}
117
118/// Returns the derivative of the Rosenbrock function.
119///
120/// ```math
121/// \frac{\partial f}{\partial x_i} = \begin{cases}
122/// &\!\!\!\!\!\!-2(a - x_i) - 4bx_i(x_{i+1} - x_i^2), & i=1\\
123/// 2b(x_i - x_{i-1}^2) &\!\!\!\!\!\!- 2(a - x_i) - 4bx_i(x_{i+1} - x_i^2), & 1<i<N\\
124/// 2b(x_i - x_{i-1}^2),&\!\!\!\!\!\! & i=N \end{cases}
125/// ```
126pub fn rosenbrock_derivative<T>(x: &T, a: Scalar, b: Scalar) -> T
127where
128 T: FromIterator<Scalar> + Tensor<Item = Scalar>,
129{
130 let n = x.iter().count();
131 x.iter()
132 .take(1)
133 .zip(x.iter().skip(1).take(1))
134 .map(|(x_i, x_ip1)| -2.0 * (a - x_i) - 4.0 * b * x_i * (x_ip1 - x_i.powi(2)))
135 .chain(
136 x.iter()
137 .zip(x.iter().skip(1).zip(x.iter().skip(2)))
138 .map(|(x_im1, (x_i, x_ip1))| {
139 2.0 * b * (x_i - x_im1.powi(2))
140 - 2.0 * (a - x_i)
141 - 4.0 * b * x_i * (x_ip1 - x_i.powi(2))
142 })
143 .chain(
144 x.iter()
145 .skip(n - 2)
146 .zip(x.iter().skip(n - 1))
147 .map(|(x_im1, x_i)| 2.0 * b * (x_i - x_im1.powi(2))),
148 ),
149 )
150 .collect()
151}
152
153// /// Returns the second derivative of the Rosenbrock function.
154// ///
155// /// ```math
156// /// \frac{\partial^2f}{\partial x_i\partial x_j} = ?
157// /// ```
158// pub fn rosenbrock_second_derivative<T, U>(x: &T, a: Scalar, b: Scalar) -> U
159// where
160// T: FromIterator<Scalar> + Tensor<Item = Scalar>,
161// U: Tensor<Item = T>,
162// {
163// todo!()
164// }