conspire/math/matrix/square/
mod.rs

1#[cfg(test)]
2mod test;
3
4#[cfg(test)]
5use crate::math::test::ErrorTensor;
6
7use crate::{
8    ABS_TOL,
9    math::{
10        Hessian, Rank2, Scalar, Tensor, TensorRank2Vec2D, TensorVec, Vector, write_tensor_rank_0,
11    },
12};
13use std::{
14    collections::VecDeque,
15    fmt::{self, Display, Formatter},
16    iter::Sum,
17    ops::{Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Sub, SubAssign},
18    vec::IntoIter,
19};
20
21/// Rearrangement structure for a banded matrix.
22#[derive(Clone)] // temporary for dae integrators
23pub struct Banded {
24    bandwidth: usize,
25    inverse: Vec<usize>,
26    mapping: Vec<usize>,
27}
28
29impl Banded {
30    pub fn map(&self, old: usize) -> usize {
31        self.inverse[old]
32    }
33    pub fn old(&self, new: usize) -> usize {
34        self.mapping[new]
35    }
36    pub fn width(&self) -> usize {
37        self.bandwidth
38    }
39}
40
41impl From<Vec<Vec<bool>>> for Banded {
42    fn from(structure: Vec<Vec<bool>>) -> Self {
43        let num = structure.len();
44        structure.iter().enumerate().for_each(|(i, row_i)| {
45            assert_eq!(row_i.len(), num);
46            row_i
47                .iter()
48                .zip(structure.iter())
49                .for_each(|(entry_ij, row_j)| assert_eq!(&row_j[i], entry_ij))
50        });
51        let mut adj_list = vec![Vec::new(); num];
52        (0..num).for_each(|i| {
53            (0..num)
54                .filter(|&j| structure[i][j] && i != j)
55                .for_each(|j| adj_list[i].push(j))
56        });
57        let start_vertex = (0..num)
58            .min_by_key(|&i| adj_list[i].len())
59            .expect("Matrix must have at least one entry.");
60        let mut visited = vec![false; num];
61        let mut mapping = Vec::new();
62        let mut queue = VecDeque::new();
63        queue.push_back(start_vertex);
64        visited[start_vertex] = true;
65        while let Some(vertex) = queue.pop_front() {
66            mapping.push(vertex);
67            let mut neighbors: Vec<usize> = adj_list[vertex]
68                .iter()
69                .filter(|&&neighbor| !visited[neighbor])
70                .copied()
71                .collect();
72            neighbors.sort_by_key(|&neighbor| adj_list[neighbor].len());
73            for neighbor in neighbors {
74                visited[neighbor] = true;
75                queue.push_back(neighbor);
76            }
77        }
78        mapping.reverse();
79        let mut inverse = vec![0; num];
80        mapping
81            .iter()
82            .enumerate()
83            .for_each(|(new, &old)| inverse[old] = new);
84        let mut bandwidth = 0;
85        (0..num).for_each(|i| {
86            (i + 1..num)
87                .filter(|&j| structure[mapping[i]][mapping[j]])
88                .for_each(|j| bandwidth = bandwidth.max(j - i))
89        });
90        Self {
91            bandwidth,
92            mapping,
93            inverse,
94        }
95    }
96}
97
98/// Possible errors for square matrices.
99#[derive(Debug, PartialEq)]
100pub enum SquareMatrixError {
101    Singular,
102}
103
104/// A square matrix.
105#[derive(Clone, Debug, PartialEq)]
106pub struct SquareMatrix(Vec<Vector>);
107
108impl Default for SquareMatrix {
109    fn default() -> Self {
110        Self::new()
111    }
112}
113
114impl SquareMatrix {
115    // /// Solve a system of linear equations using the LDL decomposition.
116    // pub fn solve_ldl(&mut self, b: &Vector) -> Result<Vector, SquareMatrixError> {
117    //     let n = self.len();
118    //     let mut p: Vec<usize> = (0..n).collect();
119    //     let mut d: Vec<Scalar> = vec![0.0; n];
120    //     let mut l: Vec<Vec<Scalar>> = vec![vec![0.0; n]; n];
121    //     // for i in 0..n {
122    //     //     for j in 0..n {
123    //     //         assert!((self[i][j] - self[j][i]).abs() < ABS_TOL || (self[i][j] / self[j][i] - 1.0).abs() < ABS_TOL)
124    //     //     }
125    //     // }
126    //     for i in 0..n {
127    //         let mut max_row = i;
128    //         let mut max_val = self[max_row][i].abs();
129    //         for k in i + 1..n {
130    //             if self[k][i].abs() > max_val {
131    //                 max_row = k;
132    //                 max_val = self[max_row][i].abs();
133    //             }
134    //         }
135    //         if max_row != i {
136    //             self.0.swap(i, max_row);
137    //             p.swap(i, max_row);
138    //         }
139    //         let mut sum = 0.0;
140    //         for k in 0..i {
141    //             sum += l[i][k] * d[k] * l[i][k];
142    //         }
143    //         let pivot = self[i][i] - sum;
144    //         if pivot.abs() < ABS_TOL {
145    //             return Err(SquareMatrixError::Singular);
146    //         }
147    //         d[i] = pivot;
148    //         l[i][i] = 1.0;
149    //         for j in i + 1..n {
150    //             sum = 0.0;
151    //             for k in 0..i {
152    //                 sum += l[j][k] * d[k] * l[i][k];
153    //             }
154    //             l[j][i] = (self[j][i] - sum) / d[i];
155    //         }
156    //     }
157    //     let mut y = Vector::zero(n);
158    //     for i in 0..n {
159    //         y[i] = b[p[i]];
160    //         for j in 0..i {
161    //             y[i] -= l[i][j] * y[j];
162    //         }
163    //     }
164    //     let mut x = Vector::zero(n);
165    //     for i in 0..n {
166    //         x[i] = y[i] / d[i];
167    //     }
168    //     for i in (0..n).rev() {
169    //         for j in i + 1..n {
170    //             x[i] -= l[j][i] * x[j];
171    //         }
172    //     }
173    //     // Ok(x)
174    //     let mut xs = Vector::zero(n);
175    //     for i in 0..n {
176    //         xs[p[i]] = x[i]
177    //     }
178    //     Ok(xs)
179    //     // let mut p_reverse = vec![0; n];
180    //     // for (i, &pi) in p.iter().enumerate() {
181    //     //     p_reverse[pi] = i;
182    //     // }
183    //     // let mut xs = Vector::zero(n);
184    //     // for i in 0..n {
185    //     //     // xs[i] = x[p_reverse[i]]
186    //     //     xs[p_reverse[i]] = x[i]
187    //     // }
188    //     // Ok(xs)
189    // }
190    /// Solve a system of linear equations using the LU decomposition.
191    pub fn solve_lu(&self, b: &Vector) -> Result<Vector, SquareMatrixError> {
192        let n = self.len();
193        let mut p: Vec<usize> = (0..n).collect();
194        let mut factor;
195        let mut lu = self.clone();
196        let mut max_row;
197        let mut max_val;
198        let mut pivot;
199        for i in 0..n {
200            max_row = i;
201            max_val = lu[max_row][i].abs();
202            for k in i + 1..n {
203                if lu[k][i].abs() > max_val {
204                    max_row = k;
205                    max_val = lu[max_row][i].abs();
206                }
207            }
208            if max_row != i {
209                lu.0.swap(i, max_row);
210                p.swap(i, max_row);
211            }
212            pivot = lu[i][i];
213            if pivot.abs() < ABS_TOL {
214                return Err(SquareMatrixError::Singular);
215            }
216            for j in i + 1..n {
217                if lu[j][i] != 0.0 {
218                    lu[j][i] /= pivot;
219                    factor = lu[j][i];
220                    for k in i + 1..n {
221                        lu[j][k] -= factor * lu[i][k];
222                    }
223                }
224            }
225        }
226        let mut x: Vector = p.into_iter().map(|p_i| b[p_i]).collect();
227        forward_substitution(&mut x, &lu);
228        backward_substitution(&mut x, &lu);
229        Ok(x)
230    }
231    /// Solve a system of linear equations rearranged in a banded structure using the LU decomposition.
232    pub fn solve_lu_banded(
233        &self,
234        b: &Vector,
235        banded: &Banded,
236    ) -> Result<Vector, SquareMatrixError> {
237        let bandwidth = banded.width();
238        let mut bandwidth_updated;
239        let n = self.len();
240        let mut p: Vec<usize> = (0..n).collect();
241        let mut end;
242        let mut factor;
243        let mut max_row;
244        let mut max_val;
245        let mut pivot;
246        let mut rearr: Self = (0..n)
247            .map(|i| (0..n).map(|j| self[banded.old(i)][banded.old(j)]).collect())
248            .collect();
249        for i in 0..n {
250            end = n.min(i + 1 + bandwidth);
251            pivot = rearr[i][i];
252            if pivot.abs() < ABS_TOL {
253                max_row = i;
254                max_val = rearr[max_row][i].abs();
255                for k in i + 1..end {
256                    if rearr[k][i].abs() > max_val {
257                        max_row = k;
258                        max_val = rearr[max_row][i].abs();
259                    }
260                }
261                if max_row != i {
262                    rearr.0.swap(i, max_row);
263                    p.swap(i, max_row);
264                    pivot = rearr[i][i];
265                    if pivot.abs() < ABS_TOL {
266                        return Err(SquareMatrixError::Singular);
267                    }
268                }
269            }
270            bandwidth_updated = bandwidth;
271            (i + 1 + bandwidth..n).for_each(|j| {
272                if rearr[i][j] != 0.0 {
273                    bandwidth_updated = bandwidth_updated.max(j - i)
274                }
275            });
276            end = n.min(i + 1 + bandwidth_updated);
277            for j in i + 1..end {
278                if rearr[j][i] != 0.0 {
279                    rearr[j][i] /= pivot;
280                    factor = rearr[j][i];
281                    for k in i + 1..end {
282                        rearr[j][k] -= factor * rearr[i][k];
283                    }
284                }
285            }
286        }
287        let mut x: Vector = p.into_iter().map(|p_i| b[banded.old(p_i)]).collect();
288        forward_substitution(&mut x, &rearr);
289        backward_substitution(&mut x, &rearr);
290        Ok((0..n).map(|i| x[banded.map(i)]).collect())
291    }
292    pub fn zero(len: usize) -> Self {
293        (0..len).map(|_| Vector::zero(len)).collect()
294    }
295}
296
297fn forward_substitution(x: &mut Vector, a: &SquareMatrix) {
298    a.iter().enumerate().for_each(|(i, a_i)| {
299        x[i] -= a_i
300            .iter()
301            .take(i)
302            .zip(x.iter().take(i))
303            .map(|(a_ij, x_j)| a_ij * x_j)
304            .sum::<Scalar>()
305    })
306}
307
308fn backward_substitution(x: &mut Vector, a: &SquareMatrix) {
309    a.0.iter().enumerate().rev().for_each(|(i, a_i)| {
310        x[i] -= a_i
311            .iter()
312            .skip(i + 1)
313            .zip(x.iter().skip(i + 1))
314            .map(|(a_ij, x_j)| a_ij * x_j)
315            .sum::<Scalar>();
316        x[i] /= a_i[i];
317    })
318}
319
320#[cfg(test)]
321impl ErrorTensor for SquareMatrix {
322    fn error_fd(&self, comparator: &Self, epsilon: Scalar) -> Option<(bool, usize)> {
323        let error_count = self
324            .iter()
325            .zip(comparator.iter())
326            .map(|(self_i, comparator_i)| {
327                self_i
328                    .iter()
329                    .zip(comparator_i.iter())
330                    .filter(|&(&self_ij, &comparator_ij)| {
331                        (self_ij / comparator_ij - 1.0).abs() >= epsilon
332                            && (self_ij.abs() >= epsilon || comparator_ij.abs() >= epsilon)
333                    })
334                    .count()
335            })
336            .sum();
337        if error_count > 0 {
338            Some((true, error_count))
339        } else {
340            None
341        }
342    }
343}
344
345impl Display for SquareMatrix {
346    fn fmt(&self, f: &mut Formatter) -> fmt::Result {
347        write!(f, "\x1B[s")?;
348        write!(f, "[[")?;
349        self.iter().enumerate().try_for_each(|(i, row)| {
350            row.iter()
351                .try_for_each(|entry| write_tensor_rank_0(f, entry))?;
352            if i + 1 < self.len() {
353                writeln!(f, "\x1B[2D],")?;
354                write!(f, "\x1B[u")?;
355                write!(f, "\x1B[{}B [", i + 1)?;
356            }
357            Ok(())
358        })?;
359        write!(f, "\x1B[2D]]")
360    }
361}
362
363impl<const N: usize> From<[[Scalar; N]; N]> for SquareMatrix {
364    fn from(array: [[Scalar; N]; N]) -> Self {
365        array.into_iter().map(Vector::from).collect()
366    }
367}
368
369impl<const D: usize, const I: usize, const J: usize> From<TensorRank2Vec2D<D, I, J>>
370    for SquareMatrix
371{
372    fn from(tensor_rank_2_vec_2d: TensorRank2Vec2D<D, I, J>) -> Self {
373        let mut square_matrix = Self::zero(tensor_rank_2_vec_2d.len() * D);
374        tensor_rank_2_vec_2d
375            .iter()
376            .enumerate()
377            .for_each(|(a, entry_a)| {
378                entry_a.iter().enumerate().for_each(|(b, entry_ab)| {
379                    entry_ab.iter().enumerate().for_each(|(i, entry_ab_i)| {
380                        entry_ab_i.iter().enumerate().for_each(|(j, entry_ab_ij)| {
381                            square_matrix[D * a + i][D * b + j] = *entry_ab_ij
382                        })
383                    })
384                })
385            });
386        square_matrix
387    }
388}
389
390impl FromIterator<Vector> for SquareMatrix {
391    fn from_iter<Ii: IntoIterator<Item = Vector>>(into_iterator: Ii) -> Self {
392        Self(Vec::from_iter(into_iterator))
393    }
394}
395
396impl Index<usize> for SquareMatrix {
397    type Output = Vector;
398    fn index(&self, index: usize) -> &Self::Output {
399        &self.0[index]
400    }
401}
402
403impl IndexMut<usize> for SquareMatrix {
404    fn index_mut(&mut self, index: usize) -> &mut Self::Output {
405        &mut self.0[index]
406    }
407}
408
409impl Hessian for SquareMatrix {
410    fn fill_into(self, square_matrix: &mut SquareMatrix) {
411        self.into_iter()
412            .zip(square_matrix.iter_mut())
413            .for_each(|(self_i, square_matrix_i)| {
414                self_i
415                    .into_iter()
416                    .zip(square_matrix_i.iter_mut())
417                    .for_each(|(self_ij, square_matrix_ij)| *square_matrix_ij = self_ij)
418            });
419    }
420}
421
422impl Rank2 for SquareMatrix {
423    type Transpose = Self;
424    fn deviatoric(&self) -> Self {
425        let len = self.len();
426        let scale = -self.trace() / len as Scalar;
427        (0..len)
428            .map(|i| {
429                (0..len)
430                    .map(|j| ((i == j) as u8) as Scalar * scale)
431                    .collect()
432            })
433            .collect::<Self>()
434            + self
435    }
436    fn deviatoric_and_trace(&self) -> (Self, Scalar) {
437        let len = self.len();
438        let trace = self.trace();
439        let scale = -trace / len as Scalar;
440        (
441            (0..len)
442                .map(|i| {
443                    (0..len)
444                        .map(|j| ((i == j) as u8) as Scalar * scale)
445                        .collect()
446                })
447                .collect::<Self>()
448                + self,
449            trace,
450        )
451    }
452    fn is_diagonal(&self) -> bool {
453        self.iter()
454            .enumerate()
455            .map(|(i, self_i)| {
456                self_i
457                    .iter()
458                    .enumerate()
459                    .map(|(j, self_ij)| (self_ij == &0.0) as u8 * (i != j) as u8)
460                    .sum::<u8>()
461            })
462            .sum::<u8>()
463            == (self.len().pow(2) - self.len()) as u8
464    }
465    fn is_identity(&self) -> bool {
466        self.iter().enumerate().all(|(i, self_i)| {
467            self_i
468                .iter()
469                .enumerate()
470                .all(|(j, self_ij)| self_ij == &((i == j) as u8 as Scalar))
471        })
472    }
473    fn is_symmetric(&self) -> bool {
474        self.iter().enumerate().all(|(i, self_i)| {
475            self_i
476                .iter()
477                .zip(self.iter())
478                .all(|(self_ij, self_j)| self_ij == &self_j[i])
479        })
480    }
481    fn squared_trace(&self) -> Scalar {
482        self.iter()
483            .enumerate()
484            .map(|(i, self_i)| {
485                self_i
486                    .iter()
487                    .zip(self.iter())
488                    .map(|(self_ij, self_j)| self_ij * self_j[i])
489                    .sum::<Scalar>()
490            })
491            .sum()
492    }
493    fn trace(&self) -> Scalar {
494        self.iter().enumerate().map(|(i, self_i)| self_i[i]).sum()
495    }
496    fn transpose(&self) -> Self::Transpose {
497        (0..self.len())
498            .map(|i| (0..self.len()).map(|j| self[j][i]).collect())
499            .collect()
500    }
501}
502
503impl Tensor for SquareMatrix {
504    type Item = Vector;
505    fn iter(&self) -> impl Iterator<Item = &Self::Item> {
506        self.0.iter()
507    }
508    fn iter_mut(&mut self) -> impl Iterator<Item = &mut Self::Item> {
509        self.0.iter_mut()
510    }
511    fn len(&self) -> usize {
512        self.0.len()
513    }
514    fn size(&self) -> usize {
515        unimplemented!("Do not like that inner Vecs could be different sizes")
516    }
517}
518
519impl IntoIterator for SquareMatrix {
520    type Item = Vector;
521    type IntoIter = IntoIter<Self::Item>;
522    fn into_iter(self) -> Self::IntoIter {
523        self.0.into_iter()
524    }
525}
526
527impl TensorVec for SquareMatrix {
528    type Item = Vector;
529    fn append(&mut self, other: &mut Self) {
530        self.0.append(&mut other.0)
531    }
532    fn capacity(&self) -> usize {
533        self.0.capacity()
534    }
535    fn is_empty(&self) -> bool {
536        self.0.is_empty()
537    }
538    fn new() -> Self {
539        Self(Vec::new())
540    }
541    fn push(&mut self, item: Self::Item) {
542        self.0.push(item)
543    }
544    fn remove(&mut self, index: usize) -> Self::Item {
545        self.0.remove(index)
546    }
547    fn retain<F>(&mut self, f: F)
548    where
549        F: FnMut(&Self::Item) -> bool,
550    {
551        self.0.retain(f)
552    }
553    fn swap_remove(&mut self, index: usize) -> Self::Item {
554        self.0.swap_remove(index)
555    }
556}
557
558impl Sum for SquareMatrix {
559    fn sum<Ii>(iter: Ii) -> Self
560    where
561        Ii: Iterator<Item = Self>,
562    {
563        iter.reduce(|mut acc, item| {
564            acc += item;
565            acc
566        })
567        .unwrap_or_else(Self::default)
568    }
569}
570
571impl Div<Scalar> for SquareMatrix {
572    type Output = Self;
573    fn div(mut self, tensor_rank_0: Scalar) -> Self::Output {
574        self /= &tensor_rank_0;
575        self
576    }
577}
578
579impl Div<&Scalar> for SquareMatrix {
580    type Output = Self;
581    fn div(mut self, tensor_rank_0: &Scalar) -> Self::Output {
582        self /= tensor_rank_0;
583        self
584    }
585}
586
587impl DivAssign<Scalar> for SquareMatrix {
588    fn div_assign(&mut self, tensor_rank_0: Scalar) {
589        self.iter_mut().for_each(|entry| *entry /= &tensor_rank_0);
590    }
591}
592
593impl DivAssign<&Scalar> for SquareMatrix {
594    fn div_assign(&mut self, tensor_rank_0: &Scalar) {
595        self.iter_mut().for_each(|entry| *entry /= tensor_rank_0);
596    }
597}
598
599impl Mul<Scalar> for SquareMatrix {
600    type Output = Self;
601    fn mul(mut self, tensor_rank_0: Scalar) -> Self::Output {
602        self *= &tensor_rank_0;
603        self
604    }
605}
606
607impl Mul<&Scalar> for SquareMatrix {
608    type Output = Self;
609    fn mul(mut self, tensor_rank_0: &Scalar) -> Self::Output {
610        self *= tensor_rank_0;
611        self
612    }
613}
614
615impl Mul<&Scalar> for &SquareMatrix {
616    type Output = SquareMatrix;
617    fn mul(self, tensor_rank_0: &Scalar) -> Self::Output {
618        self.iter().map(|self_i| self_i * tensor_rank_0).collect()
619    }
620}
621
622impl MulAssign<Scalar> for SquareMatrix {
623    fn mul_assign(&mut self, tensor_rank_0: Scalar) {
624        self.iter_mut().for_each(|entry| *entry *= &tensor_rank_0);
625    }
626}
627
628impl MulAssign<&Scalar> for SquareMatrix {
629    fn mul_assign(&mut self, tensor_rank_0: &Scalar) {
630        self.iter_mut().for_each(|entry| *entry *= tensor_rank_0);
631    }
632}
633
634impl Mul<Vector> for SquareMatrix {
635    type Output = Vector;
636    fn mul(self, vector: Vector) -> Self::Output {
637        self.iter().map(|self_i| self_i * &vector).collect()
638    }
639}
640
641impl Mul<&Vector> for SquareMatrix {
642    type Output = Vector;
643    fn mul(self, vector: &Vector) -> Self::Output {
644        self.iter().map(|self_i| self_i * vector).collect()
645    }
646}
647
648impl Add for SquareMatrix {
649    type Output = Self;
650    fn add(mut self, vector: Self) -> Self::Output {
651        self += vector;
652        self
653    }
654}
655
656impl Add<&Self> for SquareMatrix {
657    type Output = Self;
658    fn add(mut self, vector: &Self) -> Self::Output {
659        self += vector;
660        self
661    }
662}
663
664impl AddAssign for SquareMatrix {
665    fn add_assign(&mut self, vector: Self) {
666        self.iter_mut()
667            .zip(vector.iter())
668            .for_each(|(self_entry, tensor_rank_1)| *self_entry += tensor_rank_1);
669    }
670}
671
672impl AddAssign<&Self> for SquareMatrix {
673    fn add_assign(&mut self, vector: &Self) {
674        self.iter_mut()
675            .zip(vector.iter())
676            .for_each(|(self_entry, tensor_rank_1)| *self_entry += tensor_rank_1);
677    }
678}
679
680impl Mul for SquareMatrix {
681    type Output = Self;
682    fn mul(self, matrix: Self) -> Self::Output {
683        let mut output = Self::zero(matrix.len());
684        self.iter()
685            .zip(output.iter_mut())
686            .for_each(|(self_i, output_i)| {
687                self_i
688                    .iter()
689                    .zip(matrix.iter())
690                    .for_each(|(self_ij, matrix_j)| *output_i += matrix_j * self_ij)
691            });
692        output
693    }
694}
695
696impl Sub for SquareMatrix {
697    type Output = Self;
698    fn sub(mut self, square_matrix: Self) -> Self::Output {
699        self -= square_matrix;
700        self
701    }
702}
703
704impl Sub<&Self> for SquareMatrix {
705    type Output = Self;
706    fn sub(mut self, square_matrix: &Self) -> Self::Output {
707        self -= square_matrix;
708        self
709    }
710}
711
712impl Sub for &SquareMatrix {
713    type Output = SquareMatrix;
714    fn sub(self, square_matrix: Self) -> Self::Output {
715        square_matrix
716            .iter()
717            .zip(self.iter())
718            .map(|(square_matrix_i, self_i)| self_i - square_matrix_i)
719            .collect()
720    }
721}
722
723impl SubAssign for SquareMatrix {
724    fn sub_assign(&mut self, square_matrix: Self) {
725        self.iter_mut()
726            .zip(square_matrix.iter())
727            .for_each(|(self_entry, tensor_rank_1)| *self_entry -= tensor_rank_1);
728    }
729}
730
731impl SubAssign<&Self> for SquareMatrix {
732    fn sub_assign(&mut self, square_matrix: &Self) {
733        self.iter_mut()
734            .zip(square_matrix.iter())
735            .for_each(|(self_entry, tensor_rank_1)| *self_entry -= tensor_rank_1);
736    }
737}