conspire/math/matrix/square/
mod.rs

1#[cfg(test)]
2mod test;
3
4#[cfg(test)]
5use crate::math::test::ErrorTensor;
6
7use crate::{
8    ABS_TOL,
9    math::{
10        Hessian, Rank2, Tensor, TensorRank0, TensorRank2Vec2D, TensorVec, Vector,
11        tensor::TensorError, write_tensor_rank_0,
12    },
13};
14use std::{
15    collections::VecDeque,
16    fmt::{self, Display, Formatter},
17    ops::{Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Sub, SubAssign},
18    vec::IntoIter,
19};
20
21/// Rearrangement structure for a banded matrix.
22pub struct Banded {
23    bandwidth: usize,
24    inverse: Vec<usize>,
25    mapping: Vec<usize>,
26}
27
28impl Banded {
29    pub fn map(&self, old: usize) -> usize {
30        self.inverse[old]
31    }
32    pub fn old(&self, new: usize) -> usize {
33        self.mapping[new]
34    }
35    pub fn width(&self) -> usize {
36        self.bandwidth
37    }
38}
39
40impl From<Vec<Vec<bool>>> for Banded {
41    fn from(structure: Vec<Vec<bool>>) -> Self {
42        let num = structure.len();
43        structure.iter().enumerate().for_each(|(i, row_i)| {
44            assert_eq!(row_i.len(), num);
45            row_i
46                .iter()
47                .zip(structure.iter())
48                .for_each(|(entry_ij, row_j)| assert_eq!(&row_j[i], entry_ij))
49        });
50        let mut adj_list = vec![Vec::new(); num];
51        for i in 0..num {
52            for j in 0..num {
53                if structure[i][j] && i != j {
54                    adj_list[i].push(j);
55                }
56            }
57        }
58        let start_vertex = (0..num)
59            .min_by_key(|&i| adj_list[i].len())
60            .expect("Matrix must have at least one entry.");
61        let mut visited = vec![false; num];
62        let mut mapping = Vec::new();
63        let mut queue = VecDeque::new();
64        queue.push_back(start_vertex);
65        visited[start_vertex] = true;
66        while let Some(vertex) = queue.pop_front() {
67            mapping.push(vertex);
68            let mut neighbors: Vec<usize> = adj_list[vertex]
69                .iter()
70                .filter(|&&neighbor| !visited[neighbor])
71                .copied()
72                .collect();
73            neighbors.sort_by_key(|&neighbor| adj_list[neighbor].len());
74            for neighbor in neighbors {
75                visited[neighbor] = true;
76                queue.push_back(neighbor);
77            }
78        }
79        mapping.reverse();
80        let mut inverse = vec![0; num];
81        mapping
82            .iter()
83            .enumerate()
84            .for_each(|(new, &old)| inverse[old] = new);
85        let mut bandwidth = 0;
86        (0..num).for_each(|i| {
87            (i + 1..num).for_each(|j| {
88                if structure[mapping[i]][mapping[j]] {
89                    bandwidth = bandwidth.max(j - i)
90                }
91            })
92        });
93        Self {
94            bandwidth,
95            mapping,
96            inverse,
97        }
98    }
99}
100
101/// Possible errors for square matrices.
102#[derive(Debug, PartialEq)]
103pub enum SquareMatrixError {
104    Singular,
105}
106
107/// A square matrix.
108#[derive(Clone, Debug, PartialEq)]
109pub struct SquareMatrix(Vec<Vector>);
110
111impl SquareMatrix {
112    // /// Solve a system of linear equations using the LDL decomposition.
113    // pub fn solve_ldl(&mut self, b: &Vector) -> Result<Vector, SquareMatrixError> {
114    //     let n = self.len();
115    //     let mut p: Vec<usize> = (0..n).collect();
116    //     let mut d: Vec<TensorRank0> = vec![0.0; n];
117    //     let mut l: Vec<Vec<TensorRank0>> = vec![vec![0.0; n]; n];
118    //     // for i in 0..n {
119    //     //     for j in 0..n {
120    //     //         assert!((self[i][j] - self[j][i]).abs() < ABS_TOL || (self[i][j] / self[j][i] - 1.0).abs() < ABS_TOL)
121    //     //     }
122    //     // }
123    //     for i in 0..n {
124    //         let mut max_row = i;
125    //         let mut max_val = self[max_row][i].abs();
126    //         for k in i + 1..n {
127    //             if self[k][i].abs() > max_val {
128    //                 max_row = k;
129    //                 max_val = self[max_row][i].abs();
130    //             }
131    //         }
132    //         if max_row != i {
133    //             self.0.swap(i, max_row);
134    //             p.swap(i, max_row);
135    //         }
136    //         let mut sum = 0.0;
137    //         for k in 0..i {
138    //             sum += l[i][k] * d[k] * l[i][k];
139    //         }
140    //         let pivot = self[i][i] - sum;
141    //         if pivot.abs() < ABS_TOL {
142    //             return Err(SquareMatrixError::Singular);
143    //         }
144    //         d[i] = pivot;
145    //         l[i][i] = 1.0;
146    //         for j in i + 1..n {
147    //             sum = 0.0;
148    //             for k in 0..i {
149    //                 sum += l[j][k] * d[k] * l[i][k];
150    //             }
151    //             l[j][i] = (self[j][i] - sum) / d[i];
152    //         }
153    //     }
154    //     let mut y = Vector::zero(n);
155    //     for i in 0..n {
156    //         y[i] = b[p[i]];
157    //         for j in 0..i {
158    //             y[i] -= l[i][j] * y[j];
159    //         }
160    //     }
161    //     let mut x = Vector::zero(n);
162    //     for i in 0..n {
163    //         x[i] = y[i] / d[i];
164    //     }
165    //     for i in (0..n).rev() {
166    //         for j in i + 1..n {
167    //             x[i] -= l[j][i] * x[j];
168    //         }
169    //     }
170    //     // Ok(x)
171    //     let mut xs = Vector::zero(n);
172    //     for i in 0..n {
173    //         xs[p[i]] = x[i]
174    //     }
175    //     Ok(xs)
176    //     // let mut p_reverse = vec![0; n];
177    //     // for (i, &pi) in p.iter().enumerate() {
178    //     //     p_reverse[pi] = i;
179    //     // }
180    //     // let mut xs = Vector::zero(n);
181    //     // for i in 0..n {
182    //     //     // xs[i] = x[p_reverse[i]]
183    //     //     xs[p_reverse[i]] = x[i]
184    //     // }
185    //     // Ok(xs)
186    // }
187    /// Solve a system of linear equations using the LU decomposition.
188    pub fn solve_lu(&self, b: &Vector) -> Result<Vector, SquareMatrixError> {
189        let n = self.len();
190        let mut p: Vec<usize> = (0..n).collect();
191        let mut factor;
192        let mut lu = self.clone();
193        let mut max_row;
194        let mut max_val;
195        let mut pivot;
196        for i in 0..n {
197            max_row = i;
198            max_val = lu[max_row][i].abs();
199            for k in i + 1..n {
200                if lu[k][i].abs() > max_val {
201                    max_row = k;
202                    max_val = lu[max_row][i].abs();
203                }
204            }
205            if max_row != i {
206                lu.0.swap(i, max_row);
207                p.swap(i, max_row);
208            }
209            pivot = lu[i][i];
210            if pivot.abs() < ABS_TOL {
211                return Err(SquareMatrixError::Singular);
212            }
213            for j in i + 1..n {
214                if lu[j][i] != 0.0 {
215                    lu[j][i] /= pivot;
216                    factor = lu[j][i];
217                    for k in i + 1..n {
218                        lu[j][k] -= factor * lu[i][k];
219                    }
220                }
221            }
222        }
223        let mut x: Vector = p.into_iter().map(|p_i| b[p_i]).collect();
224        forward_substitution(&mut x, &lu);
225        backward_substitution(&mut x, &lu);
226        Ok(x)
227    }
228    /// Solve a system of linear equations rearranged in a banded structure using the LU decomposition.
229    pub fn solve_lu_banded(
230        &self,
231        b: &Vector,
232        banded: &Banded,
233    ) -> Result<Vector, SquareMatrixError> {
234        let bandwidth = banded.width();
235        let mut bandwidth_updated;
236        let n = self.len();
237        let mut p: Vec<usize> = (0..n).collect();
238        let mut end;
239        let mut factor;
240        let mut max_row;
241        let mut max_val;
242        let mut pivot;
243        let mut rearr: Self = (0..n)
244            .map(|i| (0..n).map(|j| self[banded.old(i)][banded.old(j)]).collect())
245            .collect();
246        for i in 0..n {
247            end = n.min(i + 1 + bandwidth);
248            pivot = rearr[i][i];
249            if pivot.abs() < ABS_TOL {
250                max_row = i;
251                max_val = rearr[max_row][i].abs();
252                for k in i + 1..end {
253                    if rearr[k][i].abs() > max_val {
254                        max_row = k;
255                        max_val = rearr[max_row][i].abs();
256                    }
257                }
258                if max_row != i {
259                    rearr.0.swap(i, max_row);
260                    p.swap(i, max_row);
261                    pivot = rearr[i][i];
262                    if pivot.abs() < ABS_TOL {
263                        return Err(SquareMatrixError::Singular);
264                    }
265                }
266            }
267            bandwidth_updated = bandwidth;
268            (i + 1 + bandwidth..n).for_each(|j| {
269                if rearr[i][j] != 0.0 {
270                    bandwidth_updated = bandwidth_updated.max(j - i)
271                }
272            });
273            end = n.min(i + 1 + bandwidth_updated);
274            for j in i + 1..end {
275                if rearr[j][i] != 0.0 {
276                    rearr[j][i] /= pivot;
277                    factor = rearr[j][i];
278                    for k in i + 1..end {
279                        rearr[j][k] -= factor * rearr[i][k];
280                    }
281                }
282            }
283        }
284        let mut x: Vector = p.into_iter().map(|p_i| b[banded.old(p_i)]).collect();
285        forward_substitution(&mut x, &rearr);
286        backward_substitution(&mut x, &rearr);
287        Ok((0..n).map(|i| x[banded.map(i)]).collect())
288    }
289}
290
291fn forward_substitution(x: &mut Vector, a: &SquareMatrix) {
292    a.iter().enumerate().for_each(|(i, a_i)| {
293        x[i] -= a_i
294            .iter()
295            .take(i)
296            .zip(x.iter().take(i))
297            .map(|(a_ij, x_j)| a_ij * x_j)
298            .sum::<TensorRank0>()
299    })
300}
301
302fn backward_substitution(x: &mut Vector, a: &SquareMatrix) {
303    a.0.iter().enumerate().rev().for_each(|(i, a_i)| {
304        x[i] -= a_i
305            .iter()
306            .skip(i + 1)
307            .zip(x.iter().skip(i + 1))
308            .map(|(a_ij, x_j)| a_ij * x_j)
309            .sum::<TensorRank0>();
310        x[i] /= a_i[i];
311    })
312}
313
314#[cfg(test)]
315impl ErrorTensor for SquareMatrix {
316    fn error(
317        &self,
318        comparator: &Self,
319        tol_abs: &TensorRank0,
320        tol_rel: &TensorRank0,
321    ) -> Option<usize> {
322        let error_count = self
323            .iter()
324            .zip(comparator.iter())
325            .map(|(self_i, comparator_i)| {
326                self_i
327                    .iter()
328                    .zip(comparator_i.iter())
329                    .filter(|&(&self_ij, &comparator_ij)| {
330                        &(self_ij - comparator_ij).abs() >= tol_abs
331                            && &(self_ij / comparator_ij - 1.0).abs() >= tol_rel
332                    })
333                    .count()
334            })
335            .sum();
336        if error_count > 0 {
337            Some(error_count)
338        } else {
339            None
340        }
341    }
342    fn error_fd(&self, comparator: &Self, epsilon: &TensorRank0) -> Option<(bool, usize)> {
343        let error_count = self
344            .iter()
345            .zip(comparator.iter())
346            .map(|(self_i, comparator_i)| {
347                self_i
348                    .iter()
349                    .zip(comparator_i.iter())
350                    .filter(|&(&self_ij, &comparator_ij)| {
351                        &(self_ij / comparator_ij - 1.0).abs() >= epsilon
352                            && (&self_ij.abs() >= epsilon || &comparator_ij.abs() >= epsilon)
353                    })
354                    .count()
355            })
356            .sum();
357        if error_count > 0 {
358            Some((true, error_count))
359        } else {
360            None
361        }
362    }
363}
364
365impl Display for SquareMatrix {
366    fn fmt(&self, f: &mut Formatter) -> fmt::Result {
367        write!(f, "\x1B[s")?;
368        write!(f, "[[")?;
369        self.iter().enumerate().try_for_each(|(i, row)| {
370            row.iter()
371                .try_for_each(|entry| write_tensor_rank_0(f, entry))?;
372            if i + 1 < self.len() {
373                writeln!(f, "\x1B[2D],")?;
374                write!(f, "\x1B[u")?;
375                write!(f, "\x1B[{}B [", i + 1)?;
376            }
377            Ok(())
378        })?;
379        write!(f, "\x1B[2D]]")
380    }
381}
382
383impl<const D: usize, const I: usize, const J: usize> From<TensorRank2Vec2D<D, I, J>>
384    for SquareMatrix
385{
386    fn from(tensor_rank_2_vec_2d: TensorRank2Vec2D<D, I, J>) -> Self {
387        let mut square_matrix = Self::zero(tensor_rank_2_vec_2d.len() * D);
388        tensor_rank_2_vec_2d
389            .iter()
390            .enumerate()
391            .for_each(|(a, entry_a)| {
392                entry_a.iter().enumerate().for_each(|(b, entry_ab)| {
393                    entry_ab.iter().enumerate().for_each(|(i, entry_ab_i)| {
394                        entry_ab_i.iter().enumerate().for_each(|(j, entry_ab_ij)| {
395                            square_matrix[D * a + i][D * b + j] = *entry_ab_ij
396                        })
397                    })
398                })
399            });
400        square_matrix
401    }
402}
403
404impl FromIterator<Vector> for SquareMatrix {
405    fn from_iter<Ii: IntoIterator<Item = Vector>>(into_iterator: Ii) -> Self {
406        Self(Vec::from_iter(into_iterator))
407    }
408}
409
410impl Index<usize> for SquareMatrix {
411    type Output = Vector;
412    fn index(&self, index: usize) -> &Self::Output {
413        &self.0[index]
414    }
415}
416
417impl IndexMut<usize> for SquareMatrix {
418    fn index_mut(&mut self, index: usize) -> &mut Self::Output {
419        &mut self.0[index]
420    }
421}
422
423impl Hessian for SquareMatrix {
424    fn fill_into(self, square_matrix: &mut SquareMatrix) {
425        self.into_iter()
426            .zip(square_matrix.iter_mut())
427            .for_each(|(self_i, square_matrix_i)| {
428                self_i
429                    .into_iter()
430                    .zip(square_matrix_i.iter_mut())
431                    .for_each(|(self_ij, square_matrix_ij)| *square_matrix_ij = self_ij)
432            });
433    }
434    fn is_positive_definite(&self) -> bool {
435        self.cholesky_decomposition().is_ok()
436    }
437}
438
439impl Rank2 for SquareMatrix {
440    type Transpose = Self;
441    fn cholesky_decomposition(&self) -> Result<SquareMatrix, TensorError> {
442        let mut check = 0.0;
443        let mut tensor_l = SquareMatrix::zero(self.len());
444        self.iter().enumerate().try_for_each(|(j, self_j)| {
445            check = self_j[j]
446                - tensor_l[j]
447                    .iter()
448                    .take(j)
449                    .map(|tensor_l_jk| tensor_l_jk.powi(2))
450                    .sum::<TensorRank0>();
451            if check < 0.0 {
452                Err(TensorError::NotPositiveDefinite)
453            } else {
454                tensor_l[j][j] = check.sqrt();
455                self.iter().enumerate().skip(j + 1).for_each(|(i, self_i)| {
456                    check = tensor_l[i]
457                        .iter()
458                        .zip(tensor_l[j].iter())
459                        .take(j)
460                        .map(|(tensor_l_ik, tensor_l_jk)| tensor_l_ik * tensor_l_jk)
461                        .sum();
462                    tensor_l[i][j] = (self_i[j] - check) / tensor_l[j][j];
463                });
464                Ok(())
465            }
466        })?;
467        Ok(tensor_l)
468    }
469    fn deviatoric(&self) -> Self {
470        let len = self.len();
471        let scale = -self.trace() / len as TensorRank0;
472        (0..len)
473            .map(|i| {
474                (0..len)
475                    .map(|j| ((i == j) as u8) as TensorRank0 * scale)
476                    .collect()
477            })
478            .collect::<Self>()
479            + self
480    }
481    fn deviatoric_and_trace(&self) -> (Self, TensorRank0) {
482        let len = self.len();
483        let trace = self.trace();
484        let scale = -trace / len as TensorRank0;
485        (
486            (0..len)
487                .map(|i| {
488                    (0..len)
489                        .map(|j| ((i == j) as u8) as TensorRank0 * scale)
490                        .collect()
491                })
492                .collect::<Self>()
493                + self,
494            trace,
495        )
496    }
497    fn is_diagonal(&self) -> bool {
498        self.iter()
499            .enumerate()
500            .map(|(i, self_i)| {
501                self_i
502                    .iter()
503                    .enumerate()
504                    .map(|(j, self_ij)| (self_ij == &0.0) as u8 * (i != j) as u8)
505                    .sum::<u8>()
506            })
507            .sum::<u8>()
508            == (self.len().pow(2) - self.len()) as u8
509    }
510    fn is_identity(&self) -> bool {
511        self.iter()
512            .enumerate()
513            .map(|(i, self_i)| {
514                self_i
515                    .iter()
516                    .enumerate()
517                    .map(|(j, self_ij)| (self_ij == &((i == j) as u8 as f64)) as u8)
518                    .sum::<u8>()
519            })
520            .sum::<u8>()
521            == self.len().pow(2) as u8
522    }
523    fn squared_trace(&self) -> TensorRank0 {
524        self.iter()
525            .enumerate()
526            .map(|(i, self_i)| {
527                self_i
528                    .iter()
529                    .zip(self.iter())
530                    .map(|(self_ij, self_j)| self_ij * self_j[i])
531                    .sum::<TensorRank0>()
532            })
533            .sum()
534    }
535    fn trace(&self) -> TensorRank0 {
536        self.iter().enumerate().map(|(i, self_i)| self_i[i]).sum()
537    }
538    fn transpose(&self) -> Self::Transpose {
539        (0..self.len())
540            .map(|i| (0..self.len()).map(|j| self[j][i]).collect())
541            .collect()
542    }
543}
544
545impl Tensor for SquareMatrix {
546    type Item = Vector;
547    fn iter(&self) -> impl Iterator<Item = &Self::Item> {
548        self.0.iter()
549    }
550    fn iter_mut(&mut self) -> impl Iterator<Item = &mut Self::Item> {
551        self.0.iter_mut()
552    }
553}
554
555impl IntoIterator for SquareMatrix {
556    type Item = Vector;
557    type IntoIter = IntoIter<Self::Item>;
558    fn into_iter(self) -> Self::IntoIter {
559        self.0.into_iter()
560    }
561}
562
563impl TensorVec for SquareMatrix {
564    type Item = Vector;
565    type Slice<'a> = &'a [&'a [TensorRank0]];
566    fn append(&mut self, other: &mut Self) {
567        self.0.append(&mut other.0)
568    }
569    fn is_empty(&self) -> bool {
570        self.0.is_empty()
571    }
572    fn len(&self) -> usize {
573        self.0.len()
574    }
575    fn new(slice: Self::Slice<'_>) -> Self {
576        slice
577            .iter()
578            .map(|slice_entry| Self::Item::new(slice_entry))
579            .collect()
580    }
581    fn push(&mut self, item: Self::Item) {
582        self.0.push(item)
583    }
584    fn zero(len: usize) -> Self {
585        (0..len).map(|_| Self::Item::zero(len)).collect()
586    }
587}
588
589impl Div<TensorRank0> for SquareMatrix {
590    type Output = Self;
591    fn div(mut self, tensor_rank_0: TensorRank0) -> Self::Output {
592        self /= &tensor_rank_0;
593        self
594    }
595}
596
597impl Div<&TensorRank0> for SquareMatrix {
598    type Output = Self;
599    fn div(mut self, tensor_rank_0: &TensorRank0) -> Self::Output {
600        self /= tensor_rank_0;
601        self
602    }
603}
604
605impl DivAssign<TensorRank0> for SquareMatrix {
606    fn div_assign(&mut self, tensor_rank_0: TensorRank0) {
607        self.iter_mut().for_each(|entry| *entry /= &tensor_rank_0);
608    }
609}
610
611impl DivAssign<&TensorRank0> for SquareMatrix {
612    fn div_assign(&mut self, tensor_rank_0: &TensorRank0) {
613        self.iter_mut().for_each(|entry| *entry /= tensor_rank_0);
614    }
615}
616
617impl Mul<TensorRank0> for SquareMatrix {
618    type Output = Self;
619    fn mul(mut self, tensor_rank_0: TensorRank0) -> Self::Output {
620        self *= &tensor_rank_0;
621        self
622    }
623}
624
625impl Mul<&TensorRank0> for SquareMatrix {
626    type Output = Self;
627    fn mul(mut self, tensor_rank_0: &TensorRank0) -> Self::Output {
628        self *= tensor_rank_0;
629        self
630    }
631}
632
633impl Mul<&TensorRank0> for &SquareMatrix {
634    type Output = SquareMatrix;
635    fn mul(self, tensor_rank_0: &TensorRank0) -> Self::Output {
636        self.iter().map(|self_i| self_i * tensor_rank_0).collect()
637    }
638}
639
640impl MulAssign<TensorRank0> for SquareMatrix {
641    fn mul_assign(&mut self, tensor_rank_0: TensorRank0) {
642        self.iter_mut().for_each(|entry| *entry *= &tensor_rank_0);
643    }
644}
645
646impl MulAssign<&TensorRank0> for SquareMatrix {
647    fn mul_assign(&mut self, tensor_rank_0: &TensorRank0) {
648        self.iter_mut().for_each(|entry| *entry *= tensor_rank_0);
649    }
650}
651
652impl Mul<Vector> for SquareMatrix {
653    type Output = Vector;
654    fn mul(self, vector: Vector) -> Self::Output {
655        self.iter().map(|self_i| self_i * &vector).collect()
656    }
657}
658
659impl Mul<&Vector> for SquareMatrix {
660    type Output = Vector;
661    fn mul(self, vector: &Vector) -> Self::Output {
662        self.iter().map(|self_i| self_i * vector).collect()
663    }
664}
665
666impl Add for SquareMatrix {
667    type Output = Self;
668    fn add(mut self, vector: Self) -> Self::Output {
669        self += vector;
670        self
671    }
672}
673
674impl Add<&Self> for SquareMatrix {
675    type Output = Self;
676    fn add(mut self, vector: &Self) -> Self::Output {
677        self += vector;
678        self
679    }
680}
681
682impl AddAssign for SquareMatrix {
683    fn add_assign(&mut self, vector: Self) {
684        self.iter_mut()
685            .zip(vector.iter())
686            .for_each(|(self_entry, tensor_rank_1)| *self_entry += tensor_rank_1);
687    }
688}
689
690impl AddAssign<&Self> for SquareMatrix {
691    fn add_assign(&mut self, vector: &Self) {
692        self.iter_mut()
693            .zip(vector.iter())
694            .for_each(|(self_entry, tensor_rank_1)| *self_entry += tensor_rank_1);
695    }
696}
697
698impl Mul for SquareMatrix {
699    type Output = Self;
700    fn mul(self, matrix: Self) -> Self::Output {
701        let mut output = Self::zero(matrix.len());
702        self.iter()
703            .zip(output.iter_mut())
704            .for_each(|(self_i, output_i)| {
705                self_i
706                    .iter()
707                    .zip(matrix.iter())
708                    .for_each(|(self_ij, matrix_j)| *output_i += matrix_j * self_ij)
709            });
710        output
711    }
712}
713
714impl Sub for SquareMatrix {
715    type Output = Self;
716    fn sub(mut self, vector: Self) -> Self::Output {
717        self -= vector;
718        self
719    }
720}
721
722impl Sub<&Self> for SquareMatrix {
723    type Output = Self;
724    fn sub(mut self, vector: &Self) -> Self::Output {
725        self -= vector;
726        self
727    }
728}
729
730impl SubAssign for SquareMatrix {
731    fn sub_assign(&mut self, vector: Self) {
732        self.iter_mut()
733            .zip(vector.iter())
734            .for_each(|(self_entry, tensor_rank_1)| *self_entry -= tensor_rank_1);
735    }
736}
737
738impl SubAssign<&Self> for SquareMatrix {
739    fn sub_assign(&mut self, vector: &Self) {
740        self.iter_mut()
741            .zip(vector.iter())
742            .for_each(|(self_entry, tensor_rank_1)| *self_entry -= tensor_rank_1);
743    }
744}