Skip to main content

conspire/math/tensor/rank_1/vec/
mod.rs

1#[cfg(test)]
2mod test;
3
4use crate::math::{
5    Jacobian, Solution, Tensor, TensorRank0, TensorRank1, TensorRank2Vec2D, Vector,
6    tensor::vec::TensorVector,
7};
8use std::{
9    mem::{forget, transmute},
10    ops::{Div, Sub},
11};
12
13#[cfg(test)]
14use crate::math::tensor::test::ErrorTensor;
15
16pub type TensorRank1Vec<const D: usize, const I: usize> = TensorVector<TensorRank1<D, I>>;
17
18impl<const D: usize, const I: usize> TensorRank1Vec<D, I> {
19    pub fn zero(len: usize) -> Self {
20        (0..len).map(|_| super::zero()).collect()
21    }
22}
23
24impl<const D: usize, const I: usize, const N: usize> From<[[TensorRank0; D]; N]>
25    for TensorRank1Vec<D, I>
26{
27    fn from(array: [[TensorRank0; D]; N]) -> Self {
28        array.into_iter().map(TensorRank1::from).collect()
29    }
30}
31
32impl<const D: usize, const I: usize> From<Vec<[TensorRank0; D]>> for TensorRank1Vec<D, I> {
33    fn from(vec: Vec<[TensorRank0; D]>) -> Self {
34        unsafe { transmute(vec) }
35    }
36}
37
38impl<const D: usize, const I: usize> From<TensorRank1Vec<D, I>> for Vec<[TensorRank0; D]> {
39    fn from(tensor_rank_1_vec: TensorRank1Vec<D, I>) -> Self {
40        unsafe { transmute(tensor_rank_1_vec) }
41    }
42}
43
44impl<const D: usize, const I: usize> From<Vec<Vec<TensorRank0>>> for TensorRank1Vec<D, I> {
45    fn from(vec: Vec<Vec<TensorRank0>>) -> Self {
46        vec.into_iter()
47            .map(|tensor_rank_1| tensor_rank_1.into())
48            .collect()
49    }
50}
51
52impl<const D: usize, const I: usize> From<TensorRank1Vec<D, I>> for Vec<Vec<TensorRank0>> {
53    fn from(tensor_rank_1_vec: TensorRank1Vec<D, I>) -> Self {
54        tensor_rank_1_vec
55            .into_iter()
56            .map(|tensor_rank_1| tensor_rank_1.into())
57            .collect()
58    }
59}
60
61impl From<TensorRank1Vec<3, 0>> for TensorRank1Vec<3, 1> {
62    fn from(tensor_rank_1_vec: TensorRank1Vec<3, 0>) -> Self {
63        unsafe { transmute(tensor_rank_1_vec) }
64    }
65}
66
67impl From<&TensorRank1Vec<3, 0>> for TensorRank1Vec<3, 1> {
68    fn from(tensor_rank_1_vec: &TensorRank1Vec<3, 0>) -> Self {
69        tensor_rank_1_vec
70            .iter()
71            .map(|tensor_rank_1| tensor_rank_1.into())
72            .collect()
73    }
74}
75
76impl From<TensorRank1Vec<3, 1>> for TensorRank1Vec<3, 0> {
77    fn from(tensor_rank_1_vec: TensorRank1Vec<3, 1>) -> Self {
78        unsafe { transmute(tensor_rank_1_vec) }
79    }
80}
81
82impl From<&TensorRank1Vec<3, 1>> for TensorRank1Vec<3, 0> {
83    fn from(tensor_rank_1_vec: &TensorRank1Vec<3, 1>) -> Self {
84        tensor_rank_1_vec
85            .iter()
86            .map(|tensor_rank_1| tensor_rank_1.into())
87            .collect()
88    }
89}
90
91impl<const D: usize, const I: usize> From<Vector> for TensorRank1Vec<D, I> {
92    fn from(vector: Vector) -> Self {
93        let n = vector.len();
94        if n.is_multiple_of(D) {
95            let length = n / D;
96            let pointer = vector.as_ptr() as *mut TensorRank1<D, I>;
97            forget(vector);
98            unsafe { Self::from(Vec::from_raw_parts(pointer, length, length)) }
99        } else {
100            panic!("Vector length mismatch.")
101        }
102    }
103}
104
105impl<const D: usize, const I: usize> Jacobian for TensorRank1Vec<D, I> {
106    fn fill_into(self, vector: &mut Vector) {
107        self.into_iter()
108            .flatten()
109            .zip(vector.iter_mut())
110            .for_each(|(self_i, vector_i)| *vector_i = self_i)
111    }
112    fn fill_into_chained(self, other: Vector, vector: &mut Vector) {
113        self.into_iter()
114            .flatten()
115            .chain(other)
116            .zip(vector.iter_mut())
117            .for_each(|(self_i, vector_i)| *vector_i = self_i)
118    }
119    fn retain_from(self, retained: &[bool]) -> Vector {
120        self.into_iter()
121            .flatten()
122            .zip(retained.iter())
123            .filter(|(_, retained)| **retained)
124            .map(|(entry, _)| entry)
125            .collect()
126    }
127    fn zero_out(&mut self, indices: &[usize]) {
128        indices
129            .iter()
130            .for_each(|index| self[index / D][index % D] = 0.0)
131    }
132}
133
134impl<const D: usize, const I: usize> Solution for TensorRank1Vec<D, I> {
135    fn decrement_from(&mut self, other: &Vector) {
136        self.iter_mut()
137            .flat_map(|x| x.iter_mut())
138            .zip(other.iter())
139            .for_each(|(self_i, vector_i)| *self_i -= vector_i)
140    }
141    fn decrement_from_chained(&mut self, other: &mut Vector, vector: Vector) {
142        self.iter_mut()
143            .flat_map(|x| x.iter_mut())
144            .chain(other.iter_mut())
145            .zip(vector)
146            .for_each(|(entry_i, vector_i)| *entry_i -= vector_i)
147    }
148    fn decrement_from_retained(&mut self, retained: &[bool], other: &Vector) {
149        self.iter_mut()
150            .flat_map(|x| x.iter_mut())
151            .zip(retained.iter())
152            .filter(|(_, retained_i)| **retained_i)
153            .zip(other.iter())
154            .for_each(|((self_i, _), vector_i)| *self_i -= vector_i)
155    }
156}
157
158impl<const D: usize, const I: usize> Sub<Vector> for TensorRank1Vec<D, I> {
159    type Output = Self;
160    fn sub(mut self, vector: Vector) -> Self::Output {
161        self.iter_mut().enumerate().for_each(|(a, self_a)| {
162            self_a
163                .iter_mut()
164                .enumerate()
165                .for_each(|(i, self_a_i)| *self_a_i -= vector[D * a + i])
166        });
167        self
168    }
169}
170
171impl<const D: usize, const I: usize> Sub<&Vector> for TensorRank1Vec<D, I> {
172    type Output = Self;
173    fn sub(mut self, vector: &Vector) -> Self::Output {
174        self.iter_mut().enumerate().for_each(|(a, self_a)| {
175            self_a
176                .iter_mut()
177                .enumerate()
178                .for_each(|(i, self_a_i)| *self_a_i -= vector[D * a + i])
179        });
180        self
181    }
182}
183
184impl<const D: usize, const I: usize, const J: usize> Div<TensorRank2Vec2D<D, I, J>>
185    for &TensorRank1Vec<D, I>
186{
187    type Output = TensorRank1Vec<D, J>;
188    fn div(self, _tensor_rank_2_vec_2d: TensorRank2Vec2D<D, I, J>) -> Self::Output {
189        todo!()
190    }
191}
192
193#[cfg(test)]
194impl<const D: usize, const I: usize> ErrorTensor for TensorRank1Vec<D, I> {
195    fn error_fd(&self, comparator: &Self, epsilon: TensorRank0) -> Option<(bool, usize)> {
196        let error_count = self
197            .iter()
198            .zip(comparator.iter())
199            .map(|(entry, comparator_entry)| {
200                entry
201                    .iter()
202                    .zip(comparator_entry.iter())
203                    .filter(|&(&entry_i, &comparator_entry_i)| {
204                        (entry_i / comparator_entry_i - 1.0).abs() >= epsilon
205                            && (entry_i.abs() >= epsilon || comparator_entry_i.abs() >= epsilon)
206                    })
207                    .count()
208            })
209            .sum();
210        if error_count > 0 {
211            let auxiliary = self
212                .iter()
213                .zip(comparator.iter())
214                .map(|(entry, comparator_entry)| {
215                    entry
216                        .iter()
217                        .zip(comparator_entry.iter())
218                        .filter(|&(&entry_i, &comparator_entry_i)| {
219                            (entry_i / comparator_entry_i - 1.0).abs() >= epsilon
220                                && (entry_i - comparator_entry_i).abs() >= epsilon
221                                && (entry_i.abs() >= epsilon || comparator_entry_i.abs() >= epsilon)
222                        })
223                        .count()
224                })
225                .sum::<usize>()
226                > 0;
227            Some((auxiliary, error_count))
228        } else {
229            None
230        }
231    }
232}