diff --git a/linfa-kernel/src/lib.rs b/linfa-kernel/src/lib.rs index 8010bda54..339c6366e 100644 --- a/linfa-kernel/src/lib.rs +++ b/linfa-kernel/src/lib.rs @@ -15,6 +15,8 @@ use linfa::{dataset::DatasetBase, dataset::Records, dataset::Targets, traits::Tr #[derive(Clone)] pub enum KernelType { Dense, + /// A sparse kernel requires to define a number of neighbours + /// between 1 and the total number of samples in input minus one. Sparse(usize), } @@ -59,7 +61,6 @@ where )] pub method: KernelMethod, pub dataset: R, - pub linear: bool, } impl<'a, F: Float> Kernel> { @@ -67,7 +68,6 @@ impl<'a, F: Float> Kernel> { dataset: ArrayView2<'a, F>, method: KernelMethod, kind: KernelType, - linear: bool, ) -> Kernel> { let inner = match kind { KernelType::Dense => KernelInner::Dense(dense_from_fn(&dataset, &method)), @@ -78,17 +78,36 @@ impl<'a, F: Float> Kernel> { inner, method, dataset, - linear, } } + /// Performs the matrix product between the kernel matrix + /// and the input + /// + /// ## Parameters + /// + /// - `rhs`: The matrix on the right-hand side of the multiplication + /// + /// ## Returns + /// + /// A new matrix containing the matrix product between the kernel + /// and `rhs` + /// + /// ## Panics + /// + /// If the shapes of kernel and `rhs` are not compatible for multiplication pub fn dot(&self, rhs: &ArrayView2) -> Array2 { match &self.inner { - KernelInner::Dense(mat) => mat.mul(rhs), + KernelInner::Dense(mat) => mat.dot(rhs), KernelInner::Sparse(mat) => mat.mul(rhs), } } + /// Sums all elements in the same row of the kernel matrix + /// + /// ## Returns + /// + /// A new array with the sum of all the elements in each row pub fn sum(&self) -> Array1 { match &self.inner { KernelInner::Dense(mat) => mat.sum_axis(Axis(1)), @@ -104,6 +123,7 @@ impl<'a, F: Float> Kernel> { } } + /// Gives the size of the side of the square kernel matrix pub fn size(&self) -> usize { match &self.inner { KernelInner::Dense(mat) => mat.ncols(), @@ -111,6 +131,13 @@ impl<'a, F: Float> Kernel> { } } + /// Getter for the data in the upper triangle of the kernel + /// matrix + /// + /// ## Returns + /// + /// A copy of all elements in the upper triangle of the kernel + /// matrix, stored in a `Vec` pub fn to_upper_triangle(&self) -> Vec { match &self.inner { KernelInner::Dense(mat) => mat @@ -128,6 +155,12 @@ impl<'a, F: Float> Kernel> { } } + /// Getter for the elements in the diagonal of the kernel matrix + /// + /// ## Returns + /// + /// A new array containing the copy of all elements in the diagonal fo + /// the kernel matrix pub fn diagonal(&self) -> Array1 { match &self.inner { KernelInner::Dense(mat) => mat.diag().to_owned(), @@ -139,6 +172,19 @@ impl<'a, F: Float> Kernel> { } } + /// Getter for a column of the kernel matrix + /// + /// ## Params + /// + /// - `i`: the index of the column + /// + /// ## Returns + /// + /// The i-th column of the kernel matrix, stored as a `Vec` + /// + /// ## Panics + /// + /// If `i` is out of bounds pub fn column(&self, i: usize) -> Vec { match &self.inner { KernelInner::Dense(mat) => mat.column(i).to_vec(), @@ -148,6 +194,23 @@ impl<'a, F: Float> Kernel> { } } + /// Sums the inner product of `sample` and every one of the samples + /// used to generate the kernel + /// + /// ## Parameters + /// + /// * `weights`: the weight of each inner product + /// * `sample`: the input sample + /// + /// ## Returns + /// + /// The weighted sum of all inner products of `sample` and every one of the samples + /// used to generate the kernel + /// + /// ## Panics + /// + /// If the shapes of `weights` or `sample` are not compatible with the + /// shape of the kernel matrix pub fn weighted_sum(&self, weights: &[F], sample: ArrayView1) -> F { self.dataset .outer_iter() @@ -156,10 +219,34 @@ impl<'a, F: Float> Kernel> { .sum() } + /// Wheter the kernel is a linear kernel + /// + /// ## Returns + /// + /// - `true`: if the kernel is linear + /// - `false`: otherwise pub fn is_linear(&self) -> bool { - self.linear + self.method.is_linear() } + /// Generates the default set of parameters for building a kernel. + /// Use this to initialize a set of parameters to be customized using `KernelParams`'s methods + /// + /// ## Example + /// + /// ```rust + /// + /// use linfa_kernel::Kernel; + /// use linfa::traits::Transformer; + /// use ndarray::Array2; + /// + /// let data = Array2::from_shape_vec((3,2), vec![1., 2., 3., 4., 5., 6.,]).unwrap(); + /// + /// // Build a kernel from `data` with the defaul parameters + /// let params = Kernel::params(); + /// let kernel = params.transform(&data); + /// + /// ``` pub fn params() -> KernelParams { KernelParams { kind: KernelType::Dense, @@ -176,6 +263,13 @@ impl<'a, F: Float> Records for Kernel> { } } +/// The inner product definition used by a kernel. +/// +/// There are three methods available: +/// +/// - Gaussian(eps): `d(x, x') = exp(-norm(x - x')/eps) ` +/// - Linear: `d(x, x') = ` +/// - Polynomial(constant, degree): `d(x, x') = ( + costant)^(degree)` #[cfg_attr( feature = "serde", derive(Serialize, Deserialize), @@ -183,8 +277,11 @@ impl<'a, F: Float> Records for Kernel> { )] #[derive(Debug, Clone)] pub enum KernelMethod { + /// Gaussian(eps) inner product Gaussian(F), + /// Euclidean inner product Linear, + /// Plynomial(constant, degree) inner product Polynomial(F, F), } @@ -210,38 +307,118 @@ impl KernelMethod { } } +/// Defines the set of prameters needed to build a kernel pub struct KernelParams { + /// Wether to construct a dense or sparse kernel kind: KernelType, + /// The inner product used by the kernel method: KernelMethod, } impl KernelParams { + /// Setter for `method`. Can be chained with `kind` and `transform`. + /// + /// ## Arguments + /// + /// - `method`: The inner product that will be used by the kernel + /// + /// ## Returns + /// + /// The modified set of params + /// + /// ## Example + /// + /// ```rust + /// use linfa_kernel::{Kernel, KernelMethod}; + /// use linfa::traits::Transformer; + /// use ndarray::Array2; + /// + /// let data = Array2::from_shape_vec((3,2), vec![1., 2., 3., 4., 5., 6.,]).unwrap(); + /// + /// // Build a kernel from `data` with the defaul parameters + /// // and then set the preferred method + /// let params = Kernel::params().method(KernelMethod::Linear); + /// let kernel = params.transform(&data); + /// ``` pub fn method(mut self, method: KernelMethod) -> KernelParams { self.method = method; self } + /// Setter for `kind`. Can be chained with `method` and `transform`. + /// + /// ## Arguments + /// + /// - `kind`: The kind of kernel to build, either dense or sparse + /// + /// ## Returns + /// + /// The modified set of params + /// + /// ## Example + /// + /// ```rust + /// use linfa_kernel::{Kernel, KernelType}; + /// use linfa::traits::Transformer; + /// use ndarray::Array2; + /// + /// let data = Array2::from_shape_vec((3,2), vec![1., 2., 3., 4., 5., 6.,]).unwrap(); + /// + /// // Build a kernel from `data` with the defaul parameters + /// // and then set the preferred kind + /// let params = Kernel::params().kind(KernelType::Dense); + /// let kernel = params.transform(&data); + /// ``` pub fn kind(mut self, kind: KernelType) -> KernelParams { self.kind = kind; - self } } impl<'a, F: Float> Transformer<&'a Array2, Kernel>> for KernelParams { + /// Builds a kernel from the input data without copying it. + /// + /// A reference to the input data will be kept by the kernel + /// through an `ArrayView` + /// + /// ## Parameters + /// + /// - `x`: matrix of records (##records, ##features) in input + /// + /// ## Returns + /// + /// A kernel build from `x` according to the parameters on which + /// this method is called + /// + /// ## Panics + /// + /// If the kernel type is `Sparse` and the number of neighbors specified is + /// not between 1 and ##records-1 fn transform(&self, x: &'a Array2) -> Kernel> { - let is_linear = self.method.is_linear(); - - Kernel::new(x.view(), self.method.clone(), self.kind.clone(), is_linear) + Kernel::new(x.view(), self.method.clone(), self.kind.clone()) } } impl<'a, F: Float> Transformer, Kernel>> for KernelParams { + /// Builds a kernel from a view of the input data. + /// + /// A reference to the input data will be kept by the kernel + /// through an `ArrayView` + /// + /// ## Parameters + /// + /// - `x`: view of a matrix of records (##records, ##features) + /// + /// A kernel build from `x` according to the parameters on which + /// this method is called + /// + /// ## Panics + /// + /// If the kernel type is `Sparse` and the number of neighbors specified is + /// not between 1 and ##records-1 fn transform(&self, x: ArrayView2<'a, F>) -> Kernel> { - let is_linear = self.method.is_linear(); - - Kernel::new(x, self.method.clone(), self.kind.clone(), is_linear) + Kernel::new(x, self.method.clone(), self.kind.clone()) } } @@ -249,18 +426,31 @@ impl<'a, F: Float, T: Targets> Transformer<&'a DatasetBase, T>, DatasetBase>, &'a T>> for KernelParams { + /// Builds a new Dataset with the kernel as the records and the same targets as the input one. + /// + /// A reference to the input records will be kept by the kernel + /// through an `ArrayView` + /// + /// ## Parameters + /// + /// - `x`: A dataset with a matrix of records (##records, ##features) and any targets + /// + /// ## Returns + /// + /// A new dataset with: + /// - records: a kernel build from `x.records()` according to the parameters on which + /// this method is called + /// - targets: same as `x.targets()` + /// + /// ## Panics + /// + /// If the kernel type is `Sparse` and the number of neighbors specified is + /// not between 1 and ##records-1 fn transform( &self, x: &'a DatasetBase, T>, ) -> DatasetBase>, &'a T> { - let is_linear = self.method.is_linear(); - - let kernel = Kernel::new( - x.records.view(), - self.method.clone(), - self.kind.clone(), - is_linear, - ); + let kernel = Kernel::new(x.records.view(), self.method.clone(), self.kind.clone()); DatasetBase::new(kernel, &x.targets) } @@ -272,13 +462,31 @@ impl<'a, F: Float, T: Targets> DatasetBase>, &'a [T::Elem]>, > for KernelParams { + /// Builds a new Dataset with the kernel as the records and the same targets as the input one. + /// + /// A reference to the input records will be kept by the kernel + /// through an `ArrayView` + /// + /// ## Parameters + /// + /// - `x`: A dataset with a matrix of records (##records, ##features) and any targets + /// + /// ## Returns + /// + /// A new dataset with: + /// - records: a kernel build from `x.records()` according to the parameters on which + /// this method is called + /// - targets: a slice of `x.targets()` + /// + /// ## Panics + /// + /// If the kernel type is `Sparse` and the number of neighbors specified is + /// not between 1 and ##records-1 fn transform( &self, x: &'a DatasetBase, T>, ) -> DatasetBase>, &'a [T::Elem]> { - let is_linear = self.method.is_linear(); - - let kernel = Kernel::new(x.records, self.method.clone(), self.kind.clone(), is_linear); + let kernel = Kernel::new(x.records, self.method.clone(), self.kind.clone()); DatasetBase::new(kernel, x.targets.as_slice()) } @@ -308,9 +516,17 @@ fn sparse_from_fn>( k: usize, method: &KernelMethod, ) -> CsMat { + // compute adjacency matrix between points in the input dataset: + // one point for each row let mut data = sparse::adjacency_matrix(dataset, k); + // iterate through each row of the adjacency matrix where each + // row is represented by a vec containing a pair (col_index, value) + // for each non-zero element in the row for (i, mut vec) in data.outer_iterator_mut().enumerate() { + // If there is a non-zero element in row i at index j + // then it means that points i and j in the input matrix are + // k-neighbours and their distance is stored in position (i,j) for (j, val) in vec.iter_mut() { let a = dataset.row(i); let b = dataset.row(j); @@ -318,6 +534,432 @@ fn sparse_from_fn>( *val = method.distance(a, b); } } - data } + +#[cfg(test)] +mod tests { + use super::*; + use ndarray::{Array1, Array2}; + use std::f64::consts; + + #[test] + fn sparse_from_fn_test() { + // pts 0 & 1 pts 2 & 3 pts 4 & 5 pts 6 & 7 + // |0.| |0.1| _ |1.| |1.1| _ |2.| |2.1| _ |3.| |3.1| + // |0.| |0.1| |1.| |1.1| |2.| |2.1| |3.| |3.1| + let input_mat = vec![ + 0., 0., 0.1, 0.1, 1., 1., 1.1, 1.1, 2., 2., 2.1, 2.1, 3., 3., 3.1, 3.1, + ]; + let input_arr = Array2::from_shape_vec((8, 2), input_mat).unwrap(); + let adj_mat = sparse_from_fn(&input_arr, 1, &KernelMethod::Linear); + assert_eq!(adj_mat.nnz(), 16); + + // 2*0^2 + assert_eq!(*adj_mat.get(0, 0).unwrap() as usize, 0); + // 2*0.1^2 + assert_eq!((*adj_mat.get(1, 1).unwrap() * 100.) as usize, 2); + // 2*1^2 + assert_eq!(*adj_mat.get(2, 2).unwrap() as usize, 2); + // 2*1.1^2 + assert_eq!((*adj_mat.get(3, 3).unwrap() * 100.) as usize, 242); + // 2 * 2^2 + assert_eq!(*adj_mat.get(4, 4).unwrap() as usize, 8); + // 2 * 2.1^2 + assert_eq!((*adj_mat.get(5, 5).unwrap() * 100.) as usize, 882); + // 2 * 3^2 + assert_eq!(*adj_mat.get(6, 6).unwrap() as usize, 18); + // 2 * 3.1^2 + assert_eq!((*adj_mat.get(7, 7).unwrap() * 100.) as usize, 1922); + + // 2*(0 * 0.1) + assert_eq!(*adj_mat.get(0, 1).unwrap() as usize, 0); + assert_eq!(*adj_mat.get(1, 0).unwrap() as usize, 0); + + // 2*(1 * 1.1) + assert_eq!((*adj_mat.get(2, 3).unwrap() * 10.) as usize, 22); + assert_eq!((*adj_mat.get(3, 2).unwrap() * 10.) as usize, 22); + + // 2*(2 * 2.1) + assert_eq!((*adj_mat.get(4, 5).unwrap() * 10.) as usize, 84); + assert_eq!((*adj_mat.get(5, 4).unwrap() * 10.) as usize, 84); + + // 2*(3 * 3.1) + assert_eq!((*adj_mat.get(6, 7).unwrap() * 10.) as usize, 186); + assert_eq!((*adj_mat.get(7, 6).unwrap() * 10.) as usize, 186); + } + + #[test] + fn dense_from_fn_test() { + // pts 0 & 1 pts 2 & 3 pts 4 & 5 pts 6 & 7 + // |0.| |0.1| _ |1.| |1.1| _ |2.| |2.1| _ |3.| |3.1| + // |0.| |0.1| |1.| |1.1| |2.| |2.1| |3.| |3.1| + let input_mat = vec![ + 0., 0., 0.1, 0.1, 1., 1., 1.1, 1.1, 2., 2., 2.1, 2.1, 3., 3., 3.1, 3.1, + ]; + let input_arr = Array2::from_shape_vec((8, 2), input_mat).unwrap(); + let method: KernelMethod = KernelMethod::Linear; + + let similarity_matrix = dense_from_fn(&input_arr, &method); + + for i in 0..8 { + for j in 0..8 { + assert!( + (similarity_matrix.row(i)[j] + - method.distance(input_arr.row(i), input_arr.row(j))) + .abs() + <= f64::EPSILON + ); + } + } + } + + #[test] + fn gaussian_test() { + let gauss_1 = KernelMethod::Gaussian(1.); + + let p1 = Array1::from_shape_vec(2, vec![0., 0.]).unwrap(); + let p2 = Array1::from_shape_vec(2, vec![0., 0.]).unwrap(); + let distance = gauss_1.distance(p1.view(), p2.view()); + let expected = 1.; + + assert!(((distance - expected) as f64).abs() <= f64::EPSILON); + + let p1 = Array1::from_shape_vec(2, vec![1., 1.]).unwrap(); + let p2 = Array1::from_shape_vec(2, vec![5., 5.]).unwrap(); + let distance = gauss_1.distance(p1.view(), p2.view()); + let expected = (consts::E).powf(-32.); + // this fails with e^-31 or e^-33 so f64::EPSILON still holds + assert!(((distance - expected) as f64).abs() <= f64::EPSILON); + + let gauss_01 = KernelMethod::Gaussian(0.1); + + let p1 = Array1::from_shape_vec(2, vec![0., 0.]).unwrap(); + let p2 = Array1::from_shape_vec(2, vec![0., 0.]).unwrap(); + let distance = gauss_01.distance(p1.view(), p2.view()); + let expected = 1.; + + assert!(((distance - expected) as f64).abs() <= f64::EPSILON); + + let p1 = Array1::from_shape_vec(2, vec![1., 1.]).unwrap(); + let p2 = Array1::from_shape_vec(2, vec![2., 2.]).unwrap(); + let distance = gauss_01.distance(p1.view(), p2.view()); + let expected = (consts::E).powf(-20.); + + assert!(((distance - expected) as f64).abs() <= f64::EPSILON); + } + + #[test] + fn poly2_test() { + let pol_0 = KernelMethod::Polynomial(0., 2.); + + let p1 = Array1::from_shape_vec(2, vec![0., 0.]).unwrap(); + let p2 = Array1::from_shape_vec(2, vec![0., 0.]).unwrap(); + let distance = pol_0.distance(p1.view(), p2.view()); + let expected = 0.; + + assert!(((distance - expected) as f64).abs() <= f64::EPSILON); + + let p1 = Array1::from_shape_vec(2, vec![1., 1.]).unwrap(); + let p2 = Array1::from_shape_vec(2, vec![5., 5.]).unwrap(); + let distance = pol_0.distance(p1.view(), p2.view()); + let expected = 100.; + assert!(((distance - expected) as f64).abs() <= f64::EPSILON); + + let pol_2 = KernelMethod::Polynomial(2., 2.); + + let p1 = Array1::from_shape_vec(2, vec![0., 0.]).unwrap(); + let p2 = Array1::from_shape_vec(2, vec![0., 0.]).unwrap(); + let distance = pol_2.distance(p1.view(), p2.view()); + let expected = 4.; + + assert!(((distance - expected) as f64).abs() <= f64::EPSILON); + + let p1 = Array1::from_shape_vec(2, vec![1., 1.]).unwrap(); + let p2 = Array1::from_shape_vec(2, vec![2., 2.]).unwrap(); + let distance = pol_2.distance(p1.view(), p2.view()); + let expected = 36.; + + assert!(((distance - expected) as f64).abs() <= f64::EPSILON); + } + + #[test] + fn test_kernel_dot() { + let input_vec: Vec = (0..100).map(|v| v as f64 * 0.1).collect(); + let vec_to_multiply: Vec = (0..100).map(|v| v as f64 * 0.3).collect(); + let input_arr = Array2::from_shape_vec((10, 10), input_vec).unwrap(); + let to_multiply = Array2::from_shape_vec((10, 10), vec_to_multiply).unwrap(); + + // dense kernel dot + let mul_mat = dense_from_fn(&input_arr, &KernelMethod::Linear).dot(&to_multiply); + let kernel = Kernel::params() + .kind(KernelType::Dense) + .method(KernelMethod::Linear) + .transform(&input_arr); + let mul_ker = kernel.dot(&to_multiply.view()); + assert!(kernels_almost_equal(mul_mat.view(), mul_ker.view())); + + // sparse kernel dot + let mul_mat = sparse_from_fn(&input_arr, 3, &KernelMethod::Linear).mul(&to_multiply.view()); + let kernel = Kernel::params() + .kind(KernelType::Sparse(3)) + .method(KernelMethod::Linear) + .transform(&input_arr); + let mul_ker = kernel.dot(&to_multiply.view()); + assert!(kernels_almost_equal(mul_mat.view(), mul_ker.view())); + } + + #[test] + fn test_kernel_upper_triangle() { + // symmetric vec, kernel matrix is a "cross" of ones + let input_vec: Vec = (0..50).map(|v| v as f64 * 0.1).collect(); + let input_arr_1 = Array2::from_shape_vec((5, 10), input_vec.clone()).unwrap(); + let mut input_arr_2 = Array2::from_shape_vec((5, 10), input_vec).unwrap(); + input_arr_2.invert_axis(Axis(0)); + let input_arr = ndarray::stack(Axis(0), &[input_arr_1.view(), input_arr_2.view()]).unwrap(); + + for kind in vec![KernelType::Dense, KernelType::Sparse(1)] { + let kernel = Kernel::params() + .kind(kind) + // Such a value for eps brings to zero the inner product + // between any two points that are not equal + .method(KernelMethod::Gaussian(1e-5)) + .transform(&input_arr); + let mut kernel_upper_triang = kernel.to_upper_triangle(); + assert_eq!(kernel_upper_triang.len(), 45); + //so that i can use pop() + kernel_upper_triang.reverse(); + for i in 0..9 { + for j in (i + 1)..10 { + if j == (9 - i) { + assert_eq!(kernel_upper_triang.pop().unwrap() as usize, 1); + } else { + assert_eq!(kernel_upper_triang.pop().unwrap() as usize, 0); + } + } + } + assert!(kernel_upper_triang.is_empty()); + } + } + + #[test] + fn test_kernel_weighted_sum() { + let input_vec: Vec = (0..100).map(|v| v as f64 * 0.1).collect(); + let input_arr = Array2::from_shape_vec((10, 10), input_vec).unwrap(); + let weights = [1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]; + for kind in vec![KernelType::Dense, KernelType::Sparse(1)] { + let kernel = Kernel::params() + .kind(kind) + // Such a value for eps brings to zero the inner product + // between any two points that are not equal + .method(KernelMethod::Gaussian(1e-5)) + .transform(&input_arr); + for (sample, w) in input_arr.outer_iter().zip(&weights) { + // with that kernel, only the input samples have non + // zero inner product with the samples used to generate the matrix. + // In particular, they have inner product equal to one only for the + // column corresponding to themselves + let w_sum = kernel.weighted_sum(&weights, sample); + assert!(values_almost_equal(&w_sum, w)); + } + } + } + + #[test] + fn test_kernel_sum() { + let input_vec: Vec = (0..100).map(|v| v as f64 * 0.1).collect(); + let input_arr = Array2::from_shape_vec((10, 10), input_vec).unwrap(); + + let method = KernelMethod::Linear; + + // dense kernel sum + let cols_sum = dense_from_fn(&input_arr, &method).sum_axis(Axis(1)); + let kernel = Kernel::params() + .kind(KernelType::Dense) + .method(method.clone()) + .transform(&input_arr); + let kers_sum = kernel.sum(); + assert!(arrays_almost_equal(cols_sum.view(), kers_sum.view())); + + // sparse kernel sum + let cols_sum = sparse_from_fn(&input_arr, 3, &method) + .to_dense() + .sum_axis(Axis(1)); + let kernel = Kernel::params() + .kind(KernelType::Sparse(3)) + .method(method) + .transform(&input_arr); + let kers_sum = kernel.sum(); + assert!(arrays_almost_equal(cols_sum.view(), kers_sum.view())); + } + + #[test] + fn test_kernel_diag() { + let input_vec: Vec = (0..100).map(|v| v as f64 * 0.1).collect(); + let input_arr = Array2::from_shape_vec((10, 10), input_vec).unwrap(); + + let method = KernelMethod::Linear; + + // dense kernel sum + let input_diagonal = dense_from_fn(&input_arr, &method).diag().into_owned(); + let kernel = Kernel::params() + .kind(KernelType::Dense) + .method(method.clone()) + .transform(&input_arr); + let kers_diagonal = kernel.diagonal(); + assert!(arrays_almost_equal( + input_diagonal.view(), + kers_diagonal.view() + )); + + // sparse kernel sum + let input_diagonal: Vec<_> = sparse_from_fn(&input_arr, 3, &method) + .outer_iterator() + .enumerate() + .map(|(i, row)| *row.get(i).unwrap()) + .collect(); + let input_diagonal = Array1::from_shape_vec(10, input_diagonal).unwrap(); + let kernel = Kernel::params() + .kind(KernelType::Sparse(3)) + .method(method) + .transform(&input_arr); + let kers_diagonal = kernel.diagonal(); + assert!(arrays_almost_equal( + input_diagonal.view(), + kers_diagonal.view() + )); + } + + // inspired from scikit learn's tests + #[test] + fn test_kernel_transform_from_array2() { + let input_vec: Vec = (0..100).map(|v| v as f64 * 0.1).collect(); + let input = Array2::from_shape_vec((50, 2), input_vec).unwrap(); + // checks that the transform for Array2 builds the right kernel + // according to its input params. + check_kernel_from_array2_type(&input, KernelType::Dense); + check_kernel_from_array2_type(&input, KernelType::Sparse(3)); + // checks that the transform for ArrayView2 builds the right kernel + // according to its input params. + check_kernel_from_array_view_2_type(input.view(), KernelType::Dense); + check_kernel_from_array_view_2_type(input.view(), KernelType::Sparse(3)); + } + + // inspired from scikit learn's tests + #[test] + fn test_kernel_transform_from_dataset() { + let input_vec: Vec = (0..100).map(|v| v as f64 * 0.1).collect(); + let input_arr = Array2::from_shape_vec((50, 2), input_vec).unwrap(); + let input = DatasetBase::new(input_arr, ()); + // checks that the transform for dataset builds the right kernel + // according to its input params. + check_kernel_from_dataset_type(&input, KernelType::Dense); + check_kernel_from_dataset_type(&input, KernelType::Sparse(3)); + + // checks that the transform for dataset view builds the right kernel + // according to its input params. + check_kernel_from_dataset_view_type(&input.view(), KernelType::Dense); + check_kernel_from_dataset_view_type(&input.view(), KernelType::Sparse(3)); + } + + fn check_kernel_from_dataset_type( + input: &DatasetBase, T>, + k_type: KernelType, + ) { + let methods = vec![ + KernelMethod::Linear, + KernelMethod::Gaussian(0.1), + KernelMethod::Polynomial(1., 2.), + ]; + for method in methods { + let kernel_ref = Kernel::new(input.records().view(), method.clone(), k_type.clone()); + let kernel_tr = Kernel::params() + .kind(k_type.clone()) + .method(method.clone()) + .transform(input); + assert!(kernels_almost_equal( + kernel_ref.dataset, + kernel_tr.records.dataset + )); + } + } + + fn check_kernel_from_dataset_view_type<'a, T: Targets>( + input: &'a DatasetBase, T>, + k_type: KernelType, + ) { + let methods = vec![ + KernelMethod::Linear, + KernelMethod::Gaussian(0.1), + KernelMethod::Polynomial(1., 2.), + ]; + for method in methods { + let kernel_ref = Kernel::new(*input.records(), method.clone(), k_type.clone()); + let kernel_tr = Kernel::params() + .kind(k_type.clone()) + .method(method.clone()) + .transform(input); + assert!(kernels_almost_equal( + kernel_ref.dataset, + kernel_tr.records.dataset + )); + } + } + + fn check_kernel_from_array2_type(input: &Array2, k_type: KernelType) { + let methods = vec![ + KernelMethod::Linear, + KernelMethod::Gaussian(0.1), + KernelMethod::Polynomial(1., 2.), + ]; + for method in methods { + let kernel_ref = Kernel::new(input.view(), method.clone(), k_type.clone()); + let kernel_tr = Kernel::params() + .kind(k_type.clone()) + .method(method.clone()) + .transform(input); + assert!(kernels_almost_equal(kernel_ref.dataset, kernel_tr.dataset)); + } + } + + fn check_kernel_from_array_view_2_type(input: ArrayView2, k_type: KernelType) { + let methods = vec![ + KernelMethod::Linear, + KernelMethod::Gaussian(0.1), + KernelMethod::Polynomial(1., 2.), + ]; + for method in methods { + let kernel_ref = Kernel::new(input, method.clone(), k_type.clone()); + let kernel_tr = Kernel::params() + .kind(k_type.clone()) + .method(method.clone()) + .transform(input); + assert!(kernels_almost_equal(kernel_ref.dataset, kernel_tr.dataset)); + } + } + + fn kernels_almost_equal(reference: ArrayView2, transformed: ArrayView2) -> bool { + for (ref_row, tr_row) in reference + .axis_iter(Axis(0)) + .zip(transformed.axis_iter(Axis(0))) + { + if !arrays_almost_equal(ref_row, tr_row) { + return false; + } + } + true + } + + fn arrays_almost_equal(reference: ArrayView1, transformed: ArrayView1) -> bool { + for (ref_item, tr_item) in reference.iter().zip(transformed.iter()) { + if !values_almost_equal(ref_item, tr_item) { + return false; + } + } + true + } + + fn values_almost_equal(v1: &f64, v2: &f64) -> bool { + (v1 - v2).abs() <= f64::EPSILON + } +} diff --git a/linfa-kernel/src/sparse.rs b/linfa-kernel/src/sparse.rs index fa4f2a3db..1c265cb90 100644 --- a/linfa-kernel/src/sparse.rs +++ b/linfa-kernel/src/sparse.rs @@ -16,7 +16,6 @@ impl MetricPoint for Euclidean<'_, F> { .map(|(&a, &b)| (a - b) * (a - b)) .sum::() .sqrt(); - space::f32_metric(val.to_f32().unwrap()) } } @@ -92,3 +91,163 @@ pub fn adjacency_matrix>( mat } + +#[cfg(test)] +mod tests { + + use super::*; + use ndarray::{Array2, ArrayView1}; + + #[test] + fn euclidean_distance_test() { + let p1 = Euclidean { + 0: ArrayView1::from_shape(2, &[0., 0.]).unwrap(), + }; + let p2 = Euclidean { + 0: ArrayView1::from_shape(2, &[1., 1.]).unwrap(), + }; + + assert_eq!(p1.distance(&p2), 2_f32.sqrt().to_bits()); + + let p2 = Euclidean { + 0: ArrayView1::from_shape(2, &[4., 3.]).unwrap(), + }; + + assert_eq!(p1.distance(&p2), 5_f32.to_bits()); + + let p2 = Euclidean { + 0: ArrayView1::from_shape(2, &[0., 0.]).unwrap(), + }; + + assert_eq!(p1.distance(&p2), 0); + } + + #[test] + #[allow(clippy::if_same_then_else)] + fn adjacency_matrix_test() { + // pts 0 & 1 pts 2 & 3 pts 4 & 5 pts 6 & 7 + // |0.| |0.1| _ |1.| |1.1| _ |2.| |2.1| _ |3.| |3.1| + // |0.| |0.1| |1.| |1.1| |2.| |2.1| |3.| |3.1| + let input_mat = vec![ + 0., 0., 0.1, 0.1, 1., 1., 1.1, 1.1, 2., 2., 2.1, 2.1, 3., 3., 3.1, 3.1, + ]; + let input_arr = Array2::from_shape_vec((8, 2), input_mat).unwrap(); + // Elements in the input come in pairs of 2 nearby elements with consecutive indices + // I expect a matrix with 16 non-zero elements placed in the diagonal and connecting + // consecutive elements in pairs of two + let adj_mat = adjacency_matrix(&input_arr, 1); + assert_eq!(adj_mat.nnz(), 16); + + for i in 0..8 { + for j in 0..8 { + // 8 diagonal elements + if i == j { + assert_eq!(*adj_mat.get(i, j).unwrap() as usize, 1); + // (0,1), (2,3), (4,5), (6,7) -> 4 elements + } else if i % 2 == 0 && j == i + 1 { + assert_eq!(*adj_mat.get(i, j).unwrap() as usize, 1); + // (1,0), (3,2), (5,4), (7,6) -> 4 elements + } else if j % 2 == 0 && i == j + 1 { + assert_eq!(*adj_mat.get(i, j).unwrap() as usize, 1); + // all other 48 elements + } else { + // Since this is the first test we check that all these elements + // are `None`, even if it follows from `adj_mat.nnz() = 16` + assert_eq!(adj_mat.get(i, j), None); + } + } + } + + // Elements in the input come in triples of 3 nearby elements with consecutive indices + // I expect a matrix with 26 non-zero elements placed in the diagonal and connecting + // consecutive elements in triples + let adj_mat = adjacency_matrix(&input_arr, 2); + assert_eq!(adj_mat.nnz(), 26); + + // diagonal -> 8 non-zeros + for i in 0..8 { + assert_eq!(*adj_mat.get(i, i).unwrap() as usize, 1); + } + + // central input elements have neighbours in the previous and next input elements + // -> 12 non zeros + for i in 1..7 { + assert_eq!(*adj_mat.get(i, i + 1).unwrap() as usize, 1); + assert_eq!(*adj_mat.get(i, i - 1).unwrap() as usize, 1); + } + + // first and last elements have neighbours respectively in + // the next and previous two elements + // -> 4 non-zeros + assert_eq!(*adj_mat.get(0, 1).unwrap() as usize, 1); + assert_eq!(*adj_mat.get(0, 2).unwrap() as usize, 1); + assert_eq!(*adj_mat.get(7, 6).unwrap() as usize, 1); + assert_eq!(*adj_mat.get(7, 5).unwrap() as usize, 1); + + // it follows then that the third and third-to-last elements + // have also neighbours respectively in the first and last elements + // -> 2 non-zeros -> 26 total + assert_eq!(*adj_mat.get(0, 2).unwrap() as usize, 1); + assert_eq!(*adj_mat.get(7, 5).unwrap() as usize, 1); + + // it follows then that all other elements are `None` + } + + #[test] + fn adjacency_matrix_test_2() { + // pts 0 & 1 pts 2 & 3 pts 4 & 5 pts 6 & 7 + // |0.| |3.1| _ |1.| |2.1| _ |2.| |1.1| _ |3.| |0.1| + // |0.| |3.1| |1.| |2.1| |2.| |1.1| |3.| |0.1| + let input_mat = vec![ + 0., 0., 3.1, 3.1, 1., 1., 2.1, 1.1, 2., 2., 1.1, 1.1, 3., 3., 0.1, 0.1, + ]; + + let input_arr = Array2::from_shape_vec((8, 2), input_mat).unwrap(); + let adj_mat = adjacency_matrix(&input_arr, 1); + assert_eq!(adj_mat.nnz(), 16); + + // I expext non-zeros in the diagonal and then: + // - point 0 to be neighbour of point 7 & vice versa + // - point 1 to be neighbour of point 6 & vice versa + // - point 2 to be neighbour of point 5 & vice versa + // - point 3 to be neighbour of point 4 & vice versa + + for i in 0..8 { + assert_eq!(*adj_mat.get(i, i).unwrap() as usize, 1); + if i <= 3 { + assert_eq!(*adj_mat.get(i, 7 - i).unwrap() as usize, 1); + assert_eq!(*adj_mat.get(7 - i, i).unwrap() as usize, 1); + } + } + } + + #[test] + #[should_panic] + fn sparse_panics_on_0_neighbours() { + let input_mat = [ + [[0., 0.], [0.1, 0.1]], + [[1., 1.], [1.1, 1.1]], + [[2., 2.], [2.1, 2.1]], + [[3., 3.], [3.1, 3.1]], + ] + .concat() + .concat(); + let input_arr = Array2::from_shape_vec((8, 2), input_mat).unwrap(); + let _ = adjacency_matrix(&input_arr, 0); + } + + #[test] + #[should_panic] + fn sparse_panics_on_n_neighbours() { + let input_mat = [ + [[0., 0.], [0.1, 0.1]], + [[1., 1.], [1.1, 1.1]], + [[2., 2.], [2.1, 2.1]], + [[3., 3.], [3.1, 3.1]], + ] + .concat() + .concat(); + let input_arr = Array2::from_shape_vec((8, 2), input_mat).unwrap(); + let _ = adjacency_matrix(&input_arr, 8); + } +} diff --git a/linfa-svm/src/permutable_kernel.rs b/linfa-svm/src/permutable_kernel.rs index 615c3cab7..22ae04ce3 100644 --- a/linfa-svm/src/permutable_kernel.rs +++ b/linfa-svm/src/permutable_kernel.rs @@ -213,7 +213,6 @@ mod tests { inner: KernelInner::Dense(dist.clone()), method: KernelMethod::Linear, dataset: dist.view(), - linear: false, }; let mut kernel = PermutableKernel::new(&dist, targets);