diff --git a/NeuralNetwork.NET/APIs/DatasetLoader.cs b/NeuralNetwork.NET/APIs/DatasetLoader.cs
index e20d004..2425478 100644
--- a/NeuralNetwork.NET/APIs/DatasetLoader.cs
+++ b/NeuralNetwork.NET/APIs/DatasetLoader.cs
@@ -4,9 +4,12 @@
using JetBrains.Annotations;
using NeuralNetworkNET.APIs.Interfaces.Data;
using NeuralNetworkNET.Extensions;
+using NeuralNetworkNET.Helpers;
using NeuralNetworkNET.SupervisedLearning.Data;
using NeuralNetworkNET.SupervisedLearning.Optimization.Parameters;
using NeuralNetworkNET.SupervisedLearning.Optimization.Progress;
+using SixLabors.ImageSharp;
+using SixLabors.ImageSharp.PixelFormats;
namespace NeuralNetworkNET.APIs
{
@@ -30,12 +33,12 @@ public static class DatasetLoader
///
/// Creates a new instance to train a network from the input collection, with the specified batch size
///
- /// The source collection to use to build the training dataset
+ /// The source collection to use to build the training dataset, where the samples will be extracted from the input instances in parallel
/// The desired dataset batch size
[PublicAPI]
[Pure, NotNull]
[CollectionAccess(CollectionAccessType.Read)]
- public static ITrainingDataset Training([NotNull] IEnumerable> data, int size) => BatchesCollection.From(data, size);
+ public static ITrainingDataset Training([NotNull, ItemNotNull] IEnumerable> data, int size) => BatchesCollection.From(data, size);
///
/// Creates a new instance to train a network from the input matrices, with the specified batch size
@@ -47,6 +50,34 @@ public static class DatasetLoader
[CollectionAccess(CollectionAccessType.Read)]
public static ITrainingDataset Training((float[,] X, float[,] Y) data, int size) => BatchesCollection.From(data, size);
+ ///
+ /// Creates a new instance to train a network from the input data, where each input sample is an image in a specified format
+ ///
+ /// The type of image pixels. It must be either , or
+ /// A list of items, where the first element is the image path and the second is a vector with the expected outputs
+ /// The desired dataset batch size
+ /// An optional to modify each sample image when loading the dataset
+ [PublicAPI]
+ [Pure, NotNull]
+ [CollectionAccess(CollectionAccessType.Read)]
+ public static ITrainingDataset Training([NotNull] IEnumerable<(String X, float[] Y)> data, int size, [CanBeNull] Action> modify = null)
+ where TPixel : struct, IPixel
+ => BatchesCollection.From(data.Select<(String X, float[] Y), Func<(float[], float[])>>(xy => () => (ImageLoader.Load(xy.X, modify), xy.Y)), size);
+
+ ///
+ /// Creates a new instance to train a network from the input data, where each input sample is an image in a specified format
+ ///
+ /// The type of image pixels. It must be either , or
+ /// A list of items, where the first element is the image path and the second is a returning a vector with the expected outputs
+ /// The desired dataset batch size
+ /// An optional to modify each sample image when loading the dataset
+ [PublicAPI]
+ [Pure, NotNull]
+ [CollectionAccess(CollectionAccessType.Read)]
+ public static ITrainingDataset Training([NotNull] IEnumerable<(String X, Func Y)> data, int size, [CanBeNull] Action> modify = null)
+ where TPixel : struct, IPixel
+ => BatchesCollection.From(data.Select<(String X, Func Y), Func<(float[], float[])>>(xy => () => (ImageLoader.Load(xy.X, modify), xy.Y())), size);
+
#endregion
#region Validation
@@ -66,13 +97,13 @@ public static IValidationDataset Validation([NotNull] IEnumerable<(float[] X, fl
///
/// Creates a new instance to validate a network accuracy from the input collection
///
- /// The source collection to use to build the validation dataset
+ /// The source collection to use to build the validation dataset, where the samples will be extracted from the input instances in parallel
/// The desired tolerance to test the network for convergence
/// The epochs interval to consider when testing the network for convergence
[PublicAPI]
[Pure, NotNull]
[CollectionAccess(CollectionAccessType.Read)]
- public static IValidationDataset Validation([NotNull] IEnumerable> data, float tolerance = 1e-2f, int epochs = 5)
+ public static IValidationDataset Validation([NotNull, ItemNotNull] IEnumerable> data, float tolerance = 1e-2f, int epochs = 5)
=> Validation(data.AsParallel().Select(f => f()), tolerance, epochs);
///
@@ -86,6 +117,36 @@ public static IValidationDataset Validation([NotNull] IEnumerable new ValidationDataset(data, tolerance, epochs);
+ ///
+ /// Creates a new instance to validate a network accuracy from the input collection
+ ///
+ /// The type of image pixels. It must be either , or
+ /// A list of items, where the first element is the image path and the second is a vector with the expected outputs
+ /// The desired tolerance to test the network for convergence
+ /// The epochs interval to consider when testing the network for convergence
+ /// An optional to modify each sample image when loading the dataset
+ [PublicAPI]
+ [Pure, NotNull]
+ [CollectionAccess(CollectionAccessType.Read)]
+ public static IValidationDataset Validation([NotNull] IEnumerable<(String X, float[] Y)> data, float tolerance = 1e-2f, int epochs = 5, [CanBeNull] Action> modify = null)
+ where TPixel : struct, IPixel
+ => Validation(data.Select<(String X, float[] Y), Func<(float[], float[])>>(xy => () => (ImageLoader.Load(xy.X, modify), xy.Y)).AsParallel(), tolerance, epochs);
+
+ ///
+ /// Creates a new instance to validate a network accuracy from the input collection
+ ///
+ /// The type of image pixels. It must be either , or
+ /// A list of items, where the first element is the image path and the second is a returning a vector with the expected outputs
+ /// The desired tolerance to test the network for convergence
+ /// The epochs interval to consider when testing the network for convergence
+ /// An optional to modify each sample image when loading the dataset
+ [PublicAPI]
+ [Pure, NotNull]
+ [CollectionAccess(CollectionAccessType.Read)]
+ public static IValidationDataset Validation([NotNull] IEnumerable<(String X, Func Y)> data, float tolerance = 1e-2f, int epochs = 5, [CanBeNull] Action> modify = null)
+ where TPixel : struct, IPixel
+ => Validation(data.Select<(String X, Func Y), Func<(float[], float[])>>(xy => () => (ImageLoader.Load(xy.X, modify), xy.Y())).AsParallel(), tolerance, epochs);
+
#endregion
#region Test
@@ -104,12 +165,12 @@ public static ITestDataset Test([NotNull] IEnumerable<(float[] X, float[] Y)> da
///
/// Creates a new instance to test a network from the input collection
///
- /// The source collection to use to build the test dataset
+ /// The source collection to use to build the test dataset, where the samples will be extracted from the input instances in parallel
/// The optional progress callback to use
[PublicAPI]
[Pure, NotNull]
[CollectionAccess(CollectionAccessType.Read)]
- public static ITestDataset Test([NotNull] IEnumerable> data, [CanBeNull] IProgress progress = null)
+ public static ITestDataset Test([NotNull, ItemNotNull] IEnumerable> data, [CanBeNull] IProgress progress = null)
=> Test(data.AsParallel().Select(f => f()), progress);
///
@@ -122,6 +183,34 @@ public static ITestDataset Test([NotNull] IEnumerable progress = null) => new TestDataset(data, progress);
+ ///
+ /// Creates a new instance to test a network from the input collection
+ ///
+ /// The type of image pixels. It must be either , or
+ /// A list of items, where the first element is the image path and the second is a vector with the expected outputs
+ /// The optional progress callback to use
+ /// An optional to modify each sample image when loading the dataset
+ [PublicAPI]
+ [Pure, NotNull]
+ [CollectionAccess(CollectionAccessType.Read)]
+ public static ITestDataset Test([NotNull] IEnumerable<(String X, float[] Y)> data, [CanBeNull] IProgress progress = null, [CanBeNull] Action> modify = null)
+ where TPixel : struct, IPixel
+ => Test(data.Select<(String X, float[] Y), Func<(float[], float[])>>(xy => () => (ImageLoader.Load(xy.X, modify), xy.Y)).AsParallel(), progress);
+
+ ///
+ /// Creates a new instance to test a network from the input collection
+ ///
+ /// The type of image pixels. It must be either , or
+ /// A list of items, where the first element is the image path and the second is a returning a vector with the expected outputs
+ /// The optional progress callback to use
+ /// An optional to modify each sample image when loading the dataset
+ [PublicAPI]
+ [Pure, NotNull]
+ [CollectionAccess(CollectionAccessType.Read)]
+ public static ITestDataset Test([NotNull] IEnumerable<(String X, Func Y)> data, [CanBeNull] IProgress progress = null, [CanBeNull] Action> modify = null)
+ where TPixel : struct, IPixel
+ => Test(data.Select<(String X, Func Y), Func<(float[], float[])>>(xy => () => (ImageLoader.Load(xy.X, modify), xy.Y())).AsParallel(), progress);
+
#endregion
}
}
diff --git a/NeuralNetwork.NET/APIs/Structs/TensorInfo.cs b/NeuralNetwork.NET/APIs/Structs/TensorInfo.cs
index 4e109cc..160cd92 100644
--- a/NeuralNetwork.NET/APIs/Structs/TensorInfo.cs
+++ b/NeuralNetwork.NET/APIs/Structs/TensorInfo.cs
@@ -3,6 +3,7 @@
using System;
using System.Diagnostics;
using System.Runtime.CompilerServices;
+using SixLabors.ImageSharp.PixelFormats;
namespace NeuralNetworkNET.APIs.Structs
{
@@ -67,30 +68,38 @@ internal TensorInfo(int height, int width, int channels)
}
///
- /// Creates a new instance for an RGB image
+ /// Creates a new instance for a linear network layer, without keeping track of spatial info
///
- /// The height of the input image
- /// The width of the input image
+ /// The input size
[PublicAPI]
[Pure]
- public static TensorInfo CreateForRgbImage(int height, int width) => new TensorInfo(height, width, 3);
+ public static TensorInfo Linear(int size) => new TensorInfo(1, 1, size);
///
- /// Creates a new instance for a grayscale image
+ /// Creates a new instance for an image with a user-defined pixel type
///
+ /// The type of image pixels. It must be either , or
/// The height of the input image
/// The width of the input image
[PublicAPI]
[Pure]
- public static TensorInfo CreateForGrayscaleImage(int height, int width) => new TensorInfo(height, width, 1);
+ public static TensorInfo Image(int height, int width) where TPixel : struct, IPixel
+ {
+ if (typeof(TPixel) == typeof(Alpha8)) return new TensorInfo(height, width, 1);
+ if (typeof(TPixel) == typeof(Rgb24)) return new TensorInfo(height, width, 3);
+ if (typeof(TPixel) == typeof(Argb32)) return new TensorInfo(height, width, 4);
+ throw new InvalidOperationException($"The {typeof(TPixel).Name} pixel format isn't currently supported");
+ }
///
- /// Creates a new instance for a linear network layer, without keeping track of spatial info
+ /// Creates a new instance for with a custom 3D shape
///
- /// The input size
+ /// The input volume height
+ /// The input volume width
+ /// The number of channels in the input volume
[PublicAPI]
[Pure]
- public static TensorInfo CreateLinear(int size) => new TensorInfo(1, 1, size);
+ public static TensorInfo Volume(int height, int width, int channels) => new TensorInfo(height, width, channels);
#endregion
diff --git a/NeuralNetwork.NET/Extensions/MiscExtensions.cs b/NeuralNetwork.NET/Extensions/MiscExtensions.cs
index d3b2c76..ac93765 100644
--- a/NeuralNetwork.NET/Extensions/MiscExtensions.cs
+++ b/NeuralNetwork.NET/Extensions/MiscExtensions.cs
@@ -2,6 +2,7 @@
using System.Collections.Generic;
using System.Linq;
using System.Runtime.CompilerServices;
+using System.Text;
using System.Threading.Tasks;
using JetBrains.Annotations;
@@ -20,8 +21,10 @@ public static class MiscExtensions
/// The item to cast
[Pure, NotNull]
[MethodImpl(MethodImplOptions.AggressiveInlining)]
- public static TOut To([NotNull] this TIn item) where TOut : class, TIn => item as TOut
- ?? throw new InvalidOperationException($"The item of type {typeof(TIn)} is a {item.GetType()} instance and can't be cast to {typeof(TOut)}");
+ public static TOut To([NotNull] this TIn item)
+ where TIn : class
+ where TOut : TIn
+ => (TOut)item;
///
/// Returns the maximum value between two numbers
@@ -138,5 +141,20 @@ public static void AssertCompleted(in this ParallelLoopResult result)
{
if (!result.IsCompleted) throw new InvalidOperationException("Error while performing the parallel loop");
}
+
+ ///
+ /// Removes the left spaces from the input verbatim string
+ ///
+ /// The string to trim
+ [Pure, NotNull]
+ public static String TrimVerbatim([NotNull] this String text)
+ {
+ String[] lines = text.Split(new[] { Environment.NewLine }, StringSplitOptions.None);
+ return lines.Aggregate(new StringBuilder(), (b, s) =>
+ {
+ b.AppendLine(s.Trim());
+ return b;
+ }).ToString();
+ }
}
}
diff --git a/NeuralNetwork.NET/Helpers/ImageLoader.cs b/NeuralNetwork.NET/Helpers/ImageLoader.cs
new file mode 100644
index 0000000..4881a87
--- /dev/null
+++ b/NeuralNetwork.NET/Helpers/ImageLoader.cs
@@ -0,0 +1,90 @@
+using System;
+using JetBrains.Annotations;
+using SixLabors.ImageSharp;
+using SixLabors.ImageSharp.Advanced;
+using SixLabors.ImageSharp.PixelFormats;
+
+namespace NeuralNetworkNET.Helpers
+{
+ ///
+ /// A static class with some helper methods to quickly load a sample from a target image file
+ ///
+ internal static class ImageLoader
+ {
+ ///
+ /// Loads the target image and applies the requested changes, then converts it to a dataset sample
+ ///
+ /// The path of the image to load
+ /// The optional changes to apply to the image
+ [Pure, NotNull]
+ public static float[] Load([NotNull] String path, [CanBeNull] Action> modify) where TPixel : struct, IPixel
+ {
+ using (Image image = Image.Load(path))
+ {
+ if (modify != null) image.Mutate(modify);
+ if (typeof(TPixel) == typeof(Alpha8)) return Load(image as Image);
+ if (typeof(TPixel) == typeof(Rgb24)) return Load(image as Image);
+ if (typeof(TPixel) == typeof(Argb32)) return Load(image as Image);
+ throw new InvalidOperationException($"The {typeof(TPixel).Name} pixel format isn't currently supported");
+ }
+ }
+
+ #region Loaders
+
+ // Loads an RGBA32 image
+ [Pure, NotNull]
+ private static unsafe float[] Load(Image image)
+ {
+ int resolution = image.Height * image.Width;
+ float[] sample = new float[resolution * 4];
+ fixed (Argb32* p0 = &image.DangerousGetPinnableReferenceToPixelBuffer())
+ fixed (float* psample = sample)
+ {
+ for (int i = 0; i < resolution; i++)
+ {
+ Argb32* pxy = p0 + i;
+ psample[i] = pxy->A / 255f;
+ psample[i + resolution] = pxy->R / 255f;
+ psample[i + 2 * resolution] = pxy->G / 255f;
+ psample[i + 3 * resolution] = pxy->B / 255f;
+ }
+ }
+ return sample;
+ }
+
+ // Loads an RGBA24 image
+ [Pure, NotNull]
+ private static unsafe float[] Load(Image image)
+ {
+ int resolution = image.Height * image.Width;
+ float[] sample = new float[resolution * 3];
+ fixed (Rgb24* p0 = &image.DangerousGetPinnableReferenceToPixelBuffer())
+ fixed (float* psample = sample)
+ {
+ for (int i = 0; i < resolution; i++)
+ {
+ Rgb24* pxy = p0 + i;
+ psample[i] = pxy->R / 255f;
+ psample[i + resolution] = pxy->G / 255f;
+ psample[i + 2 * resolution] = pxy->B / 255f;
+ }
+ }
+ return sample;
+ }
+
+ // Loads an ALPHA8 image
+ [Pure, NotNull]
+ private static unsafe float[] Load(Image image)
+ {
+ int resolution = image.Height * image.Width;
+ float[] sample = new float[resolution];
+ fixed (Alpha8* p0 = &image.DangerousGetPinnableReferenceToPixelBuffer())
+ fixed (float* psample = sample)
+ for (int i = 0; i < resolution; i++)
+ psample[i] = p0[i].PackedValue / 255f;
+ return sample;
+ }
+
+ #endregion
+ }
+}
diff --git a/NeuralNetwork.NET/Helpers/Imaging/ImageLoader.cs b/NeuralNetwork.NET/Helpers/Imaging/ImageLoader.cs
deleted file mode 100644
index 4db732d..0000000
--- a/NeuralNetwork.NET/Helpers/Imaging/ImageLoader.cs
+++ /dev/null
@@ -1,190 +0,0 @@
-using System;
-using System.IO;
-using System.Threading.Tasks;
-using JetBrains.Annotations;
-using NeuralNetworkNET.APIs.Structs;
-using NeuralNetworkNET.Extensions;
-using SixLabors.ImageSharp;
-using SixLabors.ImageSharp.Advanced;
-using SixLabors.ImageSharp.PixelFormats;
-using SixLabors.ImageSharp.Processing;
-using SixLabors.Primitives;
-
-namespace NeuralNetworkNET.Helpers.Imaging
-{
- ///
- /// A static class with some helper methods to quickly load a sample from a target image file
- ///
- public static class ImageLoader
- {
- ///
- /// Loads the target image and applies the requested changes, then converts it to a dataset sample
- ///
- /// The path of the image to load
- /// The optional changes to apply to the image
- [PublicAPI]
- [Pure, NotNull]
- public static unsafe float[] LoadRgb32ImageSample([NotNull] String path, [CanBeNull] Action> modify = null)
- {
- using (Image image = Image.Load(path))
- {
- if (modify != null) image.Mutate(modify);
- int resolution = image.Height * image.Width;
- float[] sample = new float[resolution * 3];
- fixed (Rgb24* p0 = &image.DangerousGetPinnableReferenceToPixelBuffer())
- fixed (float* psample = sample)
- for (int i = 0; i < resolution; i++)
- {
- Rgb24* pxy = p0 + i;
- psample[i] = pxy->R / 255f;
- psample[i + resolution] = pxy->G / 255f;
- psample[i + 2 * resolution] = pxy->B / 255f;
- }
- return sample;
- }
- }
-
- ///
- /// Loads the target image, converts it to grayscale and applies the requested changes, then converts it to a dataset sample
- ///
- /// The path of the image to load
- /// The optional changes to apply to the image
- [PublicAPI]
- [Pure, NotNull]
- public static unsafe float[] LoadGrayscaleImageSample([NotNull] String path, [CanBeNull] Action> modify = null)
- {
- using (Image image = Image.Load(path))
- {
- image.Mutate(x => x.Grayscale());
- if (modify != null) image.Mutate(modify);
- int resolution = image.Height * image.Width;
- float[] sample = new float[resolution];
- fixed (Rgb24* p0 = &image.DangerousGetPinnableReferenceToPixelBuffer())
- fixed (float* psample = sample)
- for (int i = 0; i < resolution; i++)
- psample[i] = p0[i].R / 255f;
- return sample;
- }
- }
-
- ///
- /// Saves an image in the target path representing the input weights and biases
- ///
- /// The target path for the image
- /// The input weights
- /// The input biases
- /// The desired image scaling to use
- [PublicAPI]
- public static unsafe void ExportFullyConnectedWeights([NotNull] String path, [NotNull] float[,] weights, [NotNull] float[] biases, ImageScaling scaling)
- {
- int
- h = weights.GetLength(0),
- w = weights.GetLength(1);
- if (biases.Length != w) throw new ArgumentException("The biases length must match the width of the weights matrix");
- using (Image image = new Image(w, h + 1))
- {
- fixed (Rgb24* p0 = &image.DangerousGetPinnableReferenceToPixelBuffer())
- {
- // Weights
- fixed (float* pw = weights)
- {
- (float min, float max) = new Span(pw, h * w).MinMax(float.MinValue, float.MaxValue);
- for (int i = 0; i < h; i++)
- {
- int offset = i * w;
- for (int j = 0; j < w; j++)
- {
- float
- value = pw[offset + j],
- normalized = (value - min) * 255 / (max - min);
- byte hex = (byte)normalized;
- p0[j * h + i] = new Rgb24(hex, hex, hex);
- }
- }
- }
-
- // Biases
- fixed (float* pb = biases)
- {
- (float min, float max) = new Span(pb, w).MinMax(float.MinValue, float.MaxValue);
- for (int i = 0; i < w; i++)
- {
- float
- value = pb[i],
- normalized = (value - min) * 255 / (max - min);
- byte hex = (byte)normalized;
- p0[h * w + i] = new Rgb24(hex, hex, hex);
- }
- }
- }
- image.UpdateScaling(scaling);
- using (FileStream stream = File.OpenWrite(path.EndsWith(".png") ? path : $"{path}.png"))
- image.Save(stream, ImageFormats.Png);
- }
- }
-
- ///
- /// Saves aan image for each kernel to the target directory
- ///
- /// The directory to use to store the images
- /// The input kernels
- /// The size info for the input kernels
- /// The desired image scaling to use
- [PublicAPI]
- public static unsafe void ExportGrayscaleKernels([NotNull] String directory, [NotNull] float[,] kernels, TensorInfo kernelsInfo, ImageScaling scaling)
- {
- // Setup
- Directory.CreateDirectory(directory);
- int
- h = kernels.GetLength(0),
- w = kernels.GetLength(1);
- if (kernelsInfo.Channels != 1) throw new ArgumentException("Only a 2D kernel can be exported as an image with this method");
-
- // Export a single kernel matrix (one per weights row)
- void Kernel(int k)
- {
- using (Image image = new Image(kernelsInfo.Width, kernelsInfo.Height))
- {
- fixed (Rgb24* p0 = &image.DangerousGetPinnableReferenceToPixelBuffer())
- fixed (float* pw = kernels)
- {
- float* pwoffset = pw + k * w;
- (float min, float max) = new Span(pwoffset, kernelsInfo.SliceSize).MinMax(float.MinValue, float.MaxValue);
- for (int i = 0; i < kernelsInfo.Height; i++)
- {
- int offset = i * kernelsInfo.Width;
- for (int j = 0; j < kernelsInfo.Width; j++)
- {
- float
- value = pwoffset[offset + j],
- normalized = (value - min) * 255 / (max - min);
- byte hex = (byte)normalized;
- p0[j * kernelsInfo.Height + i] = new Rgb24(hex, hex, hex);
- }
- }
- }
- image.UpdateScaling(scaling);
- using (FileStream stream = File.OpenWrite(Path.Combine(directory, $"{k}.png")))
- image.Save(stream, ImageFormats.Png);
- }
- }
-
- // Save all the kernels in parallel
- Parallel.For(0, h, Kernel).AssertCompleted();
- }
-
- // Resizes an image with the NN samples and the desired scaling mode
- private static void UpdateScaling([NotNull] this Image image, ImageScaling scaling) where TPixel : struct, IPixel
- {
- if (scaling == ImageScaling.Native) return;
- const int threshold = 2000;
- Size size = new Size(image.Width, image.Height);
- if (size.Height > threshold || size.Width > threshold) return; // Skip if the final size is already large enough
- int
- max = size.Height.Max(size.Width),
- scale = threshold / max;
- if (scale == 1) return;
- image.Mutate(x => x.Resize(new ResizeOptions { Size = new Size(size.Width * scale, size.Height * scale), Sampler = new NearestNeighborResampler() }));
- }
- }
-}
diff --git a/NeuralNetwork.NET/Helpers/Imaging/ImageScaling.cs b/NeuralNetwork.NET/Helpers/Imaging/ImageScaling.cs
deleted file mode 100644
index 1cd60f6..0000000
--- a/NeuralNetwork.NET/Helpers/Imaging/ImageScaling.cs
+++ /dev/null
@@ -1,18 +0,0 @@
-namespace NeuralNetworkNET.Helpers.Imaging
-{
- ///
- /// Indicates the quality used to export the weights of a neural network as an image
- ///
- public enum ImageScaling
- {
- ///
- /// The weights are exported using a 1:1 pixel size ratio
- ///
- Native,
-
- ///
- /// The weights are upscaled in size when exported as images
- ///
- HighQuality
- }
-}
\ No newline at end of file
diff --git a/NeuralNetwork.NET/Helpers/TensorMap.cs b/NeuralNetwork.NET/Helpers/TensorMap.cs
deleted file mode 100644
index 7f8b097..0000000
--- a/NeuralNetwork.NET/Helpers/TensorMap.cs
+++ /dev/null
@@ -1,104 +0,0 @@
-using JetBrains.Annotations;
-using NeuralNetworkNET.APIs.Interfaces;
-using NeuralNetworkNET.APIs.Structs;
-using System;
-using System.Collections.Generic;
-using System.Linq;
-
-namespace NeuralNetworkNET.Helpers
-{
- ///
- /// A simple map that stores references to instances while training or using a network
- ///
- internal sealed class TensorMap : IDisposable
- {
- #region IDisposable
-
- ~TensorMap() => Dispose();
-
- ///
- void IDisposable.Dispose()
- {
- GC.SuppressFinalize(this);
- Dispose();
- }
-
- // Frees the allocated tensors
- private void Dispose()
- {
- foreach (Tensor tensor in new[] { ActivityMap, ActivationMap, DeltaMap }.SelectMany(d => d.Values))
- tensor.Free();
- ActivityMap.Clear();
- ActivationMap.Clear();
- DeltaMap.Clear();
- }
-
- #endregion
-
- // The Z tensors
- [NotNull]
- private readonly IDictionary ActivityMap = new Dictionary();
-
- // The A tensors
- [NotNull]
- private readonly IDictionary ActivationMap = new Dictionary();
-
- // The dy tensors
- [NotNull]
- private readonly IDictionary DeltaMap = new Dictionary();
-
- ///
- /// Gets or sets a for the given network layer and data type
- ///
- /// The source instance for the target
- /// The value for the target
- public Tensor this[INetworkLayer layer, TensorType type]
- {
- [Pure]
- get
- {
- switch (type)
- {
- case TensorType.Activity: return ActivityMap[layer];
- case TensorType.Activation: return ActivationMap[layer];
- case TensorType.Delta: return DeltaMap[layer];
- default: throw new ArgumentOutOfRangeException(nameof(type), "Invalid data type requested");
- }
- }
- set
- {
- IDictionary target;
- switch (type)
- {
- case TensorType.Activity: target = ActivityMap; break;
- case TensorType.Activation: target = ActivationMap; break;
- case TensorType.Delta: target = DeltaMap; break;
- default: throw new ArgumentOutOfRangeException(nameof(type), "Invalid data type requested");
- }
- if (target.TryGetValue(layer, out Tensor old)) old.Free();
- target[layer] = value;
- }
- }
- }
-
- ///
- /// Indicates the type of any given
- ///
- internal enum TensorType
- {
- ///
- /// The activity of a network layer, the output before the activation function
- ///
- Activity,
-
- ///
- /// The activation of a network layer, the output with the activation function applied to it
- ///
- Activation,
-
- ///
- /// The error delta for the outputs of a given network layer
- ///
- Delta
- }
-}
diff --git a/NeuralNetwork.NET/Helpers/TrainingProgressExportHelpers.cs b/NeuralNetwork.NET/Helpers/TrainingProgressExportHelpers.cs
index 8e058a2..9a46bfe 100644
--- a/NeuralNetwork.NET/Helpers/TrainingProgressExportHelpers.cs
+++ b/NeuralNetwork.NET/Helpers/TrainingProgressExportHelpers.cs
@@ -5,6 +5,7 @@
using JetBrains.Annotations;
using NeuralNetworkNET.APIs.Enums;
using NeuralNetworkNET.APIs.Results;
+using NeuralNetworkNET.Extensions;
namespace NeuralNetworkNET.Helpers
{
@@ -20,7 +21,7 @@ public static class TrainingProgressExportHelpers
plt.ylabel(""$YLABEL$"")
plt.xlabel(""Epoch"")
plt.plot(x)
- plt.show()".Split(new[] { Environment.NewLine }, StringSplitOptions.None).Aggregate(String.Empty, (s, l) => $"{s}{l.Trim()}{Environment.NewLine}");
+ plt.show()".TrimVerbatim();
// The custom 4-spaces indentation for the data points (the \t character is not consistent across different editors)
private const String Tab = " ";
diff --git a/NeuralNetwork.NET/Networks/Layers/Cpu/FullyConnectedLayer.cs b/NeuralNetwork.NET/Networks/Layers/Cpu/FullyConnectedLayer.cs
index cf0f201..ff969d2 100644
--- a/NeuralNetwork.NET/Networks/Layers/Cpu/FullyConnectedLayer.cs
+++ b/NeuralNetwork.NET/Networks/Layers/Cpu/FullyConnectedLayer.cs
@@ -22,12 +22,12 @@ internal class FullyConnectedLayer : WeightedLayerBase
public override LayerType LayerType { get; } = LayerType.FullyConnected;
public FullyConnectedLayer(in TensorInfo input, int neurons, ActivationFunctionType activation, WeightsInitializationMode weightsMode, BiasInitializationMode biasMode)
- : base(input, TensorInfo.CreateLinear(neurons),
+ : base(input, TensorInfo.Linear(neurons),
WeightsProvider.NewFullyConnectedWeights(input, neurons, weightsMode),
WeightsProvider.NewBiases(neurons, biasMode), activation) { }
public FullyConnectedLayer(in TensorInfo input, int neurons, [NotNull] float[] weights, [NotNull] float[] biases, ActivationFunctionType activation)
- : base(input, TensorInfo.CreateLinear(neurons), weights, biases, activation)
+ : base(input, TensorInfo.Linear(neurons), weights, biases, activation)
{
if (neurons != biases.Length)
throw new ArgumentException("The biases vector must have the same size as the number of output neurons");
diff --git a/NeuralNetwork.NET/NeuralNetwork.NET.csproj b/NeuralNetwork.NET/NeuralNetwork.NET.csproj
index 0dd8f28..9faeb74 100644
--- a/NeuralNetwork.NET/NeuralNetwork.NET.csproj
+++ b/NeuralNetwork.NET/NeuralNetwork.NET.csproj
@@ -3,7 +3,7 @@
netstandard2.0
NeuralNetworkNET
- 1.3.0
+ 1.4.0
Sergio Pedri
Sergio Pedri
An easy to use CNN library, built from scratch in C# 7.2 for .NET Standard 2.0, with native memory management for better performance and additional GPU support with cuDNN
@@ -14,7 +14,7 @@
cnn, neuralnetwork, deeplearning, ai, cuda, csharp, gpu, net, netstandard
true
true
- Custom accuracy testing APIs added, code refactoring and improvements
+ New APIs to load a dataset from images, minor bug fixes and improvements
@@ -33,7 +33,7 @@
PackageReference
true
true
- 1.3.0.0
+ 1.4.0.0
diff --git a/README.md b/README.md
index 537a053..d826b43 100644
--- a/README.md
+++ b/README.md
@@ -37,7 +37,7 @@ The **NeuralNetwork.NET** library exposes easy to use classes and methods to cre
The first step is to create a custom network structure. Here is an example with a sequential network (a stack of layers):
```C#
-INeuralNetwork network = NetworkManager.NewSequential(TensorInfo.CreateForGrayscaleImage(28, 28),
+INeuralNetwork network = NetworkManager.NewSequential(TensorInfo.Image(28, 28),
NetworkLayers.Convolutional((5, 5), 20, ActivationFunctionType.Identity),
NetworkLayers.Pooling(ActivationFunctionType.LeakyReLU),
NetworkLayers.Convolutional((3, 3), 40, ActivationFunctionType.Identity),
diff --git a/Samples/DigitsCudaTest/Program.cs b/Samples/DigitsCudaTest/Program.cs
index e332d0d..a17f7f9 100644
--- a/Samples/DigitsCudaTest/Program.cs
+++ b/Samples/DigitsCudaTest/Program.cs
@@ -7,50 +7,47 @@
using NeuralNetworkNET.APIs;
using NeuralNetworkNET.APIs.Enums;
using NeuralNetworkNET.APIs.Interfaces;
+using NeuralNetworkNET.APIs.Interfaces.Data;
using NeuralNetworkNET.APIs.Results;
using NeuralNetworkNET.APIs.Structs;
using NeuralNetworkNET.Helpers;
using NeuralNetworkNET.Networks.Activations;
using NeuralNetworkNET.SupervisedLearning.Optimization.Progress;
+using SixLabors.ImageSharp.PixelFormats;
namespace DigitsCudaTest
{
- class Program
+ public class Program
{
- static async Task Main()
+ public static async Task Main()
{
- // Parse the dataset and create the network
- (var training, var test) = DataParser.LoadDatasets();
- INeuralNetwork network = NetworkManager.NewSequential(TensorInfo.CreateForGrayscaleImage(28, 28),
- CuDnnNetworkLayers.Convolutional(ConvolutionInfo.Default, (5, 5), 20, ActivationFunctionType.LeakyReLU),
+ // Create the network
+ INeuralNetwork network = NetworkManager.NewSequential(TensorInfo.Image(28, 28),
CuDnnNetworkLayers.Convolutional(ConvolutionInfo.Default, (5, 5), 20, ActivationFunctionType.Identity),
CuDnnNetworkLayers.Pooling(PoolingInfo.Default, ActivationFunctionType.LeakyReLU),
- CuDnnNetworkLayers.Convolutional(ConvolutionInfo.Default, (3, 3), 40, ActivationFunctionType.LeakyReLU),
CuDnnNetworkLayers.Convolutional(ConvolutionInfo.Default, (3, 3), 40, ActivationFunctionType.Identity),
CuDnnNetworkLayers.Pooling(PoolingInfo.Default, ActivationFunctionType.LeakyReLU),
CuDnnNetworkLayers.FullyConnected(125, ActivationFunctionType.LeCunTanh),
CuDnnNetworkLayers.FullyConnected(64, ActivationFunctionType.LeCunTanh),
CuDnnNetworkLayers.Softmax(10));
- // Setup and start the training
+ // Prepare the dataset
+ (var training, var test) = DataParser.LoadDatasets();
+ ITrainingDataset trainingData = DatasetLoader.Training(training, 400); // Batches of 400 samples
+ ITestDataset testData = DatasetLoader.Test(test, new Progress(p =>
+ {
+ Printf($"Epoch {p.Iteration}, cost: {p.Result.Cost}, accuracy: {p.Result.Accuracy}");
+ }));
+
+ // Setup and network training
CancellationTokenSource cts = new CancellationTokenSource();
Console.CancelKeyPress += (s, e) => cts.Cancel();
TrainingSessionResult result = await NetworkManager.TrainNetworkAsync(network,
- DatasetLoader.Training(training, 400),
+ trainingData,
TrainingAlgorithms.Adadelta(),
20, 0.5f,
- new Progress(p =>
- {
- Console.SetCursorPosition(0, Console.CursorTop);
- int n = (int)(p.Percentage * 32 / 100);
- char[] c = new char[32];
- for (int i = 0; i < 32; i++) c[i] = i <= n ? '=' : ' ';
- Console.Write($"[{new String(c)}] ");
- }),
- testDataset: DatasetLoader.Test(test, new Progress(p =>
- {
- Printf($"Epoch {p.Iteration}, cost: {p.Result.Cost}, accuracy: {p.Result.Accuracy}");
- })), token: cts.Token);
+ new Progress(TrackBatchProgress),
+ testDataset: testData, token: cts.Token);
// Save the training reports
String
@@ -75,5 +72,15 @@ private static void Printf(String text)
Console.ForegroundColor = ConsoleColor.White;
Console.Write($"{text}\n");
}
+
+ // Training monitor
+ private static void TrackBatchProgress(BatchProgress progress)
+ {
+ Console.SetCursorPosition(0, Console.CursorTop);
+ int n = (int)(progress.Percentage * 32 / 100); // 32 is the number of progress '=' characters to display
+ char[] c = new char[32];
+ for (int i = 0; i < 32; i++) c[i] = i <= n ? '=' : ' ';
+ Console.Write($"[{new String(c)}] ");
+ }
}
}
diff --git a/Samples/DigitsTest/Program.cs b/Samples/DigitsTest/Program.cs
index 8f7bede..a22480d 100644
--- a/Samples/DigitsTest/Program.cs
+++ b/Samples/DigitsTest/Program.cs
@@ -3,39 +3,41 @@
using MnistDatasetToolkit;
using NeuralNetworkNET.APIs;
using NeuralNetworkNET.APIs.Interfaces;
+using NeuralNetworkNET.APIs.Interfaces.Data;
using NeuralNetworkNET.APIs.Results;
using NeuralNetworkNET.APIs.Structs;
using NeuralNetworkNET.Networks.Activations;
using NeuralNetworkNET.SupervisedLearning.Optimization.Progress;
+using SixLabors.ImageSharp.PixelFormats;
namespace DigitsTest
{
- class Program
+ public class Program
{
- static async Task Main()
+ public static async Task Main()
{
- (var training, var test) = DataParser.LoadDatasets();
- INeuralNetwork network = NetworkManager.NewSequential(TensorInfo.CreateForGrayscaleImage(28, 28),
+ // Create the network
+ INeuralNetwork network = NetworkManager.NewSequential(TensorInfo.Image(28, 28),
NetworkLayers.Convolutional((5, 5), 20, ActivationFunctionType.Identity),
NetworkLayers.Pooling(ActivationFunctionType.LeakyReLU),
NetworkLayers.FullyConnected(100, ActivationFunctionType.LeCunTanh),
NetworkLayers.Softmax(10));
+
+ // Prepare the dataset
+ (var training, var test) = DataParser.LoadDatasets();
+ ITrainingDataset trainingData = DatasetLoader.Training(training, 100); // Batches of 100 samples
+ ITestDataset testData = DatasetLoader.Test(test, new Progress(p =>
+ {
+ Printf($"Epoch {p.Iteration}, cost: {p.Result.Cost}, accuracy: {p.Result.Accuracy}");
+ }));
+
+ // Train the network
TrainingSessionResult result = await NetworkManager.TrainNetworkAsync(network,
- DatasetLoader.Training(training, 100),
- TrainingAlgorithms.Adadelta(),
+ trainingData,
+ TrainingAlgorithms.Adadelta(),
60, 0.5f,
- new Progress(p =>
- {
- Console.SetCursorPosition(0, Console.CursorTop);
- int n = (int)(p.Percentage * 32 / 100);
- char[] c = new char[32];
- for (int i = 0; i < 32; i++) c[i] = i <= n ? '=' : ' ';
- Console.Write($"[{new String(c)}] ");
- }),
- testDataset: DatasetLoader.Test(test, new Progress(p =>
- {
- Printf($"Epoch {p.Iteration}, cost: {p.Result.Cost}, accuracy: {p.Result.Accuracy}");
- })));
+ new Progress(TrackBatchProgress),
+ testDataset: testData);
Printf($"Stop reason: {result.StopReason}, elapsed time: {result.TrainingTime}");
Console.ReadKey();
}
@@ -48,5 +50,15 @@ private static void Printf(String text)
Console.ForegroundColor = ConsoleColor.White;
Console.Write($"{text}\n");
}
+
+ // Training monitor
+ private static void TrackBatchProgress(BatchProgress progress)
+ {
+ Console.SetCursorPosition(0, Console.CursorTop);
+ int n = (int)(progress.Percentage * 32 / 100); // 32 is the number of progress '=' characters to display
+ char[] c = new char[32];
+ for (int i = 0; i < 32; i++) c[i] = i <= n ? '=' : ' ';
+ Console.Write($"[{new String(c)}] ");
+ }
}
}
diff --git a/Unit/NeuralNetwork.NET.Cuda.Unit/CuDnnInceptionLayerTest.cs b/Unit/NeuralNetwork.NET.Cuda.Unit/CuDnnInceptionLayerTest.cs
index 164e2f5..ad39da2 100644
--- a/Unit/NeuralNetwork.NET.Cuda.Unit/CuDnnInceptionLayerTest.cs
+++ b/Unit/NeuralNetwork.NET.Cuda.Unit/CuDnnInceptionLayerTest.cs
@@ -7,6 +7,7 @@
using System.Runtime.CompilerServices;
using NeuralNetworkNET.Networks.Layers.Cuda;
using NeuralNetworkNET.Networks.Layers.Initialization;
+using SixLabors.ImageSharp.PixelFormats;
namespace NeuralNetworkNET.Cuda.Unit
{
@@ -20,8 +21,8 @@ public class CuDnnInceptionLayerTest
[TestMethod]
public unsafe void Inception1x1()
{
- float[,] x = WeightsProvider.NewFullyConnectedWeights(TensorInfo.CreateLinear(10), 32 * 32 * 3, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(10, 32 * 32 * 3);
- CuDnnConvolutionalLayer conv = new CuDnnConvolutionalLayer(TensorInfo.CreateForRgbImage(32, 32), ConvolutionInfo.New(ConvolutionMode.CrossCorrelation), (1, 1), 10, ActivationFunctionType.ReLU, BiasInitializationMode.Gaussian);
+ float[,] x = WeightsProvider.NewFullyConnectedWeights(TensorInfo.Linear(10), 32 * 32 * 3, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(10, 32 * 32 * 3);
+ CuDnnConvolutionalLayer conv = new CuDnnConvolutionalLayer(TensorInfo.Image(32, 32), ConvolutionInfo.New(ConvolutionMode.CrossCorrelation), (1, 1), 10, ActivationFunctionType.ReLU, BiasInitializationMode.Gaussian);
CuDnnInceptionLayer inception = new CuDnnInceptionLayer(conv.InputInfo, InceptionInfo.New(10, 10, 10, 10, 10, PoolingMode.Max, 10));
fixed (float* pw = inception.Weights)
Unsafe.InitBlock(pw, 0, (uint)(sizeof(float) * inception.Weights.Length));
@@ -85,11 +86,11 @@ public unsafe void Inception1x1()
[TestMethod]
public unsafe void Inception3x3Pipeline()
{
- float[,] x = WeightsProvider.NewFullyConnectedWeights(TensorInfo.CreateLinear(10), 32 * 32 * 3, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(10, 32 * 32 * 3);
+ float[,] x = WeightsProvider.NewFullyConnectedWeights(TensorInfo.Linear(10), 32 * 32 * 3, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(10, 32 * 32 * 3);
CuDnnConvolutionalLayer
- conv1 = new CuDnnConvolutionalLayer(TensorInfo.CreateForRgbImage(32, 32), ConvolutionInfo.New(ConvolutionMode.CrossCorrelation), (1, 1), 10, ActivationFunctionType.ReLU, BiasInitializationMode.Gaussian),
+ conv1 = new CuDnnConvolutionalLayer(TensorInfo.Image(32, 32), ConvolutionInfo.New(ConvolutionMode.CrossCorrelation), (1, 1), 10, ActivationFunctionType.ReLU, BiasInitializationMode.Gaussian),
conv2 = new CuDnnConvolutionalLayer(conv1.OutputInfo, ConvolutionInfo.New(ConvolutionMode.CrossCorrelation, 1, 1), (3, 3), 10, ActivationFunctionType.ReLU, BiasInitializationMode.Gaussian);
- CuDnnInceptionLayer inception = new CuDnnInceptionLayer(TensorInfo.CreateForRgbImage(32, 32), InceptionInfo.New(10, 10, 10, 10, 10, PoolingMode.Max, 10));
+ CuDnnInceptionLayer inception = new CuDnnInceptionLayer(TensorInfo.Image(32, 32), InceptionInfo.New(10, 10, 10, 10, 10, PoolingMode.Max, 10));
fixed (float* pw = inception.Weights)
Unsafe.InitBlock(pw, 0, (uint)(sizeof(float) * inception.Weights.Length));
Buffer.BlockCopy(conv1.Weights, 0, inception.Weights, sizeof(float) * 3 * 10, sizeof(float) * conv1.Weights.Length);
@@ -154,11 +155,11 @@ public unsafe void Inception3x3Pipeline()
[TestMethod]
public unsafe void Inception5x5Pipeline()
{
- float[,] x = WeightsProvider.NewFullyConnectedWeights(TensorInfo.CreateLinear(10), 12 * 12 * 3, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(10, 12 * 12 * 3);
+ float[,] x = WeightsProvider.NewFullyConnectedWeights(TensorInfo.Linear(10), 12 * 12 * 3, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(10, 12 * 12 * 3);
CuDnnConvolutionalLayer
- conv1 = new CuDnnConvolutionalLayer(TensorInfo.CreateForRgbImage(12, 12), ConvolutionInfo.New(ConvolutionMode.CrossCorrelation), (1, 1), 10, ActivationFunctionType.ReLU, BiasInitializationMode.Gaussian),
+ conv1 = new CuDnnConvolutionalLayer(TensorInfo.Image(12, 12), ConvolutionInfo.New(ConvolutionMode.CrossCorrelation), (1, 1), 10, ActivationFunctionType.ReLU, BiasInitializationMode.Gaussian),
conv2 = new CuDnnConvolutionalLayer(conv1.OutputInfo, ConvolutionInfo.New(ConvolutionMode.CrossCorrelation, 2, 2), (5, 5), 10, ActivationFunctionType.ReLU, BiasInitializationMode.Gaussian);
- CuDnnInceptionLayer inception = new CuDnnInceptionLayer(TensorInfo.CreateForRgbImage(12, 12), InceptionInfo.New(3, 2, 2, 10, 10, PoolingMode.Max, 2));
+ CuDnnInceptionLayer inception = new CuDnnInceptionLayer(TensorInfo.Image(12, 12), InceptionInfo.New(3, 2, 2, 10, 10, PoolingMode.Max, 2));
fixed (float* pw = inception.Weights)
Unsafe.InitBlock(pw, 0, (uint)(sizeof(float) * inception.Weights.Length));
Buffer.BlockCopy(conv1.Weights, 0, inception.Weights, sizeof(float) * (3 * 3 + 3 * 2 + 3 * 3 * 2 * 2), sizeof(float) * conv1.Weights.Length);
@@ -222,10 +223,10 @@ public unsafe void Inception5x5Pipeline()
[TestMethod]
public unsafe void InceptionPoolPipeline()
{
- float[,] x = WeightsProvider.NewFullyConnectedWeights(TensorInfo.CreateLinear(10), 12 * 12 * 3, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(10, 12 * 12 * 3);
- CuDnnPoolingLayer pool = new CuDnnPoolingLayer(TensorInfo.CreateForRgbImage(12, 12), PoolingInfo.New(PoolingMode.Max, 3, 3, 1, 1, 1, 1), ActivationFunctionType.ReLU);
+ float[,] x = WeightsProvider.NewFullyConnectedWeights(TensorInfo.Linear(10), 12 * 12 * 3, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(10, 12 * 12 * 3);
+ CuDnnPoolingLayer pool = new CuDnnPoolingLayer(TensorInfo.Image(12, 12), PoolingInfo.New(PoolingMode.Max, 3, 3, 1, 1, 1, 1), ActivationFunctionType.ReLU);
CuDnnConvolutionalLayer conv = new CuDnnConvolutionalLayer(pool.OutputInfo, ConvolutionInfo.New(ConvolutionMode.CrossCorrelation), (1, 1), 10, ActivationFunctionType.ReLU, BiasInitializationMode.Gaussian);
- CuDnnInceptionLayer inception = new CuDnnInceptionLayer(TensorInfo.CreateForRgbImage(12, 12), InceptionInfo.New(3, 2, 2, 2, 2, PoolingMode.Max, 10));
+ CuDnnInceptionLayer inception = new CuDnnInceptionLayer(TensorInfo.Image(12, 12), InceptionInfo.New(3, 2, 2, 2, 2, PoolingMode.Max, 10));
fixed (float* pw = inception.Weights)
Unsafe.InitBlock(pw, 0, (uint)(sizeof(float) * inception.Weights.Length));
Buffer.BlockCopy(conv.Weights, 0, inception.Weights, sizeof(float) * (3 * 3 + 3 * 2 + 3 * 3 * 2 * 2 + 3 * 2 + 5 * 5 * 2 * 2), sizeof(float) * conv.Weights.Length);
diff --git a/Unit/NeuralNetwork.NET.Cuda.Unit/CuDnnLayersTest.cs b/Unit/NeuralNetwork.NET.Cuda.Unit/CuDnnLayersTest.cs
index 9b3d376..85c89f4 100644
--- a/Unit/NeuralNetwork.NET.Cuda.Unit/CuDnnLayersTest.cs
+++ b/Unit/NeuralNetwork.NET.Cuda.Unit/CuDnnLayersTest.cs
@@ -74,9 +74,9 @@ private static unsafe void TestGradient(WeightedLayerBase cpu, WeightedLayerBase
[TestMethod]
public void FullyConnectedForward()
{
- float[,] x = WeightsProvider.NewFullyConnectedWeights(TensorInfo.CreateLinear(400), 250, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(400, 250);
+ float[,] x = WeightsProvider.NewFullyConnectedWeights(TensorInfo.Linear(400), 250, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(400, 250);
FullyConnectedLayer
- cpu = new FullyConnectedLayer(TensorInfo.CreateLinear(250), 127, ActivationFunctionType.LeCunTanh, WeightsInitializationMode.GlorotNormal, BiasInitializationMode.Gaussian),
+ cpu = new FullyConnectedLayer(TensorInfo.Linear(250), 127, ActivationFunctionType.LeCunTanh, WeightsInitializationMode.GlorotNormal, BiasInitializationMode.Gaussian),
gpu = new CuDnnFullyConnectedLayer(cpu.InputInfo, cpu.OutputInfo.Size, cpu.Weights, cpu.Biases, cpu.ActivationFunctionType);
TestForward(cpu, gpu, x);
}
@@ -85,10 +85,10 @@ public void FullyConnectedForward()
public void FullyConnectedBackward()
{
float[,]
- delta_1 = WeightsProvider.NewFullyConnectedWeights(TensorInfo.CreateLinear(400), 127, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(400, 127),
- z = WeightsProvider.NewFullyConnectedWeights(TensorInfo.CreateLinear(400), 250, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(400, 250);
+ delta_1 = WeightsProvider.NewFullyConnectedWeights(TensorInfo.Linear(400), 127, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(400, 127),
+ z = WeightsProvider.NewFullyConnectedWeights(TensorInfo.Linear(400), 250, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(400, 250);
FullyConnectedLayer
- cpu = new FullyConnectedLayer(TensorInfo.CreateLinear(250), 127, ActivationFunctionType.LeCunTanh, WeightsInitializationMode.GlorotNormal, BiasInitializationMode.Gaussian),
+ cpu = new FullyConnectedLayer(TensorInfo.Linear(250), 127, ActivationFunctionType.LeCunTanh, WeightsInitializationMode.GlorotNormal, BiasInitializationMode.Gaussian),
gpu = new CuDnnFullyConnectedLayer(cpu.InputInfo, cpu.OutputInfo.Size, cpu.Weights, cpu.Biases, cpu.ActivationFunctionType);
TestBackward(cpu, gpu, delta_1, z);
}
@@ -97,10 +97,10 @@ public void FullyConnectedBackward()
public void FullyConnectedGradient()
{
float[,]
- x = WeightsProvider.NewFullyConnectedWeights(TensorInfo.CreateLinear(400), 250, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(400, 250),
- delta = WeightsProvider.NewFullyConnectedWeights(TensorInfo.CreateLinear(400), 127, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(400, 127);
+ x = WeightsProvider.NewFullyConnectedWeights(TensorInfo.Linear(400), 250, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(400, 250),
+ delta = WeightsProvider.NewFullyConnectedWeights(TensorInfo.Linear(400), 127, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(400, 127);
FullyConnectedLayer
- cpu = new FullyConnectedLayer(TensorInfo.CreateLinear(250), 127, ActivationFunctionType.LeCunTanh, WeightsInitializationMode.GlorotNormal, BiasInitializationMode.Gaussian),
+ cpu = new FullyConnectedLayer(TensorInfo.Linear(250), 127, ActivationFunctionType.LeCunTanh, WeightsInitializationMode.GlorotNormal, BiasInitializationMode.Gaussian),
gpu = new CuDnnFullyConnectedLayer(cpu.InputInfo, cpu.OutputInfo.Size, cpu.Weights, cpu.Biases, cpu.ActivationFunctionType);
TestGradient(cpu, gpu, x, delta);
}
@@ -112,9 +112,9 @@ public void FullyConnectedGradient()
[TestMethod]
public void SoftmaxForward()
{
- float[,] x = WeightsProvider.NewFullyConnectedWeights(TensorInfo.CreateLinear(400), 250, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(400, 250);
+ float[,] x = WeightsProvider.NewFullyConnectedWeights(TensorInfo.Linear(400), 250, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(400, 250);
SoftmaxLayer
- cpu = new SoftmaxLayer(TensorInfo.CreateLinear(250), 127, WeightsInitializationMode.GlorotNormal, BiasInitializationMode.Gaussian),
+ cpu = new SoftmaxLayer(TensorInfo.Linear(250), 127, WeightsInitializationMode.GlorotNormal, BiasInitializationMode.Gaussian),
gpu = new CuDnnSoftmaxLayer(cpu.InputInfo, cpu.OutputInfo.Size, cpu.Weights, cpu.Biases);
TestForward(cpu, gpu, x);
}
@@ -123,10 +123,10 @@ public void SoftmaxForward()
public void SoftmaxBackward()
{
float[,]
- delta_1 = WeightsProvider.NewFullyConnectedWeights(TensorInfo.CreateLinear(400), 127, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(400, 127),
- z = WeightsProvider.NewFullyConnectedWeights(TensorInfo.CreateLinear(400), 250, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(400, 250);
+ delta_1 = WeightsProvider.NewFullyConnectedWeights(TensorInfo.Linear(400), 127, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(400, 127),
+ z = WeightsProvider.NewFullyConnectedWeights(TensorInfo.Linear(400), 250, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(400, 250);
SoftmaxLayer
- cpu = new SoftmaxLayer(TensorInfo.CreateLinear(250), 127, WeightsInitializationMode.GlorotNormal, BiasInitializationMode.Gaussian),
+ cpu = new SoftmaxLayer(TensorInfo.Linear(250), 127, WeightsInitializationMode.GlorotNormal, BiasInitializationMode.Gaussian),
gpu = new CuDnnSoftmaxLayer(cpu.InputInfo, cpu.OutputInfo.Size, cpu.Weights, cpu.Biases);
TestBackward(cpu, gpu, delta_1, z);
}
@@ -135,10 +135,10 @@ public void SoftmaxBackward()
public void SoftmaxGradient()
{
float[,]
- a = WeightsProvider.NewFullyConnectedWeights(TensorInfo.CreateLinear(400), 250, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(400, 250),
- delta = WeightsProvider.NewFullyConnectedWeights(TensorInfo.CreateLinear(400), 127, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(400, 127);
+ a = WeightsProvider.NewFullyConnectedWeights(TensorInfo.Linear(400), 250, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(400, 250),
+ delta = WeightsProvider.NewFullyConnectedWeights(TensorInfo.Linear(400), 127, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(400, 127);
SoftmaxLayer
- cpu = new SoftmaxLayer(TensorInfo.CreateLinear(250), 127, WeightsInitializationMode.GlorotNormal, BiasInitializationMode.Gaussian),
+ cpu = new SoftmaxLayer(TensorInfo.Linear(250), 127, WeightsInitializationMode.GlorotNormal, BiasInitializationMode.Gaussian),
gpu = new CuDnnSoftmaxLayer(cpu.InputInfo, cpu.OutputInfo.Size, cpu.Weights, cpu.Biases);
TestGradient(cpu, gpu, a, delta);
}
@@ -147,12 +147,12 @@ public void SoftmaxGradient()
public unsafe void SoftmaxBackwardOutput()
{
float[,]
- x = WeightsProvider.NewFullyConnectedWeights(TensorInfo.CreateLinear(400), 250, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(400, 250),
+ x = WeightsProvider.NewFullyConnectedWeights(TensorInfo.Linear(400), 250, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(400, 250),
y = new float[400, 127];
for (int i = 0; i < 400; i++)
y[i, ThreadSafeRandom.NextInt(max: 127)] = 1;
SoftmaxLayer
- cpu = new SoftmaxLayer(TensorInfo.CreateLinear(250), 127, WeightsInitializationMode.GlorotNormal, BiasInitializationMode.Gaussian),
+ cpu = new SoftmaxLayer(TensorInfo.Linear(250), 127, WeightsInitializationMode.GlorotNormal, BiasInitializationMode.Gaussian),
gpu = new CuDnnSoftmaxLayer(cpu.InputInfo, cpu.OutputInfo.Size, cpu.Weights, cpu.Biases);
fixed (float* px = x, py = y)
{
@@ -176,7 +176,7 @@ public unsafe void SoftmaxBackwardOutput()
[TestMethod]
public void ConvolutionForward()
{
- float[,] x = WeightsProvider.NewFullyConnectedWeights(TensorInfo.CreateLinear(127), 58 * 58 * 3, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(127, 58 * 58 * 3);
+ float[,] x = WeightsProvider.NewFullyConnectedWeights(TensorInfo.Linear(127), 58 * 58 * 3, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(127, 58 * 58 * 3);
ConvolutionalLayer
cpu = new ConvolutionalLayer(new TensorInfo(58, 58, 3), ConvolutionInfo.Default, (5, 5), 20, ActivationFunctionType.LeakyReLU, BiasInitializationMode.Gaussian),
gpu = new CuDnnConvolutionalLayer(cpu.InputInfo, ConvolutionInfo.Default, cpu.KernelInfo, cpu.OutputInfo, cpu.Weights, cpu.Biases, cpu.ActivationFunctionType);
@@ -187,8 +187,8 @@ public void ConvolutionForward()
public unsafe void ConvolutionBackward()
{
float[,]
- delta_1 = WeightsProvider.NewFullyConnectedWeights(TensorInfo.CreateLinear(127), 54 * 54 * 20, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(127, 54 * 54 * 20),
- z = WeightsProvider.NewFullyConnectedWeights(TensorInfo.CreateLinear(127), 58 * 58 * 3, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(127, 58 * 58 * 3);
+ delta_1 = WeightsProvider.NewFullyConnectedWeights(TensorInfo.Linear(127), 54 * 54 * 20, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(127, 54 * 54 * 20),
+ z = WeightsProvider.NewFullyConnectedWeights(TensorInfo.Linear(127), 58 * 58 * 3, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(127, 58 * 58 * 3);
ConvolutionalLayer
cpu = new ConvolutionalLayer(new TensorInfo(58, 58, 3), ConvolutionInfo.Default, (5, 5), 20, ActivationFunctionType.LeCunTanh, BiasInitializationMode.Gaussian),
gpu = new CuDnnConvolutionalLayer(cpu.InputInfo, ConvolutionInfo.Default, cpu.KernelInfo, cpu.OutputInfo, cpu.Weights, cpu.Biases, ActivationFunctionType.LeCunTanh);
@@ -206,8 +206,8 @@ public unsafe void ConvolutionBackward()
public unsafe void ConvolutionGradient()
{
float[,]
- x = WeightsProvider.NewFullyConnectedWeights(TensorInfo.CreateLinear(127), 58 * 58 * 3, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(127, 58 * 58 * 3),
- delta = WeightsProvider.NewFullyConnectedWeights(TensorInfo.CreateLinear(127), 54 * 54 * 5, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(127, 54 * 54 * 5);
+ x = WeightsProvider.NewFullyConnectedWeights(TensorInfo.Linear(127), 58 * 58 * 3, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(127, 58 * 58 * 3),
+ delta = WeightsProvider.NewFullyConnectedWeights(TensorInfo.Linear(127), 54 * 54 * 5, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(127, 54 * 54 * 5);
ConvolutionalLayer
cpu = new ConvolutionalLayer(new TensorInfo(58, 58, 3), ConvolutionInfo.Default, (5, 5), 5, ActivationFunctionType.LeCunTanh, BiasInitializationMode.Gaussian),
gpu = new CuDnnConvolutionalLayer(cpu.InputInfo, ConvolutionInfo.Default, cpu.KernelInfo, cpu.OutputInfo, cpu.Weights, cpu.Biases, ActivationFunctionType.LeCunTanh);
@@ -228,7 +228,7 @@ public unsafe void ConvolutionGradient()
[TestMethod]
public void PoolingForward()
{
- float[,] x = WeightsProvider.NewFullyConnectedWeights(TensorInfo.CreateLinear(400), 58 * 58 * 3, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(400, 58 * 58 * 3);
+ float[,] x = WeightsProvider.NewFullyConnectedWeights(TensorInfo.Linear(400), 58 * 58 * 3, WeightsInitializationMode.GlorotNormal).AsSpan().AsMatrix(400, 58 * 58 * 3);
PoolingLayer
cpu = new PoolingLayer(new TensorInfo(58, 58, 3), PoolingInfo.Default, ActivationFunctionType.LeakyReLU),
gpu = new CuDnnPoolingLayer(cpu.InputInfo, PoolingInfo.Default, ActivationFunctionType.LeakyReLU);
diff --git a/Unit/NeuralNetwork.NET.Cuda.Unit/SerializationTest.cs b/Unit/NeuralNetwork.NET.Cuda.Unit/SerializationTest.cs
index da7ab6c..9573aac 100644
--- a/Unit/NeuralNetwork.NET.Cuda.Unit/SerializationTest.cs
+++ b/Unit/NeuralNetwork.NET.Cuda.Unit/SerializationTest.cs
@@ -5,6 +5,7 @@
using NeuralNetworkNET.APIs.Structs;
using NeuralNetworkNET.Networks.Activations;
using System.IO;
+using SixLabors.ImageSharp.PixelFormats;
namespace NeuralNetworkNET.Cuda.Unit
{
@@ -18,7 +19,7 @@ public class SerializationTest
[TestMethod]
public void NetworkSerialization()
{
- INeuralNetwork network = NetworkManager.NewSequential(TensorInfo.CreateForRgbImage(120, 120),
+ INeuralNetwork network = NetworkManager.NewSequential(TensorInfo.Image(120, 120),
CuDnnNetworkLayers.Convolutional(ConvolutionInfo.New(ConvolutionMode.CrossCorrelation), (10, 10), 20, ActivationFunctionType.AbsoluteReLU),
CuDnnNetworkLayers.Convolutional(ConvolutionInfo.New(ConvolutionMode.Convolution, 2, 2), (5, 5), 20, ActivationFunctionType.ELU),
CuDnnNetworkLayers.Convolutional(ConvolutionInfo.Default, (10, 10), 20, ActivationFunctionType.Identity),
diff --git a/Unit/NeuralNetwork.NET.Unit/ConvolutionExtensionsTest.cs b/Unit/NeuralNetwork.NET.Unit/ConvolutionExtensionsTest.cs
index 0a51190..047d81d 100644
--- a/Unit/NeuralNetwork.NET.Unit/ConvolutionExtensionsTest.cs
+++ b/Unit/NeuralNetwork.NET.Unit/ConvolutionExtensionsTest.cs
@@ -2,6 +2,7 @@
using NeuralNetworkNET.APIs.Structs;
using NeuralNetworkNET.cpuDNN;
using NeuralNetworkNET.Extensions;
+using SixLabors.ImageSharp.PixelFormats;
namespace NeuralNetworkNET.Unit
{
@@ -37,11 +38,11 @@ public unsafe void Pool1()
{
Tensor.Reshape(pm, 1, 16, out Tensor mTensor);
Tensor.New(1, 4, out Tensor result);
- CpuDnn.PoolingForward(mTensor, TensorInfo.CreateForGrayscaleImage(4, 4), result);
+ CpuDnn.PoolingForward(mTensor, TensorInfo.Image(4, 4), result);
Assert.IsTrue(result.ToArray2D().ContentEquals(r));
// Upscale
- CpuDnn.PoolingBackward(mTensor, TensorInfo.CreateForGrayscaleImage(4, 4), result, mTensor);
+ CpuDnn.PoolingBackward(mTensor, TensorInfo.Image(4, 4), result, mTensor);
float[,] expected =
{
{
@@ -86,7 +87,7 @@ public unsafe void Pool2()
{
Tensor.Reshape(pm, 1, 49, out Tensor mTensor);
Tensor.New(1, 16, out Tensor result);
- CpuDnn.PoolingForward(mTensor, TensorInfo.CreateForGrayscaleImage(7, 7), result);
+ CpuDnn.PoolingForward(mTensor, TensorInfo.Image(7, 7), result);
Assert.IsTrue(result.ToArray2D().ContentEquals(r));
result.Free();
}
@@ -112,7 +113,7 @@ public unsafe void Pool3()
{
Tensor.Reshape(pm, 1, 4, out Tensor mTensor);
Tensor.New(1, 1, out Tensor result);
- CpuDnn.PoolingForward(mTensor, TensorInfo.CreateForGrayscaleImage(2, 2), result);
+ CpuDnn.PoolingForward(mTensor, TensorInfo.Image(2, 2), result);
Assert.IsTrue(result.ToArray2D().ContentEquals(r));
result.Free();
}
@@ -153,7 +154,7 @@ public unsafe void Pool4()
{
Tensor.Reshape(pm, 2, 16, out Tensor mTensor);
Tensor.New(2, 4, out Tensor result);
- CpuDnn.PoolingForward(mTensor, TensorInfo.CreateForGrayscaleImage(4, 4), result);
+ CpuDnn.PoolingForward(mTensor, TensorInfo.Image(4, 4), result);
Assert.IsTrue(result.ToArray2D().ContentEquals(r));
result.Free();
}
@@ -249,7 +250,7 @@ public unsafe void UpscalePool1()
{
Tensor.Reshape(pm, 1, 16, out Tensor mTensor);
Tensor.Reshape(pp, 1, 4, out Tensor pTensor);
- CpuDnn.PoolingBackward(mTensor, TensorInfo.CreateForGrayscaleImage(4, 4), pTensor, mTensor);
+ CpuDnn.PoolingBackward(mTensor, TensorInfo.Image(4, 4), pTensor, mTensor);
Assert.IsTrue(mTensor.ToArray2D().ContentEquals(r));
}
}
@@ -509,7 +510,7 @@ public unsafe void Compress1()
{
Tensor.Reshape(pm, 1, 9, out Tensor mTensor);
Tensor.New(1, 1, out Tensor v);
- CpuDnn.ConvolutionBackwardBias(mTensor, TensorInfo.CreateForGrayscaleImage(3, 3), v);
+ CpuDnn.ConvolutionBackwardBias(mTensor, TensorInfo.Image(3, 3), v);
Assert.IsTrue(v.ToArray().ContentEquals(r));
v.Free();
}
diff --git a/Unit/NeuralNetwork.NET.Unit/MiscTest.cs b/Unit/NeuralNetwork.NET.Unit/MiscTest.cs
index 91b806d..795db17 100644
--- a/Unit/NeuralNetwork.NET.Unit/MiscTest.cs
+++ b/Unit/NeuralNetwork.NET.Unit/MiscTest.cs
@@ -52,5 +52,26 @@ public void ThresholdAccuracyTest()
Assert.IsFalse(tester(yHat, y1));
Assert.IsTrue(tester(yHat, y2));
}
+
+ [TestMethod]
+ public void TrimVerbatim()
+ {
+ const String text = @"import matplotlib.pyplot as plt
+ x = [$VALUES$]
+ plt.grid(linestyle=""dashed"")
+ plt.ylabel(""$YLABEL$"")
+ plt.xlabel(""Epoch"")
+ plt.plot(x)
+ plt.show()";
+ const String expected = @"import matplotlib.pyplot as plt
+x = [$VALUES$]
+plt.grid(linestyle=""dashed"")
+plt.ylabel(""$YLABEL$"")
+plt.xlabel(""Epoch"")
+plt.plot(x)
+plt.show()
+";
+ Assert.IsTrue(text.TrimVerbatim().Equals(expected));
+ }
}
}
diff --git a/Unit/NeuralNetwork.NET.Unit/NetworkTest.cs b/Unit/NeuralNetwork.NET.Unit/NetworkTest.cs
index b557017..045d3ac 100644
--- a/Unit/NeuralNetwork.NET.Unit/NetworkTest.cs
+++ b/Unit/NeuralNetwork.NET.Unit/NetworkTest.cs
@@ -13,6 +13,7 @@
using NeuralNetworkNET.Networks.Activations;
using NeuralNetworkNET.Networks.Implementations;
using NeuralNetworkNET.SupervisedLearning.Data;
+using SixLabors.ImageSharp.PixelFormats;
namespace NeuralNetworkNET.Unit
{
@@ -88,7 +89,7 @@ public void GradientDescentTest1()
{
(var trainingSet, var testSet) = ParseMnistDataset();
BatchesCollection batches = BatchesCollection.From(trainingSet, 100);
- SequentialNetwork network = NetworkManager.NewSequential(TensorInfo.CreateForGrayscaleImage(28, 28),
+ SequentialNetwork network = NetworkManager.NewSequential(TensorInfo.Image(28, 28),
NetworkLayers.FullyConnected(100, ActivationFunctionType.Sigmoid),
NetworkLayers.Softmax(10)).To();
TrainingSessionResult result = NetworkTrainer.TrainNetwork(network, batches, 4, 0, TrainingAlgorithms.StochasticGradientDescent(), null, null, null, null, default);
diff --git a/Unit/NeuralNetwork.NET.Unit/SerializationTest.cs b/Unit/NeuralNetwork.NET.Unit/SerializationTest.cs
index 8696a16..2f1eb98 100644
--- a/Unit/NeuralNetwork.NET.Unit/SerializationTest.cs
+++ b/Unit/NeuralNetwork.NET.Unit/SerializationTest.cs
@@ -7,6 +7,7 @@
using NeuralNetworkNET.Extensions;
using NeuralNetworkNET.Networks.Activations;
using NeuralNetworkNET.Networks.Layers.Initialization;
+using SixLabors.ImageSharp.PixelFormats;
namespace NeuralNetworkNET.Unit
{
@@ -48,7 +49,7 @@ public void StreamSerialize()
{
using (MemoryStream stream = new MemoryStream())
{
- float[] w = WeightsProvider.NewFullyConnectedWeights(TensorInfo.CreateLinear(784), 30, WeightsInitializationMode.GlorotNormal);
+ float[] w = WeightsProvider.NewFullyConnectedWeights(TensorInfo.Linear(784), 30, WeightsInitializationMode.GlorotNormal);
stream.WriteShuffled(w);
Assert.IsTrue(stream.Position == sizeof(float) * w.Length);
stream.Seek(0, SeekOrigin.Begin);
@@ -60,7 +61,7 @@ public void StreamSerialize()
[TestMethod]
public void NetworkSerialization()
{
- INeuralNetwork network = NetworkManager.NewSequential(TensorInfo.CreateForRgbImage(120, 120),
+ INeuralNetwork network = NetworkManager.NewSequential(TensorInfo.Image(120, 120),
NetworkLayers.Convolutional((10, 10), 20, ActivationFunctionType.AbsoluteReLU),
NetworkLayers.Convolutional((5, 5), 20, ActivationFunctionType.ELU),
NetworkLayers.Convolutional((10, 10), 20, ActivationFunctionType.Identity),