Skip to content

Commit

Permalink
Auto merge of #35 - autumnai:develop, r=MichaelHirn
Browse files Browse the repository at this point in the history
Improving tests and docs
  • Loading branch information
homu committed Dec 2, 2015
2 parents 5226c74 + 8eae87f commit 89c31a9
Show file tree
Hide file tree
Showing 10 changed files with 141 additions and 33 deletions.
1 change: 1 addition & 0 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ before_script:
script:
- |
travis-cargo build &&
travis-cargo test &&
travis-cargo bench &&
travis-cargo doc
addons:
Expand Down
8 changes: 5 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ For more information,
If you're using Cargo, just add Leaf to your Cargo.toml:

[dependencies]
leaf = "0.1.0"
leaf = "0.1.1"

If you're using [Cargo Edit][cargo-edit], you can
call:
Expand All @@ -75,8 +75,10 @@ We design Leaf and all other crates for machine learning completely modular and
as extensible as possible. More helpful crates you can use with Leaf:

- [**Cuticula**][cuticula]: Preprocessing Framework for Machine Learning
- [**Phloem**][phloem]: Universal CPU/GPU Data Blob for Machine Learning
- [**Collenchyma**][collen]: Backend-agnostic parallel computation
- [**Phloem**][phloem]: Universal Data Blob for Machine Learning on CUDA, OpenCL
or common CPU
- [**Collenchyma**][collen]: Portable, High Performance Computation on CUDA,
OpenCL and common CPU

[cuticula]: https://github.com/autumnai/cuticula
[phloem]: https://github.com/autumnai/phloem
Expand Down
26 changes: 22 additions & 4 deletions src/layer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,12 +31,21 @@ use std::sync::{RwLockReadGuard, RwLockWriteGuard};
/// ```
/// extern crate phloem;
/// # extern crate leaf;
/// # extern crate collenchyma as co;
/// use phloem::Blob;
/// use std::sync::{RwLock, RwLockReadGuard};
/// # use leaf::layer::ReadBlob;
/// # use co::backend::{Backend, BackendConfig};
/// # use co::frameworks::Native;
/// # use co::framework::IFramework;
/// # use std::rc::Rc;
///
/// # fn main() {
/// let lock = RwLock::new(Box::new(Blob::<f32>::of_shape(vec![3])));
/// # let framework = Native::new();
/// # let hardwares = framework.hardwares();
/// # let backend_config = BackendConfig::new(framework, hardwares);
/// # let backend = Rc::new(Backend::new(backend_config).unwrap());
/// let lock = RwLock::new(Box::new(Blob::<f32>::of_shape(Some(backend.device()), &[3, 2, 3])));
/// let read_blob: ReadBlob = lock.read().unwrap();
/// # }
/// ```
Expand All @@ -60,12 +69,21 @@ pub type ReadBlob<'_> = RwLockReadGuard<'_, HeapBlob>;
/// ```
/// extern crate phloem;
/// # extern crate leaf;
/// # extern crate collenchyma as co;
/// use phloem::Blob;
/// use std::sync::{RwLock, RwLockWriteGuard};
/// # use leaf::layer::WriteBlob;
/// # use co::backend::{Backend, BackendConfig};
/// # use co::frameworks::Native;
/// # use co::framework::IFramework;
/// # use std::rc::Rc;
///
/// # fn main() {
/// let lock = RwLock::new(Box::new(Blob::<f32>::of_shape(vec![3])));
/// # let framework = Native::new();
/// # let hardwares = framework.hardwares();
/// # let backend_config = BackendConfig::new(framework, hardwares);
/// # let backend = Rc::new(Backend::new(backend_config).unwrap());
/// let lock = RwLock::new(Box::new(Blob::<f32>::of_shape(Some(backend.device()), &[4, 2, 1])));
/// let read_blob: WriteBlob = lock.write().unwrap();
/// # }
/// ```
Expand Down Expand Up @@ -455,7 +473,7 @@ pub trait ILayer {
/// [2]: ./type.ReadBlob.html
/// [3]: ./type.WriteBlob.html
/// [3]: #method.forward_cpu
#[allow(map_clone)]
#[cfg_attr(lint, allow(map_clone))]
fn forward(&self, bottom: &[ArcLock<HeapBlob>], top: &mut Vec<ArcLock<HeapBlob>>) -> f32 {
// Lock();
// Reshape(bottom, top); // Reshape the layer to fit top & bottom blob
Expand Down Expand Up @@ -495,7 +513,7 @@ pub trait ILayer {
/// [2]: ./type.ReadBlob.html
/// [3]: ./type.WriteBlob.html
/// [3]: #method.backward_cpu
#[allow(map_clone)]
#[cfg_attr(lint, allow(map_clone))]
fn backward(&self, top: &[ArcLock<HeapBlob>], propagate_down: &[bool], bottom: &mut Vec<ArcLock<HeapBlob>>) {
let tp: Vec<_> = top.iter().map(|b| b.read().unwrap()).collect();
let bt_ref = bottom.iter().cloned().collect::<Vec<_>>();
Expand Down
65 changes: 52 additions & 13 deletions src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,20 +1,58 @@
//! Leaf is a open, fast and a well-designed, modular Framework for distributed
//! Deep Learning on {C, G}PUs.
//! Leaf is a open, modular and clear-designed Machine Intelligence Framework providing
//! state-of-the-art performance for distributed (Deep|Machine) Learning - sharing concepts from
//! Tensorflow and Caffe.
//!
//! ## Overview
//! An important module in Leaf is the backend-agnostic, high-performance computation Framework
//! [Collenchyma][collenchyma], which combines performance and usability for Leaf Networks.
//! This allows you to run and deploy Leaf Networks to servers, desktops or even mobiles
//! using the full available computation power of GPUs or other CUDA/OpenCL supported
//! devices for the learning of your Networks. And if your machine does not have a GPU or you do
//! not want to install CUDA/OpenCL on your local machine, Leaf will gracefully fall back to
//! your native host CPU.
//!
//! To build a Deep Neural Network you first need to create a
//! [Network][network] which is a container for all different types of
//! [Layers][layers]. These layers are grouped in different types such as
//! [Activation Layers][activation] and [Loss Layers][loss] (these state the
//! characteristics of the layer).
//! ## Architecture
//!
//! Now to train your network you can use one of the [Solvers][solvers]. The
//! Solver defines the [Optimization Method][optimization] and keeps track on
//! the learning progress.
//! Leaf's [Network][network] is a compositional model, representing a collection of connected
//! [layers][layers], making operations over numerical data.
//!
//! The operations can run on different Backends {CPU, GPU} and doesn't have
//! to be defined at compile time, which allows for easy backend swapping.
//! The Network defines the entire model, by defining the hirarchical structure of layers from
//! bottom to top. At execution time, the Network passes the data, flowing through the Network,
//! from one layer to the next. The output of one layer is the input for the layer on top. On a
//! backward pass, the Network passes the deriviates inverted through the Network.
//!
//! Layers, the building block of a Leaf Network, are small units, describing computation over
//! numerical input data. Generally speaking Layers take input and produce an output, but
//! essentially a Layer can describe any functionality e.g. logging as long as it obeys to the
//! general behaviour specifications of a Layer. Any Layer can be grouped in one of four
//! Layer types which are closer defined at the [Layers page][layers]. Every
//! layer serves a special purpose and can occur zero, one or many times inside a Network.
//!
//! Leaf uses a Blob, provided by the [Phloem][phloem] module, an N-dimensional array
//! for a unified memory interface over the actual data for automatic synchronization between
//! different devices (CUDA, OpenCL, host CPU). A Blob stores the actual data as well as the
//! derivatives and is used for the data flowing through the system and for the state
//! representation of Layers, which is important for portability and performance.
//! A Blob can be swapped from backend to backend and can be used for computations on CUDA, OpenCL
//! and native host CPU. It provides performance optimizations and automatically takes care of
//! memory management and synchronization.
//!
//! The learning and optimization of the Network happens at the [Solver][solver] and is decoupled
//! from the Network making the setup clean and flexibel. One of the four layer types is a Loss
//! Layer, which is used for the interaction of Network and Solver. The Network procudes the loss
//! and gradients, which the Solver uses to optimize the Network through parameter updates. Beside
//! that, the Solver provides housekeeping and other evaluations of the Network. All operation
//! on the Solver happen through Collenchyma, therefore can be executed on Cuda, OpenCL or native
//! host CPU as well.
//!
//! Leaf provides a robust and modular design, which allows to express almost any numerical
//! computation including SVMs, RNNs and other popular learning algorithms. We hope that Leaf can
//! help future research and production development alike as it combines expressiveness,
//! performance and usability.
//!
//! [network]: ./network/index.html
//! [layers]: ./layers/index.html
//! [phloem]: https://github.com/autumnai/phloem
//! [solver]: ./solvers/index.html
//!
//! ## Philosophy
//!
Expand Down Expand Up @@ -56,6 +94,7 @@
//! - [Issue #19 for Activation Layers][issue-activation]
//! - [Issue #20 for Common Layers][issue-common]
//!
//! [collenchyma]: https://github.com/autumnai/collenchyma
//! [network]: ./network/index.html
//! [layers]: ./layers/index.html
//! [activation]: ./layers/activation/index.html
Expand Down
20 changes: 18 additions & 2 deletions src/network.rs
Original file line number Diff line number Diff line change
Expand Up @@ -120,9 +120,25 @@ impl<B: IBackend + IBlas<f32>> Network<B> {
/// ## Examples
///
/// ```
/// # extern crate collenchyma;
/// # extern crate leaf;
///
/// # use leaf::network::*;
/// # use collenchyma::backend::{Backend, BackendConfig};
/// # use collenchyma::frameworks::Native;
/// # use collenchyma::framework::IFramework;
/// # use std::rc::Rc;
///
/// # fn main() {
/// // create backend
/// let framework = Native::new();
/// let hardwares = framework.hardwares();
/// let backend_config = BackendConfig::new(framework, hardwares);
/// let backend = Rc::new(Backend::new(backend_config).unwrap());
/// // create network
/// let cfg = NetworkConfig::default();
/// Network::from_config(&cfg);
/// Network::from_config(backend, &cfg);
/// # }
/// ```
pub fn from_config(backend: Rc<B>, param: &NetworkConfig) -> Network<B> {
let mut network = Network::default();
Expand Down Expand Up @@ -250,7 +266,7 @@ impl<B: IBackend + IBlas<f32>> Network<B> {
/// Used during initialization of the Network.
/// [1]: ../layer/struct.Layer.html
/// [2]: ../layer/struct.Layer.html#method.connect
#[allow(ptr_arg)]
#[cfg_attr(lint, allow(ptr_arg))]
fn init_input_blob(&mut self,
blob_name: &str,
input_shape: &Vec<usize>,
Expand Down
9 changes: 8 additions & 1 deletion src/solver.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,18 @@ impl<S, B: IBackend + IBlas<f32>> Solver<S, B> {
/// ## Example
///
/// ```
/// # extern crate leaf;
/// # extern crate collenchyma;
/// # use leaf::solver::*;
/// # use collenchyma::backend::Backend;
/// # use collenchyma::frameworks::Native;
///
/// # fn main() {
/// let cfg = SolverConfig{
/// solver: SolverKind::SGD(SGDKind::Momentum),
/// ..SolverConfig::default()};
/// let solver = Solver::<Box<ISolver>>::from_config(&cfg);
/// let solver = Solver::<Box<ISolver<Backend<Native>>>, Backend<Native>>::from_config(&cfg);
/// # }
/// ```
pub fn from_config(config: &SolverConfig) -> Solver<Box<ISolver<Backend<Native>>>, Backend<Native>> {
let framework = Native::new();
Expand Down
2 changes: 0 additions & 2 deletions src/solvers/sgd/momentum.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,6 @@
//! into the same direction you will reach the optimum faster.
//! It also makes solving more stable.
use co::backend::*;
use co::framework::*;
use co::frameworks::*;
use co::libraries::blas::IBlas;
use shared_memory::*;
use network::Network;
Expand Down
24 changes: 18 additions & 6 deletions tests/layer_specs.rs
Original file line number Diff line number Diff line change
@@ -1,27 +1,39 @@
extern crate leaf;
extern crate phloem;
extern crate collenchyma as co;

#[cfg(test)]
mod layer_spec {

use leaf::layer::*;
use phloem::Blob;
use std::rc::Rc;
use co::backend::{Backend, BackendConfig};
use co::frameworks::Native;
use co::framework::IFramework;

fn new_layer_config() -> LayerConfig {
LayerConfig::new("foo".to_owned(), LayerType::Sigmoid)
}

fn backend() -> Rc<Backend<Native>> {
let framework = Native::new();
let hardwares = framework.hardwares();
let backend_config = BackendConfig::new(framework, hardwares);
Rc::new(Backend::new(backend_config).unwrap())
}

#[test]
fn new_layer() {
let cfg = new_layer_config();
Layer::from_config(&cfg);
Layer::from_config(backend(), &cfg);
}

#[test]
fn dim_check_strict() {
let cfg = WeightConfig { share_mode: DimCheckMode::Strict, ..WeightConfig::default() };
let blob_one = Blob::<f32>::of_shape(vec![2, 3, 3]);
let blob_two = Blob::<f32>::of_shape(vec![3, 2, 3]);
let blob_one = Blob::<f32>::of_shape(Some(backend().device()), &[2, 3, 3]);
let blob_two = Blob::<f32>::of_shape(Some(backend().device()), &[3, 2, 3]);
let param_name = "foo".to_owned();
let owner_name = "owner".to_owned();
let layer_name = "layer".to_owned();
Expand All @@ -43,9 +55,9 @@ mod layer_spec {
#[test]
fn dim_check_permissive() {
let cfg = WeightConfig { share_mode: DimCheckMode::Permissive, ..WeightConfig::default() };
let blob_one = Blob::<f32>::of_shape(vec![2, 3, 3]);
let blob_two = Blob::<f32>::of_shape(vec![3, 2, 3]);
let blob_three = Blob::<f32>::of_shape(vec![3, 10, 3]);
let blob_one = Blob::<f32>::of_shape(Some(backend().device()), &[2, 3, 3]);
let blob_two = Blob::<f32>::of_shape(Some(backend().device()), &[3, 2, 3]);
let blob_three = Blob::<f32>::of_shape(Some(backend().device()), &[3, 10, 3]);
let param_name = "foo".to_owned();
let owner_name = "owner".to_owned();
let layer_name = "layer".to_owned();
Expand Down
14 changes: 13 additions & 1 deletion tests/network_specs.rs
Original file line number Diff line number Diff line change
@@ -1,13 +1,25 @@
extern crate leaf;
extern crate phloem;
extern crate collenchyma as co;

#[cfg(test)]
mod network_spec {
use std::rc::Rc;
use co::backend::{Backend, BackendConfig};
use co::framework::IFramework;
use co::frameworks::Native;
use leaf::network::*;

fn backend() -> Rc<Backend<Native>> {
let framework = Native::new();
let hardwares = framework.hardwares();
let backend_config = BackendConfig::new(framework, hardwares);
Rc::new(Backend::new(backend_config).unwrap())
}

#[test]
fn new_layer() {
let cfg = NetworkConfig::default();
Network::from_config(&cfg);
Network::from_config(backend(), &cfg);
}
}
5 changes: 4 additions & 1 deletion tests/solver_specs.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,11 @@
extern crate leaf;
extern crate collenchyma as co;

#[cfg(test)]
mod network_spec {
use leaf::solver::*;
use co::backend::Backend;
use co::frameworks::Native;

#[test]
// fixed: always return base_lr.
Expand Down Expand Up @@ -40,6 +43,6 @@ mod network_spec {
#[test]
fn instantiate_solver_sgd_momentum() {
let cfg = SolverConfig{ solver: SolverKind::SGD(SGDKind::Momentum), ..SolverConfig::default()};
Solver::<Box<ISolver>>::from_config(&cfg);
Solver::<Box<ISolver<Backend<Native>>>, Backend<Native>>::from_config(&cfg);
}
}

0 comments on commit 89c31a9

Please sign in to comment.