diff --git a/Makefile b/Makefile index 58eb0ab1e3..ee10b50c7d 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ test: cargo test --release server: - RUST_LOG="info,tide=off,run-console=debug" \ + RUST_LOG="info,tide=off,tracing=off,avm_server=off,run-console=debug" \ cargo run --release -p particle-node server-debug: @@ -33,7 +33,9 @@ server-debug: wasmer_interface_types_fl=info,\ async_std=info,\ async_io=info,\ - polling=info" \ + polling=info, \ + avm_server=off,\ + tracing=off"\ cargo run --release -p particle-node -- -c ./deploy/Config.default.toml .PHONY: server server-debug test release build deploy diff --git a/crates/created-swarm/src/swarm.rs b/crates/created-swarm/src/swarm.rs index eac7077762..10ae0d5668 100644 --- a/crates/created-swarm/src/swarm.rs +++ b/crates/created-swarm/src/swarm.rs @@ -242,6 +242,7 @@ pub struct SwarmConfig { pub builtins_dir: Option, pub spell_base_dir: Option, pub timer_resolution: Duration, + pub allowed_binaries: Vec, } impl SwarmConfig { @@ -261,6 +262,7 @@ impl SwarmConfig { builtins_dir: None, spell_base_dir: None, timer_resolution: default_script_storage_timer_resolution(), + allowed_binaries: vec![], } } } @@ -351,6 +353,7 @@ pub fn create_swarm_with_runtime( resolved.node_config.particle_execution_timeout = EXECUTION_TIMEOUT; resolved.node_config.script_storage_timer_resolution = config.timer_resolution; + resolved.node_config.allowed_binaries = config.allowed_binaries.clone(); let management_kp = fluence_keypair::KeyPair::generate_ed25519(); let management_peer_id = libp2p::identity::Keypair::from(management_kp.clone()) diff --git a/crates/particle-node-tests/tests/modules.rs b/crates/particle-node-tests/tests/modules.rs new file mode 100644 index 0000000000..15c46575fa --- /dev/null +++ b/crates/particle-node-tests/tests/modules.rs @@ -0,0 +1,121 @@ +/* + * Copyright 2023 Fluence Labs Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use base64::{engine::general_purpose::STANDARD as base64, Engine}; +use connected_client::ConnectedClient; +use created_swarm::{make_swarms, make_swarms_with_cfg}; +use maplit::hashmap; +use serde_json::json; +use service_modules::load_module; + +#[tokio::test] +async fn test_add_module_mounted_binaries() { + let swarms = make_swarms_with_cfg(1, |mut cfg| { + cfg.allowed_binaries = vec!["/usr/bin/curl".to_string()]; + cfg + }) + .await; + + let mut client = ConnectedClient::connect_to(swarms[0].multiaddr.clone()) + .await + .expect("connect client"); + let module = load_module("tests/tetraplets/artifacts", "tetraplets").expect("load module"); + + let config = json!( + { + "name": "tetraplets", + "mem_pages_count": 100, + "logger_enabled": true, + "wasi": { + "envs": json!({}), + "preopened_files": vec!["/tmp"], + "mapped_dirs": json!({}), + }, + "mounted_binaries": json!({"cmd": "/usr/bin/curl"}) + }); + + let script = r#" + (xor + (seq + (call node ("dist" "add_module") [module_bytes module_config]) + (call client ("return" "") ["ok"]) + ) + (call client ("return" "") [%last_error%.$.message]) + ) + "#; + + let data = hashmap! { + "client" => json!(client.peer_id.to_string()), + "node" => json!(client.node.to_string()), + "module_bytes" => json!(base64.encode(&module)), + "module_config" => config, + }; + + let response = client.execute_particle(script, data).await.unwrap(); + if let Some(result) = response[0].as_str() { + assert_eq!("ok", result); + } else { + panic!("can't receive response from node"); + } +} + +#[tokio::test] +async fn test_add_module_mounted_binaries_forbidden() { + let swarms = make_swarms(1).await; + + let mut client = ConnectedClient::connect_to(swarms[0].multiaddr.clone()) + .await + .expect("connect client"); + let module = load_module("tests/tetraplets/artifacts", "tetraplets").expect("load module"); + + let config = json!( + { + "name": "tetraplets", + "mem_pages_count": 100, + "logger_enabled": true, + "wasi": { + "envs": json!({}), + "preopened_files": vec!["/tmp"], + "mapped_dirs": json!({}), + }, + "mounted_binaries": json!({"cmd": "/usr/bin/behbehbeh"}) + }); + + let script = r#" + (xor + (seq + (call node ("dist" "add_module") [module_bytes module_config]) + (call client ("return" "") ["ok"]) + ) + (call client ("return" "") [%last_error%.$.message]) + ) + "#; + + let data = hashmap! { + "client" => json!(client.peer_id.to_string()), + "node" => json!(client.node.to_string()), + "module_bytes" => json!(base64.encode(&module)), + "module_config" => config, + }; + + let response = client.execute_particle(script, data).await.unwrap(); + if let Some(result) = response[0].as_str() { + let expected = "Local service error, ret_code is 1, error message is '\"Error: Config error: requested mounted binary /usr/bin/behbehbeh is forbidden on this host\\nForbiddenMountedBinary { forbidden_path: \\\"/usr/bin/behbehbeh\\\" }\"'"; + assert_eq!(expected, result); + } else { + panic!("can't receive response from node"); + } +} diff --git a/crates/particle-node-tests/tests/script_storage.rs b/crates/particle-node-tests/tests/script_storage.rs index 0a75fe2d50..edda99efd3 100644 --- a/crates/particle-node-tests/tests/script_storage.rs +++ b/crates/particle-node-tests/tests/script_storage.rs @@ -617,55 +617,6 @@ async fn add_script_delay_oneshot() { assert_eq!(list, vec![serde_json::Value::Array(vec![])]); } -#[tokio::test] -async fn add_script_random_delay() { - let swarms = make_swarms(1).await; - - let interval = 3u64; - - let mut client = ConnectedClient::connect_to(swarms[0].multiaddr.clone()) - .await - .wrap_err("connect client") - .unwrap(); - - let script = f!(r#" - (seq - (call "{client.node}" ("peer" "timestamp_sec") [] result) - (call "{client.peer_id}" ("op" "return") [result]) - ) - "#); - - let mut res = client - .execute_particle( - f!(r#" - (seq - (call relay ("peer" "timestamp_sec") [] now) - (seq - (call relay ("script" "add") [script "{interval}"]) - (call %init_peer_id% ("op" "return") [now]) - ) - ) - "#), - hashmap! { - "relay" => json!(client.node.to_string()), - "script" => json!(script), - }, - ) - .await - .unwrap(); - - let res = res.pop().unwrap(); - let now = res.as_u64().unwrap(); - - let res = client.receive_args().await.wrap_err("receive").unwrap(); - let res = res.into_iter().next().unwrap().as_u64().unwrap(); - let eps = 2u64; - let expected = now + interval + eps; - log::info!("res {}", res); - log::info!("expected {}", expected); - assert!((now..=expected).contains(&res)); -} - async fn create_file_share(client: &mut ConnectedClient) -> CreatedService { create_service( client, diff --git a/crates/particle-node-tests/tests/services.rs b/crates/particle-node-tests/tests/services.rs index 03c5788124..99dcd41e93 100644 --- a/crates/particle-node-tests/tests/services.rs +++ b/crates/particle-node-tests/tests/services.rs @@ -21,12 +21,16 @@ use serde_json::Value as JValue; use base64::{engine::general_purpose::STANDARD as base64, Engine}; use connected_client::ConnectedClient; -use created_swarm::make_swarms; +use created_swarm::{make_swarms, make_swarms_with_cfg}; use service_modules::load_module; #[tokio::test] async fn create_service_from_config() { - let swarms = make_swarms(1).await; + let swarms = make_swarms_with_cfg(1, |mut cfg| { + cfg.allowed_binaries = vec!["/does/not/exist".to_string()]; + cfg + }) + .await; let mut client = ConnectedClient::connect_to(swarms[0].multiaddr.clone()) .await @@ -64,11 +68,11 @@ async fn create_service_from_config() { [ [ "abc", - "/tmp" + "/does/not/exist" ], [ "2222", - "/tmp" + "/does/not/exist" ] ] ] diff --git a/crates/server-config/src/defaults.rs b/crates/server-config/src/defaults.rs index becf3223ec..8aeeb567e0 100644 --- a/crates/server-config/src/defaults.rs +++ b/crates/server-config/src/defaults.rs @@ -204,3 +204,7 @@ pub fn default_module_max_heap_size() -> bytesize::ByteSize { pub fn default_max_builtin_metrics_storage_size() -> usize { 5 } + +pub fn default_allowed_binaries() -> Vec { + vec!["/usr/bin/curl".to_string(), "/usr/bin/ipfs".to_string()] +} diff --git a/crates/server-config/src/node_config.rs b/crates/server-config/src/node_config.rs index f9ba8313c2..075b89c9f5 100644 --- a/crates/server-config/src/node_config.rs +++ b/crates/server-config/src/node_config.rs @@ -138,6 +138,9 @@ pub struct NodeConfig { #[serde(with = "peerid_serializer")] #[serde(default = "default_management_peer_id")] pub management_peer_id: PeerId, + + #[serde(default = "default_allowed_binaries")] + pub allowed_binaries: Vec, } #[derive(Clone, Deserialize, Derivative, Copy)] diff --git a/crates/server-config/src/services_config.rs b/crates/server-config/src/services_config.rs index 781129960f..02234f9399 100644 --- a/crates/server-config/src/services_config.rs +++ b/crates/server-config/src/services_config.rs @@ -18,8 +18,8 @@ use fs_utils::{create_dirs, set_write_only, to_abs_path}; use bytesize::ByteSize; use libp2p::PeerId; -use std::collections::HashMap; -use std::path::PathBuf; +use std::collections::{HashMap, HashSet}; +use std::path::{Path, PathBuf}; #[derive(Debug, Clone)] pub struct ServicesConfig { @@ -47,6 +47,8 @@ pub struct ServicesConfig { pub max_heap_size: ByteSize, /// Default heap size in bytes available for the module unless otherwise specified. pub default_heap_size: Option, + /// List of allowed binaries paths + pub allowed_binaries: HashSet, } impl ServicesConfig { @@ -60,9 +62,23 @@ impl ServicesConfig { builtins_management_peer_id: PeerId, max_heap_size: ByteSize, default_heap_size: Option, + allowed_binaries: Vec, ) -> Result { let base_dir = to_abs_path(base_dir); + let allowed_binaries = allowed_binaries + .into_iter() + .map(|path_str| { + let path = Path::new(&path_str); + match path.try_exists() { + Err(err) => log::warn!("cannot check binary `{path_str}`: {err}"), + Ok(false) => log::warn!("binary `{path_str}` does not exist"), + _ => {} + }; + path.to_path_buf() + }) + .collect::<_>(); + let this = Self { local_peer_id, blueprint_dir: config_utils::blueprint_dir(&base_dir), @@ -75,6 +91,7 @@ impl ServicesConfig { builtins_management_peer_id, max_heap_size, default_heap_size, + allowed_binaries, }; create_dirs(&[ diff --git a/particle-builtins/src/builtins.rs b/particle-builtins/src/builtins.rs index c84a8fba53..257565d0b0 100644 --- a/particle-builtins/src/builtins.rs +++ b/particle-builtins/src/builtins.rs @@ -119,6 +119,7 @@ where vault_dir, config.max_heap_size, config.default_heap_size, + config.allowed_binaries.clone(), ); let particles_vault_dir = vault_dir.to_path_buf(); let management_peer_id = config.management_peer_id; diff --git a/particle-modules/src/error.rs b/particle-modules/src/error.rs index 319734171b..1602584479 100644 --- a/particle-modules/src/error.rs +++ b/particle-modules/src/error.rs @@ -179,6 +179,8 @@ pub enum ModuleError { max_heap_size_wanted: u64, max_heap_size_allowed: u64, }, + #[error("Config error: requested mounted binary {forbidden_path} is forbidden on this host")] + ForbiddenMountedBinary { forbidden_path: String }, } impl From for JValue { diff --git a/particle-modules/src/modules.rs b/particle-modules/src/modules.rs index dd538db2d3..0f806ee366 100644 --- a/particle-modules/src/modules.rs +++ b/particle-modules/src/modules.rs @@ -14,7 +14,13 @@ * limitations under the License. */ -use std::{collections::HashMap, iter, path::Path, path::PathBuf, sync::Arc}; +use std::{ + collections::{HashMap, HashSet}, + iter, + path::Path, + path::PathBuf, + sync::Arc, +}; use base64::{engine::general_purpose::STANDARD as base64, Engine}; use bytesize::ByteSize; @@ -36,9 +42,10 @@ use service_modules::{ use crate::error::ModuleError::{ BlueprintNotFound, BlueprintNotFoundInVault, ConfigNotFoundInVault, EmptyDependenciesList, - FacadeShouldBeHash, IncorrectVaultBlueprint, IncorrectVaultModuleConfig, InvalidBlueprintPath, - InvalidModuleConfigPath, InvalidModuleName, InvalidModulePath, MaxHeapSizeOverflow, - ModuleNotFoundInVault, ReadModuleInterfaceError, VaultDoesNotExist, + FacadeShouldBeHash, ForbiddenMountedBinary, IncorrectVaultBlueprint, + IncorrectVaultModuleConfig, InvalidBlueprintPath, InvalidModuleConfigPath, InvalidModuleName, + InvalidModulePath, MaxHeapSizeOverflow, ModuleNotFoundInVault, ReadModuleInterfaceError, + VaultDoesNotExist, }; use crate::error::Result; use crate::files::{self, load_config_by_path, load_module_by_path, load_module_descriptor}; @@ -68,6 +75,7 @@ pub struct ModuleRepository { blueprints: Arc>>, max_heap_size: ByteSize, default_heap_size: Option, + allowed_binaries: HashSet, } impl ModuleRepository { @@ -77,6 +85,7 @@ impl ModuleRepository { particles_vault_dir: &Path, max_heap_size: ByteSize, default_heap_size: Option, + allowed_binaries: HashSet, ) -> Self { let modules_by_name: HashMap<_, _> = fs_utils::list_files(modules_dir) .into_iter() @@ -117,6 +126,7 @@ impl ModuleRepository { particles_vault_dir: particles_vault_dir.to_path_buf(), max_heap_size, default_heap_size, + allowed_binaries, } } @@ -166,11 +176,29 @@ impl ModuleRepository { Ok(()) } + + fn check_module_mounted_binaries(&self, config: &TomlMarineNamedModuleConfig) -> Result<()> { + if let Some(binaries) = &config.config.mounted_binaries { + for requested_binary in binaries.values() { + if let Some(requested_binary) = requested_binary.as_str() { + let requested_binary_path = Path::new(requested_binary); + if !self.allowed_binaries.contains(requested_binary_path) { + return Err(ForbiddenMountedBinary { + forbidden_path: requested_binary.to_string(), + }); + } + } + } + } + Ok(()) + } + pub fn add_module(&self, module: Vec, config: TomlMarineNamedModuleConfig) -> Result { let hash = Hash::new(&module); let mut config = files::add_module(&self.modules_dir, &hash, &module, config)?; self.check_module_heap_size(&mut config)?; + self.check_module_mounted_binaries(&config)?; self.modules_by_name .lock() .insert(config.name, hash.clone()); @@ -527,6 +555,7 @@ mod tests { vault_dir.path(), max_heap_size, None, + Default::default(), ); let dep1 = Dependency::Hash(Hash::new(&[1, 2, 3])); @@ -569,6 +598,7 @@ mod tests { vault_dir.path(), max_heap_size, None, + Default::default(), ); let module = load_module( @@ -627,6 +657,7 @@ mod tests { vault_dir.path(), max_heap_size, None, + Default::default(), ); let module = load_module( diff --git a/particle-node/src/node.rs b/particle-node/src/node.rs index 3f1b29b85d..f23d3e64f6 100644 --- a/particle-node/src/node.rs +++ b/particle-node/src/node.rs @@ -118,6 +118,7 @@ impl Node { builtins_peer_id, config.node_config.module_max_heap_size, config.node_config.module_default_heap_size, + config.node_config.allowed_binaries.clone(), ) .expect("create services config"); diff --git a/particle-services/src/app_services.rs b/particle-services/src/app_services.rs index 7ff5430e01..4cfa366edf 100644 --- a/particle-services/src/app_services.rs +++ b/particle-services/src/app_services.rs @@ -881,6 +881,7 @@ mod tests { to_peer_id(&startup_kp), max_heap_size, None, + Default::default(), ) .unwrap(); @@ -890,6 +891,7 @@ mod tests { &config.particles_vault_dir, max_heap_size, None, + Default::default(), ); ParticleAppServices::new(config, repo, None)