-
Notifications
You must be signed in to change notification settings - Fork 44
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
add toolbox for ar compress and uncompress #529
Merged
Merged
Changes from all commits
Commits
Show all changes
5 commits
Select commit
Hold shift + click to select a range
323f671
add toolbox for ar compress and uncompress
jingchen2222 7bd5515
resolve comment
jingchen2222 bc24015
Merge commit 'e1250ba495a87644616854bc7de896281da1691e' into feat/add…
jingchen2222 535ee1e
rust fmt
jingchen2222 45ae660
convert batchrecord to mutation and ut
jingchen2222 File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -40,3 +40,4 @@ tools/state_db | |
tools/index_doc_db | ||
tools/index_meta_db | ||
bridge | ||
sdk/dist |
Binary file not shown.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,325 @@ | ||
use arrow::array::{ | ||
ArrayRef, BinaryArray, BinaryBuilder, StringArray, StringBuilder, UInt32Array, UInt32Builder, | ||
UInt64Array, UInt64Builder, | ||
}; | ||
use arrow::datatypes::{DataType, Field, Schema, SchemaRef}; | ||
use arrow::record_batch::RecordBatch; | ||
use db3_error::{DB3Error, Result}; | ||
use db3_proto::db3_mutation_v2_proto::{MutationBody, MutationHeader}; | ||
use db3_storage::ar_fs::{ArFileSystem, ArFileSystemConfig}; | ||
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder; | ||
use parquet::arrow::ArrowWriter; | ||
use parquet::basic::{Compression, GzipLevel}; | ||
use parquet::file::properties::WriterProperties; | ||
use std::fs::File; | ||
use std::path::Path; | ||
use std::sync::atomic::{AtomicU64, Ordering}; | ||
use std::sync::Arc; | ||
use tempdir::TempDir; | ||
use tracing::info; | ||
pub struct ArToolBox { | ||
pub network_id: Arc<AtomicU64>, | ||
pub schema: SchemaRef, | ||
pub ar_filesystem: ArFileSystem, | ||
pub temp_data_path: String, | ||
} | ||
|
||
impl ArToolBox { | ||
pub fn new( | ||
key_root_path: String, | ||
arweave_url: String, | ||
temp_data_path: String, | ||
network_id: Arc<AtomicU64>, | ||
) -> Result<Self> { | ||
let ar_fs_config = ArFileSystemConfig { | ||
key_root_path, | ||
arweave_url, | ||
}; | ||
let ar_filesystem = ArFileSystem::new(ar_fs_config)?; | ||
let schema = Arc::new(Schema::new(vec![ | ||
Field::new("payload", DataType::Binary, true), | ||
Field::new("signature", DataType::Utf8, true), | ||
Field::new("block", DataType::UInt64, true), | ||
Field::new("order", DataType::UInt32, true), | ||
])); | ||
|
||
Ok(Self { | ||
network_id, | ||
schema, | ||
ar_filesystem, | ||
temp_data_path, | ||
}) | ||
} | ||
pub async fn get_ar_account(&self) -> Result<(String, String)> { | ||
let addr = self.ar_filesystem.get_address(); | ||
let balance = self.ar_filesystem.get_balance().await?; | ||
Ok((addr, balance.to_string())) | ||
} | ||
|
||
pub async fn compress_and_upload_record_batch( | ||
&self, | ||
tx: String, | ||
last_end_block: u64, | ||
current_block: u64, | ||
recordbatch: &RecordBatch, | ||
) -> Result<(String, u64, u64, u64)> { | ||
let tmp_dir = TempDir::new_in(&self.temp_data_path, "compression") | ||
.map_err(|e| DB3Error::RollupError(format!("{e}")))?; | ||
let file_path = tmp_dir.path().join("rollup.gz.parquet"); | ||
let (num_rows, size) = Self::dump_recordbatch(&file_path, recordbatch)?; | ||
let filename = format!("{}_{}.gz.parquet", last_end_block, current_block); | ||
//TODO add tx status confirmation | ||
let (id, reward) = self | ||
.ar_filesystem | ||
.upload_file( | ||
&file_path, | ||
tx.as_str(), | ||
last_end_block, | ||
current_block, | ||
self.network_id.load(Ordering::Relaxed), | ||
filename.as_str(), | ||
) | ||
.await?; | ||
Ok((id, reward, num_rows, size)) | ||
} | ||
|
||
/// Compress recordbatch to parquet file | ||
pub fn dump_recordbatch(path: &Path, recordbatch: &RecordBatch) -> Result<(u64, u64)> { | ||
let properties = WriterProperties::builder() | ||
.set_compression(Compression::GZIP(GzipLevel::default())) | ||
.build(); | ||
let fd = File::create(path).map_err(|e| DB3Error::RollupError(format!("{e}")))?; | ||
|
||
let mut writer = ArrowWriter::try_new(fd, recordbatch.schema(), Some(properties)) | ||
.map_err(|e| DB3Error::RollupError(format!("{e}")))?; | ||
writer | ||
.write(recordbatch) | ||
.map_err(|e| DB3Error::RollupError(format!("{e}")))?; | ||
let meta = writer | ||
.close() | ||
.map_err(|e| DB3Error::RollupError(format!("{e}")))?; | ||
let metadata = | ||
std::fs::metadata(path).map_err(|e| DB3Error::RollupError(format!("{e}")))?; | ||
Ok((meta.num_rows as u64, metadata.len())) | ||
} | ||
|
||
/// Parse recordbatch from parquet file | ||
pub fn parse_gzip_file(path: &Path) -> Result<Vec<RecordBatch>> { | ||
let fd = File::open(path).map_err(|e| DB3Error::RollupError(format!("{e}")))?; | ||
// Create a sync parquet reader with batch_size. | ||
// batch_size is the number of rows to read up to buffer once from pages, defaults to 1024 | ||
let parquet_reader = ParquetRecordBatchReaderBuilder::try_new(fd) | ||
.map_err(|e| DB3Error::RollupError(format!("{e}")))? | ||
.with_batch_size(8192) | ||
.build() | ||
.map_err(|e| DB3Error::RollupError(format!("{e}")))?; | ||
|
||
let mut batches = Vec::new(); | ||
|
||
for batch in parquet_reader { | ||
let each = batch.map_err(|e| DB3Error::RollupError(format!("{e}")))?; | ||
batches.push(each); | ||
} | ||
Ok(batches) | ||
} | ||
|
||
/// Parse mutation body, block and order from recordbatch | ||
pub fn convert_recordbatch_to_mutation( | ||
record_batch: &RecordBatch, | ||
) -> Result<Vec<(MutationBody, u64, u32)>> { | ||
let mut mutations = Vec::new(); | ||
let payloads = record_batch | ||
.column_by_name("payload") | ||
.unwrap() | ||
.as_any() | ||
.downcast_ref::<BinaryArray>() | ||
.unwrap(); | ||
let signatures = record_batch | ||
.column_by_name("signature") | ||
.unwrap() | ||
.as_any() | ||
.downcast_ref::<StringArray>() | ||
.unwrap(); | ||
let blocks = record_batch | ||
.column_by_name("block") | ||
.unwrap() | ||
.as_any() | ||
.downcast_ref::<UInt64Array>() | ||
.unwrap(); | ||
let orders = record_batch | ||
.column_by_name("order") | ||
.unwrap() | ||
.as_any() | ||
.downcast_ref::<UInt32Array>() | ||
.unwrap(); | ||
|
||
for i in 0..record_batch.num_rows() { | ||
let payload = payloads.value(i); | ||
let signature = signatures.value(i); | ||
let block = blocks.value(i); | ||
let order = orders.value(i); | ||
let mutation = MutationBody { | ||
payload: payload.to_vec(), | ||
signature: signature.to_string(), | ||
}; | ||
mutations.push((mutation, block, order)); | ||
} | ||
Ok(mutations) | ||
} | ||
|
||
/// convert mutation to recordbatch | ||
/// encode mutation body, block and order to recordbatch | ||
pub fn convert_mutations_to_recordbatch( | ||
&self, | ||
mutations: &[(MutationHeader, MutationBody)], | ||
) -> Result<RecordBatch> { | ||
//TODO limit the memory usage | ||
let mut payload_builder = BinaryBuilder::new(); | ||
let mut signature_builder = StringBuilder::new(); | ||
let mut block_builder = UInt64Builder::new(); | ||
let mut order_builder = UInt32Builder::new(); | ||
for (header, body) in mutations { | ||
let body_ref: &[u8] = &body.payload; | ||
payload_builder.append_value(body_ref); | ||
signature_builder.append_value(body.signature.as_str()); | ||
block_builder.append_value(header.block_id); | ||
order_builder.append_value(header.order_id); | ||
} | ||
let array_refs: Vec<ArrayRef> = vec![ | ||
Arc::new(payload_builder.finish()), | ||
Arc::new(signature_builder.finish()), | ||
Arc::new(block_builder.finish()), | ||
Arc::new(order_builder.finish()), | ||
]; | ||
let record_batch = RecordBatch::try_new(self.schema.clone(), array_refs) | ||
.map_err(|e| DB3Error::RollupError(format!("{e}")))?; | ||
info!( | ||
"convert {} into recordbatch with memory {}", | ||
mutations.len(), | ||
record_batch.get_array_memory_size() | ||
); | ||
Ok(record_batch) | ||
} | ||
} | ||
|
||
#[cfg(test)] | ||
mod tests { | ||
use super::*; | ||
use arrow::array::{Array, AsArray, BinaryArray, StringArray, UInt32Array, UInt64Array}; | ||
use arrow::datatypes::{BinaryType, DataType, Field, Schema}; | ||
use std::path::PathBuf; | ||
#[test] | ||
fn it_works() {} | ||
|
||
fn mock_batch_record() -> RecordBatch { | ||
let schema = Arc::new(Schema::new(vec![ | ||
Field::new("payload", DataType::Binary, true), | ||
Field::new("signature", DataType::Utf8, true), | ||
Field::new("block", DataType::UInt64, true), | ||
Field::new("order", DataType::UInt32, true), | ||
])); | ||
let mut payload_builder = BinaryBuilder::new(); | ||
let mut signature_builder = StringBuilder::new(); | ||
let mut block_builder = UInt64Builder::new(); | ||
let mut order_builder = UInt32Builder::new(); | ||
for block in 0..10 { | ||
let body_ref: &[u8] = "this is a payload sample".as_bytes(); | ||
payload_builder.append_value(body_ref); | ||
signature_builder.append_value("0x1234567890"); | ||
block_builder.append_value(block); | ||
order_builder.append_value((block * 10) as u32); | ||
} | ||
let array_refs: Vec<ArrayRef> = vec![ | ||
Arc::new(payload_builder.finish()), | ||
Arc::new(signature_builder.finish()), | ||
Arc::new(block_builder.finish()), | ||
Arc::new(order_builder.finish()), | ||
]; | ||
RecordBatch::try_new(schema.clone(), array_refs).unwrap() | ||
} | ||
#[test] | ||
fn dump_recordbatch_ut() { | ||
let tmp_dir_path = TempDir::new("dump_recordbatch_ut").expect("create temp dir"); | ||
|
||
let record_batch = mock_batch_record(); | ||
let (num_rows, size) = ArToolBox::dump_recordbatch( | ||
Path::new(tmp_dir_path.path().join("test.parquet").to_str().unwrap()), | ||
&record_batch, | ||
) | ||
.unwrap(); | ||
assert_eq!(num_rows, 10); | ||
assert_eq!(size, 1862); | ||
} | ||
#[test] | ||
fn parse_gzip_file_ut() { | ||
let tmp_dir_path = TempDir::new("dump_recordbatch_ut").expect("create temp dir"); | ||
|
||
let parquet_file = tmp_dir_path.path().join("test.parquet"); | ||
let record_batch = mock_batch_record(); | ||
let (num_rows, size) = ArToolBox::dump_recordbatch(&parquet_file, &record_batch).unwrap(); | ||
assert_eq!(num_rows, 10); | ||
assert_eq!(size, 1862); | ||
let res = ArToolBox::parse_gzip_file(parquet_file.as_path()).unwrap(); | ||
assert_eq!(res.len(), 1); | ||
let rec = res[0].clone(); | ||
println!("schema: {}", rec.schema()); | ||
assert!(rec.num_columns() == 4); | ||
assert_eq!(rec.num_rows(), 10); | ||
let payloads = rec | ||
.column_by_name("payload") | ||
.unwrap() | ||
.as_any() | ||
.downcast_ref::<BinaryArray>() | ||
.unwrap(); | ||
assert_eq!(payloads.len(), 10); | ||
assert_eq!(payloads.value(5), "this is a payload sample".as_bytes()); | ||
|
||
let signatures = rec | ||
.column_by_name("signature") | ||
.unwrap() | ||
.as_any() | ||
.downcast_ref::<StringArray>() | ||
.unwrap(); | ||
assert_eq!(signatures.len(), 10); | ||
assert_eq!(signatures.value(5), "0x1234567890"); | ||
|
||
let blocks = rec | ||
.column_by_name("block") | ||
.unwrap() | ||
.as_any() | ||
.downcast_ref::<UInt64Array>() | ||
.unwrap(); | ||
assert_eq!(blocks.len(), 10); | ||
assert_eq!(blocks.value(5), 5); | ||
|
||
let orders = rec | ||
.column_by_name("order") | ||
.unwrap() | ||
.as_any() | ||
.downcast_ref::<UInt32Array>() | ||
.unwrap(); | ||
assert_eq!(orders.len(), 10); | ||
assert_eq!(orders.value(5), 50); | ||
} | ||
|
||
#[test] | ||
fn parse_sample_ar_parquet_ut() { | ||
let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); | ||
path.push("resources/test/37829_37968.gz.parquet"); | ||
|
||
let res = ArToolBox::parse_gzip_file(path.as_path()).unwrap(); | ||
assert_eq!(res.len(), 1); | ||
let rec = res[0].clone(); | ||
println!("schema: {}", rec.schema()); | ||
println!("num_rows: {}", rec.num_rows()); | ||
assert_eq!(rec.num_columns(), 4); | ||
assert_eq!(rec.num_rows(), 204); | ||
|
||
let mutations = ArToolBox::convert_recordbatch_to_mutation(&rec).unwrap(); | ||
assert_eq!(mutations.len(), 204); | ||
let (mutation, block, order) = mutations[0].clone(); | ||
assert_eq!(block, 37829); | ||
assert_eq!(order, 1); | ||
assert_eq!(mutation.signature, "0xf6afe1165ae87fa09375eabccdedc61f3e5af4ed1e5c6456f1b63d397862252667e1f13f0f076f30609754f787c80135c52f7c249e95c9b8fab1b9ed27846c1b1c"); | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
unwrap will panic if some error.