Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: casting when data to be written does not match table schema #1427

Merged
merged 4 commits into from
Jun 3, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 9 additions & 17 deletions rust/src/operations/delete.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ use arrow::datatypes::Field;
use arrow::datatypes::Schema as ArrowSchema;
use arrow::error::ArrowError;
use arrow::record_batch::RecordBatch;
use arrow_cast::CastOptions;
use datafusion::datasource::file_format::{parquet::ParquetFormat, FileFormat};
use datafusion::datasource::listing::PartitionedFile;
use datafusion::datasource::MemTable;
Expand Down Expand Up @@ -461,6 +462,7 @@ async fn excute_non_empty_expr(
Some(snapshot.table_config().target_file_size() as usize),
None,
writer_properties,
&CastOptions { safe: false },
)
.await?;
metrics.rewrite_time_ms = Instant::now().duration_since(write_start).as_millis();
Expand Down Expand Up @@ -681,6 +683,7 @@ mod tests {

use crate::action::*;
use crate::operations::DeltaOps;
use crate::writer::test_utils::datafusion::get_data;
use crate::writer::test_utils::{get_arrow_schema, get_delta_schema};
use crate::DeltaTable;
use arrow::array::Int32Array;
Expand All @@ -704,17 +707,6 @@ mod tests {
table
}

async fn get_data(table: DeltaTable) -> Vec<RecordBatch> {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this is a good refactor 👍

let ctx = SessionContext::new();
ctx.register_table("test", Arc::new(table)).unwrap();
ctx.sql("select * from test")
.await
.unwrap()
.collect()
.await
.unwrap()
}

#[tokio::test]
async fn test_delete_default() {
let schema = get_arrow_schema(&None);
Expand Down Expand Up @@ -850,7 +842,7 @@ mod tests {
"+----+-------+------------+",
];

let actual = get_data(table).await;
let actual = get_data(&table).await;
assert_batches_sorted_eq!(&expected, &actual);
}

Expand Down Expand Up @@ -898,7 +890,7 @@ mod tests {
"| 2 |",
"+-------+",
];
let actual = get_data(table).await;
let actual = get_data(&table).await;
assert_batches_sorted_eq!(&expected, &actual);

// Validate behaviour of less than
Expand All @@ -919,7 +911,7 @@ mod tests {
"| 4 |",
"+-------+",
];
let actual = get_data(table).await;
let actual = get_data(&table).await;
assert_batches_sorted_eq!(&expected, &actual);

// Validate behaviour of less plus not null
Expand All @@ -938,7 +930,7 @@ mod tests {
"| 4 |",
"+-------+",
];
let actual = get_data(table).await;
let actual = get_data(&table).await;
assert_batches_sorted_eq!(&expected, &actual);
}

Expand Down Expand Up @@ -997,7 +989,7 @@ mod tests {
"+----+-------+------------+",
];

let actual = get_data(table).await;
let actual = get_data(&table).await;
assert_batches_sorted_eq!(&expected, &actual);
}

Expand Down Expand Up @@ -1058,7 +1050,7 @@ mod tests {
"| B | 20 | 2021-02-03 |",
"+----+-------+------------+",
];
let actual = get_data(table).await;
let actual = get_data(&table).await;
assert_batches_sorted_eq!(&expected, &actual);
}

Expand Down
134 changes: 129 additions & 5 deletions rust/src/operations/write.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ use crate::writer::record_batch::divide_by_partition_values;
use crate::writer::utils::PartitionPath;

use arrow_array::RecordBatch;
use arrow_cast::{can_cast_types, cast};
use arrow_cast::{can_cast_types, cast_with_options, CastOptions};
use arrow_schema::{Schema as ArrowSchema, SchemaRef as ArrowSchemaRef};
use datafusion::execution::context::{SessionContext, SessionState, TaskContext};
use datafusion::physical_plan::{memory::MemoryExec, ExecutionPlan};
Expand Down Expand Up @@ -96,6 +96,9 @@ pub struct WriteBuilder {
write_batch_size: Option<usize>,
/// RecordBatches to be written into the table
batches: Option<Vec<RecordBatch>>,
/// CastOptions determines how data types that do not match the underlying table are handled
/// By default an error is returned
cast_options: CastOptions,
}

impl WriteBuilder {
Expand All @@ -112,6 +115,7 @@ impl WriteBuilder {
target_file_size: None,
write_batch_size: None,
batches: None,
cast_options: CastOptions { safe: false },
}
}

Expand Down Expand Up @@ -167,6 +171,12 @@ impl WriteBuilder {
self
}

/// Specify the cast options to use when casting columns that do not match the table's schema.
pub fn with_cast_options(mut self, cast_options: CastOptions) -> Self {
self.cast_options = cast_options;
self
}

async fn check_preconditions(&self) -> DeltaResult<Vec<Action>> {
match self.store.is_delta_table_location().await? {
true => {
Expand Down Expand Up @@ -216,35 +226,41 @@ pub(crate) async fn write_execution_plan(
target_file_size: Option<usize>,
write_batch_size: Option<usize>,
writer_properties: Option<WriterProperties>,
cast_options: &CastOptions,
) -> DeltaResult<Vec<Add>> {
let invariants = snapshot
.current_metadata()
.and_then(|meta| meta.schema.get_invariants().ok())
.unwrap_or_default();

// Use input schema to prevent wrapping partitions columns into a dictionary.
let schema = snapshot.input_schema().unwrap_or(plan.schema());

let checker = DeltaDataChecker::new(invariants);

// Write data to disk
let mut tasks = vec![];
for i in 0..plan.output_partitioning().partition_count() {
let inner_plan = plan.clone();
let inner_schema = schema.clone();
let task_ctx = Arc::new(TaskContext::from(&state));
let inner_cast = cast_options.clone();
let config = WriterConfig::new(
inner_plan.schema(),
inner_schema.clone(),
partition_columns.clone(),
writer_properties.clone(),
target_file_size,
write_batch_size,
);
let mut writer = DeltaWriter::new(object_store.clone(), config);
let checker_stream = checker.clone();
let schema = inner_plan.schema().clone();
let mut stream = inner_plan.execute(i, task_ctx)?;
let handle: tokio::task::JoinHandle<DeltaResult<Vec<Add>>> =
tokio::task::spawn(async move {
while let Some(maybe_batch) = stream.next().await {
let batch = maybe_batch?;
checker_stream.check_batch(&batch).await?;
let arr = cast_record_batch(&batch, schema.clone())?;
let arr = cast_record_batch(&batch, inner_schema.clone(), &inner_cast)?;
writer.write(&arr).await?;
}
writer.close().await
Expand Down Expand Up @@ -375,6 +391,7 @@ impl std::future::IntoFuture for WriteBuilder {
this.target_file_size,
this.write_batch_size,
None,
&this.cast_options,
)
.await?;
actions.extend(add_actions.into_iter().map(Action::add));
Expand Down Expand Up @@ -463,14 +480,17 @@ fn can_cast_batch(from_schema: &ArrowSchema, to_schema: &ArrowSchema) -> bool {
fn cast_record_batch(
batch: &RecordBatch,
target_schema: ArrowSchemaRef,
cast_options: &CastOptions,
) -> DeltaResult<RecordBatch> {
//let cast_options = CastOptions { safe: false };

let columns = target_schema
.all_fields()
.iter()
.map(|f| {
let col = batch.column_by_name(f.name()).unwrap();
if !col.data_type().equals_datatype(f.data_type()) {
cast(col, f.data_type())
cast_with_options(col, f.data_type(), cast_options)
} else {
Ok(col.clone())
}
Expand All @@ -483,7 +503,13 @@ fn cast_record_batch(
mod tests {
use super::*;
use crate::operations::DeltaOps;
use crate::writer::test_utils::datafusion::get_data;
use crate::writer::test_utils::{get_delta_schema, get_record_batch};
use arrow::datatypes::Field;
use arrow::datatypes::Schema as ArrowSchema;
use arrow_array::{Int32Array, StringArray, TimestampMicrosecondArray};
use arrow_schema::{DataType, TimeUnit};
use datafusion::assert_batches_sorted_eq;
use serde_json::json;

#[tokio::test]
Expand Down Expand Up @@ -526,6 +552,104 @@ mod tests {
assert_eq!(table.get_file_uris().count(), 1)
}

#[tokio::test]
async fn test_write_different_types() {
// Ensure write data is casted when data of a different type from the table is provided.

// Validate String -> Int is err
let schema = Arc::new(ArrowSchema::new(vec![Field::new(
"value",
DataType::Int32,
true,
)]));

let batch = RecordBatch::try_new(
Arc::clone(&schema),
vec![Arc::new(Int32Array::from(vec![Some(0), None]))],
)
.unwrap();
let table = DeltaOps::new_in_memory().write(vec![batch]).await.unwrap();

let schema = Arc::new(ArrowSchema::new(vec![Field::new(
"value",
DataType::Utf8,
true,
)]));

let batch = RecordBatch::try_new(
Arc::clone(&schema),
vec![Arc::new(StringArray::from(vec![
Some("Test123".to_owned()),
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm confused, where does this value go in the expected table? 😕

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Right below, in line 570?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Since it cannot be parsed as an int it will result in a null value. Which aligns with ansi sql but maybe we should only allow that if the user opts in?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh nevermind. It seems like this is being inserted as null? That doesn't seem like what a user would want.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah I think we could disable that by passing safe: true in CastOptions: https://docs.rs/arrow-cast/40.0.0/arrow_cast/cast/struct.CastOptions.html

Some("123".to_owned()),
None,
]))],
)
.unwrap();

// Test cast options
let table = DeltaOps::from(table)
.write(vec![batch.clone()])
.with_cast_options(CastOptions { safe: true })
.await
.unwrap();

let expected = [
"+-------+",
"| value |",
"+-------+",
"| |",
"| |",
"| |",
"| 123 |",
"| 0 |",
"+-------+",
];
let actual = get_data(&table).await;
assert_batches_sorted_eq!(&expected, &actual);

let res = DeltaOps::from(table).write(vec![batch]).await;
assert!(res.is_err());

// Validate the datetime -> string behavior
let schema = Arc::new(ArrowSchema::new(vec![Field::new(
"value",
arrow::datatypes::DataType::Utf8,
true,
)]));

let batch = RecordBatch::try_new(
Arc::clone(&schema),
vec![Arc::new(StringArray::from(vec![Some(
"2023-06-03 15:35:00".to_owned(),
)]))],
)
.unwrap();
let table = DeltaOps::new_in_memory().write(vec![batch]).await.unwrap();

let schema = Arc::new(ArrowSchema::new(vec![Field::new(
"value",
DataType::Timestamp(TimeUnit::Microsecond, None),
true,
)]));
let batch = RecordBatch::try_new(
Arc::clone(&schema),
vec![Arc::new(TimestampMicrosecondArray::from(vec![Some(10000)]))],
)
.unwrap();

let _res = DeltaOps::from(table).write(vec![batch]).await.unwrap();
let expected = [
"+-------------------------+",
"| value |",
"+-------------------------+",
"| 1970-01-01T00:00:00.010 |",
"| 2023-06-03 15:35:00 |",
"+-------------------------+",
];
let actual = get_data(&_res).await;
assert_batches_sorted_eq!(&expected, &actual);
}

#[tokio::test]
async fn test_write_nonexistent() {
let batch = get_record_batch(None, false);
Expand Down
20 changes: 20 additions & 0 deletions rust/src/writer/test_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -211,3 +211,23 @@ pub async fn create_initialized_table(partition_cols: &[String]) -> DeltaTable {

table
}

#[cfg(feature = "datafusion")]
pub mod datafusion {
use crate::DeltaTable;
use arrow_array::RecordBatch;
use datafusion::prelude::SessionContext;
use std::sync::Arc;

pub async fn get_data(table: &DeltaTable) -> Vec<RecordBatch> {
let table = DeltaTable::new_with_state(table.object_store(), table.state.clone());
let ctx = SessionContext::new();
ctx.register_table("test", Arc::new(table)).unwrap();
ctx.sql("select * from test")
.await
.unwrap()
.collect()
.await
.unwrap()
}
}