diff --git a/datafusion/core/tests/simplification.rs b/datafusion/core/tests/simplification.rs index 25f994d320c1..5a2f040c09d8 100644 --- a/datafusion/core/tests/simplification.rs +++ b/datafusion/core/tests/simplification.rs @@ -25,6 +25,7 @@ use datafusion_common::cast::as_int32_array; use datafusion_common::ScalarValue; use datafusion_common::{DFSchemaRef, ToDFSchema}; use datafusion_expr::expr::ScalarFunction; +use datafusion_expr::logical_plan::builder::table_scan_with_filters; use datafusion_expr::simplify::SimplifyInfo; use datafusion_expr::{ expr, table_scan, BuiltinScalarFunction, Cast, ColumnarValue, Expr, ExprSchemable, @@ -294,6 +295,45 @@ fn select_date_plus_interval() -> Result<()> { Ok(()) } +#[test] +fn simplify_project_scalar_fn() -> Result<()> { + // Issue https://github.com/apache/arrow-datafusion/issues/5996 + let schema = Schema::new(vec![Field::new("f", DataType::Float64, false)]); + let plan = table_scan(Some("test"), &schema, None)? + .project(vec![power(col("f"), lit(1.0))])? + .build()?; + + // before simplify: power(t.f, 1.0) + // after simplify: t.f as "power(t.f, 1.0)" + let expected = "Projection: test.f AS power(test.f,Float64(1))\ + \n TableScan: test"; + let actual = get_optimized_plan_formatted(&plan, &Utc::now()); + assert_eq!(expected, actual); + Ok(()) +} + +#[test] +fn simplify_scan_predicate() -> Result<()> { + let schema = Schema::new(vec![ + Field::new("f", DataType::Float64, false), + Field::new("g", DataType::Float64, false), + ]); + let plan = table_scan_with_filters( + Some("test"), + &schema, + None, + vec![col("g").eq(power(col("f"), lit(1.0)))], + )? + .build()?; + + // before simplify: t.g = power(t.f, 1.0) + // after simplify: (t.g = t.f) as "t.g = power(t.f, 1.0)" + let expected = "TableScan: test, full_filters=[g = f AS g = power(f,Float64(1))]"; + let actual = get_optimized_plan_formatted(&plan, &Utc::now()); + assert_eq!(expected, actual); + Ok(()) +} + #[test] fn test_const_evaluator() { // true --> true @@ -431,3 +471,99 @@ fn multiple_now() -> Result<()> { assert_eq!(expected, actual); Ok(()) } + +// ------------------------------ +// --- Simplifier tests ----- +// ------------------------------ + +fn expr_test_schema() -> DFSchemaRef { + Schema::new(vec![ + Field::new("c1", DataType::Utf8, true), + Field::new("c2", DataType::Boolean, true), + Field::new("c3", DataType::Int64, true), + Field::new("c4", DataType::UInt32, true), + Field::new("c1_non_null", DataType::Utf8, false), + Field::new("c2_non_null", DataType::Boolean, false), + Field::new("c3_non_null", DataType::Int64, false), + Field::new("c4_non_null", DataType::UInt32, false), + ]) + .to_dfschema_ref() + .unwrap() +} + +fn test_simplify(input_expr: Expr, expected_expr: Expr) { + let info: MyInfo = MyInfo { + schema: expr_test_schema(), + execution_props: ExecutionProps::new(), + }; + let simplifier = ExprSimplifier::new(info); + let simplified_expr = simplifier + .simplify(input_expr.clone()) + .expect("successfully evaluated"); + + assert_eq!( + simplified_expr, expected_expr, + "Mismatch evaluating {input_expr}\n Expected:{expected_expr}\n Got:{simplified_expr}" + ); +} + +#[test] +fn test_simplify_log() { + // Log(c3, 1) ===> 0 + { + let expr = log(col("c3_non_null"), lit(1)); + test_simplify(expr, lit(0i64)); + } + // Log(c3, c3) ===> 1 + { + let expr = log(col("c3_non_null"), col("c3_non_null")); + let expected = lit(1i64); + test_simplify(expr, expected); + } + // Log(c3, Power(c3, c4)) ===> c4 + { + let expr = log( + col("c3_non_null"), + power(col("c3_non_null"), col("c4_non_null")), + ); + let expected = col("c4_non_null"); + test_simplify(expr, expected); + } + // Log(c3, c4) ===> Log(c3, c4) + { + let expr = log(col("c3_non_null"), col("c4_non_null")); + let expected = log(col("c3_non_null"), col("c4_non_null")); + test_simplify(expr, expected); + } +} + +#[test] +fn test_simplify_power() { + // Power(c3, 0) ===> 1 + { + let expr = power(col("c3_non_null"), lit(0)); + let expected = lit(1i64); + test_simplify(expr, expected) + } + // Power(c3, 1) ===> c3 + { + let expr = power(col("c3_non_null"), lit(1)); + let expected = col("c3_non_null"); + test_simplify(expr, expected) + } + // Power(c3, Log(c3, c4)) ===> c4 + { + let expr = power( + col("c3_non_null"), + log(col("c3_non_null"), col("c4_non_null")), + ); + let expected = col("c4_non_null"); + test_simplify(expr, expected) + } + // Power(c3, c4) ===> Power(c3, c4) + { + let expr = power(col("c3_non_null"), col("c4_non_null")); + let expected = power(col("c3_non_null"), col("c4_non_null")); + test_simplify(expr, expected) + } +} diff --git a/datafusion/expr/src/built_in_function.rs b/datafusion/expr/src/built_in_function.rs index 7426ccd938e7..d98d7d0abfe2 100644 --- a/datafusion/expr/src/built_in_function.rs +++ b/datafusion/expr/src/built_in_function.rs @@ -47,12 +47,8 @@ pub enum BuiltinScalarFunction { Factorial, /// iszero Iszero, - /// log, same as log10 - Log, /// nanvl Nanvl, - /// power - Power, /// round Round, /// trunc @@ -128,9 +124,7 @@ impl BuiltinScalarFunction { BuiltinScalarFunction::Exp => Volatility::Immutable, BuiltinScalarFunction::Factorial => Volatility::Immutable, BuiltinScalarFunction::Iszero => Volatility::Immutable, - BuiltinScalarFunction::Log => Volatility::Immutable, BuiltinScalarFunction::Nanvl => Volatility::Immutable, - BuiltinScalarFunction::Power => Volatility::Immutable, BuiltinScalarFunction::Round => Volatility::Immutable, BuiltinScalarFunction::Cot => Volatility::Immutable, BuiltinScalarFunction::Trunc => Volatility::Immutable, @@ -176,16 +170,6 @@ impl BuiltinScalarFunction { BuiltinScalarFunction::Factorial => Ok(Int64), - BuiltinScalarFunction::Power => match &input_expr_types[0] { - Int64 => Ok(Int64), - _ => Ok(Float64), - }, - - BuiltinScalarFunction::Log => match &input_expr_types[0] { - Float32 => Ok(Float32), - _ => Ok(Float64), - }, - BuiltinScalarFunction::Nanvl => match &input_expr_types[0] { Float32 => Ok(Float32), _ => Ok(Float64), @@ -233,10 +217,6 @@ impl BuiltinScalarFunction { self.volatility(), ), BuiltinScalarFunction::Random => Signature::exact(vec![], self.volatility()), - BuiltinScalarFunction::Power => Signature::one_of( - vec![Exact(vec![Int64, Int64]), Exact(vec![Float64, Float64])], - self.volatility(), - ), BuiltinScalarFunction::Round => Signature::one_of( vec![ Exact(vec![Float64, Int64]), @@ -255,16 +235,6 @@ impl BuiltinScalarFunction { ], self.volatility(), ), - - BuiltinScalarFunction::Log => Signature::one_of( - vec![ - Exact(vec![Float32]), - Exact(vec![Float64]), - Exact(vec![Float32, Float32]), - Exact(vec![Float64, Float64]), - ], - self.volatility(), - ), BuiltinScalarFunction::Nanvl => Signature::one_of( vec![Exact(vec![Float32, Float32]), Exact(vec![Float64, Float64])], self.volatility(), @@ -302,8 +272,6 @@ impl BuiltinScalarFunction { | BuiltinScalarFunction::Trunc ) { Some(vec![Some(true)]) - } else if *self == BuiltinScalarFunction::Log { - Some(vec![Some(true), Some(false)]) } else { None } @@ -317,9 +285,7 @@ impl BuiltinScalarFunction { BuiltinScalarFunction::Exp => &["exp"], BuiltinScalarFunction::Factorial => &["factorial"], BuiltinScalarFunction::Iszero => &["iszero"], - BuiltinScalarFunction::Log => &["log"], BuiltinScalarFunction::Nanvl => &["nanvl"], - BuiltinScalarFunction::Power => &["power", "pow"], BuiltinScalarFunction::Random => &["random"], BuiltinScalarFunction::Round => &["round"], BuiltinScalarFunction::Trunc => &["trunc"], diff --git a/datafusion/expr/src/expr_fn.rs b/datafusion/expr/src/expr_fn.rs index 6c811ff06418..b554d87bade1 100644 --- a/datafusion/expr/src/expr_fn.rs +++ b/datafusion/expr/src/expr_fn.rs @@ -546,9 +546,6 @@ nary_scalar_expr!( ); scalar_expr!(Exp, exp, num, "exponential"); -scalar_expr!(Power, power, base exponent, "`base` raised to the power of `exponent`"); -scalar_expr!(Log, log, base x, "logarithm of a `x` for a particular `base`"); - scalar_expr!(InitCap, initcap, string, "converts the first letter of each word in `string` in uppercase and the remaining characters in lowercase"); scalar_expr!(EndsWith, ends_with, string suffix, "whether the `string` ends with the `suffix`"); nary_scalar_expr!(Coalesce, coalesce, "returns `coalesce(args...)`, which evaluates to the value of the first [Expr] which is not NULL"); diff --git a/datafusion/functions/src/macros.rs b/datafusion/functions/src/macros.rs index c92cb27ef5bb..5ee47bd3e8eb 100644 --- a/datafusion/functions/src/macros.rs +++ b/datafusion/functions/src/macros.rs @@ -357,6 +357,19 @@ macro_rules! make_math_binary_udf { }; } +macro_rules! make_function_scalar_inputs { + ($ARG: expr, $NAME:expr, $ARRAY_TYPE:ident, $FUNC: block) => {{ + let arg = downcast_arg!($ARG, $NAME, $ARRAY_TYPE); + + arg.iter() + .map(|a| match a { + Some(a) => Some($FUNC(a)), + _ => None, + }) + .collect::<$ARRAY_TYPE>() + }}; +} + macro_rules! make_function_inputs2 { ($ARG1: expr, $ARG2: expr, $NAME1:expr, $NAME2: expr, $ARRAY_TYPE:ident, $FUNC: block) => {{ let arg1 = downcast_arg!($ARG1, $NAME1, $ARRAY_TYPE); diff --git a/datafusion/functions/src/math/log.rs b/datafusion/functions/src/math/log.rs new file mode 100644 index 000000000000..2131b6aa6705 --- /dev/null +++ b/datafusion/functions/src/math/log.rs @@ -0,0 +1,259 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//! Math function: `log()`. + +use arrow::datatypes::DataType; +use datafusion_common::{exec_err, DataFusionError, Result, ScalarValue}; +use datafusion_expr::expr::ScalarFunction; +use datafusion_expr::simplify::{ExprSimplifyResult, SimplifyInfo}; +use datafusion_expr::{ColumnarValue, Expr, FuncMonotonicity, ScalarFunctionDefinition}; + +use arrow::array::{ArrayRef, Float32Array, Float64Array}; +use datafusion_expr::TypeSignature::*; +use datafusion_expr::{ScalarUDFImpl, Signature, Volatility}; +use std::any::Any; +use std::sync::Arc; + +use super::power::PowerFunc; + +#[derive(Debug)] +pub struct LogFunc { + signature: Signature, +} + +impl Default for LogFunc { + fn default() -> Self { + Self::new() + } +} + +impl LogFunc { + pub fn new() -> Self { + use DataType::*; + Self { + signature: Signature::one_of( + vec![ + Exact(vec![Float32]), + Exact(vec![Float64]), + Exact(vec![Float32, Float32]), + Exact(vec![Float64, Float64]), + ], + Volatility::Immutable, + ), + } + } +} + +impl ScalarUDFImpl for LogFunc { + fn as_any(&self) -> &dyn Any { + self + } + fn name(&self) -> &str { + "log" + } + + fn signature(&self) -> &Signature { + &self.signature + } + + fn return_type(&self, arg_types: &[DataType]) -> Result { + match &arg_types[0] { + DataType::Float32 => Ok(DataType::Float32), + _ => Ok(DataType::Float64), + } + } + + fn monotonicity(&self) -> Result> { + Ok(Some(vec![Some(true), Some(false)])) + } + + // Support overloaded log(base, x) and log(x) which defaults to log(10, x) + fn invoke(&self, args: &[ColumnarValue]) -> Result { + let args = ColumnarValue::values_to_arrays(args)?; + + let mut base = ColumnarValue::Scalar(ScalarValue::Float32(Some(10.0))); + + let mut x = &args[0]; + if args.len() == 2 { + x = &args[1]; + base = ColumnarValue::Array(args[0].clone()); + } + // note in f64::log params order is different than in sql. e.g in sql log(base, x) == f64::log(x, base) + let arr: ArrayRef = match args[0].data_type() { + DataType::Float64 => match base { + ColumnarValue::Scalar(ScalarValue::Float32(Some(base))) => { + Arc::new(make_function_scalar_inputs!(x, "x", Float64Array, { + |value: f64| f64::log(value, base as f64) + })) + } + ColumnarValue::Array(base) => Arc::new(make_function_inputs2!( + x, + base, + "x", + "base", + Float64Array, + { f64::log } + )), + _ => { + return exec_err!("log function requires a scalar or array for base") + } + }, + + DataType::Float32 => match base { + ColumnarValue::Scalar(ScalarValue::Float32(Some(base))) => { + Arc::new(make_function_scalar_inputs!(x, "x", Float32Array, { + |value: f32| f32::log(value, base) + })) + } + ColumnarValue::Array(base) => Arc::new(make_function_inputs2!( + x, + base, + "x", + "base", + Float32Array, + { f32::log } + )), + _ => { + return exec_err!("log function requires a scalar or array for base") + } + }, + other => { + return exec_err!("Unsupported data type {other:?} for function log") + } + }; + + Ok(ColumnarValue::Array(arr)) + } + + /// Simplify the `log` function by the relevant rules: + /// 1. Log(a, 1) ===> 0 + /// 2. Log(a, Power(a, b)) ===> b + /// 3. Log(a, a) ===> 1 + fn simplify( + &self, + args: Vec, + info: &dyn SimplifyInfo, + ) -> Result { + let mut number = &args[0]; + let mut base = + &Expr::Literal(ScalarValue::new_ten(&info.get_data_type(number)?)?); + if args.len() == 2 { + base = &args[0]; + number = &args[1]; + } + + match number { + Expr::Literal(value) + if value == &ScalarValue::new_one(&info.get_data_type(number)?)? => + { + Ok(ExprSimplifyResult::Simplified(Expr::Literal( + ScalarValue::new_zero(&info.get_data_type(base)?)?, + ))) + } + Expr::ScalarFunction(ScalarFunction { + func_def: ScalarFunctionDefinition::UDF(fun), + args, + }) if base == &args[0] + && fun + .as_ref() + .inner() + .as_any() + .downcast_ref::() + .is_some() => + { + Ok(ExprSimplifyResult::Simplified(args[1].clone())) + } + _ => { + if number == base { + Ok(ExprSimplifyResult::Simplified(Expr::Literal( + ScalarValue::new_one(&info.get_data_type(number)?)?, + ))) + } else { + Ok(ExprSimplifyResult::Original(args)) + } + } + } + } +} + +#[cfg(test)] +mod tests { + use datafusion_common::cast::{as_float32_array, as_float64_array}; + + use super::*; + + #[test] + fn test_log_f64() { + let args = [ + ColumnarValue::Array(Arc::new(Float64Array::from(vec![2.0, 2.0, 3.0, 5.0]))), // base + ColumnarValue::Array(Arc::new(Float64Array::from(vec![ + 8.0, 4.0, 81.0, 625.0, + ]))), // num + ]; + + let result = LogFunc::new() + .invoke(&args) + .expect("failed to initialize function log"); + + match result { + ColumnarValue::Array(arr) => { + let floats = as_float64_array(&arr) + .expect("failed to convert result to a Float64Array"); + + assert_eq!(floats.len(), 4); + assert_eq!(floats.value(0), 3.0); + assert_eq!(floats.value(1), 2.0); + assert_eq!(floats.value(2), 4.0); + assert_eq!(floats.value(3), 4.0); + } + ColumnarValue::Scalar(_) => { + panic!("Expected an array value") + } + } + } + + #[test] + fn test_log_f32() { + let args = [ + ColumnarValue::Array(Arc::new(Float32Array::from(vec![2.0, 2.0, 3.0, 5.0]))), // base + ColumnarValue::Array(Arc::new(Float32Array::from(vec![ + 8.0, 4.0, 81.0, 625.0, + ]))), // num + ]; + + let result = LogFunc::new() + .invoke(&args) + .expect("failed to initialize function log"); + + match result { + ColumnarValue::Array(arr) => { + let floats = as_float32_array(&arr) + .expect("failed to convert result to a Float32Array"); + + assert_eq!(floats.len(), 4); + assert_eq!(floats.value(0), 3.0); + assert_eq!(floats.value(1), 2.0); + assert_eq!(floats.value(2), 4.0); + assert_eq!(floats.value(3), 4.0); + } + ColumnarValue::Scalar(_) => { + panic!("Expected an array value") + } + } + } +} diff --git a/datafusion/functions/src/math/mod.rs b/datafusion/functions/src/math/mod.rs index 3a1f7cc13bb7..2655edfe76dc 100644 --- a/datafusion/functions/src/math/mod.rs +++ b/datafusion/functions/src/math/mod.rs @@ -20,12 +20,16 @@ pub mod abs; pub mod gcd; pub mod lcm; +pub mod log; pub mod nans; pub mod pi; +pub mod power; // Create UDFs make_udf_function!(nans::IsNanFunc, ISNAN, isnan); make_udf_function!(abs::AbsFunc, ABS, abs); +make_udf_function!(log::LogFunc, LOG, log); +make_udf_function!(power::PowerFunc, POWER, power); make_udf_function!(gcd::GcdFunc, GCD, gcd); make_udf_function!(lcm::LcmFunc, LCM, lcm); make_udf_function!(pi::PiFunc, PI, pi); @@ -66,6 +70,8 @@ export_functions!( "returns true if a given number is +NaN or -NaN otherwise returns false" ), (abs, num, "returns the absolute value of a given number"), + (power, base exponent, "`base` raised to the power of `exponent`"), + (log, base num, "logarithm of a number for a particular `base`"), (log2, num, "base 2 logarithm of a number"), (log10, num, "base 10 logarithm of a number"), (ln, num, "natural logarithm (base e) of a number"), diff --git a/datafusion/functions/src/math/power.rs b/datafusion/functions/src/math/power.rs new file mode 100644 index 000000000000..8e3b2cf02405 --- /dev/null +++ b/datafusion/functions/src/math/power.rs @@ -0,0 +1,218 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//! Math function: `power()`. + +use arrow::datatypes::DataType; +use datafusion_common::{exec_err, DataFusionError, Result, ScalarValue}; +use datafusion_expr::expr::ScalarFunction; +use datafusion_expr::simplify::{ExprSimplifyResult, SimplifyInfo}; +use datafusion_expr::{ColumnarValue, Expr, ScalarFunctionDefinition}; + +use arrow::array::{ArrayRef, Float64Array, Int64Array}; +use datafusion_expr::TypeSignature::*; +use datafusion_expr::{ScalarUDFImpl, Signature, Volatility}; +use std::any::Any; +use std::sync::Arc; + +use super::log::LogFunc; + +#[derive(Debug)] +pub struct PowerFunc { + signature: Signature, + aliases: Vec, +} + +impl Default for PowerFunc { + fn default() -> Self { + Self::new() + } +} + +impl PowerFunc { + pub fn new() -> Self { + use DataType::*; + Self { + signature: Signature::one_of( + vec![Exact(vec![Int64, Int64]), Exact(vec![Float64, Float64])], + Volatility::Immutable, + ), + aliases: vec![String::from("pow")], + } + } +} + +impl ScalarUDFImpl for PowerFunc { + fn as_any(&self) -> &dyn Any { + self + } + fn name(&self) -> &str { + "power" + } + + fn signature(&self) -> &Signature { + &self.signature + } + + fn return_type(&self, arg_types: &[DataType]) -> Result { + match arg_types[0] { + DataType::Int64 => Ok(DataType::Int64), + _ => Ok(DataType::Float64), + } + } + + fn aliases(&self) -> &[String] { + &self.aliases + } + + fn invoke(&self, args: &[ColumnarValue]) -> Result { + let args = ColumnarValue::values_to_arrays(args)?; + + let arr: ArrayRef = match args[0].data_type() { + DataType::Float64 => Arc::new(make_function_inputs2!( + &args[0], + &args[1], + "base", + "exponent", + Float64Array, + { f64::powf } + )), + + DataType::Int64 => Arc::new(make_function_inputs2!( + &args[0], + &args[1], + "base", + "exponent", + Int64Array, + { i64::pow } + )), + + other => { + return exec_err!( + "Unsupported data type {other:?} for function {}", + self.name() + ) + } + }; + + Ok(ColumnarValue::Array(arr)) + } + + /// Simplify the `power` function by the relevant rules: + /// 1. Power(a, 0) ===> 0 + /// 2. Power(a, 1) ===> a + /// 3. Power(a, Log(a, b)) ===> b + fn simplify( + &self, + args: Vec, + info: &dyn SimplifyInfo, + ) -> Result { + let base = &args[0]; + let exponent = &args[1]; + + match exponent { + Expr::Literal(value) + if value == &ScalarValue::new_zero(&info.get_data_type(exponent)?)? => + { + Ok(ExprSimplifyResult::Simplified(Expr::Literal( + ScalarValue::new_one(&info.get_data_type(base)?)?, + ))) + } + Expr::Literal(value) + if value == &ScalarValue::new_one(&info.get_data_type(exponent)?)? => + { + Ok(ExprSimplifyResult::Simplified(base.clone())) + } + Expr::ScalarFunction(ScalarFunction { + func_def: ScalarFunctionDefinition::UDF(fun), + args, + }) if base == &args[0] + && fun + .as_ref() + .inner() + .as_any() + .downcast_ref::() + .is_some() => + { + Ok(ExprSimplifyResult::Simplified(args[1].clone())) + } + _ => Ok(ExprSimplifyResult::Original(args)), + } + } +} + +#[cfg(test)] +mod tests { + use datafusion_common::cast::{as_float64_array, as_int64_array}; + + use super::*; + + #[test] + fn test_power_f64() { + let args = [ + ColumnarValue::Array(Arc::new(Float64Array::from(vec![2.0, 2.0, 3.0, 5.0]))), // base + ColumnarValue::Array(Arc::new(Float64Array::from(vec![3.0, 2.0, 4.0, 4.0]))), // exponent + ]; + + let result = PowerFunc::new() + .invoke(&args) + .expect("failed to initialize function power"); + + match result { + ColumnarValue::Array(arr) => { + let floats = as_float64_array(&arr) + .expect("failed to convert result to a Float64Array"); + assert_eq!(floats.len(), 4); + assert_eq!(floats.value(0), 8.0); + assert_eq!(floats.value(1), 4.0); + assert_eq!(floats.value(2), 81.0); + assert_eq!(floats.value(3), 625.0); + } + ColumnarValue::Scalar(_) => { + panic!("Expected an array value") + } + } + } + + #[test] + fn test_power_i64() { + let args = [ + ColumnarValue::Array(Arc::new(Int64Array::from(vec![2, 2, 3, 5]))), // base + ColumnarValue::Array(Arc::new(Int64Array::from(vec![3, 2, 4, 4]))), // exponent + ]; + + let result = PowerFunc::new() + .invoke(&args) + .expect("failed to initialize function power"); + + match result { + ColumnarValue::Array(arr) => { + let ints = as_int64_array(&arr) + .expect("failed to convert result to a Int64Array"); + + assert_eq!(ints.len(), 4); + assert_eq!(ints.value(0), 8); + assert_eq!(ints.value(1), 4); + assert_eq!(ints.value(2), 81); + assert_eq!(ints.value(3), 625); + } + ColumnarValue::Scalar(_) => { + panic!("Expected an array value") + } + } + } +} diff --git a/datafusion/optimizer/src/simplify_expressions/expr_simplifier.rs b/datafusion/optimizer/src/simplify_expressions/expr_simplifier.rs index 8b70f76617dd..3198807b04cf 100644 --- a/datafusion/optimizer/src/simplify_expressions/expr_simplifier.rs +++ b/datafusion/optimizer/src/simplify_expressions/expr_simplifier.rs @@ -1318,18 +1318,6 @@ impl<'a, S: SimplifyInfo> TreeNodeRewriter for Simplifier<'a, S> { ExprSimplifyResult::Simplified(expr) => Transformed::yes(expr), }, - // log - Expr::ScalarFunction(ScalarFunction { - func_def: ScalarFunctionDefinition::BuiltIn(BuiltinScalarFunction::Log), - args, - }) => Transformed::yes(simpl_log(args, info)?), - - // power - Expr::ScalarFunction(ScalarFunction { - func_def: ScalarFunctionDefinition::BuiltIn(BuiltinScalarFunction::Power), - args, - }) => Transformed::yes(simpl_power(args, info)?), - // concat Expr::ScalarFunction(ScalarFunction { func_def: ScalarFunctionDefinition::BuiltIn(BuiltinScalarFunction::Concat), @@ -2665,68 +2653,6 @@ mod tests { assert_eq!(simplify(expr_eq), lit(true)); } - #[test] - fn test_simplify_log() { - // Log(c3, 1) ===> 0 - { - let expr = log(col("c3_non_null"), lit(1)); - let expected = lit(0i64); - assert_eq!(simplify(expr), expected); - } - // Log(c3, c3) ===> 1 - { - let expr = log(col("c3_non_null"), col("c3_non_null")); - let expected = lit(1i64); - assert_eq!(simplify(expr), expected); - } - // Log(c3, Power(c3, c4)) ===> c4 - { - let expr = log( - col("c3_non_null"), - power(col("c3_non_null"), col("c4_non_null")), - ); - let expected = col("c4_non_null"); - assert_eq!(simplify(expr), expected); - } - // Log(c3, c4) ===> Log(c3, c4) - { - let expr = log(col("c3_non_null"), col("c4_non_null")); - let expected = log(col("c3_non_null"), col("c4_non_null")); - assert_eq!(simplify(expr), expected); - } - } - - #[test] - fn test_simplify_power() { - // Power(c3, 0) ===> 1 - { - let expr = power(col("c3_non_null"), lit(0)); - let expected = lit(1i64); - assert_eq!(simplify(expr), expected); - } - // Power(c3, 1) ===> c3 - { - let expr = power(col("c3_non_null"), lit(1)); - let expected = col("c3_non_null"); - assert_eq!(simplify(expr), expected); - } - // Power(c3, Log(c3, c4)) ===> c4 - { - let expr = power( - col("c3_non_null"), - log(col("c3_non_null"), col("c4_non_null")), - ); - let expected = col("c4_non_null"); - assert_eq!(simplify(expr), expected); - } - // Power(c3, c4) ===> Power(c3, c4) - { - let expr = power(col("c3_non_null"), col("c4_non_null")); - let expected = power(col("c3_non_null"), col("c4_non_null")); - assert_eq!(simplify(expr), expected); - } - } - #[test] fn test_simplify_concat_ws() { let null = lit(ScalarValue::Utf8(None)); diff --git a/datafusion/optimizer/src/simplify_expressions/simplify_exprs.rs b/datafusion/optimizer/src/simplify_expressions/simplify_exprs.rs index 8213af76989f..4e06730133d9 100644 --- a/datafusion/optimizer/src/simplify_expressions/simplify_exprs.rs +++ b/datafusion/optimizer/src/simplify_expressions/simplify_exprs.rs @@ -144,7 +144,7 @@ mod tests { and, binary_expr, col, lit, logical_plan::builder::LogicalPlanBuilder, Expr, ExprSchemable, JoinType, }; - use datafusion_expr::{call_fn, or, BinaryExpr, Cast, Operator}; + use datafusion_expr::{or, BinaryExpr, Cast, Operator}; use crate::test::{assert_fields_eq, test_table_scan_with_name}; use crate::OptimizerContext; @@ -712,42 +712,6 @@ mod tests { assert_optimized_plan_eq(&plan, expected) } - #[test] - fn simplify_project_scalar_fn() -> Result<()> { - // Issue https://github.com/apache/arrow-datafusion/issues/5996 - let schema = Schema::new(vec![Field::new("f", DataType::Float64, false)]); - let plan = table_scan(Some("test"), &schema, None)? - .project(vec![call_fn("power", vec![col("f"), lit(1.0)])?])? - .build()?; - - // before simplify: power(t.f, 1.0) - // after simplify: t.f as "power(t.f, 1.0)" - let expected = "Projection: test.f AS power(test.f,Float64(1))\ - \n TableScan: test"; - - assert_optimized_plan_eq(&plan, expected) - } - - #[test] - fn simplify_scan_predicate() -> Result<()> { - let schema = Schema::new(vec![ - Field::new("f", DataType::Float64, false), - Field::new("g", DataType::Float64, false), - ]); - let plan = table_scan_with_filters( - Some("test"), - &schema, - None, - vec![col("g").eq(call_fn("power", vec![col("f"), lit(1.0)])?)], - )? - .build()?; - - // before simplify: t.g = power(t.f, 1.0) - // after simplify: (t.g = t.f) as "t.g = power(t.f, 1.0)" - let expected = "TableScan: test, full_filters=[g = f AS g = power(f,Float64(1))]"; - assert_optimized_plan_eq(&plan, expected) - } - #[test] fn simplify_is_not_null() -> Result<()> { let table_scan = test_table_scan(); diff --git a/datafusion/optimizer/src/simplify_expressions/utils.rs b/datafusion/optimizer/src/simplify_expressions/utils.rs index 1dd3a6162894..f0ad4738633f 100644 --- a/datafusion/optimizer/src/simplify_expressions/utils.rs +++ b/datafusion/optimizer/src/simplify_expressions/utils.rs @@ -18,11 +18,10 @@ //! Utility functions for expression simplification use datafusion_common::{internal_err, Result, ScalarValue}; -use datafusion_expr::simplify::SimplifyInfo; use datafusion_expr::{ expr::{Between, BinaryExpr, InList, ScalarFunction}, expr_fn::{and, bitwise_and, bitwise_or, concat_ws, or}, - lit, BuiltinScalarFunction, Expr, Like, Operator, ScalarFunctionDefinition, + lit, BuiltinScalarFunction, Expr, Like, Operator, }; pub static POWS_OF_TEN: [i128; 38] = [ @@ -343,77 +342,6 @@ pub fn distribute_negation(expr: Expr) -> Expr { } } -/// Simplify the `log` function by the relevant rules: -/// 1. Log(a, 1) ===> 0 -/// 2. Log(a, a) ===> 1 -/// 3. Log(a, Power(a, b)) ===> b -pub fn simpl_log(current_args: Vec, info: &dyn SimplifyInfo) -> Result { - let mut number = ¤t_args[0]; - let mut base = &Expr::Literal(ScalarValue::new_ten(&info.get_data_type(number)?)?); - if current_args.len() == 2 { - base = ¤t_args[0]; - number = ¤t_args[1]; - } - - match number { - Expr::Literal(value) - if value == &ScalarValue::new_one(&info.get_data_type(number)?)? => - { - Ok(Expr::Literal(ScalarValue::new_zero( - &info.get_data_type(base)?, - )?)) - } - Expr::ScalarFunction(ScalarFunction { - func_def: ScalarFunctionDefinition::BuiltIn(BuiltinScalarFunction::Power), - args, - }) if base == &args[0] => Ok(args[1].clone()), - _ => { - if number == base { - Ok(Expr::Literal(ScalarValue::new_one( - &info.get_data_type(number)?, - )?)) - } else { - Ok(Expr::ScalarFunction(ScalarFunction::new( - BuiltinScalarFunction::Log, - vec![base.clone(), number.clone()], - ))) - } - } - } -} - -/// Simplify the `power` function by the relevant rules: -/// 1. Power(a, 0) ===> 0 -/// 2. Power(a, 1) ===> a -/// 3. Power(a, Log(a, b)) ===> b -pub fn simpl_power(current_args: Vec, info: &dyn SimplifyInfo) -> Result { - let base = ¤t_args[0]; - let exponent = ¤t_args[1]; - - match exponent { - Expr::Literal(value) - if value == &ScalarValue::new_zero(&info.get_data_type(exponent)?)? => - { - Ok(Expr::Literal(ScalarValue::new_one( - &info.get_data_type(base)?, - )?)) - } - Expr::Literal(value) - if value == &ScalarValue::new_one(&info.get_data_type(exponent)?)? => - { - Ok(base.clone()) - } - Expr::ScalarFunction(ScalarFunction { - func_def: ScalarFunctionDefinition::BuiltIn(BuiltinScalarFunction::Log), - args, - }) if base == &args[0] => Ok(args[1].clone()), - _ => Ok(Expr::ScalarFunction(ScalarFunction::new( - BuiltinScalarFunction::Power, - current_args, - ))), - } -} - /// Simplify the `concat` function by /// 1. filtering out all `null` literals /// 2. concatenating contiguous literal arguments diff --git a/datafusion/physical-expr/src/functions.rs b/datafusion/physical-expr/src/functions.rs index 79d69b273d2c..124acdc7ac78 100644 --- a/datafusion/physical-expr/src/functions.rs +++ b/datafusion/physical-expr/src/functions.rs @@ -197,12 +197,6 @@ pub fn create_physical_fun( BuiltinScalarFunction::Trunc => { Arc::new(|args| make_scalar_function_inner(math_expressions::trunc)(args)) } - BuiltinScalarFunction::Power => { - Arc::new(|args| make_scalar_function_inner(math_expressions::power)(args)) - } - BuiltinScalarFunction::Log => { - Arc::new(|args| make_scalar_function_inner(math_expressions::log)(args)) - } BuiltinScalarFunction::Cot => { Arc::new(|args| make_scalar_function_inner(math_expressions::cot)(args)) } diff --git a/datafusion/physical-expr/src/math_expressions.rs b/datafusion/physical-expr/src/math_expressions.rs index 384f8d87eb96..b29230de1f76 100644 --- a/datafusion/physical-expr/src/math_expressions.rs +++ b/datafusion/physical-expr/src/math_expressions.rs @@ -27,7 +27,7 @@ use arrow::datatypes::DataType; use arrow_array::Array; use rand::{thread_rng, Rng}; -use datafusion_common::ScalarValue::{Float32, Int64}; +use datafusion_common::ScalarValue::Int64; use datafusion_common::{exec_err, ScalarValue}; use datafusion_common::{DataFusionError, Result}; use datafusion_expr::ColumnarValue; @@ -374,85 +374,6 @@ pub fn round(args: &[ArrayRef]) -> Result { } } -/// Power SQL function -pub fn power(args: &[ArrayRef]) -> Result { - match args[0].data_type() { - DataType::Float64 => Ok(Arc::new(make_function_inputs2!( - &args[0], - &args[1], - "base", - "exponent", - Float64Array, - { f64::powf } - )) as ArrayRef), - - DataType::Int64 => Ok(Arc::new(make_function_inputs2!( - &args[0], - &args[1], - "base", - "exponent", - Int64Array, - { i64::pow } - )) as ArrayRef), - - other => exec_err!("Unsupported data type {other:?} for function power"), - } -} - -/// Log SQL function -pub fn log(args: &[ArrayRef]) -> Result { - // Support overloaded log(base, x) and log(x) which defaults to log(10, x) - // note in f64::log params order is different than in sql. e.g in sql log(base, x) == f64::log(x, base) - let mut base = ColumnarValue::Scalar(Float32(Some(10.0))); - - let mut x = &args[0]; - if args.len() == 2 { - x = &args[1]; - base = ColumnarValue::Array(args[0].clone()); - } - match args[0].data_type() { - DataType::Float64 => match base { - ColumnarValue::Scalar(ScalarValue::Float32(Some(base))) => { - let base = base as f64; - Ok( - Arc::new(make_function_scalar_inputs!(x, "x", Float64Array, { - |value: f64| f64::log(value, base) - })) as ArrayRef, - ) - } - ColumnarValue::Array(base) => Ok(Arc::new(make_function_inputs2!( - x, - base, - "x", - "base", - Float64Array, - { f64::log } - )) as ArrayRef), - _ => exec_err!("log function requires a scalar or array for base"), - }, - - DataType::Float32 => match base { - ColumnarValue::Scalar(ScalarValue::Float32(Some(base))) => Ok(Arc::new( - make_function_scalar_inputs!(x, "x", Float32Array, { - |value: f32| f32::log(value, base) - }), - ) - as ArrayRef), - ColumnarValue::Array(base) => Ok(Arc::new(make_function_inputs2!( - x, - base, - "x", - "base", - Float32Array, - { f32::log } - )) as ArrayRef), - _ => exec_err!("log function requires a scalar or array for base"), - }, - - other => exec_err!("Unsupported data type {other:?} for function log"), - } -} - ///cot SQL function pub fn cot(args: &[ArrayRef]) -> Result { match args[0].data_type() { @@ -571,78 +492,6 @@ mod tests { assert!(0.0 <= floats.value(0) && floats.value(0) < 1.0); } - #[test] - fn test_power_f64() { - let args: Vec = vec![ - Arc::new(Float64Array::from(vec![2.0, 2.0, 3.0, 5.0])), // base - Arc::new(Float64Array::from(vec![3.0, 2.0, 4.0, 4.0])), // exponent - ]; - - let result = power(&args).expect("failed to initialize function power"); - let floats = - as_float64_array(&result).expect("failed to initialize function power"); - - assert_eq!(floats.len(), 4); - assert_eq!(floats.value(0), 8.0); - assert_eq!(floats.value(1), 4.0); - assert_eq!(floats.value(2), 81.0); - assert_eq!(floats.value(3), 625.0); - } - - #[test] - fn test_power_i64() { - let args: Vec = vec![ - Arc::new(Int64Array::from(vec![2, 2, 3, 5])), // base - Arc::new(Int64Array::from(vec![3, 2, 4, 4])), // exponent - ]; - - let result = power(&args).expect("failed to initialize function power"); - let floats = - as_int64_array(&result).expect("failed to initialize function power"); - - assert_eq!(floats.len(), 4); - assert_eq!(floats.value(0), 8); - assert_eq!(floats.value(1), 4); - assert_eq!(floats.value(2), 81); - assert_eq!(floats.value(3), 625); - } - - #[test] - fn test_log_f64() { - let args: Vec = vec![ - Arc::new(Float64Array::from(vec![2.0, 2.0, 3.0, 5.0])), // base - Arc::new(Float64Array::from(vec![8.0, 4.0, 81.0, 625.0])), // x - ]; - - let result = log(&args).expect("failed to initialize function log"); - let floats = - as_float64_array(&result).expect("failed to initialize function log"); - - assert_eq!(floats.len(), 4); - assert_eq!(floats.value(0), 3.0); - assert_eq!(floats.value(1), 2.0); - assert_eq!(floats.value(2), 4.0); - assert_eq!(floats.value(3), 4.0); - } - - #[test] - fn test_log_f32() { - let args: Vec = vec![ - Arc::new(Float32Array::from(vec![2.0, 2.0, 3.0, 5.0])), // base - Arc::new(Float32Array::from(vec![8.0, 4.0, 81.0, 625.0])), // x - ]; - - let result = log(&args).expect("failed to initialize function log"); - let floats = - as_float32_array(&result).expect("failed to initialize function log"); - - assert_eq!(floats.len(), 4); - assert_eq!(floats.value(0), 3.0); - assert_eq!(floats.value(1), 2.0); - assert_eq!(floats.value(2), 4.0); - assert_eq!(floats.value(3), 4.0); - } - #[test] fn test_round_f32() { let args: Vec = vec![ diff --git a/datafusion/proto/proto/datafusion.proto b/datafusion/proto/proto/datafusion.proto index b656bededc07..0f245673f6cd 100644 --- a/datafusion/proto/proto/datafusion.proto +++ b/datafusion/proto/proto/datafusion.proto @@ -552,7 +552,7 @@ enum ScalarFunction { Exp = 8; // 9 was Floor // 10 was Ln - Log = 11; + // 11 was Log // 12 was Log10 // 13 was Log2 Round = 14; @@ -605,7 +605,7 @@ enum ScalarFunction { // Trim = 61; // Upper = 62; Coalesce = 63; - Power = 64; + // 64 was Power // 65 was StructFun // 66 was FromUnixtime // 67 Atan2 diff --git a/datafusion/proto/src/generated/pbjson.rs b/datafusion/proto/src/generated/pbjson.rs index c13ae045bdb5..0922fccc7917 100644 --- a/datafusion/proto/src/generated/pbjson.rs +++ b/datafusion/proto/src/generated/pbjson.rs @@ -22794,7 +22794,6 @@ impl serde::Serialize for ScalarFunction { Self::Unknown => "unknown", Self::Ceil => "Ceil", Self::Exp => "Exp", - Self::Log => "Log", Self::Round => "Round", Self::Trunc => "Trunc", Self::Concat => "Concat", @@ -22802,7 +22801,6 @@ impl serde::Serialize for ScalarFunction { Self::InitCap => "InitCap", Self::Random => "Random", Self::Coalesce => "Coalesce", - Self::Power => "Power", Self::Factorial => "Factorial", Self::Cot => "Cot", Self::Nanvl => "Nanvl", @@ -22822,7 +22820,6 @@ impl<'de> serde::Deserialize<'de> for ScalarFunction { "unknown", "Ceil", "Exp", - "Log", "Round", "Trunc", "Concat", @@ -22830,7 +22827,6 @@ impl<'de> serde::Deserialize<'de> for ScalarFunction { "InitCap", "Random", "Coalesce", - "Power", "Factorial", "Cot", "Nanvl", @@ -22879,7 +22875,6 @@ impl<'de> serde::Deserialize<'de> for ScalarFunction { "unknown" => Ok(ScalarFunction::Unknown), "Ceil" => Ok(ScalarFunction::Ceil), "Exp" => Ok(ScalarFunction::Exp), - "Log" => Ok(ScalarFunction::Log), "Round" => Ok(ScalarFunction::Round), "Trunc" => Ok(ScalarFunction::Trunc), "Concat" => Ok(ScalarFunction::Concat), @@ -22887,7 +22882,6 @@ impl<'de> serde::Deserialize<'de> for ScalarFunction { "InitCap" => Ok(ScalarFunction::InitCap), "Random" => Ok(ScalarFunction::Random), "Coalesce" => Ok(ScalarFunction::Coalesce), - "Power" => Ok(ScalarFunction::Power), "Factorial" => Ok(ScalarFunction::Factorial), "Cot" => Ok(ScalarFunction::Cot), "Nanvl" => Ok(ScalarFunction::Nanvl), diff --git a/datafusion/proto/src/generated/prost.rs b/datafusion/proto/src/generated/prost.rs index 092d5c59d081..db7614144983 100644 --- a/datafusion/proto/src/generated/prost.rs +++ b/datafusion/proto/src/generated/prost.rs @@ -2851,7 +2851,7 @@ pub enum ScalarFunction { Exp = 8, /// 9 was Floor /// 10 was Ln - Log = 11, + /// 11 was Log /// 12 was Log10 /// 13 was Log2 Round = 14, @@ -2904,7 +2904,7 @@ pub enum ScalarFunction { /// Trim = 61; /// Upper = 62; Coalesce = 63, - Power = 64, + /// 64 was Power /// 65 was StructFun /// 66 was FromUnixtime /// 67 Atan2 @@ -2989,7 +2989,6 @@ impl ScalarFunction { ScalarFunction::Unknown => "unknown", ScalarFunction::Ceil => "Ceil", ScalarFunction::Exp => "Exp", - ScalarFunction::Log => "Log", ScalarFunction::Round => "Round", ScalarFunction::Trunc => "Trunc", ScalarFunction::Concat => "Concat", @@ -2997,7 +2996,6 @@ impl ScalarFunction { ScalarFunction::InitCap => "InitCap", ScalarFunction::Random => "Random", ScalarFunction::Coalesce => "Coalesce", - ScalarFunction::Power => "Power", ScalarFunction::Factorial => "Factorial", ScalarFunction::Cot => "Cot", ScalarFunction::Nanvl => "Nanvl", @@ -3011,7 +3009,6 @@ impl ScalarFunction { "unknown" => Some(Self::Unknown), "Ceil" => Some(Self::Ceil), "Exp" => Some(Self::Exp), - "Log" => Some(Self::Log), "Round" => Some(Self::Round), "Trunc" => Some(Self::Trunc), "Concat" => Some(Self::Concat), @@ -3019,7 +3016,6 @@ impl ScalarFunction { "InitCap" => Some(Self::InitCap), "Random" => Some(Self::Random), "Coalesce" => Some(Self::Coalesce), - "Power" => Some(Self::Power), "Factorial" => Some(Self::Factorial), "Cot" => Some(Self::Cot), "Nanvl" => Some(Self::Nanvl), diff --git a/datafusion/proto/src/logical_plan/from_proto.rs b/datafusion/proto/src/logical_plan/from_proto.rs index 9c24a3941895..6a2e89fe00a3 100644 --- a/datafusion/proto/src/logical_plan/from_proto.rs +++ b/datafusion/proto/src/logical_plan/from_proto.rs @@ -39,9 +39,9 @@ use datafusion_expr::window_frame::{check_window_frame, regularize_window_order_ use datafusion_expr::{ ceil, coalesce, concat_expr, concat_ws_expr, cot, ends_with, exp, expr::{self, InList, Sort, WindowFunction}, - factorial, initcap, iszero, log, + factorial, initcap, iszero, logical_plan::{PlanType, StringifiedPlan}, - nanvl, power, random, round, trunc, AggregateFunction, Between, BinaryExpr, + nanvl, random, round, trunc, AggregateFunction, Between, BinaryExpr, BuiltInWindowFunction, BuiltinScalarFunction, Case, Cast, Expr, GetFieldAccess, GetIndexedField, GroupingSet, GroupingSet::GroupingSets, @@ -421,7 +421,6 @@ impl From<&protobuf::ScalarFunction> for BuiltinScalarFunction { ScalarFunction::Unknown => todo!(), ScalarFunction::Cot => Self::Cot, ScalarFunction::Exp => Self::Exp, - ScalarFunction::Log => Self::Log, ScalarFunction::Factorial => Self::Factorial, ScalarFunction::Ceil => Self::Ceil, ScalarFunction::Round => Self::Round, @@ -432,7 +431,6 @@ impl From<&protobuf::ScalarFunction> for BuiltinScalarFunction { ScalarFunction::InitCap => Self::InitCap, ScalarFunction::Random => Self::Random, ScalarFunction::Coalesce => Self::Coalesce, - ScalarFunction::Power => Self::Power, ScalarFunction::Nanvl => Self::Nanvl, ScalarFunction::Iszero => Self::Iszero, } @@ -1320,14 +1318,6 @@ pub fn parse_expr( ScalarFunction::Coalesce => { Ok(coalesce(parse_exprs(args, registry, codec)?)) } - ScalarFunction::Power => Ok(power( - parse_expr(&args[0], registry, codec)?, - parse_expr(&args[1], registry, codec)?, - )), - ScalarFunction::Log => Ok(log( - parse_expr(&args[0], registry, codec)?, - parse_expr(&args[1], registry, codec)?, - )), ScalarFunction::Cot => Ok(cot(parse_expr(&args[0], registry, codec)?)), ScalarFunction::Nanvl => Ok(nanvl( parse_expr(&args[0], registry, codec)?, diff --git a/datafusion/proto/src/logical_plan/to_proto.rs b/datafusion/proto/src/logical_plan/to_proto.rs index bd964b43d418..db9653e32346 100644 --- a/datafusion/proto/src/logical_plan/to_proto.rs +++ b/datafusion/proto/src/logical_plan/to_proto.rs @@ -1410,7 +1410,6 @@ impl TryFrom<&BuiltinScalarFunction> for protobuf::ScalarFunction { BuiltinScalarFunction::Cot => Self::Cot, BuiltinScalarFunction::Exp => Self::Exp, BuiltinScalarFunction::Factorial => Self::Factorial, - BuiltinScalarFunction::Log => Self::Log, BuiltinScalarFunction::Ceil => Self::Ceil, BuiltinScalarFunction::Round => Self::Round, BuiltinScalarFunction::Trunc => Self::Trunc, @@ -1420,7 +1419,6 @@ impl TryFrom<&BuiltinScalarFunction> for protobuf::ScalarFunction { BuiltinScalarFunction::InitCap => Self::InitCap, BuiltinScalarFunction::Random => Self::Random, BuiltinScalarFunction::Coalesce => Self::Coalesce, - BuiltinScalarFunction::Power => Self::Power, BuiltinScalarFunction::Nanvl => Self::Nanvl, BuiltinScalarFunction::Iszero => Self::Iszero, };