Skip to content

Commit

Permalink
adjust timestamp casting
Browse files Browse the repository at this point in the history
  • Loading branch information
jdye64 committed May 2, 2022
1 parent 8beb813 commit 3b7cb4d
Show file tree
Hide file tree
Showing 4 changed files with 5 additions and 16 deletions.
6 changes: 4 additions & 2 deletions dask_planner/src/expression.rs
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,6 @@ impl PyExpr {
}
_ => {
println!("Encountered a non-Aggregate type");

name.clone().to_ascii_uppercase()
}
}
Expand All @@ -109,7 +108,10 @@ impl PyExpr {
}
Expr::Column(column) => column.name.clone(),
Expr::ScalarVariable(..) => unimplemented!("ScalarVariable!!!"),
Expr::Literal(..) => unimplemented!("Literal!!!"),
Expr::Literal(scalar_value) => {
println!("Scalar Value: {:?}", scalar_value);
unimplemented!("Literal!!!")
}
Expr::BinaryExpr { .. } => {
// If the BinaryExpr does not have an Alias
// Ex: `df.a - Int64(1)` then use the String
Expand Down
4 changes: 0 additions & 4 deletions dask_sql/physical/rel/logical/project.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,6 @@ def convert(self, rel: "LogicalPlan", context: "dask_sql.Context") -> DataContai
new_columns = {}
new_mappings = {}

# Debugging only
for key, expr in named_projects:
print(f"Key: {key} - Expr: {expr.toString()}", str(key), expr)

# Collect all (new) columns this Projection will limit to
for key, expr in named_projects:

Expand Down
10 changes: 1 addition & 9 deletions dask_sql/physical/rex/core/call.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@ def cast(self, operand, rex=None) -> SeriesOrScalar:
# TODO: ideally we don't want to directly access the datetimes,
# but Pandas can't truncate timezone datetimes and cuDF can't
# truncate datetimes
if output_type == "DATE":
if output_type == "DATE" or output_type == "TIMESTAMP":
return return_column.dt.floor("D").astype(python_type)

return return_column
Expand Down Expand Up @@ -887,24 +887,16 @@ def convert(
context: "dask_sql.Context",
) -> SeriesOrScalar:

print(f"\n\nEntering call.py convert for expr: {expr.toString()}")

for ex in expr.getOperands():
print(f"convert operand expr: {ex.toString()}")

# Prepare the operands by turning the RexNodes into python expressions
operands = [
RexConverter.convert(rel, o, dc, context=context)
for o in expr.getOperands()
]

print(f"\nOperands post conversion: {operands}")

# Now use the operator name in the mapping
# TODO: obviously this needs to not be hardcoded but not sure of the best place to pull the value from currently???
schema_name = "root"
operator_name = expr.getOperatorName().lower()
print(f"Operator Name: {operator_name}")

try:
operation = self.OPERATION_MAPPING[operator_name]
Expand Down
1 change: 0 additions & 1 deletion tests/integration/test_groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ def timeseries_df(c):
return None


@pytest.mark.skip(reason="WIP DataFusion")
def test_group_by(c):
return_df = c.sql(
"""
Expand Down

0 comments on commit 3b7cb4d

Please sign in to comment.