Skip to content

Commit

Permalink
partitioned-hash-gluejob altered - v1
Browse files Browse the repository at this point in the history
  • Loading branch information
madhu-k-sr2 committed Dec 13, 2024
1 parent ce10c3d commit 59ab86e
Showing 1 changed file with 2 additions and 2 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,7 @@ def write_rds_df_to_s3_parquet(df_rds_write: DataFrame,
HASHBYTES('SHA2_256', CONCAT_WS('', {', '.join(all_columns_except_pkey)})), 1), 3, 66)) AS RowHash,
YEAR({date_partition_column_name}) AS year,
MONTH({date_partition_column_name}) AS month
FROM {rds_sqlserver_db_schema}.[{rds_sqlserver_db_table}]
FROM {rds_sqlserver_db}.{rds_sqlserver_db_schema}.{rds_sqlserver_db_table}
""".strip()

incremental_run_bool = args.get('incremental_run_bool', 'false')
Expand All @@ -301,7 +301,7 @@ def write_rds_df_to_s3_parquet(df_rds_write: DataFrame,
if rds_query_where_clause is not None:

rds_db_hash_cols_query_str = rds_db_hash_cols_query_str + \
f""" WHERE {rds_query_where_clause.lstrip()}"""
f""" WHERE {rds_query_where_clause.rstrip()}"""

elif incremental_run_bool == 'true':
existing_prq_hashed_rows_df = CustomPysparkMethods.get_s3_parquet_df_v2(
Expand Down

0 comments on commit 59ab86e

Please sign in to comment.