diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7185cef3e..903507b7a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -70,7 +70,8 @@ To run functional tests we rely on [dagger](https://dagger.io/). This launches a ```sh pip install -r dagger/requirements.txt -python dagger/run_dbt_spark_tests.py --profile apache_spark --test-path tests/functional/adapter/incremental_strategies/test_microbatch.py +python dagger/run_dbt_spark_tests.py --profile databricks_sql_endpoint --test-path tests/functional/adapter/test_basic.py::TestSimpleMaterializationsSpark::test_base +``` `--profile`: required, this is the kind of spark connection to test against diff --git a/dagger/run_dbt_spark_tests.py b/dagger/run_dbt_spark_tests.py index 16192541d..67fa56587 100644 --- a/dagger/run_dbt_spark_tests.py +++ b/dagger/run_dbt_spark_tests.py @@ -150,8 +150,6 @@ async def test_spark(test_args): tst_container = tst_container.with_(env_variables(TESTING_ENV_VARS)) test_path = test_args.test_path if test_args.test_path else "tests/functional/adapter" - # TODO: remove before merging! - test_path = "tests/functional/adapter/incremental_strategies/test_microbatch.py" result = await tst_container.with_exec( ["pytest", "-v", "--profile", test_profile, "-n", "auto", test_path] ).stdout() diff --git a/dbt/include/spark/macros/materializations/incremental/strategies.sql b/dbt/include/spark/macros/materializations/incremental/strategies.sql index 28ba8284e..4ffead6a0 100644 --- a/dbt/include/spark/macros/materializations/incremental/strategies.sql +++ b/dbt/include/spark/macros/materializations/incremental/strategies.sql @@ -79,8 +79,7 @@ {#-- microbatch wraps insert_overwrite, and requires a partition_by config #} {% set missing_partition_key_microbatch_msg -%} dbt-spark 'microbatch' incremental strategy requires a `partition_by` config. - Ensure you are using a `partition_by` column that is of grain {{ config.get('batch_size') }} - for microbatch model {{ model.name }}. + Ensure you are using a `partition_by` column that is of grain {{ config.get('batch_size') }}. {%- endset %} {%- if not config.get('partition_by') -%}