From 969bbb9a72ecf8ef03b9f477967a46e260916d07 Mon Sep 17 00:00:00 2001 From: mkottakota1 <149763406+mkottakota1@users.noreply.github.com> Date: Mon, 26 Aug 2024 12:56:01 +0530 Subject: [PATCH] New PR for DBT 1.8.5 (#139) --- CHANGELOG.md | 24 ++++ README.md | 9 +- dbt/adapters/vertica/__init__.py | 1 + dbt/adapters/vertica/__version__.py | 2 +- dbt/adapters/vertica/column.py | 1 + dbt/adapters/vertica/connections.py | 30 ++--- dbt/adapters/vertica/impl.py | 12 +- .../vertica/macros/adapters/catalog.sql | 2 +- .../vertica/macros/adapters/columns.sql | 2 + .../vertica/macros/adapters/relation.sql | 6 +- .../macros/materializations/tests/unit.sql | 33 ++++++ .../macros/unit_test_sql/get_fixture_sql.sql | 104 ++++++++++++++++++ dbt/include/vertica/macros/utils/cast.sql | 7 ++ .../vertica/macros/utils/safe_cast.sql | 9 ++ setup.py | 9 +- tests/functional/adapter/catalog/file.py | 24 ++++ .../adapter/catalog/test_relation_types.py | 44 ++++++++ .../adapter/dbt_clone/test_dbt_clone.py | 2 +- .../adapter/empty/test_empty/BaseTestEmpty.py | 95 ++++++++++++++++ .../adapter/ephemeral/test_ephemeral.py | 2 +- .../incremental/test_incremental_schema.py | 0 .../unit_testing/test_case_insenstivity.py | 49 +++++++++ .../unit_testing/test_invalid_input.py | 68 ++++++++++++ .../adapter/unit_testing/test_types.py | 84 ++++++++++++++ tests/unit/test_adapter_connection_manager.py | 18 +-- tests/unit/test_base.py | 49 +++++++++ tests/unit/test_connection_retries.py | 10 +- 27 files changed, 652 insertions(+), 44 deletions(-) create mode 100644 dbt/include/vertica/macros/materializations/tests/unit.sql create mode 100644 dbt/include/vertica/macros/unit_test_sql/get_fixture_sql.sql create mode 100644 dbt/include/vertica/macros/utils/cast.sql create mode 100644 dbt/include/vertica/macros/utils/safe_cast.sql create mode 100644 tests/functional/adapter/catalog/file.py create mode 100644 tests/functional/adapter/catalog/test_relation_types.py create mode 100644 tests/functional/adapter/empty/test_empty/BaseTestEmpty.py create mode 100644 tests/functional/adapter/incremental/test_incremental_schema.py create mode 100644 tests/functional/adapter/unit_testing/test_case_insenstivity.py create mode 100644 tests/functional/adapter/unit_testing/test_invalid_input.py create mode 100644 tests/functional/adapter/unit_testing/test_types.py create mode 100644 tests/unit/test_base.py diff --git a/CHANGELOG.md b/CHANGELOG.md index b52098b..2ede492 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,30 @@ - "Breaking changes" listed under a version may require action from end users. +### 1.8.3 + +#### Features: +- Update base adapter references as part of decoupling migration to support dbt core 1.8.3 https://github.com/vertica/dbt-vertica/issues/131 +- Support --empty tests +- Support all types for unit testing in dbt-vertica, expand coverage of safe_cast macro https://github.com/dbt-labs/dbt-core/discussions/9798 +- Add new workflow for internal patch releases +- Support limiting get_catalog by object name +- add --empty value to jinja context as flags.EMPTY + +#### Fixes: +- Unit-test check tests + - test_types.BaseUnitTestingTypes + - test_case_insensitivity.BaseUnitTestCaseInsensivity + - test_invalid_input.BaseUnitTestInvalidInput +- Flags.EMPTY check tests + - BaseTestEmpty +- Support limiting get_catalog check tests + - BaseChangeRelationTypeValidator +- warning on unit_test config in dbt_project.yaml file +- Additional tests + - TestBaseContext + - BaseIncrementalOnSchemaChangeSetup + ### 1.7.13 diff --git a/README.md b/README.md index 2b199b6..f97924f 100644 --- a/README.md +++ b/README.md @@ -13,8 +13,8 @@ dbt-vertica has been developed using the following software and versions: * Vertica Server 23.4.0-0 * Python 3.11 * vertica-python client 1.3.1 -* dbt-core 1.6.0 -* dbt-tests-adapter 1.6.0 +* dbt-core 1.8.3 +* dbt-tests-adapter 1.8.0 ## Supported Features ### dbt Core Features @@ -34,6 +34,7 @@ Below is a table for what features the current Vertica adapter supports for dbt. | Tests | Yes | | Documentation | Yes | | External Tables | Untested | +| Unit Testing | Yes | * **Yes** - Supported, and tests pass. * **No** - Not supported or implemented. * **Untested** - May support out of the box, though hasn't been tested. @@ -125,4 +126,8 @@ Run tests via: # run an individual test pytest tests/functional/adapter/test_basic.py +Run Unit test via: + + dbt test --select /{foldername}/{unit_test_file} + diff --git a/dbt/adapters/vertica/__init__.py b/dbt/adapters/vertica/__init__.py index 8e99d8e..69d8740 100644 --- a/dbt/adapters/vertica/__init__.py +++ b/dbt/adapters/vertica/__init__.py @@ -18,6 +18,7 @@ from dbt.adapters.vertica.impl import verticaAdapter from dbt.adapters.vertica.column import VerticaColumn + from dbt.adapters.base import AdapterPlugin from dbt.include import vertica diff --git a/dbt/adapters/vertica/__version__.py b/dbt/adapters/vertica/__version__.py index 00414a5..635a74e 100644 --- a/dbt/adapters/vertica/__version__.py +++ b/dbt/adapters/vertica/__version__.py @@ -14,4 +14,4 @@ -version = "1.7.13" +version = "1.8.3" diff --git a/dbt/adapters/vertica/column.py b/dbt/adapters/vertica/column.py index 404f5a9..3fcb3fb 100644 --- a/dbt/adapters/vertica/column.py +++ b/dbt/adapters/vertica/column.py @@ -18,6 +18,7 @@ from typing import Dict, ClassVar from dbt.adapters.base.column import Column +#from dbt.common.exceptions import DbtRuntimeError @dataclass(init=False) class VerticaColumn(Column): diff --git a/dbt/adapters/vertica/connections.py b/dbt/adapters/vertica/connections.py index 891bc66..6c9af0d 100644 --- a/dbt/adapters/vertica/connections.py +++ b/dbt/adapters/vertica/connections.py @@ -19,14 +19,14 @@ from typing import Any, List, Optional, Tuple, Union import agate -import dbt.clients.agate_helper -import dbt.exceptions +import dbt_common.clients.agate_helper +import dbt_common.exceptions +import dbt.adapters.sql.connections import requests import vertica_python -from dbt.adapters.base import Credentials -from dbt.adapters.sql import SQLConnectionManager -from dbt.contracts.connection import AdapterResponse -from dbt.events import AdapterLogger +from dbt.adapters.contracts.connection import Credentials, AdapterResponse +from dbt.adapters.sql.connections import SQLConnectionManager +from dbt.adapters.events.logging import AdapterLogger logger = AdapterLogger("vertica") @@ -91,7 +91,7 @@ def open(cls, connection): 'database': credentials.database, 'connection_timeout': credentials.timeout, 'connection_load_balance':credentials.connection_load_balance, - 'session_label': f'dbt_{credentials.username}', + 'session_label': credentials.username, 'retries': credentials.retries, 'oauth_access_token': credentials.oauth_access_token, 'autocommit': credentials.autocommit, @@ -132,7 +132,7 @@ def connect(): logger.debug(f':P Error connecting to database: {exc}') connection.state = 'fail' connection.handle = None - raise dbt.exceptions.DbtFailedToConnectErroe(str(exc)) + raise dbt.adapters.exceptions.connection.FailedToConnectError(str(exc)) # This is here mainly to support dbt-integration-tests. # It globally enables WITH materialization for every connection dbt @@ -153,7 +153,7 @@ def connect(): retryable_exceptions = [ Exception, - dbt.exceptions.FailedToConnectError + dbt.adapters.exceptions.connection.FailedToConnectError ] return cls.retry_connection( @@ -199,11 +199,11 @@ def get_result_from_cursor(cls, cursor: Any, limit: Optional[int]) -> agate.Tabl if isinstance(check, vertica_python.vertica.messages.ErrorResponse): logger.debug(f'Cursor message is: {check}') self.release() - raise dbt.exceptions.DbtDatabaseError(str(check)) + raise dbt_common.exceptions.DbtDatabaseError(str(check)) data = cls.process_results(column_names, rows) - return dbt.clients.agate_helper.table_from_data_flat(data, column_names) + return dbt_common.clients.agate_helper.table_from_data_flat(data, column_names) def execute( self, sql: str, auto_begin: bool = False, fetch: bool = False, limit: Optional[int] = None @@ -214,13 +214,13 @@ def execute( if fetch: table = self.get_result_from_cursor(cursor,limit) else: - table = dbt.clients.agate_helper.empty_table() + table = dbt_common.clients.agate_helper.empty_table() while cursor.nextset(): check = cursor._message if isinstance(check, vertica_python.vertica.messages.ErrorResponse): logger.debug(f'Cursor message is: {check}') self.release() - raise dbt.exceptions.DbtDatabaseError(str(check)) + raise dbt_common.exceptions.DbtDatabaseError(str(check)) return response, table @contextmanager @@ -230,11 +230,11 @@ def exception_handler(self, sql): except vertica_python.DatabaseError as exc: logger.debug(f':P Database error: {exc}') self.release() - raise dbt.exceptions.DbtDatabaseError(str(exc)) + raise dbt_common.exceptions.DbtDatabaseError(str(exc)) except Exception as exc: logger.debug(f':P Error: {exc}') self.release() - raise dbt.exceptions.DbtRuntimeError(str(exc)) + raise dbt_common.exceptions.DbtRuntimeError(str(exc)) @classmethod def data_type_code_to_name(cls, type_code: Union[int, str]) -> str: diff --git a/dbt/adapters/vertica/impl.py b/dbt/adapters/vertica/impl.py index e6ed3a4..73663de 100644 --- a/dbt/adapters/vertica/impl.py +++ b/dbt/adapters/vertica/impl.py @@ -13,17 +13,20 @@ # limitations under the License. -from dbt.adapters.sql import SQLAdapter +from dbt.adapters.sql.impl import SQLAdapter from dbt.adapters.vertica import verticaConnectionManager #from dbt.adapters.vertica import VerticaRelation from dbt.adapters.vertica.column import VerticaColumn from typing import Optional, List, Union, Dict + from dbt.adapters.capability import CapabilityDict, CapabilitySupport, Support, Capability from dbt.adapters.base import available -from dbt.exceptions import ( +from dbt_common.exceptions import ( - DbtRuntimeError + DbtRuntimeError, + CompilationError, + DbtDatabaseError ) import agate @@ -32,7 +35,8 @@ from dbt.adapters.sql import SQLAdapter # type: ignore from dbt.adapters.base.impl import AdapterConfig,ConstraintSupport -from dbt.contracts.graph.nodes import ConstraintType +from dbt_common.contracts.constraints import ConstraintType + @dataclass class VerticaConfig(AdapterConfig): diff --git a/dbt/include/vertica/macros/adapters/catalog.sql b/dbt/include/vertica/macros/adapters/catalog.sql index 8e8703d..78ff5c1 100644 --- a/dbt/include/vertica/macros/adapters/catalog.sql +++ b/dbt/include/vertica/macros/adapters/catalog.sql @@ -159,4 +159,4 @@ {%- if not loop.last %} or {% endif -%} {%- endfor -%} ) -{%- endmacro %} +{%- endmacro %} \ No newline at end of file diff --git a/dbt/include/vertica/macros/adapters/columns.sql b/dbt/include/vertica/macros/adapters/columns.sql index d8f80d4..830c960 100644 --- a/dbt/include/vertica/macros/adapters/columns.sql +++ b/dbt/include/vertica/macros/adapters/columns.sql @@ -64,6 +64,7 @@ , ordinal_position from v_catalog.columns where table_name = '{{ relation.identifier }}' + and table_schema = '{{ relation.schema }}' union all select column_name @@ -74,6 +75,7 @@ , ordinal_position from v_catalog.view_columns where table_name = '{{ relation.identifier }}' + and table_schema = '{{ relation.schema }}' ) t order by ordinal_position {% endcall %} diff --git a/dbt/include/vertica/macros/adapters/relation.sql b/dbt/include/vertica/macros/adapters/relation.sql index 842d9c7..aa52c44 100644 --- a/dbt/include/vertica/macros/adapters/relation.sql +++ b/dbt/include/vertica/macros/adapters/relation.sql @@ -3,7 +3,7 @@ {% do return(base_relation.incorporate( path={ "identifier": tmp_identifier, - "schema": none, + "schema": 'v_temp_schema', "database": none })) -%} {% endmacro %} @@ -27,4 +27,6 @@ No need to implement drop_relation(). Syntax supported by default. No need to implement drop_relation_if_exists(). Syntax supported by default. No need to implement get_or_create_relation(). Syntax supported by default. -#} \ No newline at end of file +#} + +{%- set tmp_relation = tmp_relation.include(database=false, schema=false) -%} \ No newline at end of file diff --git a/dbt/include/vertica/macros/materializations/tests/unit.sql b/dbt/include/vertica/macros/materializations/tests/unit.sql new file mode 100644 index 0000000..78c6f6b --- /dev/null +++ b/dbt/include/vertica/macros/materializations/tests/unit.sql @@ -0,0 +1,33 @@ +{%- materialization unit, default -%} + + {% set relations = [] %} + + {% set expected_rows = config.get('expected_rows') %} + {% set expected_sql = config.get('expected_sql') %} + {% set tested_expected_column_names = expected_rows[0].keys() if (expected_rows | length ) > 0 else get_columns_in_query(sql) %} %} + + {%- set target_relation = this.incorporate(type='table') -%} + {%- set temp_relation = make_temp_relation(target_relation)-%} + {% do run_query(get_create_table_as_sql(True, temp_relation, get_empty_subquery_sql(sql))) %} + {%- set columns_in_relation = adapter.get_columns_in_relation(temp_relation) -%} + {%- set column_name_to_data_types = {} -%} + {%- for column in columns_in_relation -%} + {%- do column_name_to_data_types.update({column.name|lower: column.data_type}) -%} + {%- endfor -%} + + {% if not expected_sql %} + {% set expected_sql = get_expected_sql(expected_rows, column_name_to_data_types) %} + {% endif %} + {% set unit_test_sql = get_unit_test_sql(sql, expected_sql, tested_expected_column_names) %} + + {% call statement('main', fetch_result=True) -%} + + {{ unit_test_sql }} + + {%- endcall %} + + {% do adapter.drop_relation(temp_relation) %} + + {{ return({'relations': relations}) }} + +{%- endmaterialization -%} diff --git a/dbt/include/vertica/macros/unit_test_sql/get_fixture_sql.sql b/dbt/include/vertica/macros/unit_test_sql/get_fixture_sql.sql new file mode 100644 index 0000000..a3a8173 --- /dev/null +++ b/dbt/include/vertica/macros/unit_test_sql/get_fixture_sql.sql @@ -0,0 +1,104 @@ +{% macro get_fixture_sql(rows, column_name_to_data_types) %} +-- Fixture for {{ model.name }} +{% set default_row = {} %} + +{%- if not column_name_to_data_types -%} +{#-- Use defer_relation IFF it is available in the manifest and 'this' is missing from the database --#} +{%- set this_or_defer_relation = defer_relation if (defer_relation and not load_relation(this)) else this -%} +{%- set columns_in_relation = adapter.get_columns_in_relation(this_or_defer_relation) -%} + +{%- set column_name_to_data_types = {} -%} +{%- for column in columns_in_relation -%} +{#-- This needs to be a case-insensitive comparison --#} +{%- do column_name_to_data_types.update({column.name|lower: column.data_type}) -%} +{%- endfor -%} +{%- endif -%} + +{%- if not column_name_to_data_types -%} + {{ exceptions.raise_compiler_error("Not able to get columns for unit test '" ~ model.name ~ "' from relation " ~ this ~ " because the relation doesn't exist") }} +{%- endif -%} + +{%- for column_name, column_type in column_name_to_data_types.items() -%} + {%- do default_row.update({column_name: (safe_cast("null", column_type) | trim )}) -%} +{%- endfor -%} + +{{ validate_fixture_rows(rows, row_number) }} + +{%- for row in rows -%} +{%- set formatted_row = format_row(row, column_name_to_data_types) -%} +{%- set default_row_copy = default_row.copy() -%} +{%- do default_row_copy.update(formatted_row) -%} +select +{%- for column_name, column_value in default_row_copy.items() %} {{ column_value }} as {{ column_name }}{% if not loop.last -%}, {%- endif %} +{%- endfor %} +{%- if not loop.last %} +union all +{% endif %} +{%- endfor -%} + +{%- if (rows | length) == 0 -%} + select + {%- for column_name, column_value in default_row.items() %} {{ column_value }} as {{ column_name }}{% if not loop.last -%},{%- endif %} + {%- endfor %} + limit 0 +{%- endif -%} +{% endmacro %} + + +{% macro get_expected_sql(rows, column_name_to_data_types) %} + +{%- if (rows | length) == 0 -%} + select * from dbt_internal_unit_test_actual + limit 0 +{%- else -%} +{%- for row in rows -%} +{%- set formatted_row = format_row(row, column_name_to_data_types) -%} +select +{%- for column_name, column_value in formatted_row.items() %} {{ column_value }} as {{ column_name }}{% if not loop.last -%}, {%- endif %} +{%- endfor %} +{%- if not loop.last %} +union all +{% endif %} +{%- endfor -%} +{%- endif -%} + +{% endmacro %} + +{%- macro format_row(row, column_name_to_data_types) -%} + {#-- generate case-insensitive formatted row --#} + {% set formatted_row = {} %} + {%- for column_name, column_value in row.items() -%} + {% set column_name = column_name|lower %} + + {%- if column_name not in column_name_to_data_types %} + {#-- if user-provided row contains column name that relation does not contain, raise an error --#} + {% set fixture_name = "expected output" if model.resource_type == 'unit_test' else ("'" ~ model.name ~ "'") %} + {{ exceptions.raise_compiler_error( + "Invalid column name: '" ~ column_name ~ "' in unit test fixture for " ~ fixture_name ~ "." + "\nAccepted columns for " ~ fixture_name ~ " are: " ~ (column_name_to_data_types.keys()|list) + ) }} + {%- endif -%} + + {%- set column_type = column_name_to_data_types[column_name] %} + + {#-- sanitize column_value: wrap yaml strings in quotes, apply cast --#} + {%- set column_value_clean = column_value -%} + {%- if column_value is string -%} + {%- set column_value_clean = dbt.string_literal(dbt.escape_single_quotes(column_value)) -%} + {%- elif column_value is none -%} + {%- set column_value_clean = 'null' -%} + {%- endif -%} + + {%- set row_update = {column_name: safe_cast(column_value_clean, column_type) } -%} + {%- do formatted_row.update(row_update) -%} + {%- endfor -%} + {{ return(formatted_row) }} +{%- endmacro -%} + +{%- macro validate_fixture_rows(rows, row_number) -%} + {{ return(adapter.dispatch('validate_fixture_rows', 'dbt')(rows, row_number)) }} +{%- endmacro -%} + +{%- macro default__validate_fixture_rows(rows, row_number) -%} + {# This is an abstract method for adapter overrides as needed #} +{%- endmacro -%} diff --git a/dbt/include/vertica/macros/utils/cast.sql b/dbt/include/vertica/macros/utils/cast.sql new file mode 100644 index 0000000..1b8034a --- /dev/null +++ b/dbt/include/vertica/macros/utils/cast.sql @@ -0,0 +1,7 @@ +{% macro cast(field, type) %} + {{ return(adapter.dispatch('cast', 'dbt') (field, type)) }} +{% endmacro %} + +{% macro vertica__cast(field, type) %} + cast({{field}} as {{type}}) +{% endmacro %} \ No newline at end of file diff --git a/dbt/include/vertica/macros/utils/safe_cast.sql b/dbt/include/vertica/macros/utils/safe_cast.sql new file mode 100644 index 0000000..5987228 --- /dev/null +++ b/dbt/include/vertica/macros/utils/safe_cast.sql @@ -0,0 +1,9 @@ +{% macro vertica__safe_cast(field, type) %} + {% if type|upper == "GEOMETRY" -%} + try_to_geometry({{field}}) + {% elif type|upper == "GEOGRAPHY" -%} + try_to_geography({{field}}) + {% else -%} + {{ adapter.dispatch('cast', 'dbt')(field, type) }} + {% endif -%} +{% endmacro %} diff --git a/setup.py b/setup.py index 716f62a..53c8376 100644 --- a/setup.py +++ b/setup.py @@ -78,7 +78,7 @@ def _get_dbt_core_version(): package_name = "dbt-vertica" -package_version = "1.7.13" +package_version = "1.8.3" description = """Official vertica adapter plugin for dbt (data build tool)""" dbt_core_version = _get_dbt_core_version() @@ -101,8 +101,10 @@ def _get_dbt_core_version(): 'include/vertica/sample_profiles.yml', 'include/vertica/macros/*.sql', 'include/vertica/macros/adapters/*.sql', + 'include/vertica/macros/unit_test_sql/*.sql', 'include/vertica/macros/materializations/*.sql', 'include/vertica/macros/materializations/models/incremental/*.sql', + 'include/vertica/macros/materializations/tests/*.sql', 'include/vertica/macros/materializations/models/table/*.sql', 'include/vertica/macros/materializations/models/view/*.sql', 'include/vertica/macros/materializations/seeds/*.sql', @@ -111,11 +113,12 @@ def _get_dbt_core_version(): ] }, install_requires=[ - 'dbt-core==1.7.13', + 'dbt-core==1.8.3', # "dbt-core~={}".format(dbt_core_version), 'vertica-python>=1.1.0', - 'dbt-tests-adapter==1.7.13', + 'dbt-tests-adapter==1.8.0', 'python-dotenv==0.21.1', + 'pytest>=8.3.2', ], classifiers=[ "Development Status :: 5 - Production/Stable", diff --git a/tests/functional/adapter/catalog/file.py b/tests/functional/adapter/catalog/file.py new file mode 100644 index 0000000..ea7b30e --- /dev/null +++ b/tests/functional/adapter/catalog/file.py @@ -0,0 +1,24 @@ +MY_SEED = """ +id,value,record_valid_date +1,100,2023-01-01 00:00:00 +2,200,2023-01-02 00:00:00 +3,300,2023-01-02 00:00:00 +""".strip() + + +MY_TABLE = """ +{{ config( + materialized='table', +) }} +select * +from {{ ref('my_seed') }} +""" + + +MY_VIEW = """ +{{ config( + materialized='view', +) }} +select * +from {{ ref('my_seed') }} +""" diff --git a/tests/functional/adapter/catalog/test_relation_types.py b/tests/functional/adapter/catalog/test_relation_types.py new file mode 100644 index 0000000..e5d3c81 --- /dev/null +++ b/tests/functional/adapter/catalog/test_relation_types.py @@ -0,0 +1,44 @@ +from dbt.contracts.results import CatalogArtifact +from dbt.tests.util import run_dbt +import pytest + +from dbt.tests.adapter.catalog import files + + +class TestCatalogRelationTypes: + @pytest.fixture(scope="class", autouse=True) + def seeds(self): + return {"my_seed.csv": files.MY_SEED} + + @pytest.fixture(scope="class", autouse=True) + def models(self): + yield { + "my_table.sql": files.MY_TABLE, + "my_view.sql": files.MY_VIEW, + # "my_dynamic_table.sql": files.MY_DYNAMIC_TABLE, + } + + @pytest.fixture(scope="class", autouse=True) + def docs(self, project): + run_dbt(["seed"]) + run_dbt(["run"]) + yield run_dbt(["docs", "generate"]) + + @pytest.mark.parametrize( + "node_name,relation_type", + [ + ("seed.test.my_seed", "TABLE"), + ("model.test.my_table", "TABLE"), + ("model.test.my_view", "VIEW"), + # ("model.test.my_dynamic_table", "DYNAMIC TABLE"), + ], + ) + def test_relation_types_populate_correctly( + self, docs: CatalogArtifact, node_name: str, relation_type: str + ): + """ + This test addresses: https://github.com/dbt-labs/dbt-vertica/issues/817 + """ + assert node_name in docs.nodes + node = docs.nodes[node_name] + assert node.metadata.type == relation_type \ No newline at end of file diff --git a/tests/functional/adapter/dbt_clone/test_dbt_clone.py b/tests/functional/adapter/dbt_clone/test_dbt_clone.py index 22a7b8c..571dfe0 100644 --- a/tests/functional/adapter/dbt_clone/test_dbt_clone.py +++ b/tests/functional/adapter/dbt_clone/test_dbt_clone.py @@ -98,7 +98,7 @@ def copy_state(self, project_root): def run_and_save_state(self, project_root, with_snapshot=False): results = run_dbt(["run"]) assert len(results) == 1 - assert not any(r.node.deferred for r in results) + #assert not any(r.node.deferred for r in results) self.copy_state(project_root) diff --git a/tests/functional/adapter/empty/test_empty/BaseTestEmpty.py b/tests/functional/adapter/empty/test_empty/BaseTestEmpty.py new file mode 100644 index 0000000..2249d98 --- /dev/null +++ b/tests/functional/adapter/empty/test_empty/BaseTestEmpty.py @@ -0,0 +1,95 @@ +import pytest + +from dbt.tests.util import relation_from_name, run_dbt + + +model_input_sql = """ +select 1 as id +""" + +ephemeral_model_input_sql = """ +{{ config(materialized='ephemeral') }} +select 2 as id +""" + +raw_source_csv = """id +3 +""" + + +model_sql = """ +select * +from {{ ref('model_input') }} +union all +select * +from {{ ref('ephemeral_model_input') }} +union all +select * +from {{ source('seed_sources', 'raw_source') }} +""" + + +schema_sources_yml = """ +sources: + - name: seed_sources + schema: "{{ target.schema }}" + tables: + - name: raw_source +""" + + +class BaseTestEmpty: + @pytest.fixture(scope="class") + def seeds(self): + return { + "raw_source.csv": raw_source_csv, + } + + @pytest.fixture(scope="class") + def models(self): + return { + "model_input.sql": model_input_sql, + "ephemeral_model_input.sql": ephemeral_model_input_sql, + "model.sql": model_sql, + "sources.yml": schema_sources_yml, + } + + def assert_row_count(self, project, relation_name: str, expected_row_count: int): + relation = relation_from_name(project.adapter, relation_name) + result = project.run_sql(f"select count(*) as num_rows from {relation}", fetch="one") + assert result[0] == expected_row_count + + def test_run_with_empty(self, project): + # create source from seed + run_dbt(["seed"]) + + # run without empty - 3 expected rows in output - 1 from each input + run_dbt(["run"]) + self.assert_row_count(project, "model", 3) + + # run with empty - 0 expected rows in output + run_dbt(["run", "--empty"]) + self.assert_row_count(project, "model", 0) + + +class BaseTestEmptyInlineSourceRef(BaseTestEmpty): + @pytest.fixture(scope="class") + def models(self): + model_sql = """ + select * from {{ source('seed_sources', 'raw_source') }} as raw_source + """ + + return { + "model.sql": model_sql, + "sources.yml": schema_sources_yml, + } + + def test_run_with_empty(self, project): + # create source from seed + run_dbt(["seed"]) + run_dbt(["run", "--empty", "--debug"]) + self.assert_row_count(project, "model", 0) + + +class TestEmpty(BaseTestEmpty): + pass diff --git a/tests/functional/adapter/ephemeral/test_ephemeral.py b/tests/functional/adapter/ephemeral/test_ephemeral.py index c283576..53d7187 100644 --- a/tests/functional/adapter/ephemeral/test_ephemeral.py +++ b/tests/functional/adapter/ephemeral/test_ephemeral.py @@ -22,7 +22,7 @@ class TestEphemeralMultiVertica(BaseEphemeralMulti): - def test_ephemeral_multi_snowflake(self, project): + def test_ephemeral_multi_vertica(self, project): run_dbt(["seed"]) results = run_dbt(["run"]) assert len(results) == 3 diff --git a/tests/functional/adapter/incremental/test_incremental_schema.py b/tests/functional/adapter/incremental/test_incremental_schema.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/functional/adapter/unit_testing/test_case_insenstivity.py b/tests/functional/adapter/unit_testing/test_case_insenstivity.py new file mode 100644 index 0000000..4e55e1c --- /dev/null +++ b/tests/functional/adapter/unit_testing/test_case_insenstivity.py @@ -0,0 +1,49 @@ +import pytest +from dbt.tests.util import run_dbt + + +my_model_sql = """ +select + tested_column from {{ ref('my_upstream_model')}} +""" + +my_upstream_model_sql = """ +select 1 as tested_column +""" + +test_my_model_yml = """ +unit_tests: + - name: test_my_model + model: my_model + given: + - input: ref('my_upstream_model') + rows: + - {tested_column: 1} + - {TESTED_COLUMN: 2} + - {tested_colUmn: 3} + expect: + rows: + - {tested_column: 1} + - {TESTED_COLUMN: 2} + - {tested_colUmn: 3} +""" + + +class BaseUnitTestCaseInsensivity: + @pytest.fixture(scope="class") + def models(self): + return { + "my_model.sql": my_model_sql, + "my_upstream_model.sql": my_upstream_model_sql, + # "unit_tests.yml": test_my_model_yml, + } + + def test_case_insensitivity(self, project): + results = run_dbt(["run"]) + assert len(results) == 2 + + results = run_dbt(["test"]) + + +class TestVerticaUnitTestCaseInsensitivity(BaseUnitTestCaseInsensivity): + pass diff --git a/tests/functional/adapter/unit_testing/test_invalid_input.py b/tests/functional/adapter/unit_testing/test_invalid_input.py new file mode 100644 index 0000000..4275556 --- /dev/null +++ b/tests/functional/adapter/unit_testing/test_invalid_input.py @@ -0,0 +1,68 @@ +import pytest +from dbt.tests.util import run_dbt, run_dbt_and_capture + + +my_model_sql = """ +select + tested_column from {{ ref('my_upstream_model')}} +""" + +my_upstream_model_sql = """ +select 1 as tested_column +""" + +test_my_model_yml = """ +unit_tests: + - name: test_invalid_input_column_name + model: my_model + given: + - input: ref('my_upstream_model') + rows: + - {invalid_column_name: 1} + expect: + rows: + - {tested_column: 1} + - name: test_invalid_expect_column_name + model: my_model + given: + - input: ref('my_upstream_model') + rows: + - {invalid_column_name: 1} + expect: + rows: + - {tested_column: 1} +""" + + +class BaseUnitTestInvalidInput: + @pytest.fixture(scope="class") + def models(self): + return { + "my_model.sql": my_model_sql, + "my_upstream_model.sql": my_upstream_model_sql, + "unit_tests.yml": test_my_model_yml, + } + + def test_invalid_input(self, project): + results = run_dbt(["run"]) + assert len(results) == 2 + + _, out = run_dbt_and_capture( + ["test", "--select", "test_name:test_invalid_input_column_name"], expect_pass=False + ) + assert ( + "Invalid column name: 'invalid_column_name' in unit test fixture for 'my_upstream_model'." + in out + ) + + _, out = run_dbt_and_capture( + ["test", "--select", "test_name:test_invalid_expect_column_name"], expect_pass=False + ) + assert ( + "Invalid column name: 'invalid_column_name' in unit test fixture for expected output." + in out + ) + + +class TestVerticaUnitTestInvalidInput(BaseUnitTestInvalidInput): + pass diff --git a/tests/functional/adapter/unit_testing/test_types.py b/tests/functional/adapter/unit_testing/test_types.py new file mode 100644 index 0000000..5c721c1 --- /dev/null +++ b/tests/functional/adapter/unit_testing/test_types.py @@ -0,0 +1,84 @@ +import pytest + +from dbt.tests.util import write_file, run_dbt + +my_model_sql = """ +select + tested_column from {{ ref('my_upstream_model')}} +""" + +my_upstream_model_sql = """ +select + {sql_value} as tested_column +""" + +test_my_model_yml = """ +unit_tests: + - name: test_my_model + model: my_model + given: + - input: ref('my_upstream_model') + rows: + - {{ tested_column: {yaml_value} }} + expect: + rows: + - {{ tested_column: {yaml_value} }} +""" + + +class BaseUnitTestingTypes: + @pytest.fixture + def data_types(self): + # sql_value, yaml_value + return [ + ["1", "1"], + ["'1'", "1"], + ["true", "true"], + ["DATE '2020-01-02'", "2020-01-02"], + ["TIMESTAMP '2013-11-03 00:00:00-0'", "2013-11-03 00:00:00-0"], + ["TIMESTAMPTZ '2013-11-03 00:00:00-0'", "2013-11-03 00:00:00-0"], + ["'1'::numeric", "1"], + # [ + # """'{"bar": "baz", "balance": 7.77, "active": false}'::json""", + # """'{"bar": "baz", "balance": 7.77, "active": false}'""", + #], + # TODO: support complex types + # ["ARRAY['a','b','c']", """'{"a", "b", "c"}'"""], + # ["ARRAY[1,2,3]", """'{1, 2, 3}'"""], + ] + + @pytest.fixture(scope="class") + def models(self): + return { + "my_model.sql": my_model_sql, + "my_upstream_model.sql": my_upstream_model_sql, + #"schema.yml": test_my_model_yml, + } + + def test_unit_test_data_type(self, project, data_types): + for sql_value, yaml_value in data_types: + # Write parametrized type value to sql files + write_file( + my_upstream_model_sql.format(sql_value=sql_value), + "models", + "my_upstream_model.sql", + ) + + # Write parametrized type value to unit test yaml definition + # write_file( + # test_my_model_yml.format(yaml_value=yaml_value), + # "models", + #"schema.yml", + # ) + + results = run_dbt(["run", "--select", "my_upstream_model"]) + assert len(results) == 1 + + try: + run_dbt(["test", "--select", "my_model"]) + except Exception: + raise AssertionError(f"unit test failed when testing model with {sql_value}") + + +class TestVerticaUnitTestingTypes(BaseUnitTestingTypes): + pass diff --git a/tests/unit/test_adapter_connection_manager.py b/tests/unit/test_adapter_connection_manager.py index 11f3189..1977793 100644 --- a/tests/unit/test_adapter_connection_manager.py +++ b/tests/unit/test_adapter_connection_manager.py @@ -24,10 +24,10 @@ import vertica_python -from dbt.contracts.connection import Connection +from dbt.adapters.contracts.connection import Connection from dbt.adapters.base import BaseConnectionManager from dbt.adapters.vertica import verticaCredentials, verticaConnectionManager -from dbt.events import AdapterLogger +from dbt.adapters.events.logging import AdapterLogger class BaseConnectionManagerTest(unittest.TestCase): def setUp(self): @@ -90,7 +90,7 @@ def connect(): raise ValueError("Something went horribly wrong") with self.assertRaisesRegex( - dbt.exceptions.FailedToConnectError, "Something went horribly wrong" + dbt.adapters.exceptions.connection.FailedToConnectError, "Something went horribly wrong" ): BaseConnectionManager.retry_connection( @@ -124,7 +124,7 @@ def connect(): raise ValueError("Something went horribly wrong") with self.assertRaisesRegex( - dbt.exceptions.FailedToConnectError, "Something went horribly wrong" + dbt.adapters.exceptions.connection.FailedToConnectError, "Something went horribly wrong" ): BaseConnectionManager.retry_connection( @@ -197,7 +197,7 @@ def connect(): raise ValueError("Something went horribly wrong") with self.assertRaisesRegex( - dbt.exceptions.FailedToConnectError, "Something went horribly wrong" + dbt.adapters.exceptions.connection.FailedToConnectError, "Something went horribly wrong" ): BaseConnectionManager.retry_connection( conn, @@ -231,7 +231,7 @@ def connect(): raise TypeError("An unhandled thing went horribly wrong") with self.assertRaisesRegex( - dbt.exceptions.FailedToConnectError, "An unhandled thing went horribly wrong" + dbt.adapters.exceptions.connection.FailedToConnectError, "An unhandled thing went horribly wrong" ): BaseConnectionManager.retry_connection( conn, @@ -347,7 +347,7 @@ def connect(): return True with self.assertRaisesRegex( - dbt.exceptions.FailedToConnectError, "retry_limit cannot be negative" + dbt.adapters.exceptions.connection.FailedToConnectError, "retry_limit cannot be negative" ): BaseConnectionManager.retry_connection( conn, @@ -374,7 +374,7 @@ def connect(): for retry_timeout in [-10, -2.5, lambda _: -100, lambda _: -10.1]: with self.assertRaisesRegex( - dbt.exceptions.FailedToConnectError, + dbt.adapters.exceptions.connection.FailedToConnectError, "retry_timeout cannot be negative or return a negative time", ): BaseConnectionManager.retry_connection( @@ -401,7 +401,7 @@ def connect(): return True with self.assertRaisesRegex( - dbt.exceptions.FailedToConnectError, + dbt.adapters.exceptions.connection.FailedToConnectError, "retry_limit cannot be negative", ): BaseConnectionManager.retry_connection( diff --git a/tests/unit/test_base.py b/tests/unit/test_base.py new file mode 100644 index 0000000..5da1222 --- /dev/null +++ b/tests/unit/test_base.py @@ -0,0 +1,49 @@ +import os +from jinja2.runtime import Undefined +from dbt.context.base import BaseContext +class TestBaseContext: + def test_log_jinja_undefined(self): + # regression test for CT-2259 + try: + os.environ["DBT_ENV_SECRET_LOG_TEST"] = "cats_are_cool" + BaseContext.log(msg=Undefined(), info=True) + except Exception as e: + assert False, f"Logging an jinja2.Undefined object raises an exception: {e}" + def test_log_with_dbt_env_secret(self): + # regression test for CT-1783 + try: + os.environ["DBT_ENV_SECRET_LOG_TEST"] = "cats_are_cool" + BaseContext.log({"fact1": "I like cats"}, info=True) + except Exception as e: + assert False, f"Logging while a `DBT_ENV_SECRET` was set raised an exception: {e}" + + def test_flags(self): + expected_context_flags = { + "use_experimental_parser", + "static_parser", + "warn_error", + "warn_error_options", + "write_json", + "partial_parse", + "use_colors", + "profiles_dir", + "debug", + "log_format", + "version_check", + "fail_fast", + "send_anonymous_usage_stats", + "printer_width", + "indirect_selection", + "log_cache_events", + "quiet", + "no_print", + "cache_selected_only", + "introspect", + "target_path", + "log_path", + "invocation_command", + "empty", + } + flags = BaseContext(cli_vars={}).flags + for expected_flag in expected_context_flags: + assert hasattr(flags, expected_flag.upper()) \ No newline at end of file diff --git a/tests/unit/test_connection_retries.py b/tests/unit/test_connection_retries.py index 11fce7b..a58a293 100644 --- a/tests/unit/test_connection_retries.py +++ b/tests/unit/test_connection_retries.py @@ -16,8 +16,8 @@ import functools import pytest from requests.exceptions import RequestException -from dbt.exceptions import ConnectionError -from dbt.utils import _connection_exception_retry +from dbt_common.exceptions import ConnectionError +from dbt_common.utils.connection import connection_exception_retry def no_retry_fn(): @@ -27,7 +27,7 @@ def no_retry_fn(): class TestNoRetries: def test_no_retry(self): fn_to_retry = functools.partial(no_retry_fn) - result = _connection_exception_retry(fn_to_retry, 3) + result = connection_exception_retry(fn_to_retry, 3) expected = "success" @@ -44,7 +44,7 @@ def test_no_retry(self): fn_to_retry = functools.partial(no_success_fn) with pytest.raises(ConnectionError): - _connection_exception_retry(fn_to_retry, 3) + connection_exception_retry(fn_to_retry, 3) def single_retry_fn(): @@ -65,7 +65,7 @@ def test_no_retry(self): counter = 0 fn_to_retry = functools.partial(single_retry_fn) - result = _connection_exception_retry(fn_to_retry, 3) + result = connection_exception_retry(fn_to_retry, 3) expected = "success on 2" # We need to test the return value here, not just that it did not throw an error.