diff --git a/.changes/unreleased/Under the Hood-20230110-101919.yaml b/.changes/unreleased/Under the Hood-20230110-101919.yaml new file mode 100644 index 000000000..2209fdb9f --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230110-101919.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Rename exceptions to match dbt-core. +time: 2023-01-10T10:19:19.675879-06:00 +custom: + Author: emmyoop + Issue: "557" + PR: "585" diff --git a/dbt/adapters/spark/connections.py b/dbt/adapters/spark/connections.py index 66ca93d30..a606beb78 100644 --- a/dbt/adapters/spark/connections.py +++ b/dbt/adapters/spark/connections.py @@ -90,7 +90,7 @@ def cluster_id(self): def __post_init__(self): # spark classifies database and schema as the same thing if self.database is not None and self.database != self.schema: - raise dbt.exceptions.RuntimeException( + raise dbt.exceptions.DbtRuntimeError( f" schema: {self.schema} \n" f" database: {self.database} \n" f"On Spark, database must be omitted or have the same value as" @@ -102,7 +102,7 @@ def __post_init__(self): try: import pyodbc # noqa: F401 except ImportError as e: - raise dbt.exceptions.RuntimeException( + raise dbt.exceptions.DbtRuntimeError( f"{self.method} connection method requires " "additional dependencies. \n" "Install the additional required dependencies with " @@ -111,7 +111,7 @@ def __post_init__(self): ) from e if self.method == SparkConnectionMethod.ODBC and self.cluster and self.endpoint: - raise dbt.exceptions.RuntimeException( + raise dbt.exceptions.DbtRuntimeError( "`cluster` and `endpoint` cannot both be set when" f" using {self.method} method to connect to Spark" ) @@ -120,7 +120,7 @@ def __post_init__(self): self.method == SparkConnectionMethod.HTTP or self.method == SparkConnectionMethod.THRIFT ) and not (ThriftState and THttpClient and hive): - raise dbt.exceptions.RuntimeException( + raise dbt.exceptions.DbtRuntimeError( f"{self.method} connection method requires " "additional dependencies. \n" "Install the additional required dependencies with " @@ -131,7 +131,7 @@ def __post_init__(self): try: import pyspark # noqa: F401 except ImportError as e: - raise dbt.exceptions.RuntimeException( + raise dbt.exceptions.DbtRuntimeError( f"{self.method} connection method requires " "additional dependencies. \n" "Install the additional required dependencies with " @@ -233,12 +233,13 @@ def execute(self, sql, bindings=None): if poll_state.errorMessage: logger.debug("Poll response: {}".format(poll_state)) logger.debug("Poll status: {}".format(state)) - dbt.exceptions.raise_database_error(poll_state.errorMessage) + raise dbt.exceptions.DbtDatabaseError(poll_state.errorMessage) elif state not in STATE_SUCCESS: status_type = ThriftState._VALUES_TO_NAMES.get(state, "Unknown<{!r}>".format(state)) - - dbt.exceptions.raise_database_error("Query failed with status: {}".format(status_type)) + raise dbt.exceptions.DbtDatabaseError( + "Query failed with status: {}".format(status_type) + ) logger.debug("Poll status: {}, query complete".format(state)) @@ -293,9 +294,9 @@ def exception_handler(self, sql): thrift_resp = exc.args[0] if hasattr(thrift_resp, "status"): msg = thrift_resp.status.errorMessage - raise dbt.exceptions.RuntimeException(msg) + raise dbt.exceptions.DbtRuntimeError(msg) else: - raise dbt.exceptions.RuntimeException(str(exc)) + raise dbt.exceptions.DbtRuntimeError(str(exc)) def cancel(self, connection): connection.handle.cancel() @@ -462,7 +463,7 @@ def open(cls, connection): msg = "Failed to connect" if creds.token is not None: msg += ", is your token valid?" - raise dbt.exceptions.FailedToConnectException(msg) from e + raise dbt.exceptions.FailedToConnectError(msg) from e retryable_message = _is_retryable_error(e) if retryable_message and creds.connect_retries > 0: msg = ( @@ -483,7 +484,7 @@ def open(cls, connection): logger.warning(msg) time.sleep(creds.connect_timeout) else: - raise dbt.exceptions.FailedToConnectException("failed to connect") from e + raise dbt.exceptions.FailedToConnectError("failed to connect") from e else: raise exc diff --git a/dbt/adapters/spark/impl.py b/dbt/adapters/spark/impl.py index f02025c7c..1abdfa62b 100644 --- a/dbt/adapters/spark/impl.py +++ b/dbt/adapters/spark/impl.py @@ -124,7 +124,7 @@ def add_schema_to_cache(self, schema) -> str: """Cache a new schema in dbt. It will show up in `list relations`.""" if schema is None: name = self.nice_connection_name() - dbt.exceptions.raise_compiler_error( + raise dbt.exceptions.CompilationError( "Attempted to cache a null schema for {}".format(name) ) if dbt.flags.USE_CACHE: # type: ignore @@ -138,7 +138,7 @@ def list_relations_without_caching( kwargs = {"schema_relation": schema_relation} try: results = self.execute_macro(LIST_RELATIONS_MACRO_NAME, kwargs=kwargs) - except dbt.exceptions.RuntimeException as e: + except dbt.exceptions.DbtRuntimeError as e: errmsg = getattr(e, "msg", "") if f"Database '{schema_relation}' not found" in errmsg: return [] @@ -150,7 +150,7 @@ def list_relations_without_caching( relations = [] for row in results: if len(row) != 4: - raise dbt.exceptions.RuntimeException( + raise dbt.exceptions.DbtRuntimeError( f'Invalid value from "show table extended ...", ' f"got {len(row)} values, expected 4" ) @@ -222,7 +222,7 @@ def get_columns_in_relation(self, relation: Relation) -> List[SparkColumn]: GET_COLUMNS_IN_RELATION_RAW_MACRO_NAME, kwargs={"relation": relation} ) columns = self.parse_describe_extended(relation, rows) - except dbt.exceptions.RuntimeException as e: + except dbt.exceptions.DbtRuntimeError as e: # spark would throw error when table doesn't exist, where other # CDW would just return and empty list, normalizing the behavior here errmsg = getattr(e, "msg", "") @@ -280,7 +280,7 @@ def get_properties(self, relation: Relation) -> Dict[str, str]: def get_catalog(self, manifest): schema_map = self._get_catalog_schemas(manifest) if len(schema_map) > 1: - dbt.exceptions.raise_compiler_error( + raise dbt.exceptions.CompilationError( f"Expected only one database in get_catalog, found " f"{list(schema_map)}" ) @@ -308,7 +308,7 @@ def _get_one_catalog( manifest, ) -> agate.Table: if len(schemas) != 1: - dbt.exceptions.raise_compiler_error( + raise dbt.exceptions.CompilationError( f"Expected only one schema in spark _get_one_catalog, found " f"{schemas}" ) diff --git a/dbt/adapters/spark/python_submissions.py b/dbt/adapters/spark/python_submissions.py index 1e81c572a..47529e079 100644 --- a/dbt/adapters/spark/python_submissions.py +++ b/dbt/adapters/spark/python_submissions.py @@ -53,7 +53,7 @@ def _create_work_dir(self, path: str) -> None: }, ) if response.status_code != 200: - raise dbt.exceptions.RuntimeException( + raise dbt.exceptions.DbtRuntimeError( f"Error creating work_dir for python notebooks\n {response.content!r}" ) @@ -71,7 +71,7 @@ def _upload_notebook(self, path: str, compiled_code: str) -> None: }, ) if response.status_code != 200: - raise dbt.exceptions.RuntimeException( + raise dbt.exceptions.DbtRuntimeError( f"Error creating python notebook.\n {response.content!r}" ) @@ -99,7 +99,7 @@ def _submit_job(self, path: str, cluster_spec: dict) -> str: json=job_spec, ) if submit_response.status_code != 200: - raise dbt.exceptions.RuntimeException( + raise dbt.exceptions.DbtRuntimeError( f"Error creating python run.\n {submit_response.content!r}" ) return submit_response.json()["run_id"] @@ -135,7 +135,7 @@ def _submit_through_notebook(self, compiled_code: str, cluster_spec: dict) -> No json_run_output = run_output.json() result_state = json_run_output["metadata"]["state"]["result_state"] if result_state != "SUCCESS": - raise dbt.exceptions.RuntimeException( + raise dbt.exceptions.DbtRuntimeError( "Python model failed with traceback as:\n" "(Note that the line number here does not " "match the line number in your code due to dbt templating)\n" @@ -169,9 +169,9 @@ def polling( response = status_func(**status_func_kwargs) state = get_state_func(response) if exceeded_timeout: - raise dbt.exceptions.RuntimeException("python model run timed out") + raise dbt.exceptions.DbtRuntimeError("python model run timed out") if state != expected_end_state: - raise dbt.exceptions.RuntimeException( + raise dbt.exceptions.DbtRuntimeError( "python model run ended in state" f"{state} with state_message\n{get_state_msg_func(response)}" ) @@ -205,7 +205,7 @@ def create(self) -> str: }, ) if response.status_code != 200: - raise dbt.exceptions.RuntimeException( + raise dbt.exceptions.DbtRuntimeError( f"Error creating an execution context.\n {response.content!r}" ) return response.json()["id"] @@ -221,7 +221,7 @@ def destroy(self, context_id: str) -> str: }, ) if response.status_code != 200: - raise dbt.exceptions.RuntimeException( + raise dbt.exceptions.DbtRuntimeError( f"Error deleting an execution context.\n {response.content!r}" ) return response.json()["id"] @@ -246,7 +246,7 @@ def execute(self, context_id: str, command: str) -> str: }, ) if response.status_code != 200: - raise dbt.exceptions.RuntimeException( + raise dbt.exceptions.DbtRuntimeError( f"Error creating a command.\n {response.content!r}" ) return response.json()["id"] @@ -263,7 +263,7 @@ def status(self, context_id: str, command_id: str) -> Dict[str, Any]: }, ) if response.status_code != 200: - raise dbt.exceptions.RuntimeException( + raise dbt.exceptions.DbtRuntimeError( f"Error getting status of command.\n {response.content!r}" ) return response.json() @@ -298,7 +298,7 @@ def submit(self, compiled_code: str) -> None: get_state_msg_func=lambda response: response.json()["results"]["data"], ) if response["results"]["resultType"] == "error": - raise dbt.exceptions.RuntimeException( + raise dbt.exceptions.DbtRuntimeError( f"Python model failed with traceback as:\n" f"{response['results']['cause']}" ) diff --git a/dbt/adapters/spark/relation.py b/dbt/adapters/spark/relation.py index d3abb0d3a..0b0c58bc1 100644 --- a/dbt/adapters/spark/relation.py +++ b/dbt/adapters/spark/relation.py @@ -3,7 +3,7 @@ from dataclasses import dataclass, field from dbt.adapters.base.relation import BaseRelation, Policy -from dbt.exceptions import RuntimeException +from dbt.exceptions import DbtRuntimeError @dataclass @@ -31,11 +31,11 @@ class SparkRelation(BaseRelation): def __post_init__(self): if self.database != self.schema and self.database: - raise RuntimeException("Cannot set database in spark!") + raise DbtRuntimeError("Cannot set database in spark!") def render(self): if self.include_policy.database and self.include_policy.schema: - raise RuntimeException( + raise DbtRuntimeError( "Got a spark relation with schema and database set to " "include, but only one can be set" ) diff --git a/tests/integration/incremental_on_schema_change/test_incremental_on_schema_change.py b/tests/integration/incremental_on_schema_change/test_incremental_on_schema_change.py index 2d967a4dc..4d1cd374e 100644 --- a/tests/integration/incremental_on_schema_change/test_incremental_on_schema_change.py +++ b/tests/integration/incremental_on_schema_change/test_incremental_on_schema_change.py @@ -1,6 +1,5 @@ from cProfile import run from tests.integration.base import DBTIntegrationTest, use_profile -import dbt.exceptions class TestIncrementalOnSchemaChange(DBTIntegrationTest): diff --git a/tests/integration/incremental_strategies/test_incremental_strategies.py b/tests/integration/incremental_strategies/test_incremental_strategies.py index 73bb6ba2b..cea8225e7 100644 --- a/tests/integration/incremental_strategies/test_incremental_strategies.py +++ b/tests/integration/incremental_strategies/test_incremental_strategies.py @@ -1,6 +1,5 @@ from cProfile import run from tests.integration.base import DBTIntegrationTest, use_profile -import dbt.exceptions class TestIncrementalStrategies(DBTIntegrationTest): diff --git a/tests/integration/persist_docs/test_persist_docs.py b/tests/integration/persist_docs/test_persist_docs.py index bc93f491b..527f548a3 100644 --- a/tests/integration/persist_docs/test_persist_docs.py +++ b/tests/integration/persist_docs/test_persist_docs.py @@ -1,8 +1,5 @@ from cProfile import run from tests.integration.base import DBTIntegrationTest, use_profile -import dbt.exceptions - -import json class TestPersistDocsDelta(DBTIntegrationTest): diff --git a/tests/integration/seed_column_types/test_seed_column_types.py b/tests/integration/seed_column_types/test_seed_column_types.py index 326c9f523..dc997d110 100644 --- a/tests/integration/seed_column_types/test_seed_column_types.py +++ b/tests/integration/seed_column_types/test_seed_column_types.py @@ -1,6 +1,5 @@ from cProfile import run from tests.integration.base import DBTIntegrationTest, use_profile -import dbt.exceptions class TestSeedColumnTypeCast(DBTIntegrationTest): diff --git a/tests/unit/test_adapter.py b/tests/unit/test_adapter.py index 53b95f731..d24bc8a2f 100644 --- a/tests/unit/test_adapter.py +++ b/tests/unit/test_adapter.py @@ -2,7 +2,7 @@ from unittest import mock import dbt.flags as flags -from dbt.exceptions import RuntimeException +from dbt.exceptions import DbtRuntimeError from agate import Row from pyhive import hive from dbt.adapters.spark import SparkAdapter, SparkRelation @@ -453,7 +453,7 @@ def test_relation_with_database(self): adapter = SparkAdapter(config) # fine adapter.Relation.create(schema='different', identifier='table') - with self.assertRaises(RuntimeException): + with self.assertRaises(DbtRuntimeError): # not fine - database set adapter.Relation.create( database='something', schema='different', identifier='table') @@ -476,7 +476,7 @@ def test_profile_with_database(self): }, 'target': 'test' } - with self.assertRaises(RuntimeException): + with self.assertRaises(DbtRuntimeError): config_from_parts_or_dicts(self.project_cfg, profile) def test_profile_with_cluster_and_sql_endpoint(self): @@ -496,7 +496,7 @@ def test_profile_with_cluster_and_sql_endpoint(self): }, 'target': 'test' } - with self.assertRaises(RuntimeException): + with self.assertRaises(DbtRuntimeError): config_from_parts_or_dicts(self.project_cfg, profile) def test_parse_columns_from_information_with_table_type_and_delta_provider(self):