diff --git a/.zuul.yaml b/.zuul.yaml index 84bb45c2f858c4e54d39cadce50ede7e7390c34e..4066fb485dcffe5fa00d43fb7bf0120e30f09d3c 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -2,7 +2,7 @@ templates: - check-requirements - lib-forward-testing-python3 - - openstack-python3-xena-jobs + - openstack-python3-yoga-jobs - periodic-stable-jobs - publish-openstack-docs-pti - release-notes-jobs-python3 diff --git a/oslo_db/sqlalchemy/models.py b/oslo_db/sqlalchemy/models.py index 2bad0f570fba0107e5659c9f1453cbec4716ac41..72ac7cba9845bd80e08bae62880e85979a255506 100644 --- a/oslo_db/sqlalchemy/models.py +++ b/oslo_db/sqlalchemy/models.py @@ -34,18 +34,8 @@ class ModelBase(object): def save(self, session): """Save this object.""" - - # NOTE(boris-42): This part of code should be look like: - # session.add(self) - # session.flush() - # But there is a bug in sqlalchemy and eventlet that - # raises NoneType exception if there is no running - # transaction and rollback is called. As long as - # sqlalchemy has this bug we have to create transaction - # explicitly. - with session.begin(subtransactions=True): - session.add(self) - session.flush() + session.add(self) + session.flush() def __setitem__(self, key, value): setattr(self, key, value) diff --git a/oslo_db/sqlalchemy/provision.py b/oslo_db/sqlalchemy/provision.py index f8918623856ea3564484c9b3fd68aa54ce72d6ed..21eb90a603dc1a8321f6119b5c1123eefef340b5 100644 --- a/oslo_db/sqlalchemy/provision.py +++ b/oslo_db/sqlalchemy/provision.py @@ -518,12 +518,14 @@ class MySQLBackendImpl(BackendImpl): def create_named_database(self, engine, ident, conditional=False): with engine.connect() as conn: if not conditional or not self.database_exists(conn, ident): - conn.exec_driver_sql("CREATE DATABASE %s" % ident) + with conn.begin(): + conn.exec_driver_sql("CREATE DATABASE %s" % ident) def drop_named_database(self, engine, ident, conditional=False): with engine.connect() as conn: if not conditional or self.database_exists(conn, ident): - conn.exec_driver_sql("DROP DATABASE %s" % ident) + with conn.begin(): + conn.exec_driver_sql("DROP DATABASE %s" % ident) def database_exists(self, engine, ident): s = sql.text("SHOW DATABASES LIKE :ident") @@ -577,23 +579,26 @@ class SQLiteBackendImpl(BackendImpl): @BackendImpl.impl.dispatch_for("postgresql") class PostgresqlBackendImpl(BackendImpl): def create_opportunistic_driver_url(self): - return "postgresql://openstack_citest:openstack_citest"\ - "@localhost/postgres" + return "postgresql+psycopg2://openstack_citest:openstack_citest@localhost/postgres" # noqa: E501 def create_named_database(self, engine, ident, conditional=False): with engine.connect().execution_options( - isolation_level="AUTOCOMMIT") as conn: + isolation_level="AUTOCOMMIT", + ) as conn: if not conditional or not self.database_exists(conn, ident): - conn.exec_driver_sql("CREATE DATABASE %s" % ident) + with conn.begin(): + conn.exec_driver_sql("CREATE DATABASE %s" % ident) def drop_named_database(self, engine, ident, conditional=False): with engine.connect().execution_options( - isolation_level="AUTOCOMMIT") as conn: + isolation_level="AUTOCOMMIT", + ) as conn: self._close_out_database_users(conn, ident) - if conditional: - conn.exec_driver_sql("DROP DATABASE IF EXISTS %s" % ident) - else: - conn.exec_driver_sql("DROP DATABASE %s" % ident) + with conn.begin(): + if conditional: + conn.exec_driver_sql("DROP DATABASE IF EXISTS %s" % ident) + else: + conn.exec_driver_sql("DROP DATABASE %s" % ident) def drop_additional_objects(self, conn): enums = [e['name'] for e in sqlalchemy.inspect(conn).get_enums()] @@ -605,9 +610,11 @@ class PostgresqlBackendImpl(BackendImpl): return bool( engine.scalar( sqlalchemy.text( - "SELECT datname FROM pg_database " - "WHERE datname=:name"), name=ident) + "SELECT datname FROM pg_database WHERE datname=:name" + ), + {'name': ident}, ) + ) def _close_out_database_users(self, conn, ident): """Attempt to guarantee a database can be dropped. @@ -631,7 +638,9 @@ class PostgresqlBackendImpl(BackendImpl): "WHERE usename=current_user AND " "pid != pg_backend_pid() AND " "datname=:dname" - ), dname=ident) + ), + {'dname': ident}, + ) def _random_ident(): diff --git a/oslo_db/sqlalchemy/test_fixtures.py b/oslo_db/sqlalchemy/test_fixtures.py index 6b82f05a908d0834bf1dea5f59624e5c5c0fa71d..8b69d3f21bb92ca07ec0937de01eeba90d21cb40 100644 --- a/oslo_db/sqlalchemy/test_fixtures.py +++ b/oslo_db/sqlalchemy/test_fixtures.py @@ -254,22 +254,6 @@ class DeletesFromSchema(ResetsData): """ -class RollsBackTransaction(ResetsData): - """Fixture class that maintains a database transaction per test. - - """ - - def setup_for_reset(self, engine, facade): - conn = engine.connect() - engine = utils.NonCommittingEngine(conn) - self._reset_engine = enginefacade._TestTransactionFactory.apply_engine( - engine, facade) - - def reset_schema_data(self, engine, facade): - self._reset_engine() - engine._dispose() - - class SimpleDbFixture(BaseDbFixture): """Fixture which provides an engine from a fixed URL. diff --git a/oslo_db/sqlalchemy/utils.py b/oslo_db/sqlalchemy/utils.py index 99da62b0da5e5c48ac37c1dc0bee8958054430ae..3a6a993e1f04a89d72f3c186d063bb07669cdda5 100644 --- a/oslo_db/sqlalchemy/utils.py +++ b/oslo_db/sqlalchemy/utils.py @@ -490,32 +490,34 @@ def drop_old_duplicate_entries_from_table(engine, table_name, func.count(table.c.id) > 1 ) - for row in engine.execute(duplicated_rows_select).fetchall(): - # NOTE(boris-42): Do not remove row that has the biggest ID. - delete_condition = table.c.id != row[0] - is_none = None # workaround for pyflakes - delete_condition &= table.c.deleted_at == is_none - for name in uc_column_names: - delete_condition &= table.c[name] == row[name] - - rows_to_delete_select = sqlalchemy.sql.select( - table.c.id, - ).where(delete_condition) - for row in engine.execute(rows_to_delete_select).fetchall(): - LOG.info("Deleting duplicated row with id: %(id)s from table: " - "%(table)s", dict(id=row[0], table=table_name)) - - if use_soft_delete: - delete_statement = table.update().\ - where(delete_condition).\ - values({ - 'deleted': literal_column('id'), - 'updated_at': literal_column('updated_at'), - 'deleted_at': timeutils.utcnow() - }) - else: - delete_statement = table.delete().where(delete_condition) - engine.execute(delete_statement) + with engine.connect() as conn, conn.begin(): + for row in conn.execute(duplicated_rows_select).fetchall(): + # NOTE(boris-42): Do not remove row that has the biggest ID. + delete_condition = table.c.id != row[0] + is_none = None # workaround for pyflakes + delete_condition &= table.c.deleted_at == is_none + for name in uc_column_names: + delete_condition &= table.c[name] == row._mapping[name] + + rows_to_delete_select = sqlalchemy.sql.select( + table.c.id, + ).where(delete_condition) + for row in conn.execute(rows_to_delete_select).fetchall(): + LOG.info( + "Deleting duplicated row with id: %(id)s from table: " + "%(table)s", dict(id=row[0], table=table_name)) + + if use_soft_delete: + delete_statement = table.update().\ + where(delete_condition).\ + values({ + 'deleted': literal_column('id'), + 'updated_at': literal_column('updated_at'), + 'deleted_at': timeutils.utcnow() + }) + else: + delete_statement = table.delete().where(delete_condition) + conn.execute(delete_statement) def _get_default_deleted_value(table): @@ -569,11 +571,12 @@ def change_deleted_column_type_to_boolean(engine, table_name, finally: table.metadata.bind = None - engine.execute( - table.update(). - where(table.c.deleted == table.c.id). - values(old_deleted=True) - ) + with engine.connect() as conn, conn.begin(): + conn.execute( + table.update().where( + table.c.deleted == table.c.id + ).values(old_deleted=True) + ) table.metadata.bind = engine try: @@ -607,39 +610,46 @@ def _change_deleted_column_type_to_boolean_sqlite(engine, table_name, # figure out how else to copy an arbitrary column schema constraints = [constraint._copy() for constraint in table.constraints] - meta = table.metadata - new_table = Table(table_name + "__tmp__", meta, - *(columns + constraints)) - new_table.create(engine) + with engine.connect() as conn: + meta = table.metadata + new_table = Table( + table_name + "__tmp__", meta, + *(columns + constraints)) - indexes = [] - for index in get_indexes(engine, table_name): - column_names = [new_table.c[c] for c in index['column_names']] - indexes.append(Index(index["name"], *column_names, - unique=index["unique"])) - - c_select = [] - for c in table.c: - if c.name != "deleted": - c_select.append(c) - else: - c_select.append(table.c.deleted == table.c.id) + with conn.begin(): + new_table.create(conn) - table.drop(engine) - for index in indexes: - index.create(engine) + indexes = [] + for index in get_indexes(engine, table_name): + column_names = [new_table.c[c] for c in index['column_names']] + indexes.append( + Index(index["name"], *column_names, unique=index["unique"]) + ) + + c_select = [] + for c in table.c: + if c.name != "deleted": + c_select.append(c) + else: + c_select.append(table.c.deleted == table.c.id) - table.metadata.bind = engine - try: - new_table.rename(table_name) - finally: - table.metadata.bind = None + with conn.begin(): + table.drop(conn) + for index in indexes: + index.create(conn) - engine.execute( - new_table.update(). - where(new_table.c.deleted == new_table.c.id). - values(deleted=True) - ) + table.metadata.bind = engine + try: + new_table.rename(table_name) + finally: + table.metadata.bind = None + + with conn.begin(): + conn.execute( + new_table.update().where( + new_table.c.deleted == new_table.c.id + ).values(deleted=True) + ) @debtcollector.removals.remove( @@ -664,21 +674,23 @@ def change_deleted_column_type_to_id_type(engine, table_name, finally: table.metadata.bind = None - deleted = True # workaround for pyflakes - engine.execute( - table.update(). - where(table.c.deleted == deleted). - values(new_deleted=table.c.id) - ) table.metadata.bind = engine try: + with engine.connect() as conn, conn.begin(): + deleted = True # workaround for pyflakes + conn.execute( + table.update().where( + table.c.deleted == deleted + ).values(new_deleted=table.c.id) + ) + table.c.deleted.drop() table.c.new_deleted.alter(name="deleted") + + _restore_indexes_on_deleted_columns(engine, table_name, indexes) finally: table.metadata.bind = None - _restore_indexes_on_deleted_columns(engine, table_name, indexes) - def _is_deleted_column_constraint(constraint): # NOTE(boris-42): There is no other way to check is CheckConstraint @@ -731,40 +743,48 @@ def _change_deleted_column_type_to_id_type_sqlite(engine, table_name, # figure out how else to copy an arbitrary constraint schema constraints.append(constraint._copy()) - new_table = Table(table_name + "__tmp__", meta, - *(columns + constraints)) - new_table.create(engine) - - indexes = [] - for index in get_indexes(engine, table_name): - column_names = [new_table.c[c] for c in index['column_names']] - indexes.append(Index(index["name"], *column_names, - unique=index["unique"])) - - table.drop(engine) - for index in indexes: - index.create(engine) - - new_table.metadata.bind = engine - try: - new_table.rename(table_name) - finally: - new_table.metadata.bind = None + with engine.connect() as conn: + # we need separate transactions, since we must create the table before + # we can copy entries into it (later) + with conn.begin(): + new_table = Table( + table_name + "__tmp__", meta, + *(columns + constraints)) + new_table.create(conn) - deleted = True # workaround for pyflakes - engine.execute( - new_table.update(). - where(new_table.c.deleted == deleted). - values(deleted=new_table.c.id) - ) - - # NOTE(boris-42): Fix value of deleted column: False -> "" or 0. - deleted = False # workaround for pyflakes - engine.execute( - new_table.update(). - where(new_table.c.deleted == deleted). - values(deleted=default_deleted_value) - ) + indexes = [] + for index in get_indexes(engine, table_name): + column_names = [new_table.c[c] for c in index['column_names']] + indexes.append( + Index(index["name"], *column_names, unique=index["unique"]) + ) + + with conn.begin(): + table.drop(conn) + for index in indexes: + index.create(conn) + + with conn.begin(): + new_table.metadata.bind = engine + try: + new_table.rename(table_name) + finally: + new_table.metadata.bind = None + + deleted = True # workaround for pyflakes + conn.execute( + new_table.update().where( + new_table.c.deleted == deleted + ).values(deleted=new_table.c.id) + ) + + # NOTE(boris-42): Fix value of deleted column: False -> "" or 0. + deleted = False # workaround for pyflakes + conn.execute( + new_table.update().where( + new_table.c.deleted == deleted + ).values(deleted=default_deleted_value) + ) def get_db_connection_info(conn_pieces): @@ -1110,7 +1130,9 @@ def get_non_innodb_tables(connectable, skip_tables=('migrate_version', params['database'] = connectable.engine.url.database query = text(query_str) - noninnodb = connectable.execute(query, **params) + # TODO(stephenfin): What about if this is already a Connection? + with connectable.connect() as conn, conn.begin(): + noninnodb = conn.execute(query, params) return [i[0] for i in noninnodb] @@ -1220,145 +1242,22 @@ def suspend_fk_constraints_for_col_alter( ctx = MigrationContext.configure(conn) op = Operations(ctx) - for fk in fks: - op.drop_constraint( - fk['name'], fk['source_table'], type_="foreignkey") - yield - for fk in fks: - op.create_foreign_key( - fk['name'], fk['source_table'], - fk['referred_table'], - fk['constrained_columns'], - fk['referred_columns'], - onupdate=fk['options'].get('onupdate'), - ondelete=fk['options'].get('ondelete'), - deferrable=fk['options'].get('deferrable'), - initially=fk['options'].get('initially'), - ) - - -class NonCommittingConnectable(object): - """A ``Connectable`` substitute which rolls all operations back. - - ``NonCommittingConnectable`` forms the basis of mock - ``Engine`` and ``Connection`` objects within a test. It provides - only that part of the API that should reasonably be used within - a single-connection test environment (e.g. no engine.dispose(), - connection.invalidate(), etc. ). The connection runs both within - a transaction as well as a savepoint. The transaction is there - so that any operations upon the connection can be rolled back. - If the test calls begin(), a "pseduo" transaction is returned that - won't actually commit anything. The subtransaction is there to allow - a test to successfully call rollback(), however, where all operations - to that point will be rolled back and the operations can continue, - simulating a real rollback while still remaining within a transaction - external to the test. + with conn.begin(): + for fk in fks: + op.drop_constraint( + fk['name'], fk['source_table'], type_="foreignkey") - """ - - _nested_trans = None - - def __init__(self, connection): - self.connection = connection - self._trans = connection.begin() - self._restart_nested() - - def _restart_nested(self): - if self._nested_trans is not None: - self._nested_trans.rollback() - self._nested_trans = self.connection.begin_nested() - - def _dispose(self): - if not self.connection.closed: - self._nested_trans.rollback() - self._trans.rollback() - self.connection.close() - - def execute(self, obj, *multiparams, **params): - """Executes the given construct and returns a :class:`.ResultProxy`.""" - - return self.connection.execute(obj, *multiparams, **params) - - def scalar(self, obj, *multiparams, **params): - """Executes and returns the first column of the first row.""" - - return self.connection.scalar(obj, *multiparams, **params) - - -class NonCommittingEngine(NonCommittingConnectable): - """``Engine`` -specific non committing connectbale.""" - - @property - def url(self): - return self.connection.engine.url - - @property - def engine(self): - return self - - def connect(self): - return NonCommittingConnection(self.connection) - - @contextlib.contextmanager - def begin(self): - conn = self.connect() - trans = conn.begin() - try: - yield conn - except Exception: - trans.rollback() - else: - trans.commit() - - -class NonCommittingConnection(NonCommittingConnectable): - """``Connection`` -specific non committing connectbale.""" - - def close(self): - """Close the 'Connection'. - - In this context, close() is a no-op. - - """ - pass - - def begin(self): - return NonCommittingTransaction(self, self.connection.begin()) - - def __enter__(self): - return self - - def __exit__(self, *arg): - pass - - -class NonCommittingTransaction(object): - """A wrapper for ``Transaction``. - - This is to accommodate being able to guaranteed start a new - SAVEPOINT when a transaction is rolled back. - - """ - def __init__(self, provisioned, transaction): - self.provisioned = provisioned - self.transaction = transaction - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - if type is None: - try: - self.commit() - except Exception: - self.rollback() - raise - else: - self.rollback() - - def commit(self): - self.transaction.commit() + yield - def rollback(self): - self.transaction.rollback() - self.provisioned._restart_nested() + with conn.begin(): + for fk in fks: + op.create_foreign_key( + fk['name'], fk['source_table'], + fk['referred_table'], + fk['constrained_columns'], + fk['referred_columns'], + onupdate=fk['options'].get('onupdate'), + ondelete=fk['options'].get('ondelete'), + deferrable=fk['options'].get('deferrable'), + initially=fk['options'].get('initially'), + ) diff --git a/oslo_db/tests/fixtures.py b/oslo_db/tests/fixtures.py index a1b692955609e1e745dbbbb4357e81b83d87a028..46462782d43752742e093d67d97e316e7a14d1d4 100644 --- a/oslo_db/tests/fixtures.py +++ b/oslo_db/tests/fixtures.py @@ -21,6 +21,9 @@ class WarningsFixture(fixtures.Fixture): def setUp(self): super().setUp() + + self._original_warning_filters = warnings.filters[:] + # Make deprecation warnings only happen once to avoid spamming warnings.simplefilter('once', DeprecationWarning) @@ -44,68 +47,33 @@ class WarningsFixture(fixtures.Fixture): warnings.filterwarnings( 'once', - message=r'The Session.begin.subtransactions flag is deprecated .*', - category=sqla_exc.SADeprecationWarning) - - warnings.filterwarnings( - 'once', - message=r'Using non-integer/slice indices on Row is deprecated .*', - category=sqla_exc.SADeprecationWarning) - - warnings.filterwarnings( - 'once', - message=r'The Engine.execute\(\) method is considered legacy .*', - category=sqla_exc.SADeprecationWarning) - - warnings.filterwarnings( - 'once', - message=r'The Executable.execute\(\) method is considered .*', - category=sqla_exc.SADeprecationWarning) - - warnings.filterwarnings( - 'once', - message=r'The Row.keys\(\) method is considered legacy .*', + message=r'Calling \.begin\(\) when a transaction is already .*', category=sqla_exc.SADeprecationWarning) - warnings.filterwarnings( - 'once', - message=r'Retrieving row members using strings or other .*', - category=sqla_exc.SADeprecationWarning) + # ...plus things that aren't our fault - warnings.filterwarnings( - 'once', - message=r'The connection.execute\(\) method in SQLAlchemy 2.0 .*', - category=sqla_exc.SADeprecationWarning) + # FIXME(stephenfin): These are caused by sqlalchemy-migrate, not us, + # and should be removed when we drop support for that library warnings.filterwarnings( - 'once', - message=r'Calling the mapper\(\) function directly outside .*', + 'ignore', + message=r'Passing a string to Connection.execute\(\) .*', + module='migrate', category=sqla_exc.SADeprecationWarning) warnings.filterwarnings( 'once', message=r'The current statement is being autocommitted .*', + module='migrate', category=sqla_exc.SADeprecationWarning) - warnings.filterwarnings( - 'once', - message=r'Calling \.begin\(\) when a transaction is already .*', - category=sqla_exc.SADeprecationWarning) - - warnings.filterwarnings( - 'once', - message=r'The Engine.scalar\(\) method is considered legacy .*', - category=sqla_exc.SADeprecationWarning) - - # ...plus things that aren't our fault - - # FIXME(stephenfin): These are caused by sqlalchemy-migrate, not us, - # and should be removed when we drop support for that library - warnings.filterwarnings( 'ignore', - message=r'Passing a string to Connection.execute\(\) .*', + message=r'The Engine.execute\(\) method is considered legacy .*', module='migrate', category=sqla_exc.SADeprecationWarning) - self.addCleanup(warnings.resetwarnings) + self.addCleanup(self._reset_warning_filters) + + def _reset_warning_filters(self): + warnings.filters[:] = self._original_warning_filters diff --git a/oslo_db/tests/sqlalchemy/test_enginefacade.py b/oslo_db/tests/sqlalchemy/test_enginefacade.py index b24892b85bcd4c8d517890c6a7eb94083e81c48e..a188d01e56b254d7d0f641c6e258e9ce987a469b 100644 --- a/oslo_db/tests/sqlalchemy/test_enginefacade.py +++ b/oslo_db/tests/sqlalchemy/test_enginefacade.py @@ -24,7 +24,7 @@ from oslo_context import context as oslo_context from sqlalchemy import Column from sqlalchemy import Integer from sqlalchemy import MetaData -from sqlalchemy.orm import mapper +from sqlalchemy.orm import registry from sqlalchemy.orm import Session from sqlalchemy import select from sqlalchemy import String @@ -1671,11 +1671,13 @@ class LiveFacadeTest(db_test_base._DbTestCase): metadata.create_all(self.engine) self.addCleanup(metadata.drop_all, self.engine) + reg = registry() + class User(object): def __init__(self, name): self.name = name - mapper(User, user_table) + reg.map_imperatively(User, user_table) self.User = User def _assert_ctx_connection(self, context, connection): diff --git a/oslo_db/tests/sqlalchemy/test_exc_filters.py b/oslo_db/tests/sqlalchemy/test_exc_filters.py index 9075f2adf8a8d3b961f465b9de8db816fa2ad5ba..53789f560079a747a8c0d025b4dcdc3319a9d03d 100644 --- a/oslo_db/tests/sqlalchemy/test_exc_filters.py +++ b/oslo_db/tests/sqlalchemy/test_exc_filters.py @@ -23,7 +23,7 @@ from sqlalchemy.engine import url as sqla_url from sqlalchemy import event import sqlalchemy.exc from sqlalchemy.orm import declarative_base -from sqlalchemy.orm import mapper +from sqlalchemy.orm import registry from sqlalchemy import sql from oslo_db import exception @@ -270,14 +270,16 @@ class TestNonExistentConstraintPostgreSQL( ): def test_raise(self): - matched = self.assertRaises( - exception.DBNonExistentConstraint, - self.engine.execute, - sqla.schema.DropConstraint( - sqla.ForeignKeyConstraint(["id"], ["baz.id"], - name="bar_fkey", - table=self.table_1)), - ) + with self.engine.connect() as conn: + matched = self.assertRaises( + exception.DBNonExistentConstraint, + conn.execute, + sqla.schema.DropConstraint( + sqla.ForeignKeyConstraint(["id"], ["baz.id"], + name="bar_fkey", + table=self.table_1)), + ) + self.assertInnerException( matched, "ProgrammingError", @@ -295,14 +297,16 @@ class TestNonExistentConstraintMySQL( ): def test_raise(self): - matched = self.assertRaises( - exception.DBNonExistentConstraint, - self.engine.execute, - sqla.schema.DropConstraint( - sqla.ForeignKeyConstraint(["id"], ["baz.id"], - name="bar_fkey", - table=self.table_1)), - ) + with self.engine.connect() as conn: + matched = self.assertRaises( + exception.DBNonExistentConstraint, + conn.execute, + sqla.schema.DropConstraint( + sqla.ForeignKeyConstraint(["id"], ["baz.id"], + name="bar_fkey", + table=self.table_1)), + ) + # NOTE(jd) Cannot check precisely with assertInnerException since MySQL # error are not the same depending on its version… self.assertIsInstance(matched.inner_exception, @@ -332,11 +336,13 @@ class TestNonExistentTable( ) def test_raise(self): - matched = self.assertRaises( - exception.DBNonExistentTable, - self.engine.execute, - sqla.schema.DropTable(self.table_1), - ) + with self.engine.connect() as conn: + matched = self.assertRaises( + exception.DBNonExistentTable, + conn.execute, + sqla.schema.DropTable(self.table_1), + ) + self.assertInnerException( matched, "OperationalError", @@ -352,11 +358,13 @@ class TestNonExistentTablePostgreSQL( ): def test_raise(self): - matched = self.assertRaises( - exception.DBNonExistentTable, - self.engine.execute, - sqla.schema.DropTable(self.table_1), - ) + with self.engine.connect() as conn: + matched = self.assertRaises( + exception.DBNonExistentTable, + conn.execute, + sqla.schema.DropTable(self.table_1), + ) + self.assertInnerException( matched, "ProgrammingError", @@ -372,11 +380,13 @@ class TestNonExistentTableMySQL( ): def test_raise(self): - matched = self.assertRaises( - exception.DBNonExistentTable, - self.engine.execute, - sqla.schema.DropTable(self.table_1), - ) + with self.engine.connect() as conn: + matched = self.assertRaises( + exception.DBNonExistentTable, + conn.execute, + sqla.schema.DropTable(self.table_1), + ) + # NOTE(jd) Cannot check precisely with assertInnerException since MySQL # error are not the same depending on its version… self.assertIsInstance(matched.inner_exception, @@ -488,13 +498,20 @@ class TestReferenceErrorSQLite( self.table_2.create(self.engine) def test_raise(self): - self.engine.execute(sql.text("PRAGMA foreign_keys = ON")) + connection = self.engine.raw_connection() + try: + cursor = connection.cursor() + cursor.execute('PRAGMA foreign_keys = ON') + cursor.close() + finally: + connection.close() - matched = self.assertRaises( - exception.DBReferenceError, - self.engine.execute, - self.table_2.insert().values(id=1, foo_id=2) - ) + with self.engine.connect() as conn: + matched = self.assertRaises( + exception.DBReferenceError, + conn.execute, + self.table_2.insert().values(id=1, foo_id=2) + ) self.assertInnerException( matched, @@ -510,16 +527,25 @@ class TestReferenceErrorSQLite( self.assertIsNone(matched.key_table) def test_raise_delete(self): - self.engine.execute(sql.text("PRAGMA foreign_keys = ON")) + connection = self.engine.raw_connection() + try: + cursor = connection.cursor() + cursor.execute('PRAGMA foreign_keys = ON') + cursor.close() + finally: + connection.close() with self.engine.connect() as conn: - conn.execute(self.table_1.insert().values(id=1234, foo=42)) - conn.execute(self.table_2.insert().values(id=4321, foo_id=1234)) - matched = self.assertRaises( - exception.DBReferenceError, - self.engine.execute, - self.table_1.delete() - ) + with conn.begin(): + conn.execute(self.table_1.insert().values(id=1234, foo=42)) + conn.execute( + self.table_2.insert().values(id=4321, foo_id=1234)) + matched = self.assertRaises( + exception.DBReferenceError, + conn.execute, + self.table_1.delete() + ) + self.assertInnerException( matched, "IntegrityError", @@ -539,12 +565,14 @@ class TestReferenceErrorPostgreSQL( db_test_base._PostgreSQLOpportunisticTestCase, ): def test_raise(self): - params = {'id': 1, 'foo_id': 2} - matched = self.assertRaises( - exception.DBReferenceError, - self.engine.execute, - self.table_2.insert().values(**params) - ) + with self.engine.connect() as conn: + params = {'id': 1, 'foo_id': 2} + matched = self.assertRaises( + exception.DBReferenceError, + conn.execute, + self.table_2.insert().values(**params) + ) + self.assertInnerException( matched, "IntegrityError", @@ -563,13 +591,18 @@ class TestReferenceErrorPostgreSQL( def test_raise_delete(self): with self.engine.connect() as conn: - conn.execute(self.table_1.insert().values(id=1234, foo=42)) - conn.execute(self.table_2.insert().values(id=4321, foo_id=1234)) - matched = self.assertRaises( - exception.DBReferenceError, - self.engine.execute, - self.table_1.delete() - ) + with conn.begin(): + conn.execute(self.table_1.insert().values(id=1234, foo=42)) + conn.execute( + self.table_2.insert().values(id=4321, foo_id=1234)) + + with conn.begin(): + matched = self.assertRaises( + exception.DBReferenceError, + conn.execute, + self.table_1.delete() + ) + self.assertInnerException( matched, "IntegrityError", @@ -592,11 +625,12 @@ class TestReferenceErrorMySQL( db_test_base._MySQLOpportunisticTestCase, ): def test_raise(self): - matched = self.assertRaises( - exception.DBReferenceError, - self.engine.execute, - self.table_2.insert().values(id=1, foo_id=2) - ) + with self.engine.connect() as conn: + matched = self.assertRaises( + exception.DBReferenceError, + conn.execute, + self.table_2.insert().values(id=1, foo_id=2) + ) # NOTE(jd) Cannot check precisely with assertInnerException since MySQL # error are not the same depending on its version… @@ -632,14 +666,14 @@ class TestReferenceErrorMySQL( self.assertEqual("resource_foo", matched.key_table) def test_raise_delete(self): - with self.engine.connect() as conn: + with self.engine.connect() as conn, conn.begin(): conn.execute(self.table_1.insert().values(id=1234, foo=42)) conn.execute(self.table_2.insert().values(id=4321, foo_id=1234)) - matched = self.assertRaises( - exception.DBReferenceError, - self.engine.execute, - self.table_1.delete() - ) + matched = self.assertRaises( + exception.DBReferenceError, + conn.execute, + self.table_1.delete() + ) # NOTE(jd) Cannot check precisely with assertInnerException since MySQL # error are not the same depending on its version… self.assertIsInstance(matched.inner_exception, @@ -1046,10 +1080,13 @@ class IntegrationTest(db_test_base._DbTestCase): self.test_table.create(self.engine) self.addCleanup(self.test_table.drop, self.engine) + reg = registry() + class Foo(object): def __init__(self, counter): self.counter = counter - mapper(Foo, self.test_table) + + reg.map_imperatively(Foo, self.test_table) self.Foo = Foo def test_flush_wrapper_duplicate_entry(self): @@ -1162,11 +1199,14 @@ class TestDBDisconnected(TestsExceptionFilter): yield def _test_ping_listener_disconnected( - self, dialect_name, exc_obj, is_disconnect=True): + self, dialect_name, exc_obj, is_disconnect=True, + ): with self._fixture(dialect_name, exc_obj, 1, is_disconnect): conn = self.engine.connect() with conn.begin(): - self.assertEqual(1, conn.scalar(sqla.select(1))) + self.assertEqual( + 1, conn.execute(sqla.select(1)).scalars().first(), + ) self.assertFalse(conn.closed) self.assertFalse(conn.invalidated) self.assertTrue(conn.in_transaction()) @@ -1179,7 +1219,10 @@ class TestDBDisconnected(TestsExceptionFilter): # test implicit execution with self._fixture(dialect_name, exc_obj, 1): - self.assertEqual(1, self.engine.scalar(sqla.select(1))) + with self.engine.connect() as conn: + self.assertEqual( + 1, conn.execute(sqla.select(1)).scalars().first(), + ) def test_mariadb_error_1927(self): for code in [1927]: diff --git a/oslo_db/tests/sqlalchemy/test_provision.py b/oslo_db/tests/sqlalchemy/test_provision.py index f0ef94463bc0d5a2f4be7c541c19fbc42c5eca1f..a6cedceb3cb9e7c292f4c7fd062bf718ab255e0b 100644 --- a/oslo_db/tests/sqlalchemy/test_provision.py +++ b/oslo_db/tests/sqlalchemy/test_provision.py @@ -22,7 +22,6 @@ from oslo_db import exception from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import provision from oslo_db.sqlalchemy import test_fixtures -from oslo_db.sqlalchemy import utils from oslo_db.tests import base as test_base from oslo_db.tests.sqlalchemy import base as db_test_base @@ -149,81 +148,6 @@ class PostgreSQLDropAllObjectsTest( pass -class RetainSchemaTest(test_base.BaseTestCase): - DRIVER = "sqlite" - - def setUp(self): - super(RetainSchemaTest, self).setUp() - - metadata = schema.MetaData() - self.test_table = schema.Table( - 'test_table', metadata, - schema.Column('x', types.Integer), - schema.Column('y', types.Integer), - mysql_engine='InnoDB' - ) - - def gen_schema(engine): - metadata.create_all(engine, checkfirst=False) - self._gen_schema = gen_schema - - def test_once(self): - self._run_test() - - def test_twice(self): - self._run_test() - - def _run_test(self): - try: - database_resource = provision.DatabaseResource( - self.DRIVER, provision_new_database=True) - except exception.BackendNotAvailable: - self.skipTest("database not available") - - schema_resource = provision.SchemaResource( - database_resource, self._gen_schema) - - schema = schema_resource.getResource() - - conn = schema.database.engine.connect() - engine = utils.NonCommittingEngine(conn) - - with engine.connect() as conn: - rows = conn.execute(self.test_table.select()) - self.assertEqual([], rows.fetchall()) - - trans = conn.begin() - conn.execute( - self.test_table.insert(), - {"x": 1, "y": 2} - ) - trans.rollback() - - rows = conn.execute(self.test_table.select()) - self.assertEqual([], rows.fetchall()) - - trans = conn.begin() - conn.execute( - self.test_table.insert(), - {"x": 2, "y": 3} - ) - trans.commit() - - rows = conn.execute(self.test_table.select()) - self.assertEqual([(2, 3)], rows.fetchall()) - - engine._dispose() - schema_resource.finishedWith(schema) - - -class MySQLRetainSchemaTest(RetainSchemaTest): - DRIVER = "mysql" - - -class PostgresqlRetainSchemaTest(RetainSchemaTest): - DRIVER = "postgresql" - - class AdHocURLTest(test_base.BaseTestCase): def test_sqlite_setup_teardown(self): diff --git a/oslo_db/tests/sqlalchemy/test_sqlalchemy.py b/oslo_db/tests/sqlalchemy/test_sqlalchemy.py index 10def6b10b51073fbef2f97cc095547ad451ac0a..7b634f12fad9dcbce154b2d4107774a368319e91 100644 --- a/oslo_db/tests/sqlalchemy/test_sqlalchemy.py +++ b/oslo_db/tests/sqlalchemy/test_sqlalchemy.py @@ -109,7 +109,7 @@ class SQLiteSavepointTest(db_test_base._DbTestCase): ) self.assertEqual( [(1, 'data 1')], - self.engine.execute( + conn.execute( self.test_table.select(). order_by(self.test_table.c.id) ).fetchall() @@ -145,13 +145,13 @@ class SQLiteSavepointTest(db_test_base._DbTestCase): {'data': 'data 3'} ) - self.assertEqual( - [(1, 'data 1'), (2, 'data 3')], - self.engine.execute( - self.test_table.select(). - order_by(self.test_table.c.id) - ).fetchall() - ) + self.assertEqual( + [(1, 'data 1'), (2, 'data 3')], + conn.execute( + self.test_table.select(). + order_by(self.test_table.c.id) + ).fetchall() + ) def test_savepoint_beginning(self): with self.engine.begin() as conn: @@ -167,13 +167,13 @@ class SQLiteSavepointTest(db_test_base._DbTestCase): {'data': 'data 2'} ) - self.assertEqual( - [(1, 'data 2')], - self.engine.execute( - self.test_table.select(). - order_by(self.test_table.c.id) - ).fetchall() - ) + self.assertEqual( + [(1, 'data 2')], + conn.execute( + self.test_table.select(). + order_by(self.test_table.c.id) + ).fetchall() + ) class FakeDBAPIConnection(object): @@ -314,20 +314,22 @@ class MySQLModeTestCase(db_test_base._MySQLOpportunisticTestCase): self.test_table = Table(_TABLE_NAME + "mode", meta, Column('id', Integer, primary_key=True), Column('bar', String(255))) - self.test_table.create(self.connection) + with self.connection.begin(): + self.test_table.create(self.connection) def cleanup(): - self.test_table.drop(self.connection) + with self.connection.begin(): + self.test_table.drop(self.connection) self.connection.close() mode_engine.dispose() + self.addCleanup(cleanup) def _test_string_too_long(self, value): with self.connection.begin(): - self.connection.execute(self.test_table.insert(), - bar=value) + self.connection.execute(self.test_table.insert(), {'bar': value}) result = self.connection.execute(self.test_table.select()) - return result.fetchone()['bar'] + return result.fetchone().bar def test_string_too_long(self): value = 'a' * 512 @@ -477,34 +479,42 @@ class SQLiteConnectTest(test_base.BaseTestCase): def test_sqlite_fk_listener(self): engine = self._fixture(sqlite_fk=True) - self.assertEqual( - 1, - engine.scalar(sql.text('pragma foreign_keys')) - ) + with engine.connect() as conn: + self.assertEqual( + 1, + conn.execute( + sql.text('pragma foreign_keys') + ).scalars().first(), + ) engine = self._fixture(sqlite_fk=False) - self.assertEqual( - 0, - engine.scalar(sql.text("pragma foreign_keys")) - ) + with engine.connect() as conn: + self.assertEqual( + 0, + conn.execute( + sql.text('pragma foreign_keys') + ).scalars().first(), + ) def test_sqlite_synchronous_listener(self): engine = self._fixture() # "The default setting is synchronous=FULL." (e.g. 2) # http://www.sqlite.org/pragma.html#pragma_synchronous - self.assertEqual( - 2, - engine.scalar(sql.text('pragma synchronous')) - ) + with engine.connect() as conn: + self.assertEqual( + 2, + conn.execute(sql.text('pragma synchronous')).scalars().first(), + ) engine = self._fixture(sqlite_synchronous=False) - self.assertEqual( - 0, - engine.scalar(sql.text('pragma synchronous')) - ) + with engine.connect() as conn: + self.assertEqual( + 0, + conn.execute(sql.text('pragma synchronous')).scalars().first(), + ) class MysqlConnectTest(db_test_base._MySQLOpportunisticTestCase): @@ -513,9 +523,10 @@ class MysqlConnectTest(db_test_base._MySQLOpportunisticTestCase): return session.create_engine(self.engine.url, mysql_sql_mode=sql_mode) def _assert_sql_mode(self, engine, sql_mode_present, sql_mode_non_present): - mode = engine.execute( - sql.text("SHOW VARIABLES LIKE 'sql_mode'") - ).fetchone()[1] + with engine.connect() as conn: + mode = conn.execute( + sql.text("SHOW VARIABLES LIKE 'sql_mode'") + ).fetchone()[1] self.assertIn( sql_mode_present, mode ) @@ -539,9 +550,10 @@ class MysqlConnectTest(db_test_base._MySQLOpportunisticTestCase): # get the GLOBAL sql_mode, not the @@SESSION, so that # we get what is configured for the MySQL database, as opposed # to what our own session.create_engine() has set it to. - expected = self.engine.execute( - sql.text("SELECT @@GLOBAL.sql_mode") - ).scalar() + with self.engine.connect() as conn: + expected = conn.execute( + sql.text("SELECT @@GLOBAL.sql_mode") + ).scalar() engine = self._fixture(sql_mode=None) self._assert_sql_mode(engine, expected, None) @@ -593,9 +605,10 @@ class MysqlConnectTest(db_test_base._MySQLOpportunisticTestCase): engine = self._fixture(sql_mode='TRADITIONAL') - actual_mode = engine.execute( - sql.text("SHOW VARIABLES LIKE 'sql_mode'") - ).fetchone()[1] + with engine.connect() as conn: + actual_mode = conn.execute( + sql.text("SHOW VARIABLES LIKE 'sql_mode'") + ).fetchone()[1] self.assertIn('MySQL server mode set to %s' % actual_mode, log.output) diff --git a/oslo_db/tests/sqlalchemy/test_update_match.py b/oslo_db/tests/sqlalchemy/test_update_match.py index fdd1887ce39731b4a986c7dc3911345d5f00201c..b4c025befcb00c2ae07e1c1d10a9f4d36eb0a0cf 100644 --- a/oslo_db/tests/sqlalchemy/test_update_match.py +++ b/oslo_db/tests/sqlalchemy/test_update_match.py @@ -122,7 +122,7 @@ class UpdateMatchTest(db_test_base._DbTestCase): sql.select(MyModel.__table__).where(MyModel.__table__.c.id == pk) ).first() values['id'] = pk - self.assertEqual(values, dict(row)) + self.assertEqual(values, dict(row._mapping)) def test_update_specimen_successful(self): uuid = '136254d5-3869-408f-9da7-190e0072641a' diff --git a/oslo_db/tests/sqlalchemy/test_utils.py b/oslo_db/tests/sqlalchemy/test_utils.py index 75313323b691afdf6f6b8afb503328b347af5dc5..087f7ec1a78924f4ff89543196448511217d6056 100644 --- a/oslo_db/tests/sqlalchemy/test_utils.py +++ b/oslo_db/tests/sqlalchemy/test_utils.py @@ -699,7 +699,9 @@ class TestMigrationUtils(db_test_base._DbTestCase): Column('updated_at', DateTime)) test_table.create(engine) - engine.execute(test_table.insert(), values) + with engine.connect() as conn, conn.begin(): + with conn.begin(): + conn.execute(test_table.insert(), values) return test_table, values def test_drop_old_duplicate_entries_from_table(self): @@ -719,10 +721,11 @@ class TestMigrationUtils(db_test_base._DbTestCase): uniq_values.add(uniq_value) expected_ids.append(value['id']) - real_ids = [ - row[0] for row in - self.engine.execute(select(test_table.c.id)).fetchall() - ] + with self.engine.connect() as conn, conn.begin(): + real_ids = [ + row[0] for row in + conn.execute(select(test_table.c.id)).fetchall() + ] self.assertEqual(len(expected_ids), len(real_ids)) for id_ in expected_ids: @@ -760,18 +763,21 @@ class TestMigrationUtils(db_test_base._DbTestCase): base_select = table.select() - rows_select = base_select.where(table.c.deleted != table.c.id) - row_ids = [row['id'] for row in - self.engine.execute(rows_select).fetchall()] - self.assertEqual(len(expected_values), len(row_ids)) - for value in expected_values: - self.assertIn(value['id'], row_ids) - - deleted_rows_select = base_select.where( - table.c.deleted == table.c.id) - deleted_rows_ids = [row['id'] for row in - self.engine.execute( - deleted_rows_select).fetchall()] + with self.engine.connect() as conn, conn.begin(): + rows_select = base_select.where(table.c.deleted != table.c.id) + row_ids = [ + row.id for row in conn.execute(rows_select).fetchall() + ] + self.assertEqual(len(expected_values), len(row_ids)) + for value in expected_values: + self.assertIn(value['id'], row_ids) + + deleted_rows_select = base_select.where( + table.c.deleted == table.c.id) + deleted_rows_ids = [ + row.id for row in + conn.execute(deleted_rows_select).fetchall() + ] self.assertEqual(len(values) - len(row_ids), len(deleted_rows_ids)) for value in soft_deleted_values: @@ -933,7 +939,7 @@ class TestMigrationUtils(db_test_base._DbTestCase): # NOTE(zzzeek): SQLAlchemy 1.2 Boolean type will disallow non 1/0 # value here, 1.1 also coerces to "1/0" so use raw SQL to test the # constraint - with self.engine.connect() as conn: + with self.engine.connect() as conn, conn.begin(): conn.exec_driver_sql( "INSERT INTO abc (deleted) VALUES (?)", (10, ), @@ -1647,43 +1653,51 @@ class TestDialectFunctionDispatcher(test_base.BaseTestCase): class TestGetInnoDBTables(db_test_base._MySQLOpportunisticTestCase): def test_all_tables_use_innodb(self): - self.engine.execute( - sql.text( - "CREATE TABLE customers " - "(a INT, b CHAR (20), INDEX (a)) ENGINE=InnoDB")) + with self.engine.connect() as conn, conn.begin(): + conn.execute( + sql.text( + "CREATE TABLE customers " + "(a INT, b CHAR (20), INDEX (a)) ENGINE=InnoDB")) self.assertEqual([], utils.get_non_innodb_tables(self.engine)) def test_all_tables_use_innodb_false(self): - self.engine.execute( - sql.text("CREATE TABLE employee (i INT) ENGINE=MEMORY")) + with self.engine.connect() as conn, conn.begin(): + conn.execute( + sql.text("CREATE TABLE employee (i INT) ENGINE=MEMORY") + ) self.assertEqual(['employee'], utils.get_non_innodb_tables(self.engine)) def test_skip_tables_use_default_value(self): - self.engine.execute( - sql.text("CREATE TABLE migrate_version (i INT) ENGINE=MEMORY")) + with self.engine.connect() as conn, conn.begin(): + conn.execute( + sql.text("CREATE TABLE migrate_version (i INT) ENGINE=MEMORY") + ) self.assertEqual([], utils.get_non_innodb_tables(self.engine)) def test_skip_tables_use_passed_value(self): - self.engine.execute( - sql.text("CREATE TABLE some_table (i INT) ENGINE=MEMORY")) + with self.engine.connect() as conn, conn.begin(): + conn.execute( + sql.text("CREATE TABLE some_table (i INT) ENGINE=MEMORY")) self.assertEqual([], utils.get_non_innodb_tables( self.engine, skip_tables=('some_table',))) def test_skip_tables_use_empty_list(self): - self.engine.execute( - sql.text("CREATE TABLE some_table_3 (i INT) ENGINE=MEMORY")) + with self.engine.connect() as conn, conn.begin(): + conn.execute( + sql.text("CREATE TABLE some_table_3 (i INT) ENGINE=MEMORY")) self.assertEqual(['some_table_3'], utils.get_non_innodb_tables( self.engine, skip_tables=())) def test_skip_tables_use_several_values(self): - self.engine.execute( - sql.text("CREATE TABLE some_table_1 (i INT) ENGINE=MEMORY")) - self.engine.execute( - sql.text("CREATE TABLE some_table_2 (i INT) ENGINE=MEMORY")) + with self.engine.connect() as conn, conn.begin(): + conn.execute( + sql.text("CREATE TABLE some_table_1 (i INT) ENGINE=MEMORY")) + conn.execute( + sql.text("CREATE TABLE some_table_2 (i INT) ENGINE=MEMORY")) self.assertEqual([], utils.get_non_innodb_tables( self.engine, diff --git a/releasenotes/notes/remove-NotCommitting-utils-fed6df0e2f85edfa.yaml b/releasenotes/notes/remove-NotCommitting-utils-fed6df0e2f85edfa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c57fbadb94b74a1710a5e8a7218fc27d0cb031f1 --- /dev/null +++ b/releasenotes/notes/remove-NotCommitting-utils-fed6df0e2f85edfa.yaml @@ -0,0 +1,15 @@ +--- +upgrade: + - | + The following helpers have been removed from the + ``oslo_db.sqlalchemy.utils`` module: + + - ``NonCommittingConnectable`` + - ``NonCommittingEngine`` + - ``NonCommittingConnection`` + - ``NonCommittingTransaction`` + + These were unused outside of oslo.db and were not compatible with + SQLAlchemy 2.0. In addition, the ``RollsBackTransaction`` fixture has + been removed from ``oslo_db.sqlalchemy.test_fixtures``. This was + similarly unused and presented similar compatibility issues. diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst index 45469b2a0dbce7c6e8b82c347bdab1ac277d64fa..2d9a2abe84266bd30a8a45e95366ce40efa3e6d8 100644 --- a/releasenotes/source/index.rst +++ b/releasenotes/source/index.rst @@ -6,6 +6,7 @@ :maxdepth: 1 unreleased + xena wallaby victoria ussuri diff --git a/releasenotes/source/xena.rst b/releasenotes/source/xena.rst new file mode 100644 index 0000000000000000000000000000000000000000..1be85be3ebf18741d311aa41096d417a99c970bf --- /dev/null +++ b/releasenotes/source/xena.rst @@ -0,0 +1,6 @@ +========================= +Xena Series Release Notes +========================= + +.. release-notes:: + :branch: stable/xena diff --git a/setup.cfg b/setup.cfg index 5fc1aef92349077cbc87dfb9eaf517c6253457ca..7acd07930a6f0f6e6b1441687f981341b890e280 100644 --- a/setup.cfg +++ b/setup.cfg @@ -18,6 +18,7 @@ classifier = Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 + Programming Language :: Python :: 3.9 Programming Language :: Python :: 3 :: Only Programming Language :: Python :: Implementation :: CPython