Fix wrong migrations

For Postgres migrations are failed.
More then we can get for mysql an error
"Specified key was too long; max key length is 1000 bytes" in b6ae66d05e3
migration in some versions of dialect.

Fixes-Bug: #1219776

Change-Id: Id9b0e1eb0e685053291367f7eb62fef68b6b2f84
This commit is contained in:
Svetlana Shturm 2013-09-05 12:50:03 +04:00
parent 3cd78fdec4
commit aae3f55304
2 changed files with 41 additions and 30 deletions

View File

@ -32,13 +32,13 @@ from alembic import op
TABLE_NAME = 'sourceassoc'
OLD_NAME = 'uniq_sourceassoc0meter_id'
NEW_NAME = 'uniq_sourceassoc0meter_id0user_id'
UNIQ_NAME = 'uniq_sourceassoc0meter_id0user_id'
COLUMNS = ('meter_id', 'user_id')
def change_uniq(table_name, old_name, new_name, columns):
engine = op.get_bind().engine
def change_uniq(table_name, uniq_name, columns, downgrade=False):
bind = op.get_bind()
engine = bind.engine
if engine.name == 'sqlite':
return
if engine.name == 'mysql':
@ -50,12 +50,10 @@ def change_uniq(table_name, old_name, new_name, columns):
op.drop_constraint('fk_sourceassoc_user_id',
table_name,
type_='foreignkey')
try:
# For some versions of dialects constraint can be skipped.
op.drop_constraint(old_name, table_name=table_name, type_='unique')
except Exception:
pass
op.create_unique_constraint(new_name, table_name, columns)
if downgrade:
op.drop_constraint(uniq_name, table_name=table_name, type_='unique')
else:
op.create_unique_constraint(uniq_name, table_name, columns)
if engine.name == 'mysql':
op.create_foreign_key('fk_sourceassoc_meter_id', table_name, 'meter',
['meter_id'], ['id'])
@ -64,8 +62,8 @@ def change_uniq(table_name, old_name, new_name, columns):
def upgrade():
change_uniq(TABLE_NAME, OLD_NAME, NEW_NAME, COLUMNS)
change_uniq(TABLE_NAME, UNIQ_NAME, COLUMNS)
def downgrade():
change_uniq(TABLE_NAME, NEW_NAME, OLD_NAME, COLUMNS)
change_uniq(TABLE_NAME, UNIQ_NAME, COLUMNS, downgrade=True)

View File

@ -29,53 +29,66 @@ revision = 'b6ae66d05e3'
down_revision = '17738166b91'
from alembic import op
import sqlalchemy as sa
INDEXES = (
# ([dialects], table_name, index_name, create/delete, uniq/not uniq)
# ([dialects], table_name, index_name, create/delete, uniq/not_uniq,
# length_limited)
(['mysql', 'sqlite', 'postgresql'],
'resource',
'resource_user_id_project_id_key',
('user_id', 'project_id'), True, False),
(['mysql'], 'source', 'id', ('id',), False, True))
('user_id', 'project_id'), True, False, True),
(['mysql'], 'source', 'id', ('id',), False, True, False))
def index_cleanup(engine_names, table_name, uniq_name, columns, create=True,
unique=False):
engine = op.get_bind().engine
def index_cleanup(engine_names, table_name, uniq_name, columns, create,
unique, limited):
bind = op.get_bind()
engine = bind.engine
if engine.name not in engine_names:
return
if create:
# We have unique constraint in postgres for `resource` table.
# But it should be a simple index. So, we should delete unique key
# before index creation.
if engine.name == 'postgresql':
op.drop_constraint(uniq_name, table_name, type_='unique')
op.create_index(uniq_name, table_name, columns, unique=unique)
if limited and engine.name == 'mysql':
# For some versions of mysql we can get an error
# "Specified key was too long; max key length is 1000 bytes".
# We should create an index by hand in this case with limited
# length of columns.
meta = sa.MetaData()
meta.bind = engine
table = sa.Table(table_name, meta, autoload=True)
columns_mysql = ",".join((c + "(100)" for c in columns))
sql = ("create index %s ON %s (%s)" % (uniq_name, table,
columns_mysql))
engine.execute(sql)
else:
op.create_index(uniq_name, table_name, columns, unique=unique)
else:
if unique:
op.drop_constraint(uniq_name, table_name, type_='unique')
else:
op.drop_index(uniq_name, table_name=table_name)
if engine.name == 'postgresql':
op.create_unique_constraint(uniq_name, table_name, columns)
def upgrade():
for engine_names, table_name, uniq_name, columns, create, uniq in INDEXES:
for (engine_names, table_name, uniq_name, columns, create, uniq,
limited) in INDEXES:
index_cleanup(engine_names,
table_name,
uniq_name,
columns,
create,
uniq)
uniq,
limited)
def downgrade():
for engine_names, table_name, uniq_name, columns, create, uniq in INDEXES:
for (engine_names, table_name, uniq_name, columns, create, uniq,
limited) in INDEXES:
index_cleanup(engine_names,
table_name,
uniq_name,
columns,
not create,
uniq)
uniq,
limited)