Copilot commented on code in PR #58149:
URL: https://github.com/apache/airflow/pull/58149#discussion_r3066472855
##########
airflow-core/src/airflow/migrations/versions/0060_3_0_0_add_try_id_to_ti_and_tih.py:
##########
@@ -127,34 +127,14 @@ def upgrade():
def downgrade():
"""Unapply Add try_id to TI and TIH."""
- dialect_name = op.get_bind().dialect.name
with op.batch_alter_table("task_instance_history", schema=None) as
batch_op:
batch_op.drop_constraint(batch_op.f("task_instance_history_pkey"),
type_="primary")
- batch_op.add_column(sa.Column("id", sa.INTEGER, nullable=True))
+ match batch_op.get_bind().dialect:
+ case "mysql":
+ batch_op.execute("ALTER TABLE task_instance_history ADD COLUMN
id INTEGER PRIMARY KEY AUTO_INCREMENT;")
+ case "postgresql":
+ batch_op.execute("ALTER TABLE task_instance_history ADD COLUMN
id SERIAL PRIMARY KEY;")
+ case "sqlite":
+ batch_op.execute("ALTER TABLE task_instance_history ADD COLUMN
id INTEGER PRIMARY KEY AUTOINCREMENT;")
Review Comment:
`get_bind().dialect` returns a Dialect object, not a dialect name string, so
these `case "mysql"/"postgresql"/"sqlite"` branches won’t match and the
migration will silently skip adding `id`. Match on
`batch_op.get_bind().dialect.name` (or compute it once) and include a default
case that raises a clear error for unsupported dialects.
```suggestion
dialect_name = batch_op.get_bind().dialect.name
match dialect_name:
case "mysql":
batch_op.execute("ALTER TABLE task_instance_history ADD
COLUMN id INTEGER PRIMARY KEY AUTO_INCREMENT;")
case "postgresql":
batch_op.execute("ALTER TABLE task_instance_history ADD
COLUMN id SERIAL PRIMARY KEY;")
case "sqlite":
batch_op.execute("ALTER TABLE task_instance_history ADD
COLUMN id INTEGER PRIMARY KEY AUTOINCREMENT;")
case _:
raise ValueError(f"Unsupported dialect for downgrade:
{dialect_name}")
```
##########
airflow-core/src/airflow/migrations/versions/0060_3_0_0_add_try_id_to_ti_and_tih.py:
##########
@@ -127,34 +127,14 @@ def upgrade():
def downgrade():
"""Unapply Add try_id to TI and TIH."""
- dialect_name = op.get_bind().dialect.name
with op.batch_alter_table("task_instance_history", schema=None) as
batch_op:
batch_op.drop_constraint(batch_op.f("task_instance_history_pkey"),
type_="primary")
- batch_op.add_column(sa.Column("id", sa.INTEGER, nullable=True))
+ match batch_op.get_bind().dialect:
+ case "mysql":
+ batch_op.execute("ALTER TABLE task_instance_history ADD COLUMN
id INTEGER PRIMARY KEY AUTO_INCREMENT;")
+ case "postgresql":
+ batch_op.execute("ALTER TABLE task_instance_history ADD COLUMN
id SERIAL PRIMARY KEY;")
+ case "sqlite":
+ batch_op.execute("ALTER TABLE task_instance_history ADD COLUMN
id INTEGER PRIMARY KEY AUTOINCREMENT;")
Review Comment:
SQLite does not support `ALTER TABLE ... ADD COLUMN ... PRIMARY KEY` (and
`AUTOINCREMENT` is only valid on an `INTEGER PRIMARY KEY` defined when the
table is created). This downgrade will fail on SQLite. Prefer using Alembic
batch operations to rebuild the table with the new primary key (e.g.,
`batch_op.add_column(..., primary_key=True, autoincrement=True)` plus
creating/dropping constraints via batch APIs), which is also consistent with
the PR’s goal of relying on Alembic autoincrement behavior.
##########
airflow-core/src/airflow/migrations/versions/0060_3_0_0_add_try_id_to_ti_and_tih.py:
##########
@@ -127,34 +127,14 @@ def upgrade():
def downgrade():
"""Unapply Add try_id to TI and TIH."""
- dialect_name = op.get_bind().dialect.name
with op.batch_alter_table("task_instance_history", schema=None) as
batch_op:
batch_op.drop_constraint(batch_op.f("task_instance_history_pkey"),
type_="primary")
- batch_op.add_column(sa.Column("id", sa.INTEGER, nullable=True))
+ match batch_op.get_bind().dialect:
+ case "mysql":
+ batch_op.execute("ALTER TABLE task_instance_history ADD COLUMN
id INTEGER PRIMARY KEY AUTO_INCREMENT;")
+ case "postgresql":
+ batch_op.execute("ALTER TABLE task_instance_history ADD COLUMN
id SERIAL PRIMARY KEY;")
+ case "sqlite":
+ batch_op.execute("ALTER TABLE task_instance_history ADD COLUMN
id INTEGER PRIMARY KEY AUTOINCREMENT;")
Review Comment:
`batch_op.execute(...)` is not a commonly supported/portable batch operation
API in Alembic (batch mode typically expects `op.execute(...)` or schema
operations like `add_column/create_primary_key`). To avoid runtime failures and
improve portability across backends/batch modes, use `batch_op.add_column(...)`
+ `batch_op.create_primary_key(...)` (or `op.execute(...)` outside the batch
context if raw DDL is unavoidable).
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]