diff --git a/Cargo.lock b/Cargo.lock index 764abd78fb5..0e4dec8f47f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5153,6 +5153,7 @@ name = "spacetimedb-schema" version = "1.0.0-rc3" dependencies = [ "anyhow", + "colored", "enum-as-inner", "hashbrown 0.15.1", "indexmap 2.6.0", @@ -5160,6 +5161,7 @@ dependencies = [ "lazy_static", "petgraph", "proptest", + "regex", "serde_json", "serial_test", "smallvec", diff --git a/crates/cli/src/subcommands/publish.rs b/crates/cli/src/subcommands/publish.rs index 64183173d89..014c94b0006 100644 --- a/crates/cli/src/subcommands/publish.rs +++ b/crates/cli/src/subcommands/publish.rs @@ -179,6 +179,7 @@ pub async fn exec(config: Config, args: &ArgMatches) -> Result<(), anyhow::Error domain, database_identity, op, + update_summary, } => { let op = match op { PublishOp::Created => "Created new", @@ -189,6 +190,9 @@ pub async fn exec(config: Config, args: &ArgMatches) -> Result<(), anyhow::Error } else { println!("{} database with identity: {}", op, database_identity); } + if let Some(update_summary) = update_summary { + println!("{}", update_summary); + } } PublishResult::TldNotRegistered { domain } => { return Err(anyhow::anyhow!( diff --git a/crates/client-api-messages/src/name.rs b/crates/client-api-messages/src/name.rs index 0678992303b..b33b8f91f8e 100644 --- a/crates/client-api-messages/src/name.rs +++ b/crates/client-api-messages/src/name.rs @@ -57,6 +57,17 @@ pub enum PublishResult { /// or not. database_identity: Identity, op: PublishOp, + + /// If the database was updated, may contain a string describing the update. + /// Contains ANSI escape codes for color. You can use + /// `spacetimedb_schema::auto_migrate::pretty_print::strip_ansi_escape_codes` + /// to remove these if needed. + /// + /// TODO: it would be much better to put a more structured data type here. + /// This could then be formatted on client, possibly in a machine-readable form; + /// it could also allow, say, a GUI that shows migration histories on the website. + /// However, this requires reworking the `MigrationPlan` type to be serializable. + update_summary: Option, }, // TODO: below variants are obsolete with control db module diff --git a/crates/client-api/src/routes/database.rs b/crates/client-api/src/routes/database.rs index 55412d7dada..46f364c5a32 100644 --- a/crates/client-api/src/routes/database.rs +++ b/crates/client-api/src/routes/database.rs @@ -658,7 +658,7 @@ pub async fn publish( .await .map_err(log_and_500)?; - if let Some(updated) = maybe_updated { + let update_summary = if let Some(updated) = maybe_updated { match updated { UpdateDatabaseResult::AutoMigrateError(errs) => { return Err((StatusCode::BAD_REQUEST, format!("Database update rejected: {errs}")).into()); @@ -670,14 +670,18 @@ pub async fn publish( ) .into()); } - UpdateDatabaseResult::NoUpdateNeeded | UpdateDatabaseResult::UpdatePerformed => {} + UpdateDatabaseResult::NoUpdateNeeded => None, + UpdateDatabaseResult::UpdatePerformed(summary) => Some(summary), } - } + } else { + None + }; Ok(axum::Json(PublishResult::Success { domain: db_name.as_ref().map(ToString::to_string), database_identity, op, + update_summary, })) } diff --git a/crates/core/src/db/update.rs b/crates/core/src/db/update.rs index 8b040dffca7..53d37ca6b99 100644 --- a/crates/core/src/db/update.rs +++ b/crates/core/src/db/update.rs @@ -7,7 +7,7 @@ use spacetimedb_lib::db::auth::StTableType; use spacetimedb_lib::identity::AuthCtx; use spacetimedb_lib::AlgebraicValue; use spacetimedb_primitives::ColSet; -use spacetimedb_schema::auto_migrate::{AutoMigratePlan, ManualMigratePlan, MigratePlan}; +use spacetimedb_schema::auto_migrate::{AutoMigratePlan, MigratePlan}; use spacetimedb_schema::def::TableDef; use spacetimedb_schema::schema::{IndexSchema, Schema, SequenceSchema, TableSchema}; use std::sync::Arc; @@ -45,22 +45,10 @@ pub fn update_database( } match plan { - MigratePlan::Manual(plan) => manual_migrate_database(stdb, tx, plan, system_logger, existing_tables), MigratePlan::Auto(plan) => auto_migrate_database(stdb, tx, auth_ctx, plan, system_logger, existing_tables), } } -/// Manually migrate a database. -fn manual_migrate_database( - _stdb: &RelationalDB, - _tx: &mut MutTxId, - _plan: ManualMigratePlan, - _system_logger: &SystemLogger, - _existing_tables: Vec>, -) -> anyhow::Result<()> { - unimplemented!("Manual database migrations are not yet implemented") -} - /// Automatically migrate a database. fn auto_migrate_database( stdb: &RelationalDB, diff --git a/crates/core/src/host/host_controller.rs b/crates/core/src/host/host_controller.rs index 7e6308779b4..270250bbb3b 100644 --- a/crates/core/src/host/host_controller.rs +++ b/crates/core/src/host/host_controller.rs @@ -426,7 +426,7 @@ impl HostController { ) .await?; match update_result { - UpdateDatabaseResult::NoUpdateNeeded | UpdateDatabaseResult::UpdatePerformed => { + UpdateDatabaseResult::NoUpdateNeeded | UpdateDatabaseResult::UpdatePerformed(_) => { *guard = Some(host); } UpdateDatabaseResult::AutoMigrateError(e) => { diff --git a/crates/core/src/host/module_host.rs b/crates/core/src/host/module_host.rs index 83758e86d5e..d4126d8f87d 100644 --- a/crates/core/src/host/module_host.rs +++ b/crates/core/src/host/module_host.rs @@ -448,7 +448,9 @@ pub struct WeakModuleHost { #[derive(Debug)] pub enum UpdateDatabaseResult { NoUpdateNeeded, - UpdatePerformed, + /// The string is a printable summary of the update that happened. + /// Contains ANSI escape sequences for color. + UpdatePerformed(String), AutoMigrateError(ErrorStream), ErrorExecutingMigration(anyhow::Error), } @@ -457,7 +459,7 @@ impl UpdateDatabaseResult { pub fn was_successful(&self) -> bool { matches!( self, - UpdateDatabaseResult::UpdatePerformed | UpdateDatabaseResult::NoUpdateNeeded + UpdateDatabaseResult::UpdatePerformed(_) | UpdateDatabaseResult::NoUpdateNeeded ) } } diff --git a/crates/core/src/host/wasm_common/module_host_actor.rs b/crates/core/src/host/wasm_common/module_host_actor.rs index 77f4bd7bf98..5195e35ac12 100644 --- a/crates/core/src/host/wasm_common/module_host_actor.rs +++ b/crates/core/src/host/wasm_common/module_host_actor.rs @@ -3,6 +3,7 @@ use bytes::Bytes; use spacetimedb_client_api_messages::timestamp::Timestamp; use spacetimedb_primitives::TableId; use spacetimedb_schema::auto_migrate::ponder_migrate; +use spacetimedb_schema::auto_migrate::pretty_print::pretty_print; use spacetimedb_schema::def::ModuleDef; use spacetimedb_schema::schema::{Schema, TableSchema}; use std::sync::Arc; @@ -340,6 +341,10 @@ impl ModuleInstance for WasmModuleInstance { return Ok(UpdateDatabaseResult::AutoMigrateError(errs)); } }; + let summary = pretty_print(&plan).unwrap_or_else(|_| { + log::warn!("Failed to pretty-print migration plan: {plan:#?}"); + "(plan not rendered, but succeeded)".to_string() + }); let stdb = &*self.replica_context().relational_db; let program_hash = program.hash; @@ -361,7 +366,7 @@ impl ModuleInstance for WasmModuleInstance { stdb.commit_tx(tx)?; self.system_logger().info("Database updated"); log::info!("Database updated, {}", stdb.database_identity()); - Ok(UpdateDatabaseResult::UpdatePerformed) + Ok(UpdateDatabaseResult::UpdatePerformed(summary)) } } } diff --git a/crates/schema/Cargo.toml b/crates/schema/Cargo.toml index 82fda50506e..3b58fbbb72c 100644 --- a/crates/schema/Cargo.toml +++ b/crates/schema/Cargo.toml @@ -16,7 +16,9 @@ spacetimedb-sats.workspace = true spacetimedb-data-structures.workspace = true spacetimedb-sql-parser.workspace = true +regex.workspace = true anyhow.workspace = true +colored.workspace = true indexmap.workspace = true itertools.workspace = true lazy_static.workspace = true diff --git a/crates/schema/src/auto_migrate.rs b/crates/schema/src/auto_migrate.rs index 977c2e7e77d..a98b16eb853 100644 --- a/crates/schema/src/auto_migrate.rs +++ b/crates/schema/src/auto_migrate.rs @@ -6,12 +6,13 @@ use spacetimedb_data_structures::{ use spacetimedb_lib::db::raw_def::v9::{RawRowLevelSecurityDefV9, TableType}; use spacetimedb_sats::WithTypespace; +pub mod pretty_print; + pub type Result = std::result::Result>; /// A plan for a migration. #[derive(Debug)] pub enum MigratePlan<'def> { - Manual(ManualMigratePlan<'def>), Auto(AutoMigratePlan<'def>), } @@ -19,7 +20,6 @@ impl<'def> MigratePlan<'def> { /// Get the old `ModuleDef` for this migration plan. pub fn old_def(&self) -> &'def ModuleDef { match self { - MigratePlan::Manual(plan) => plan.old, MigratePlan::Auto(plan) => plan.old, } } @@ -27,20 +27,11 @@ impl<'def> MigratePlan<'def> { /// Get the new `ModuleDef` for this migration plan. pub fn new_def(&self) -> &'def ModuleDef { match self { - MigratePlan::Manual(plan) => plan.new, MigratePlan::Auto(plan) => plan.new, } } } -/// A plan for a manual migration. -/// `new` must have a reducer marked with `Lifecycle::Update`. -#[derive(Debug)] -pub struct ManualMigratePlan<'def> { - pub old: &'def ModuleDef, - pub new: &'def ModuleDef, -} - /// A plan for an automatic migration. #[derive(Debug)] pub struct AutoMigratePlan<'def> { @@ -52,13 +43,13 @@ pub struct AutoMigratePlan<'def> { /// There is also an implied check: that the schema in the database is compatible with the old ModuleDef. pub prechecks: Vec>, /// The migration steps to perform. - /// Order should not matter, as the steps are independent. + /// Order matters: `Remove`s of a particular `Def` must be ordered before `Add`s. pub steps: Vec>, } /// Checks that must be performed before performing an automatic migration. /// These checks can access table contents and other database state. -#[derive(PartialEq, Eq, Debug)] +#[derive(PartialEq, Eq, Debug, PartialOrd, Ord)] pub enum AutoMigratePrecheck<'def> { /// Perform a check that adding a sequence is valid (the relevant column contains no values /// greater than the sequence's start value). @@ -66,31 +57,50 @@ pub enum AutoMigratePrecheck<'def> { } /// A step in an automatic migration. -#[derive(PartialEq, Eq, Debug)] +#[derive(PartialEq, Eq, Debug, PartialOrd, Ord)] pub enum AutoMigrateStep<'def> { + // It is important FOR CORRECTNESS that `Remove` variants are declared before `Add` variants in this enum! + // + // The ordering is used to sort the steps of an auto-migration. + // If adds go before removes, and the user tries to remove an index and then re-add it with new configuration, + // the following can occur: + // + // 1. `AddIndex("indexname")` + // 2. `RemoveIndex("indexname")` + // + // This results in the index being added -- which, at time of writing, does nothing -- and then removed, + // resulting in the intended index not being created. + // + // For now, we just ensure that we declare all `Remove` variants before `Add` variants + // and let `#[derive(PartialOrd)]` take care of the rest. + // TODO: when this enum is made serializable, a more durable fix will be needed here. + // Probably we will want to have separate arrays of add and remove steps. + // + /// Remove an index. + RemoveIndex(::Key<'def>), + /// Remove a constraint. + RemoveConstraint(::Key<'def>), + /// Remove a sequence. + RemoveSequence(::Key<'def>), + /// Remove a schedule annotation from a table. + RemoveSchedule(::Key<'def>), + /// Remove a row-level security query. + RemoveRowLevelSecurity(::Key<'def>), + /// Add a table, including all indexes, constraints, and sequences. /// There will NOT be separate steps in the plan for adding indexes, constraints, and sequences. AddTable(::Key<'def>), /// Add an index. AddIndex(::Key<'def>), - /// Remove an index. - RemoveIndex(::Key<'def>), - /// Remove a constraint. - RemoveConstraint(::Key<'def>), /// Add a sequence. AddSequence(::Key<'def>), - /// Remove a sequence. - RemoveSequence(::Key<'def>), - /// Change the access of a table. - ChangeAccess(::Key<'def>), /// Add a schedule annotation to a table. AddSchedule(::Key<'def>), - /// Remove a schedule annotation from a table. - RemoveSchedule(::Key<'def>), /// Add a row-level security query. AddRowLevelSecurity(::Key<'def>), - /// Remove a row-level security query. - RemoveRowLevelSecurity(::Key<'def>), + + /// Change the access of a table. + ChangeAccess(::Key<'def>), } /// Something that might prevent an automatic migration. @@ -181,6 +191,9 @@ pub fn ponder_auto_migrate<'def>(old: &'def ModuleDef, new: &'def ModuleDef) -> let ((), (), (), (), ()) = (tables_ok, indexes_ok, sequences_ok, constraints_ok, rls_ok).combine_errors()?; + plan.steps.sort(); + plan.prechecks.sort(); + Ok(plan) } @@ -418,9 +431,14 @@ fn auto_migrate_constraints(plan: &mut AutoMigratePlan, new_tables: &HashSet<&Id .collect_all_errors() } -// Because we can refer to many tables and fields on the row level-security query, we need to remove all of them, -// then add the new ones, instead of trying to track the graph of dependencies. fn auto_migrate_row_level_security(plan: &mut AutoMigratePlan) -> Result<()> { + // Because we can refer to many tables and fields on the row level-security query, we need to remove all of them, + // then add the new ones, instead of trying to track the graph of dependencies. + // When pretty-printing, steps to remove and re-add a row-level-security policy are not shown, + // since they are an implementation detail that will be surprising to users. + // TODO: change this to not add-and-remove unchanged policies, and hand the responsibility for reinitializing + // queries to the `core` crate instead. + // When you do this, you will need to update `pretty_print.rs`. for rls in plan.old.row_level_security() { plan.steps.push(AutoMigrateStep::RemoveRowLevelSecurity(rls.key())); } diff --git a/crates/schema/src/auto_migrate/pretty_print.rs b/crates/schema/src/auto_migrate/pretty_print.rs new file mode 100644 index 00000000000..2964eef1412 --- /dev/null +++ b/crates/schema/src/auto_migrate/pretty_print.rs @@ -0,0 +1,320 @@ +//! This module provides a function [`pretty_print`](pretty_print) that renders an automatic migration plan to a string. + +use super::{IndexAlgorithm, MigratePlan, TableDef}; +use crate::{auto_migrate::AutoMigrateStep, def::ConstraintData}; +use colored::{self, Colorize}; +use lazy_static::lazy_static; +use regex::Regex; +use spacetimedb_lib::{ + db::raw_def::v9::{RawRowLevelSecurityDefV9, TableAccess, TableType}, + AlgebraicType, +}; +use spacetimedb_primitives::{ColId, ColList}; +use spacetimedb_sats::{algebraic_type::fmt::fmt_algebraic_type, WithTypespace}; +use std::fmt::{self, Write}; + +lazy_static! { + // https://superuser.com/questions/380772/removing-ansi-color-codes-from-text-stream + static ref ANSI_ESCAPE_SEQUENCE: Regex = Regex::new( + r#"(?x) # verbose mode + (?: + \x1b \[ [\x30-\x3f]* [\x20-\x2f]* [\x40-\x7e] # CSI sequences (start with "ESC [") + | \x1b [PX^_] .*? \x1b \\ # String Terminator sequences (end with "ESC \") + | \x1b \] [^\x07]* (?: \x07 | \x1b \\ ) # Sequences ending in BEL ("\x07") + | \x1b [\x40-\x5f] + )"# + ) + .unwrap(); +} + +/// Strip ANSI escape sequences from a string. +/// This is needed when printing in a terminal without support for these sequences, +/// such as `CMD.exe`. +pub fn strip_ansi_escape_codes(s: &str) -> String { + ANSI_ESCAPE_SEQUENCE.replace_all(s, "").into_owned() +} + +/// Pretty print a migration plan, resulting in a string (containing ANSI escape codes). +/// If you are printing to a console without ANSI escape code support, call [`strip_ansi_escape_codes`] on the +/// resulting string. +pub fn pretty_print(plan: &MigratePlan) -> Result { + let MigratePlan::Auto(plan) = plan; + let mut out = String::new(); + let outr = &mut out; + + writeln!(outr, "--------------")?; + writeln!(outr, "{}", "Performed automatic migration".blue())?; + writeln!(outr, "--------------")?; + + let created = "Created".green().bold(); + let removed = "Removed".red().bold(); + + for step in &plan.steps { + match step { + AutoMigrateStep::AddTable(t) => { + let table = plan.new.table(*t).ok_or(fmt::Error)?; + + write!(outr, "- {} table: {}", created, table_name(&table.name))?; + if table.table_type == TableType::System { + write!(outr, " (system)")?; + } + match table.table_access { + TableAccess::Private => write!(outr, " (private)")?, + TableAccess::Public => write!(outr, " (public)")?, + } + writeln!(outr)?; + writeln!(outr, " - Columns:")?; + for column in &table.columns { + let resolved = WithTypespace::new(plan.new.typespace(), &column.ty) + .resolve_refs() + .map_err(|_| fmt::Error)?; + + writeln!( + outr, + " - {}: {}", + column_name(&column.name), + type_name(&resolved) + )?; + } + if !table.constraints.is_empty() { + writeln!(outr, " - Unique constraints:")?; + for constraint in table.constraints.values() { + match &constraint.data { + ConstraintData::Unique(unique) => { + writeln!( + outr, + " - {} on {}", + constraint_name(&constraint.name), + format_col_list(&unique.columns.clone().into(), table)? + )?; + } + } + } + } + if !table.indexes.is_empty() { + writeln!(outr, " - Indexes:")?; + for index in table.indexes.values() { + match &index.algorithm { + IndexAlgorithm::BTree(btree) => { + write!( + outr, + " - {} on {}", + index_name(&index.name), + format_col_list(&btree.columns, table)? + )?; + writeln!(outr)?; + } + } + } + } + if !table.sequences.is_empty() { + writeln!(outr, " - Auto-increment constraints:")?; + for sequence in table.sequences.values() { + let column = column_name_from_id(table, sequence.column); + writeln!(outr, " - {} on {}", sequence_name(&sequence.name), column)?; + } + } + if let Some(schedule) = &table.schedule { + let reducer = reducer_name(&schedule.reducer_name); + writeln!(outr, " - Scheduled, calling reducer: {}", reducer)?; + } + } + AutoMigrateStep::AddIndex(index) => { + let table_def = plan.new.stored_in_table_def(index).ok_or(fmt::Error)?; + let index_def = table_def.indexes.get(*index).ok_or(fmt::Error)?; + + writeln!( + outr, + "- {} index {} on columns {} of table {}", + created, + index_name(index), + format_col_list(index_def.algorithm.columns(), table_def)?, + table_name(&table_def.name), + )?; + } + AutoMigrateStep::RemoveIndex(index) => { + let table_def = plan.old.stored_in_table_def(index).ok_or(fmt::Error)?; + let index_def = table_def.indexes.get(*index).ok_or(fmt::Error)?; + + let col_list = match &index_def.algorithm { + IndexAlgorithm::BTree(b) => &b.columns, + }; + write!( + outr, + "- {} index {} on columns {} of table {}", + removed, + index_name(index), + format_col_list(col_list, table_def)?, + table_name(&table_def.name) + )?; + writeln!(outr)?; + } + AutoMigrateStep::RemoveConstraint(constraint) => { + let table_def = plan.old.stored_in_table_def(constraint).ok_or(fmt::Error)?; + let constraint_def = table_def.constraints.get(*constraint).ok_or(fmt::Error)?; + match &constraint_def.data { + ConstraintData::Unique(unique_constraint_data) => { + write!( + outr, + "- {} unique constraint {} on columns {} of table {}", + removed, + constraint_name(constraint), + format_col_list(&unique_constraint_data.columns, table_def)?, + table_name(&table_def.name) + )?; + writeln!(outr)?; + } + } + } + AutoMigrateStep::AddSequence(sequence) => { + let table_def = plan.new.stored_in_table_def(sequence).ok_or(fmt::Error)?; + let sequence_def = table_def.sequences.get(*sequence).ok_or(fmt::Error)?; + + write!( + outr, + "- {} auto-increment constraint {} on column {} of table {}", + created, + constraint_name(sequence), + column_name_from_id(table_def, sequence_def.column), + table_name(&table_def.name), + )?; + writeln!(outr)?; + } + AutoMigrateStep::RemoveSequence(sequence) => { + let table_def = plan.old.stored_in_table_def(sequence).ok_or(fmt::Error)?; + let sequence_def = table_def.sequences.get(*sequence).ok_or(fmt::Error)?; + + write!( + outr, + "- {} auto-increment constraint {} on column {} of table {}", + removed, + constraint_name(sequence), + column_name_from_id(table_def, sequence_def.column), + table_name(&table_def.name), + )?; + writeln!(outr)?; + } + AutoMigrateStep::ChangeAccess(table) => { + let table_def = plan.new.table(*table).ok_or(fmt::Error)?; + + write!(outr, "- Changed access for table {}", table_name(&table_def.name))?; + match table_def.table_access { + TableAccess::Private => write!(outr, " (public -> private)")?, + TableAccess::Public => write!(outr, " (private -> public)")?, + } + writeln!(outr)?; + } + AutoMigrateStep::AddSchedule(schedule) => { + let table_def = plan.new.table(*schedule).ok_or(fmt::Error)?; + let schedule_def = table_def.schedule.as_ref().ok_or(fmt::Error)?; + + let reducer = reducer_name(&schedule_def.reducer_name); + writeln!( + outr, + "- {} schedule for table {} calling reducer {}", + created, + table_name(&table_def.name), + reducer + )?; + } + AutoMigrateStep::RemoveSchedule(schedule) => { + let table_def = plan.old.table(*schedule).ok_or(fmt::Error)?; + let schedule_def = table_def.schedule.as_ref().ok_or(fmt::Error)?; + + let reducer = reducer_name(&schedule_def.reducer_name); + writeln!( + outr, + "- {} schedule for table {} calling reducer {}", + removed, + table_name(&table_def.name), + reducer + )?; + } + AutoMigrateStep::AddRowLevelSecurity(rls) => { + // Implementation detail: Row-level-security policies are always removed and re-added + // because the `core` crate needs to recompile some stuff. + // We hide this from the user. + if plan.old.lookup::(*rls) + == plan.new.lookup::(*rls) + { + continue; + } + writeln!(outr, "- {} row level security policy:", created)?; + writeln!(outr, " `{}`", rls.blue())?; + } + AutoMigrateStep::RemoveRowLevelSecurity(rls) => { + // Implementation detail: Row-level-security policies are always removed and re-added + // because the `core` crate needs to recompile some stuff. + // We hide this from the user. + if plan.old.lookup::(*rls) + == plan.new.lookup::(*rls) + { + continue; + } + writeln!(outr, "- {} row level security policy:", removed)?; + writeln!(outr, " `{}`", rls.blue())?; + } + } + } + + Ok(out) +} + +fn reducer_name(name: &str) -> String { + format!("`{}`", name.yellow()) +} + +fn table_name(name: &str) -> String { + format!("`{}`", name.cyan()) +} + +fn column_name(name: &str) -> String { + table_name(name) +} + +fn column_name_from_id(table_def: &TableDef, col_id: ColId) -> String { + column_name( + table_def + .columns + .get(col_id.idx()) + .map(|def| &*def.name) + .unwrap_or("unknown_column"), + ) +} + +fn index_name(name: &str) -> String { + format!("`{}`", name.purple()) +} + +fn constraint_name(name: &str) -> String { + index_name(name) +} + +fn sequence_name(name: &str) -> String { + index_name(name) +} + +fn type_name(type_: &AlgebraicType) -> String { + format!("{}", fmt_algebraic_type(type_).to_string().green()) +} + +fn format_col_list(col_list: &ColList, table_def: &TableDef) -> Result { + let mut out = String::new(); + write!(&mut out, "[")?; + for (i, col) in col_list.iter().enumerate() { + let join = if i == 0 { "" } else { ", " }; + write!(&mut out, "{}{}", join, column_name_from_id(table_def, col))?; + } + write!(&mut out, "]")?; + Ok(out) +} + +#[cfg(test)] +mod tests { + #[test] + fn test_strip_ansi_escape_sequences() { + let input = "\x1b[31mHello, \x1b[32mworld!\x1b[0m"; + let expected = "Hello, world!"; + assert_eq!(super::strip_ansi_escape_codes(input), expected); + } +} diff --git a/crates/schema/src/def.rs b/crates/schema/src/def.rs index c43df56f361..f4020b9bb6c 100644 --- a/crates/schema/src/def.rs +++ b/crates/schema/src/def.rs @@ -122,6 +122,7 @@ pub struct ModuleDef { /// The row-level security policies. /// /// **Note**: Are only validated syntax-wise. + // TODO: for consistency, this should be changed to store a `RowLevelSecurityDef` instead. row_level_security_raw: HashMap, } diff --git a/smoketests/__init__.py b/smoketests/__init__.py index 883ce063df1..63254542403 100644 --- a/smoketests/__init__.py +++ b/smoketests/__init__.py @@ -191,6 +191,7 @@ def publish_module(self, domain=None, *, clear=True, capture_stderr=True): ) self.resolved_identity = re.search(r"identity: ([0-9a-fA-F]+)", publish_output)[1] self.database_identity = domain if domain is not None else self.resolved_identity + return publish_output @classmethod def reset_config(cls): diff --git a/smoketests/tests/auto_migration.py b/smoketests/tests/auto_migration.py index 69ec5e33586..5468947308f 100644 --- a/smoketests/tests/auto_migration.py +++ b/smoketests/tests/auto_migration.py @@ -1,6 +1,27 @@ from .. import Smoketest import sys import logging +import re + +# 7-bit C1 ANSI sequences +ansi_escape = re.compile( + r""" + \x1B # ESC + (?: # 7-bit C1 Fe (except CSI) + [@-Z\\-_] + | # or [ for CSI, followed by a control sequence + \[ + [0-?]* # Parameter bytes + [ -/]* # Intermediate bytes + [@-~] # Final byte + ) +""", + re.VERBOSE, +) + + +def strip_ansi_escape_codes(text: str) -> str: + return ansi_escape.sub("", text) class AddTableAutoMigration(Smoketest): @@ -25,7 +46,11 @@ class AddTableAutoMigration(Smoketest): } #[spacetimedb::table(name = point_mass)] +#[index(name = point_masses_by_mass, btree(columns = position))] pub struct PointMass { + #[primary_key] + #[auto_inc] + id: u64, mass: f64, /// This used to cause an error when check_compatible did not resolve types in a `ModuleDef`. position: Vector2, @@ -37,12 +62,80 @@ class AddTableAutoMigration(Smoketest): y: f64, } +#[spacetimedb::table(name = scheduled_table, scheduled(send_scheduled_message), public)] +pub struct ScheduledTable { + #[primary_key] + #[auto_inc] + scheduled_id: u64, + scheduled_at: spacetimedb::ScheduleAt, + text: String, +} + +#[spacetimedb::reducer] +fn send_scheduled_message(_ctx: &ReducerContext, arg: ScheduledTable) { + let _ = arg.text; + let _ = arg.scheduled_at; + let _ = arg.scheduled_id; +} + spacetimedb::filter!("SELECT * FROM person"); """ - MODULE_CODE_UPDATED = ( - MODULE_CODE - + """ + MODULE_CODE_UPDATED = """ +use spacetimedb::{log, ReducerContext, Table, SpacetimeType}; + +#[spacetimedb::table(name = person)] +pub struct Person { + name: String, +} + +#[spacetimedb::reducer] +pub fn add_person(ctx: &ReducerContext, name: String) { + ctx.db.person().insert(Person { name }); +} + +#[spacetimedb::reducer] +pub fn print_persons(ctx: &ReducerContext, prefix: String) { + for person in ctx.db.person().iter() { + log::info!("{}: {}", prefix, person.name); + } +} + +#[spacetimedb::table(name = point_mass, public)] // private -> public +// remove index +pub struct PointMass { + // remove primary_key and auto_inc + id: u64, + mass: f64, + /// This used to cause an error when check_compatible did not resolve types in a `ModuleDef`. + position: Vector2, +} + +#[derive(SpacetimeType, Clone, Copy)] +pub struct Vector2 { + x: f64, + y: f64, +} + +// TODO: once removing schedules is implemented, remove the schedule here. +#[spacetimedb::table(name = scheduled_table, scheduled(send_scheduled_message), public)] +pub struct ScheduledTable { + #[primary_key] + #[auto_inc] + scheduled_id: u64, + scheduled_at: spacetimedb::ScheduleAt, + text: String, +} + +#[spacetimedb::reducer] +fn send_scheduled_message(_ctx: &ReducerContext, arg: ScheduledTable) { + let _ = arg.text; + let _ = arg.scheduled_at; + let _ = arg.scheduled_id; +} + +spacetimedb::filter!("SELECT * FROM person"); + #[spacetimedb::table(name = book)] pub struct Book { isbn: String, @@ -61,8 +154,43 @@ class AddTableAutoMigration(Smoketest): } spacetimedb::filter!("SELECT * FROM book"); + +#[spacetimedb::table(name = parabolas)] +#[index(name = parabolas_by_b_c, btree(columns = [b, c]))] +pub struct Parabola { + #[primary_key] + #[auto_inc] + id: u64, + a: f64, + b: f64, + c: f64, +} """ - ) + + EXPECTED_MIGRATION_REPORT = """-------------- +Performed automatic migration +-------------- +- Removed index `point_mass_id_idx_btree` on columns [`id`] of table `point_mass` +- Removed unique constraint `point_mass_id_key` on columns [`id`] of table `point_mass` +- Removed auto-increment constraint `point_mass_id_seq` on column `id` of table `point_mass` +- Created table: `book` (private) + - Columns: + - `isbn`: String +- Created table: `parabolas` (private) + - Columns: + - `id`: U64 + - `a`: F64 + - `b`: F64 + - `c`: F64 + - Unique constraints: + - `parabolas_id_key` on [`id`] + - Indexes: + - `parabolas_id_idx_btree` on [`id`] + - Auto-increment constraints: + - `parabolas_id_seq` on `id` +- Created row level security policy: + `SELECT * FROM book` +- Changed access for table `point_mass` (private -> public)""" def assertSql(self, sql, expected): self.maxDiff = None @@ -75,11 +203,14 @@ def test_add_table_auto_migration(self): """This tests uploading a module with a schema change that should not require clearing the database.""" # Check the row-level SQL filter is created correctly - self.assertSql("SELECT sql FROM st_row_level_security", """\ + self.assertSql( + "SELECT sql FROM st_row_level_security", + """\ sql ------------------------ "SELECT * FROM person" -""") +""", + ) logging.info("Initial publish complete") # initial module code is already published by test framework @@ -98,17 +229,28 @@ def test_add_table_auto_migration(self): ) self.write_module_code(self.MODULE_CODE_UPDATED) - self.publish_module(self.database_identity, clear=False) + output = self.publish_module(self.database_identity, clear=False) + output = strip_ansi_escape_codes(output) + + print("got output\n", output) + + # Remark: if this test ever fails mysteriously, + # try double-checking the pretty printing code for trailing spaces before newlines. + # Also make sure the pretty-printing is deterministic. + self.assertIn(self.EXPECTED_MIGRATION_REPORT, output) logging.info("Updated") # Check the row-level SQL filter is added correctly - self.assertSql("SELECT sql FROM st_row_level_security", """\ + self.assertSql( + "SELECT sql FROM st_row_level_security", + """\ sql ------------------------ - "SELECT * FROM person" "SELECT * FROM book" -""") + "SELECT * FROM person" +""", + ) self.logs(100)