diff --git a/crates/bench/src/bin/summarize.rs b/crates/bench/src/bin/summarize.rs index eb3040aeec9..7af14e2992f 100644 --- a/crates/bench/src/bin/summarize.rs +++ b/crates/bench/src/bin/summarize.rs @@ -1,5 +1,5 @@ //! Script to pack benchmark results into JSON files. -//! These are read by the benchmarks-viewer application: https://github.com/clockworklabs/benchmarks-viewer, +//! These are read by the benchmarks-viewer application: , //! which is used to generate reports on the benchmarks. //! See also: the github actions scripts that invoke this command, `SpacetimeDB/.github/workflows/benchmarks.yml` and `SpacetimeDB/.github/workflows/callgrind_benchmarks.yml`. use clap::{Parser, Subcommand}; @@ -110,7 +110,7 @@ mod criterion { /// /// Unfortunately, there is no published library for this, so we use the schema /// from `critcmp` under the MIT license: - /// https://github.com/BurntSushi/critcmp/blob/daaf0383c3981c98a6eaaef47142755e5bddb3c4/src/data.rs + /// /// /// TODO(jgilles): update this if we update our Criterion version past 0.4. #[allow(unused)] diff --git a/crates/bindings-macro/src/lib.rs b/crates/bindings-macro/src/lib.rs index 1ebc56593c5..99a4a5dd5f1 100644 --- a/crates/bindings-macro/src/lib.rs +++ b/crates/bindings-macro/src/lib.rs @@ -112,11 +112,11 @@ pub fn reducer(args: StdTokenStream, item: StdTokenStream) -> StdTokenStream { } /// It turns out to be shockingly difficult to construct an [`Attribute`]. -/// That type is not [`Parse`], instead having two distinct methods +/// That type is not [`syn::parse::Parse`], instead having two distinct methods /// for parsing "inner" vs "outer" attributes. /// -/// We need this [`Attribute`] in [`table`] so that we can "pushnew" it -/// onto the end of a list of attributes. See comments within [`table`]. +/// We need this [`Attribute`] in [`crate::table()`] so that we can "pushnew" it +/// onto the end of a list of attributes. See comments within [`crate::table()`]. fn derive_table_helper_attr() -> Attribute { let source = quote!(#[derive(spacetimedb::__TableHelper)]); diff --git a/crates/cli/src/config.rs b/crates/cli/src/config.rs index 0165bcf9d80..1a8807a3eb5 100644 --- a/crates/cli/src/config.rs +++ b/crates/cli/src/config.rs @@ -28,14 +28,14 @@ pub struct ServerConfig { } impl ServerConfig { - /// Generate a new [`Table`] representing this [`ServerConfig`]. + /// Generate a new [`toml_edit::Table`] representing this [`ServerConfig`]. pub fn as_table(&self) -> toml_edit::Table { let mut table = toml_edit::Table::new(); Self::update_table(&mut table, self); table } - /// Update an existing [`Table`] with the values of a [`ServerConfig`]. + /// Update an existing [`toml_edit::Table`] with the values of a [`ServerConfig`]. pub fn update_table(edit: &mut toml_edit::Table, from: &ServerConfig) { set_table_opt_value(edit, NICKNAME_KEY, from.nickname.as_deref()); set_table_opt_value(edit, HOST_KEY, Some(&from.host)); diff --git a/crates/cli/src/subcommands/subscribe.rs b/crates/cli/src/subcommands/subscribe.rs index 6eb2039c787..40b6025752f 100644 --- a/crates/cli/src/subcommands/subscribe.rs +++ b/crates/cli/src/subcommands/subscribe.rs @@ -198,7 +198,7 @@ where ws.send(msg.into()).await } -/// Await the initial [`ServerMessage::SubscriptionUpdate`]. +/// Await the initial [`ws::ServerMessage::TransactionUpdateLight`] `|` [`ws::ServerMessage::TransactionUpdate`]. /// If `module_def` is `Some`, print a JSON representation to stdout. async fn await_initial_update(ws: &mut S, module_def: Option<&RawModuleDefV9>) -> anyhow::Result<()> where @@ -232,7 +232,7 @@ where Ok(()) } -/// Print `num` [`ServerMessage::TransactionUpdate`] messages as JSON. +/// Print `num` [`ws::ServerMessage::TransactionUpdateLight`] `|` [`ws::ServerMessage::TransactionUpdate`] messages as JSON. /// If `num` is `None`, keep going indefinitely. async fn consume_transaction_updates( ws: &mut S, diff --git a/crates/client-api/src/auth.rs b/crates/client-api/src/auth.rs index 7437e6ef8cf..345965dbb5c 100644 --- a/crates/client-api/src/auth.rs +++ b/crates/client-api/src/auth.rs @@ -65,7 +65,7 @@ impl SpacetimeCreds { /// The auth information in a request. /// -/// This is inserted as an extension by [`auth_middleware`]; make sure that's applied if you're making expecting +/// This is inserted as an extension by [`anon_auth_middleware`]; make sure that's applied if you're making expecting /// this to be present. #[derive(Clone)] pub struct SpacetimeAuth { diff --git a/crates/client-api/src/lib.rs b/crates/client-api/src/lib.rs index 0cb21d75f0e..1a3d00b737c 100644 --- a/crates/client-api/src/lib.rs +++ b/crates/client-api/src/lib.rs @@ -130,7 +130,7 @@ impl Host { /// Parameters for publishing a database. /// -/// See [`ControlStateDelegate::publish_database`]. +/// See [`ControlStateWriteAccess::publish_database`]. pub struct DatabaseDef { /// The [`Identity`] the database shall have. pub database_identity: Identity, diff --git a/crates/core/src/callgrind_flag.rs b/crates/core/src/callgrind_flag.rs index c1d90d951e1..e28fe24764a 100644 --- a/crates/core/src/callgrind_flag.rs +++ b/crates/core/src/callgrind_flag.rs @@ -13,10 +13,10 @@ use std::sync::atomic::{AtomicU32, Ordering}; /// Our solution is to wrap the code of interest in a function that is only called when the global flag is set. /// /// See: documentation on valgrind/callgrind/iai-callgrind's `toggle-collect` option (ctrl-f on these pages): -/// - https://github.com/iai-callgrind/iai-callgrind/ -/// - https://valgrind.org/docs/manual/cl-manual.html +/// - +/// - /// -/// We do NOT use the valgrind macros (or the crate https://github.com/2dav/crabgrind) because they are a pain to build. +/// We do NOT use the valgrind macros (or the crate ) because they are a pain to build. /// (Hours wasted here: 9.) /// Instead, we have a wrapper function which is only called when a global flag is set. /// diff --git a/crates/core/src/client/client_connection.rs b/crates/core/src/client/client_connection.rs index e4a5d21a8b1..59bd488ab4e 100644 --- a/crates/core/src/client/client_connection.rs +++ b/crates/core/src/client/client_connection.rs @@ -41,8 +41,8 @@ pub struct ClientConfig { pub protocol: Protocol, /// The client's desired (conditional) compression algorithm, if any. pub compression: Compression, - /// Whether the client prefers full [`TransactionUpdate`]s - /// rather than [`TransactionUpdateLight`]s on a successful update. + /// Whether the client prefers full [`spacetimedb_client_api_messages::websocket::TransactionUpdate`]s + /// rather than [`spacetimedb_client_api_messages::websocket::TransactionUpdateLight`]s on a successful update. // TODO(centril): As more knobs are added, make this into a bitfield (when there's time). pub tx_update_full: bool, } diff --git a/crates/core/src/db/datastore/locking_tx_datastore/committed_state.rs b/crates/core/src/db/datastore/locking_tx_datastore/committed_state.rs index 8e403f3429b..1f718432361 100644 --- a/crates/core/src/db/datastore/locking_tx_datastore/committed_state.rs +++ b/crates/core/src/db/datastore/locking_tx_datastore/committed_state.rs @@ -432,7 +432,7 @@ impl CommittedState { } /// When there's an index on `cols`, - /// returns an iterator over the [TableIndex] that yields all the [`RowRef`]s + /// returns an iterator over the [spacetimedb_table::table_index::TableIndex] that yields all the [`RowRef`]s /// that match the specified `range` in the indexed column. /// /// Matching is defined by `Ord for AlgebraicValue`. diff --git a/crates/core/src/db/datastore/locking_tx_datastore/datastore.rs b/crates/core/src/db/datastore/locking_tx_datastore/datastore.rs index 066e8256fb7..a695c5914f3 100644 --- a/crates/core/src/db/datastore/locking_tx_datastore/datastore.rs +++ b/crates/core/src/db/datastore/locking_tx_datastore/datastore.rs @@ -166,7 +166,7 @@ impl Locking { /// - Construct all the tables referenced by `snapshot`, computing their schemas /// either from known system table schemas or from `st_table` and friends. /// - Populate those tables with all rows in `snapshot`. - /// - Construct a [`HashMapBlobStore`] containing all the large blobs referenced by `snapshot`, + /// - Construct a [`spacetimedb_table::blob_store::HashMapBlobStore`] containing all the large blobs referenced by `snapshot`, /// with reference counts specified in `snapshot`. /// - Do [`CommittedState::reset_system_table_schemas`] to fix-up auto_inc IDs in the system tables, /// to ensure those schemas match what [`Self::bootstrap`] would install. diff --git a/crates/core/src/db/datastore/locking_tx_datastore/delete_table.rs b/crates/core/src/db/datastore/locking_tx_datastore/delete_table.rs index b2a241d76ac..7beffb60ad5 100644 --- a/crates/core/src/db/datastore/locking_tx_datastore/delete_table.rs +++ b/crates/core/src/db/datastore/locking_tx_datastore/delete_table.rs @@ -3,7 +3,7 @@ use spacetimedb_table::{ indexes::{max_rows_in_page, PageIndex, PageOffset, RowPointer, Size, SquashedOffset}, }; -/// A table recording which rows of a table in the [`CommittedState`] that have been deleted. +/// A table recording which rows of a table in the [`super::committed_state::CommittedState`] that have been deleted. pub struct DeleteTable { /// Keeps track of all the deleted row pointers. deleted: Vec>, @@ -27,7 +27,7 @@ impl DeleteTable { } } - /// Returns whether `ptr`, belonging to a table in [`CommittedState`], is recorded as deleted. + /// Returns whether `ptr`, belonging to a table in [`super::committed_state::CommittedState`], is recorded as deleted. pub fn contains(&self, ptr: RowPointer) -> bool { let page_idx = ptr.page_index().idx(); match self.deleted.get(page_idx) { @@ -36,7 +36,7 @@ impl DeleteTable { } } - /// Marks `ptr`, belonging to a table in [`CommittedState`], as deleted. + /// Marks `ptr`, belonging to a table in [`super::committed_state::CommittedState`], as deleted. /// /// Returns `true` if `ptr` was not previously marked. pub fn insert(&mut self, ptr: RowPointer) -> bool { @@ -84,7 +84,7 @@ impl DeleteTable { } } - /// Un-marks `ptr`, belonging to a table in [`CommittedState`], as deleted. + /// Un-marks `ptr`, belonging to a table in [`super::committed_state::CommittedState`], as deleted. pub fn remove(&mut self, ptr: RowPointer) -> bool { let fixed_row_size = self.fixed_row_size; let page_idx = ptr.page_index().idx(); diff --git a/crates/core/src/db/datastore/system_tables.rs b/crates/core/src/db/datastore/system_tables.rs index 4c3d008ec41..487a5aaac63 100644 --- a/crates/core/src/db/datastore/system_tables.rs +++ b/crates/core/src/db/datastore/system_tables.rs @@ -8,7 +8,7 @@ //! - You will probably need to add a new ID type in `spacetimedb_primitives`, //! with trait implementations in `spacetimedb_sats::{typespace, de::impl, ser::impl}`. //! - Add it to [`system_tables`], and define a constant for its index there. -//! - Use [`st_fields_enum`] to define its column enum. +//! - Use `st_fields_enum` to define its column enum. //! - Register its schema in [`system_module_def`], making sure to call `validate_system_table` at the end of the function. use crate::db::relational_db::RelationalDB; @@ -216,7 +216,7 @@ st_fields_enum!(enum StIndexFields { }); // WARNING: For a stable schema, don't change the field names and discriminants. st_fields_enum!( - /// The fields that define the internal table [crate::db::relational_db::ST_SEQUENCES_NAME]. + /// The fields that define the internal table [ST_SEQUENCE_NAME]. enum StSequenceFields { "sequence_id", SequenceId = 0, "sequence_name", SequenceName = 1, @@ -859,13 +859,13 @@ impl From for IdentityViaU256 { /// * `database_identity` is the [`Identity`] of the database. /// * `owner_identity` is the [`Identity`] of the owner of the database. /// * `program_kind` is the [`ModuleKind`] (currently always [`WASM_MODULE`]). -/// * `program_hash` is the [`Hash`] of the raw bytes of the (compiled) module. +/// * `program_hash` is the [`struct@Hash`] of the raw bytes of the (compiled) module. /// * `program_bytes` are the raw bytes of the (compiled) module. /// * `module_version` is the version of the module. /// /// | identity | owner_identity | program_kind | program_bytes | program_hash | module_version | /// |------------------|----------------|---------------|---------------|---------------------|----------------| -/// | | | 0 | | | | +/// | `` | `` | 0 | `` | `` | `` | #[derive(Clone, Debug, Eq, PartialEq, SpacetimeType)] #[sats(crate = spacetimedb_lib)] pub struct StModuleRow { @@ -898,9 +898,9 @@ pub fn read_identity_from_col(row: RowRef<'_>, col: impl StFields) -> Result, col: impl StFields) -> Result { Ok(Hash::from_u256(row.read_col(col.col_id())?)) } diff --git a/crates/core/src/db/datastore/traits.rs b/crates/core/src/db/datastore/traits.rs index 1f8b456f21a..3d3dc7cef0b 100644 --- a/crates/core/src/db/datastore/traits.rs +++ b/crates/core/src/db/datastore/traits.rs @@ -41,7 +41,7 @@ use spacetimedb_table::table::RowRef; /// nuanced example of how postgres deals with consistency guarantees at lower /// isolation levels. /// -/// - https://stackoverflow.com/questions/55254236/do-i-need-higher-transaction-isolation-to-make-constraints-work-reliably-in-post +/// - /// /// Thus from an application perspective, isolation anomalies may cause the data /// to be inconsistent or incorrect but will **not** cause it to violate the @@ -79,13 +79,13 @@ use spacetimedb_table::table::RowRef; /// However since then database researchers have identified and cataloged many /// more. See: /// -/// - https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/tr-95-51.pdf -/// - https://pmg.csail.mit.edu/papers/adya-phd.pdf +/// - +/// - /// /// See the following table of anomalies for a more complete list used as a /// reference for database implementers: /// -/// - https://github.com/ept/hermitage?tab=readme-ov-file#summary-of-test-results +/// - /// /// The following anomalies are not part of the SQL standard, but are important: /// @@ -101,7 +101,7 @@ use spacetimedb_table::table::RowRef; /// PostgreSQL's documentation provides a good summary of the anomalies and /// isolation levels that it supports: /// -/// - https://www.postgresql.org/docs/current/transaction-iso.html +/// - /// /// IMPORTANT!!! The order of these isolation levels in the enum is significant /// because we often must check if one isolation level is higher (offers more diff --git a/crates/core/src/db/relational_db.rs b/crates/core/src/db/relational_db.rs index 96afa932824..c9d46580918 100644 --- a/crates/core/src/db/relational_db.rs +++ b/crates/core/src/db/relational_db.rs @@ -223,7 +223,7 @@ impl RelationalDB { /// /// If, however, a non-empty `history` was supplied, [`Metadata`] will /// already be be set. In this case, i.e. if either [`Self::metadata`] or - /// [`Self::program_bytes`] return a `Some` value, [`Self::set_initialized`] + /// [`StModuleRow::program_bytes`] return a `Some` value, [`Self::set_initialized`] /// should _not_ be called. /// /// Sometimes, one may want to obtain a database without a module (e.g. for @@ -652,7 +652,7 @@ impl RelationalDB { /// If `(tx_data, ctx)` should be appended to the commitlog, do so. /// /// Note that by this stage, - /// [`crate::db::datastore::locking_tx_datastore::committed_state::tx_consumes_offset`] + /// [`CommittedState::tx_consumes_offset`] /// has already decided based on the reducer and operations whether the transaction should be appended; /// this method is responsible only for reading its decision out of the `tx_data` /// and calling `durability.append_tx`. @@ -1012,14 +1012,14 @@ impl RelationalDB { self.inner.constraint_id_from_name(tx, constraint_name) } - /// Adds the index into the [ST_INDEXES_NAME] table + /// Adds the index into the [super::datastore::system_tables::ST_INDEX_NAME] table /// /// NOTE: It loads the data from the table into it before returning pub fn create_index(&self, tx: &mut MutTx, schema: IndexSchema, is_unique: bool) -> Result { self.inner.create_index_mut_tx(tx, schema, is_unique) } - /// Removes the [`TableIndex`] from the database by their `index_id` + /// Removes the [`super::datastore::system_tables::StIndexRow`] from the database by their `index_id` pub fn drop_index(&self, tx: &mut MutTx, index_id: IndexId) -> Result<(), DBError> { self.inner.drop_index_mut_tx(tx, index_id) } @@ -1167,7 +1167,7 @@ impl RelationalDB { self.inner.create_sequence_mut_tx(tx, sequence_schema) } - ///Removes the [Sequence] from database instance + ///Removes the [`super::datastore::system_tables::StSequenceRow`] from database instance pub fn drop_sequence(&self, tx: &mut MutTx, seq_id: SequenceId) -> Result<(), DBError> { self.inner.drop_sequence_mut_tx(tx, seq_id) } @@ -1401,7 +1401,7 @@ pub mod tests_utils { /// Create a [`TestDB`] which stores data in a local commitlog, /// initialized with pre-existing data from `history`. /// - /// [`TestHistory::from_txes`] is an easy-ish way to construct a non-empty [`History`]. + /// [`TestHistory::from_txes`] is an easy-ish way to construct a non-empty [`durability::History`]. /// /// `expected_num_clients` is the expected size of the `connected_clients` return /// from [`RelationalDB::open`] after replaying `history`. diff --git a/crates/core/src/host/host_controller.rs b/crates/core/src/host/host_controller.rs index be4167a63a4..fface3f161a 100644 --- a/crates/core/src/host/host_controller.rs +++ b/crates/core/src/host/host_controller.rs @@ -669,7 +669,7 @@ struct Host { replica_ctx: Arc, /// Scheduler for repeating reducers, operating on the current `module`. scheduler: Scheduler, - /// Handle to the metrics collection task started via [`disk_monitor`]. + /// Handle to the metrics collection task started via [`storage_monitor`]. /// /// The task collects metrics from the `replica_ctx`, and so stays alive as long /// as the `replica_ctx` is live. The task is aborted when [`Host`] is dropped. diff --git a/crates/core/src/host/wasm_common/instrumentation.rs b/crates/core/src/host/wasm_common/instrumentation.rs index e2d3550e626..9759724d963 100644 --- a/crates/core/src/host/wasm_common/instrumentation.rs +++ b/crates/core/src/host/wasm_common/instrumentation.rs @@ -14,7 +14,7 @@ //! `noop` does nothing. //! `op` uses `std::time::Instant` and `std::time::Duration` to capture timings. //! Components which use the time-span interface will conditionally import one of the two modules, like: -//! ```no-run +//! ```no_run //! #[cfg(feature = "spacetimedb-wasm-instance-times)] //! use instrumentation::op as span; //! #[cfg(not(feature = "spacetimedb-wasm-instance-times)] diff --git a/crates/core/src/host/wasmtime/wasm_instance_env.rs b/crates/core/src/host/wasmtime/wasm_instance_env.rs index e9237907311..0862471bec9 100644 --- a/crates/core/src/host/wasmtime/wasm_instance_env.rs +++ b/crates/core/src/host/wasmtime/wasm_instance_env.rs @@ -251,7 +251,7 @@ impl WasmInstanceEnv { /// host call, to provide consistent error handling and instrumentation. /// /// This method should be used as opposed to a manual implementation, - /// as it helps with upholding the safety invariants of [`bindings_sys::call`]. + /// as it helps with upholding the safety invariants of `spacetimedb_bindings_sys::call`. /// /// Returns an error if writing `T` to `out` errors. fn cvt_ret( @@ -379,7 +379,7 @@ impl WasmInstanceEnv { /// Starts iteration on each row, as BSATN-encoded, of a table identified by `table_id`. /// /// On success, the iterator handle is written to the `out` pointer. - /// This handle can be advanced by [`row_iter_bsatn_advance`]. + /// This handle can be advanced by [`Self::row_iter_bsatn_advance`]. /// /// # Traps /// @@ -436,7 +436,7 @@ impl WasmInstanceEnv { /// which is unique for the module. /// /// On success, the iterator handle is written to the `out` pointer. - /// This handle can be advanced by [`row_iter_bsatn_advance`]. + /// This handle can be advanced by [`Self::row_iter_bsatn_advance`]. /// /// # Non-obvious queries /// @@ -783,7 +783,7 @@ impl WasmInstanceEnv { /// in WASM memory. /// /// This syscall will delete all the rows found by - /// [`datastore_index_scan_range_bsatn`] with the same arguments passed, + /// [`Self::datastore_index_scan_range_bsatn`] with the same arguments passed, /// including `prefix_elems`. /// See `datastore_index_scan_range_bsatn` for details. /// @@ -1089,7 +1089,7 @@ impl WasmInstanceEnv { } /// Logs at `level` a `message` message occuring in `filename:line_number` - /// with [`target`](target) being the module path at the `log!` invocation site. + /// with [`target`](https://docs.rs/log/latest/log/struct.Record.html#method.target) being the module path at the `log!` invocation site. /// /// These various pointers are interpreted lossily as UTF-8 strings with a corresponding `_len`. /// @@ -1107,8 +1107,6 @@ impl WasmInstanceEnv { /// - `target` is not NULL and `target_ptr[..target_len]` is not in bounds of WASM memory. /// - `filename` is not NULL and `filename_ptr[..filename_len]` is not in bounds of WASM memory. /// - `message` is not NULL and `message_ptr[..message_len]` is not in bounds of WASM memory. - /// - /// [target]: https://docs.rs/log/latest/log/struct.Record.html#method.target #[tracing::instrument(level = "trace", skip_all)] pub fn console_log( caller: Caller<'_, Self>, @@ -1154,7 +1152,7 @@ impl WasmInstanceEnv { /// Begins a timing span with `name = name_ptr[..name_len]`. /// - /// When the returned `ConsoleTimerId` is passed to [`console_timer_end`], + /// When the returned `ConsoleTimerId` is passed to [`Self::console_timer_end`], /// the duration between the calls will be printed to the module's logs. /// /// The `name` is interpreted lossily as a UTF-8 string. diff --git a/crates/core/src/messages/control_db.rs b/crates/core/src/messages/control_db.rs index f3c52cd22da..0c4c4f30bb6 100644 --- a/crates/core/src/messages/control_db.rs +++ b/crates/core/src/messages/control_db.rs @@ -32,7 +32,7 @@ pub struct Database { /// /// Valid only for as long as `initial_program` is valid. pub host_type: HostType, - /// [`Hash`] of the compiled module to initialize the database with. + /// [`struct@Hash`] of the compiled module to initialize the database with. /// /// Updating the database's module will **not** change this value. pub initial_program: Hash, diff --git a/crates/core/src/sql/ast.rs b/crates/core/src/sql/ast.rs index 4fd4fa4ab32..7145d7beaf7 100644 --- a/crates/core/src/sql/ast.rs +++ b/crates/core/src/sql/ast.rs @@ -348,7 +348,7 @@ fn infer_str_or_enum(field: Option<&AlgebraicType>, value: String) -> Result( tables: impl Clone + Iterator, field: Option<&'a AlgebraicType>, diff --git a/crates/core/src/startup.rs b/crates/core/src/startup.rs index e7f2b240981..847a7bdb32d 100644 --- a/crates/core/src/startup.rs +++ b/crates/core/src/startup.rs @@ -198,7 +198,7 @@ fn configure_rayon() { /// /// Other than entering the `rt`, this spawn handler behaves identitically to the default Rayon spawn handler, /// as documented in -/// https://docs.rs/rustc-rayon-core/0.5.0/rayon_core/struct.ThreadPoolBuilder.html#method.spawn_handler +/// /// /// Having Rayon threads block on async operations is a code smell. /// We need to be careful that the Rayon threads never actually block, diff --git a/crates/core/src/subscription/execution_unit.rs b/crates/core/src/subscription/execution_unit.rs index c484b23b7fc..9cd1bd3d08e 100644 --- a/crates/core/src/subscription/execution_unit.rs +++ b/crates/core/src/subscription/execution_unit.rs @@ -69,7 +69,7 @@ enum EvalIncrPlan { Semijoin(IncrementalJoin), /// For single-table selects, store only one version of the plan, - /// which has a single source, an in-memory table, produced by [`query::query_to_mem_table`]. + /// which has a single source, an in-memory table, produced by [`ExecutionUnit`]. Select(QueryExpr), } @@ -88,7 +88,7 @@ pub struct ExecutionUnit { /// This is a direct compilation of the source query. eval_plan: QueryExpr, /// A version of the plan optimized for `eval_incr`, - /// whose source is an in-memory table, as if by [`query::to_mem_table`]. + /// whose source is an in-memory table. eval_incr_plan: EvalIncrPlan, } diff --git a/crates/core/src/subscription/query.rs b/crates/core/src/subscription/query.rs index 332dc9179b2..b23afe0b3db 100644 --- a/crates/core/src/subscription/query.rs +++ b/crates/core/src/subscription/query.rs @@ -25,7 +25,7 @@ pub const SUBSCRIBE_TO_ALL_QUERY: &str = "SELECT * FROM *"; /// rather than returning a new `SourceSet`. /// /// This is necessary when merging multiple SQL queries into a single query set, -/// as in [`crate::subscription::module_subscription_actor::ModuleSubscriptions::add_subscriber`]. +/// as in [`crate::subscription::module_subscription_actor::ModuleSubscriptions::add_multi_subscription`]. pub fn compile_read_only_queryset( relational_db: &RelationalDB, auth: &AuthCtx, @@ -88,11 +88,11 @@ pub fn compile_read_only_query(auth: &AuthCtx, tx: &Tx, input: &str) -> Result

SlimRawSlice { /// SAFETY: `self.ptr` and `self.len` /// must satisfy [`std::slice::from_raw_parts_mut`]'s requirements. /// That is, - /// * `self.ptr` must be [valid] for both reads and writes + /// * `self.ptr` must be *valid* for both reads and writes /// for `self.len * mem::size_of::()` many bytes, /// and it must be properly aligned. /// diff --git a/crates/execution/src/iter.rs b/crates/execution/src/iter.rs index a6da9a2a5f4..08f8444b045 100644 --- a/crates/execution/src/iter.rs +++ b/crates/execution/src/iter.rs @@ -197,7 +197,7 @@ impl<'a> Iter<'a> { } } -/// An iterator that always returns [RowRef]s +/// An iterator that always returns [Row]s pub enum RowRefIter<'a> { TableScan(TableScanIter<'a>), IndexScanPoint(IndexScanPointIter<'a>), @@ -353,7 +353,7 @@ impl<'a> Iterator for LeftDeepJoinIter<'a> { } /// A semijoin iterator. -/// Returns [RowRef]s if this is a right semijoin. +/// Returns [Row]s if this is a right semijoin. /// Returns [Tuple]s otherwise. pub enum SemiJoin { All(All), diff --git a/crates/physical-plan/src/plan.rs b/crates/physical-plan/src/plan.rs index 3748ce641da..36fb75a4eee 100644 --- a/crates/physical-plan/src/plan.rs +++ b/crates/physical-plan/src/plan.rs @@ -899,7 +899,7 @@ pub struct IxScan { pub arg: Sarg, } -/// An index [S]earch [arg]ument +/// An index \[S\]earch \[arg\]ument #[derive(Debug, Clone, PartialEq, Eq)] pub enum Sarg { Eq(ColId, AlgebraicValue), diff --git a/crates/primitives/src/attr.rs b/crates/primitives/src/attr.rs index 231ff39afeb..59600391dec 100644 --- a/crates/primitives/src/attr.rs +++ b/crates/primitives/src/attr.rs @@ -32,7 +32,7 @@ //! - PRIMARY_KEY_IDENTITY: Make it a `PRIMARY_KEY` + `IDENTITY` //! //! NOTE: We have [ConstraintKind] and [AttributeKind] intentionally semi-duplicated because -//! the first is for the [Constrains] that are per-table and the second is for markers of the column. +//! the first is for the `ConstraintSchema` that are per-table and the second is for markers of the column. //TODO: This needs a proper refactor, and use types for `column attributes` and `table tributes` /// The assigned constraint for a `Table` diff --git a/crates/sats/src/satn.rs b/crates/sats/src/satn.rs index f9a787190f5..c4908f8e356 100644 --- a/crates/sats/src/satn.rs +++ b/crates/sats/src/satn.rs @@ -481,9 +481,9 @@ struct PsqlNamedFormatter<'a, 'f> { f: EntryWrapper<'a, 'f, ','>, /// The index of the element. idx: usize, - /// If is not [Self::is_bytes_or_special] to control if we start with `(` + /// If is not [ProductType::is_special_tag] to control if we start with `(` start: bool, - /// For checking [Self::is_bytes_or_special] + /// For checking [ProductType::is_special_tag] ty: &'a ProductType, /// If the current element is a special type. is_special: bool, diff --git a/crates/schema/src/def.rs b/crates/schema/src/def.rs index 374a3ca5002..030388d586b 100644 --- a/crates/schema/src/def.rs +++ b/crates/schema/src/def.rs @@ -378,7 +378,7 @@ pub trait ModuleDefLookup: Sized + Debug + 'static { /// /// Validation rules: /// - The table name must be a valid identifier. -/// - The table's columns must be sorted according to [crate::db::ordering::canonical_ordering]. +/// - The table's columns must be sorted according to the [default element ordering](spacetimedb_lib::db::default_element_ordering). /// - The table's indexes, constraints, and sequences must be sorted by their keys. /// - The table's column types may refer only to types in the containing DatabaseDef's typespace. /// - The table's column names must be unique. @@ -387,14 +387,14 @@ pub trait ModuleDefLookup: Sized + Debug + 'static { pub struct TableDef { /// The name of the table. /// Unique within a module, acts as the table's identifier. - /// Must be a valid [crate::db::identifier::Identifier]. + /// Must be a valid [Identifier]. pub name: Identifier, /// A reference to a `ProductType` containing the columns of this table. /// This is the single source of truth for the table's columns. /// All elements of the `ProductType` must have names. /// - /// Like all types in the module, this must have the [default element ordering](crate::db::default_element_ordering), UNLESS a custom ordering is declared via `ModuleDef.misc_exports` for this type. + /// Like all types in the module, this must have the [default element ordering](spacetimedb_lib::db::default_element_ordering), UNLESS a custom ordering is declared via `ModuleDef.misc_exports` for this type. pub product_type_ref: AlgebraicTypeRef, /// The primary key of the table, if present. Must refer to a valid column. diff --git a/crates/schema/src/type_for_generate.rs b/crates/schema/src/type_for_generate.rs index 8ebb5ee2db0..46bf12f9b3b 100644 --- a/crates/schema/src/type_for_generate.rs +++ b/crates/schema/src/type_for_generate.rs @@ -305,7 +305,7 @@ pub enum AlgebraicTypeUse { Ref(AlgebraicTypeRef), /// The type of array values where elements are of a base type `elem_ty`. - /// Values [`AlgebraicValue::Array(array)`](crate::AlgebraicValue::Array) will have this type. + /// Values [`AlgebraicValue::Array(array)`](spacetimedb_sats::AlgebraicValue::Array) will have this type. Array(Arc), /// A standard structural option type. diff --git a/crates/sdk/src/client_cache.rs b/crates/sdk/src/client_cache.rs index 46fc31ce013..6f62e24147a 100644 --- a/crates/sdk/src/client_cache.rs +++ b/crates/sdk/src/client_cache.rs @@ -411,7 +411,7 @@ impl TableHandle { .into_iter() } - /// See [`DbContextImpl::queue_mutation`]. + /// See [`super::db_connection::DbContextImpl::queue_mutation`]. fn queue_mutation(&self, mutation: PendingMutation) { self.pending_mutations.unbounded_send(mutation).unwrap(); } diff --git a/crates/sdk/src/credentials.rs b/crates/sdk/src/credentials.rs index bdef761048c..048ec2e1150 100644 --- a/crates/sdk/src/credentials.rs +++ b/crates/sdk/src/credentials.rs @@ -65,12 +65,12 @@ struct Credentials { } impl File { - /// Get a handle on a file which stores a SpacetimeDB [`Identity`] and its private access token. + /// Get a handle on a file which stores a SpacetimeDB [`spacetimedb_lib::Identity`] and its private access token. /// /// This method does not create the file or check that it exists. /// /// Distinct applications running as the same user on the same machine - /// may share [`Identity`]/token pairs by supplying the same `key`. + /// may share [`spacetimedb_lib::Identity`]/token pairs by supplying the same `key`. /// Users who desire distinct credentials for their application /// should supply a unique `key` per application. /// diff --git a/crates/sdk/src/spacetime_module.rs b/crates/sdk/src/spacetime_module.rs index bd1c3a71106..de20c326f6d 100644 --- a/crates/sdk/src/spacetime_module.rs +++ b/crates/sdk/src/spacetime_module.rs @@ -52,7 +52,7 @@ pub trait SpacetimeModule: Send + Sync + 'static { /// Return type of [`crate::DbContext::set_reducer_flags`]. type SetReducerFlags: InModule + Send + 'static; - /// Parsed and typed analogue of [`crate::ws::DatabaseUpdate`]. + /// Parsed and typed analogue of [`ws::DatabaseUpdate`]. type DbUpdate: DbUpdate; /// The result of applying `Self::DbUpdate` to the client cache. @@ -67,7 +67,7 @@ pub trait SpacetimeModule: Send + Sync + 'static { } /// Implemented by the autogenerated `DbUpdate` type, -/// which is a parsed and typed analogue of [`crate::ws::DatabaseUpdate`]. +/// which is a parsed and typed analogue of [`spacetimedb_client_api_messages::websocket::DatabaseUpdate`]. pub trait DbUpdate: TryFrom, Error = crate::Error> + InModule + Send + 'static where diff --git a/crates/sdk/tests/connect_disconnect_client/src/module_bindings/mod.rs b/crates/sdk/tests/connect_disconnect_client/src/module_bindings/mod.rs index 99dce4ce3df..68bdb9d44e3 100644 --- a/crates/sdk/tests/connect_disconnect_client/src/module_bindings/mod.rs +++ b/crates/sdk/tests/connect_disconnect_client/src/module_bindings/mod.rs @@ -256,7 +256,7 @@ impl DbConnection { /// Returns an error if the connection is disconnected. /// If the disconnection in question was normal, /// i.e. the result of a call to [`__sdk::DbContext::disconnect`], - /// the returned error will be downcastable to [`__sdk::DisconnectedError`]. + /// the returned error will be downcastable to [`__sdk::Error::Disconnected`]. /// /// This is a low-level primitive exposed for power users who need significant control over scheduling. /// Most applications should call [`Self::frame_tick`] each frame @@ -270,7 +270,7 @@ impl DbConnection { /// Returns an error if the connection is disconnected. /// If the disconnection in question was normal, /// i.e. the result of a call to [`__sdk::DbContext::disconnect`], - /// the returned error will be downcastable to [`__sdk::DisconnectedError`]. + /// the returned error will be downcastable to [`__sdk::Error::Disconnected`]. /// /// This is a low-level primitive exposed for power users who need significant control over scheduling. /// Most applications should call [`Self::run_threaded`] to spawn a thread @@ -284,7 +284,7 @@ impl DbConnection { /// Returns an error if the connection is disconnected. /// If the disconnection in question was normal, /// i.e. the result of a call to [`__sdk::DbContext::disconnect`], - /// the returned error will be downcastable to [`__sdk::DisconnectedError`]. + /// the returned error will be downcastable to [`__sdk::Error::Disconnected`]. /// /// This is a low-level primitive exposed for power users who need significant control over scheduling. /// Most applications should call [`Self::run_async`] to run an `async` loop @@ -535,7 +535,7 @@ impl __sdk::DbContext for ReducerEventContext { impl __sdk::ReducerEventContext for ReducerEventContext {} -/// An [`__sdk::DbContext`] passed to [`__sdk::SubscriptionBuilder::on_applied`] and [`SubscriptionHandle::unsubscribe_then`] callbacks. +/// An [`__sdk::DbContext`] passed to subscription callbacks. pub struct SubscriptionEventContext { /// Access to tables defined by the module via extension traits implemented for [`RemoteTables`]. pub db: RemoteTables, diff --git a/crates/sdk/tests/test-client/src/module_bindings/mod.rs b/crates/sdk/tests/test-client/src/module_bindings/mod.rs index 269b3bc4d39..2859861fa45 100644 --- a/crates/sdk/tests/test-client/src/module_bindings/mod.rs +++ b/crates/sdk/tests/test-client/src/module_bindings/mod.rs @@ -3436,7 +3436,7 @@ impl DbConnection { /// Returns an error if the connection is disconnected. /// If the disconnection in question was normal, /// i.e. the result of a call to [`__sdk::DbContext::disconnect`], - /// the returned error will be downcastable to [`__sdk::DisconnectedError`]. + /// the returned error will be downcastable to [`__sdk::Error::Disconnected`]. /// /// This is a low-level primitive exposed for power users who need significant control over scheduling. /// Most applications should call [`Self::frame_tick`] each frame @@ -3450,7 +3450,7 @@ impl DbConnection { /// Returns an error if the connection is disconnected. /// If the disconnection in question was normal, /// i.e. the result of a call to [`__sdk::DbContext::disconnect`], - /// the returned error will be downcastable to [`__sdk::DisconnectedError`]. + /// the returned error will be downcastable to [`__sdk::Error::Disconnected`]. /// /// This is a low-level primitive exposed for power users who need significant control over scheduling. /// Most applications should call [`Self::run_threaded`] to spawn a thread @@ -3464,7 +3464,7 @@ impl DbConnection { /// Returns an error if the connection is disconnected. /// If the disconnection in question was normal, /// i.e. the result of a call to [`__sdk::DbContext::disconnect`], - /// the returned error will be downcastable to [`__sdk::DisconnectedError`]. + /// the returned error will be downcastable to [`__sdk::Error::Disconnected`]. /// /// This is a low-level primitive exposed for power users who need significant control over scheduling. /// Most applications should call [`Self::run_async`] to run an `async` loop @@ -3715,7 +3715,7 @@ impl __sdk::DbContext for ReducerEventContext { impl __sdk::ReducerEventContext for ReducerEventContext {} -/// An [`__sdk::DbContext`] passed to [`__sdk::SubscriptionBuilder::on_applied`] and [`SubscriptionHandle::unsubscribe_then`] callbacks. +/// An [`__sdk::DbContext`] passed to [`__sdk::SubscriptionBuilder::on_applied`] callback. pub struct SubscriptionEventContext { /// Access to tables defined by the module via extension traits implemented for [`RemoteTables`]. pub db: RemoteTables, diff --git a/crates/snapshot/src/lib.rs b/crates/snapshot/src/lib.rs index 41341e5cfac..a59a85697ac 100644 --- a/crates/snapshot/src/lib.rs +++ b/crates/snapshot/src/lib.rs @@ -235,7 +235,7 @@ impl Snapshot { /// /// `hash` must be the content hash of `page`, and must be stored in `page.unmodified_hash()`. /// - /// Returns the `hash` for convenient use with [`Iter::map`] in [`Self::write_table`]. + /// Returns the `hash` for convenient use with [`core::slice::Iter::map`] in [`Self::write_table`]. /// /// If the `prev_snapshot` is supplied, this function will attempt to hardlink the page's on-disk object /// from that previous snapshot into `object_repo` rather than creating a fresh object. @@ -606,7 +606,7 @@ impl SnapshotRepository { /// Given `snapshot_dir` as the result of [`Self::snapshot_dir_path`], /// get the [`DirTrie`] which contains serialized objects (pages and large blobs) - /// referenced by the [`Snapshot`] contained in the [`Self::snapshot_file_path`]. + /// referenced by the [`Snapshot`] contained in the [`SnapshotDirPath::snapshot_file`]. /// /// Consequences are unspecified if this method is called from outside this crate /// on a non-existent, locked or incomplete `snapshot_dir`. @@ -685,7 +685,7 @@ impl SnapshotRepository { /// Open a repository at `root`, failing if the `root` doesn't exist or isn't a directory. /// - /// Calls [`Path::is_dir`] and requires that the result is `true`. + /// Calls [`std::path::Path::is_dir`] and requires that the result is `true`. /// See that method for more detailed preconditions on this function. pub fn open(root: SnapshotsPath, database_identity: Identity, replica_id: u64) -> Result { if !root.is_dir() { diff --git a/crates/sqltest/src/main.rs b/crates/sqltest/src/main.rs index 936a11a944b..7674bbb8d40 100644 --- a/crates/sqltest/src/main.rs +++ b/crates/sqltest/src/main.rs @@ -327,7 +327,7 @@ async fn update_test_files(files: Vec, engine: DbType, format: bool) -> Ok(()) } -/// Different from [`sqllogictest::update_test_file`], we re-implement it here to print some +/// Different from [`sqllogictest`], we re-implement logic here to print some /// progress information. async fn update_test_file>( out: &mut T, diff --git a/crates/table/src/blob_store.rs b/crates/table/src/blob_store.rs index 52078072ba6..6ebbb21a8d8 100644 --- a/crates/table/src/blob_store.rs +++ b/crates/table/src/blob_store.rs @@ -77,7 +77,7 @@ pub trait BlobStore: Sync { /// Insert `bytes` into the blob store. /// /// Returns the content address of `bytes` a `BlobHash` - /// which can be used in [`retrieve_blob`] to fetch it. + /// which can be used in [`Self::retrieve_blob`] to fetch it. fn insert_blob(&mut self, bytes: &[u8]) -> BlobHash; /// Insert `hash` referring to `bytes` and mark its refcount as `uses`. diff --git a/crates/table/src/indexes.rs b/crates/table/src/indexes.rs index 8d58adb699f..30c42514d64 100644 --- a/crates/table/src/indexes.rs +++ b/crates/table/src/indexes.rs @@ -96,7 +96,7 @@ impl Mul for Size { } } -/// An offset into a [`Page`]. +/// An offset into a [`crate::page::Page`]. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Add, Sub, bytemuck::NoUninit)] #[repr(transparent)] #[cfg_attr(any(test, feature = "proptest"), derive(proptest_derive::Arbitrary))] @@ -208,7 +208,7 @@ pub fn max_rows_in_page(fixed_row_size: Size) -> usize { PageOffset::PAGE_END.idx().div_ceil(fixed_row_size.len()) } -/// The index of a [`Page`] within a [`Pages`]. +/// The index of a [`crate::page::Page`] within a [`crate::pages::Pages`]. #[cfg_attr(any(test, feature = "proptest"), derive(proptest_derive::Arbitrary))] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)] pub struct PageIndex(#[cfg_attr(any(test, feature = "proptest"), proptest(strategy = "0..MASK_PI"))] pub u64); diff --git a/crates/table/src/layout.rs b/crates/table/src/layout.rs index 9c9b02b0d77..6197ae2995a 100644 --- a/crates/table/src/layout.rs +++ b/crates/table/src/layout.rs @@ -104,7 +104,7 @@ pub trait HasLayout { /// Supporting recursive types remains a TODO(future-work). /// Note that the previous Spacetime datastore did not support recursive types in tables. /// -/// - Scalar types (`ty.is_scalar()`) are separated into [`PrimitveType`] (atomically-sized types like integers). +/// - Scalar types (`ty.is_scalar()`) are separated into [`PrimitiveType`] (atomically-sized types like integers). /// - Variable length types are separated into [`VarLenType`] (strings, arrays, and maps). /// This separation allows cleaner pattern-matching, e.g. in `HasLayout::layout`, /// where `VarLenType` returns a static ref to [`VAR_LEN_REF_LAYOUT`], diff --git a/crates/table/src/page.rs b/crates/table/src/page.rs index a734199feb3..3da74d75c8b 100644 --- a/crates/table/src/page.rs +++ b/crates/table/src/page.rs @@ -24,12 +24,12 @@ //! but the value is not required to be logically meaningful, //! and no code may depend on the data within it to uphold any invariants. //! E.g. an unallocated [`VarLenGranule`] within a page stores valid-unconstrained bytes, -//! because the bytes are either 0 from the initial [`alloc_zeroed`] of the page, +//! because the bytes are either 0 from the initial [`std::alloc::alloc_zeroed`] of the page, //! or contain stale data from a previously freed [`VarLenGranule`]. //! //! - `unused` means that it is safe to overwrite a block of memory without cleaning up its previous value. //! -//! See the post [Two Kinds of Invariants: Safety and Validity][ralf_safe_valid] +//! See the post [Two Kinds of Invariants: Safety and Validity][ralfj_safe_valid] //! for a discussion on safety and validity invariants. use super::{ @@ -680,7 +680,7 @@ impl<'page> VarView<'page> { /// and returns a [`VarLenRef`] pointing to that granule. /// /// The granule is not initialized by this method, and contains valid-unconstrained bytes. - /// It is the caller's responsibility to initialize it with a [`BlobHash`](super::blob_hash::BlobHash). + /// It is the caller's responsibility to initialize it with a [`BlobHash`](super::blob_store::BlobHash). #[cold] fn alloc_blob_hash(&mut self) -> Result { // Var-len hashes are 32 bytes, which fits within a single granule. @@ -847,7 +847,7 @@ impl<'page> VarView<'page> { unsafe { get_ref(self.var_row_data, self.adjuster()(offset)) } } - /// Frees the blob pointed to by the [`BlobHash`] stored in the granule at `offset`. + /// Frees the blob pointed to by the [`crate::blob_store::BlobHash`] stored in the granule at `offset`. /// /// Panics when `offset` is NULL. /// @@ -993,7 +993,7 @@ fn assert_alignment(ptr: *const Byte) { ); } -/// Returns a reference to the [`T`] pointed to at by `offset`. +/// Returns a reference to the `T` pointed to at by `offset`. /// /// # Safety /// @@ -1008,7 +1008,7 @@ pub unsafe fn get_ref(row_data: &Bytes, offset: PageOffset) -> &T { unsafe { &*ptr } } -/// Returns a mutable reference to the [`T`] pointed to at by `offset`. +/// Returns a mutable reference to the `T` pointed to at by `offset`. /// /// # Safety /// @@ -1181,7 +1181,7 @@ impl Page { /// - Padding bytes within the fixed-length portion of the rows. /// - [`VarLenRef`] pointer-like portions of rows. /// - Unused trailing parts of partially-filled [`VarLenGranule`]s. - /// - [`VarLenGranule`]s used to store [`BlobHash`]es. + /// - [`VarLenGranule`]s used to store [`super::blob_store::BlobHash`]es. /// /// Note that large blobs themselves are not counted. /// The caller should obtain a count of the bytes used by large blobs @@ -1319,7 +1319,7 @@ impl Page { /// so always check `Self::has_space_for_row` before calling. /// /// This method is provided for testing the page store directly; - /// higher-level codepaths are expected to use [`crate::bflatn::ser::write_av_to_page`], + /// higher-level codepaths are expected to use [`crate::bflatn_to::write_row_to_page`], /// which performs similar operations to this method, /// but handles rollback on failure appropriately. /// diff --git a/crates/table/src/pointer_map.rs b/crates/table/src/pointer_map.rs index 7659ab76499..696eced948b 100644 --- a/crates/table/src/pointer_map.rs +++ b/crates/table/src/pointer_map.rs @@ -1,7 +1,7 @@ //! Provides [`PointerMap`] that deals with the //! association of a [`RowHash`] to a [`RowPointer`] -//! through operations [`insert`](self::PointerMap::insert) -//! and [`delete`](PointerMap::delete). +//! through operations [`insert`](PointerMap::insert) +//! and [`remove`](PointerMap::remove). //! //! These associations can then be queried through //! `map.pointers_for(hash)` and `map.pointers_for_mut(hash)`. diff --git a/crates/table/src/read_column.rs b/crates/table/src/read_column.rs index 1295d01e621..aea983c1e24 100644 --- a/crates/table/src/read_column.rs +++ b/crates/table/src/read_column.rs @@ -96,7 +96,7 @@ pub unsafe trait ReadColumn: Sized { /// A few highlights are included here: /// /// - Variable-length columns, i.e. `AlgebraicType::String`, `AlgebraicType::Array` and `AlgebraicType::Map` - /// are stored within the row as [`crate::var_len::VarLenRef`s], + /// are stored within the row as [`crate::var_len::VarLenRef`]'s, /// which refer to an intrusive linked list of 62-byte "granules", /// allocated separately in a space starting from the end of the page. /// Strings are stored as UTF-8 bytes; all other var-len types are stored as BSATN-encoded bytes. diff --git a/crates/table/src/row_type_visitor.rs b/crates/table/src/row_type_visitor.rs index dec882b8631..ea9379e9ff2 100644 --- a/crates/table/src/row_type_visitor.rs +++ b/crates/table/src/row_type_visitor.rs @@ -1,4 +1,4 @@ -//! A [`VarLenMembers`] visitor for [`AlgebraicType`], +//! A [`VarLenMembers`] visitor for [`spacetimedb_sats::AlgebraicType`], //! supporting any non-recursive `AlgebraicType`, //! including sums and products. //! diff --git a/crates/table/src/static_layout.rs b/crates/table/src/static_layout.rs index 81b626adf17..26b7a357ac6 100644 --- a/crates/table/src/static_layout.rs +++ b/crates/table/src/static_layout.rs @@ -16,7 +16,7 @@ //! which both traverse a [`RowTypeLayout`] and dispatch on the type of each column. //! //! For example, to serialize a row of type `(u64, u64, u32, u64)`, -//! [`bflatn_from`] will do four dispatches, three calls to `serialize_u64` and one to `serialize_u32`. +//! [`crate::bflatn_from`] will do four dispatches, three calls to `serialize_u64` and one to `serialize_u32`. //! This module will make 2 `memcpy`s (or actually, `<[u8]>::copy_from_slice`s): //! one of 20 bytes to copy the leading `(u64, u64, u32)`, which contains no padding, //! and then one of 8 bytes to copy the trailing `u64`, skipping over 4 bytes of padding in between. @@ -168,7 +168,7 @@ impl StaticLayout { /// Construct a `StaticLayout` for converting BFLATN rows of `row_type` <-> BSATN. /// /// Returns `None` if `row_type` contains a column which does not have a constant length in BSATN, - /// either a [`VarLenType`] + /// either a [`crate::layout::VarLenType`] /// or a [`SumTypeLayout`] whose variants do not have the same "live" unpadded length. pub fn for_row_type(row_type: &RowTypeLayout) -> Option { if !row_type.layout().fixed { diff --git a/crates/table/src/table.rs b/crates/table/src/table.rs index 43157bfa36d..31d260e266a 100644 --- a/crates/table/src/table.rs +++ b/crates/table/src/table.rs @@ -90,7 +90,7 @@ pub struct Table { pub row_count: u64, /// Stores the sum total number of bytes that each blob object in the table occupies. /// - /// Note that the [`HashMapBlobStore`] does ref-counting and de-duplication, + /// Note that the [`crate::blob_store::HashMapBlobStore`] does ref-counting and de-duplication, /// but this sum will count an object each time its hash is mentioned, rather than just once. blob_store_bytes: BlobNumBytes, /// Indicates whether this is a scheduler table or not. @@ -312,7 +312,7 @@ impl Table { /// without inserting it logically into the pointer map. /// /// This is useful when we need to insert a row temporarily to get back a `RowPointer`. - /// A call to this method should be followed by a call to [`delete_internal_skip_pointer_map`]. + /// A call to this method should be followed by a call to [`Self::delete_internal_skip_pointer_map`]. pub fn insert_physically_pv<'a>( &'a mut self, blob_store: &'a mut dyn BlobStore, @@ -345,7 +345,7 @@ impl Table { /// This does not check for set semantic or unique constraints. /// /// This is also useful when we need to insert a row temporarily to get back a `RowPointer`. - /// In this case, A call to this method should be followed by a call to [`delete_internal_skip_pointer_map`]. + /// In this case, A call to this method should be followed by a call to [`Self::delete_internal_skip_pointer_map`]. /// /// When `row` is not valid BSATN at the table's row type, /// an error is returned and there will be nothing for the caller to revert. @@ -1084,7 +1084,7 @@ impl Table { ret } - /// Returns an iterator over all the rows of `self`, yielded as [`RefRef`]s. + /// Returns an iterator over all the rows of `self`, yielded as [`RowRef`]s. pub fn scan_rows<'a>(&'a self, blob_store: &'a dyn BlobStore) -> TableScanIter<'a> { TableScanIter { current_page: None, // Will be filled by the iterator. @@ -1201,7 +1201,7 @@ impl Table { /// /// - Unallocated space within pages. /// - Per-page overhead (e.g. page headers). - /// - Table overhead (e.g. the [`RowTypeLayout`], [`PointerMap`], [`Schema`] &c). + /// - Table overhead (e.g. the [`RowTypeLayout`], [`PointerMap`], [`TableSchema`] &c). /// - Indexes. /// - Large blobs in the [`BlobStore`]. /// @@ -1245,7 +1245,7 @@ impl Table { /// which is intended to capture the number of live user-supplied bytes, /// not including representational overhead. /// This is distinct from the BFLATN size measured by [`Self::bytes_used_by_rows`]. - /// See the trait [`crate::btree_index::KeySize`] for specifics on the metric measured. + /// See the trait [`crate::table_index::KeySize`] for specifics on the metric measured. pub fn bytes_used_by_index_keys(&self) -> u64 { self.indexes.values().map(|idx| idx.num_key_bytes()).sum() } @@ -1611,7 +1611,7 @@ impl<'a> Iterator for TableScanIter<'a> { } /// A combined table and index, -/// allowing direct extraction of a [`IndexScanIter`]. +/// allowing direct extraction of a [`IndexScanPointIter`]/[`IndexScanRangeIter`]. #[derive(Copy, Clone)] pub struct TableAndIndex<'a> { table: &'a Table, diff --git a/crates/table/src/table_index/mod.rs b/crates/table/src/table_index/mod.rs index 29edc4f0f15..d60f790f41d 100644 --- a/crates/table/src/table_index/mod.rs +++ b/crates/table/src/table_index/mod.rs @@ -913,7 +913,7 @@ impl TypedIndex { } } -/// An index on a set of [`ColId`]s of a table. +/// An index on a set of [`ColList`] of a table. #[derive(Debug, PartialEq, Eq)] pub struct TableIndex { /// The actual index, specialized for the appropriate key type. diff --git a/crates/table/src/table_index/unique_direct_index.rs b/crates/table/src/table_index/unique_direct_index.rs index a560afed306..e39b3771728 100644 --- a/crates/table/src/table_index/unique_direct_index.rs +++ b/crates/table/src/table_index/unique_direct_index.rs @@ -208,7 +208,7 @@ impl UniqueDirectIndex { } } -/// An iterator over the potential value in a [`UniqueDirectMap`] for a given key. +/// An iterator over the potential value in a [`RowPointer`] for a given key. pub struct UniqueDirectIndexPointIter { iter: IntoIter, } diff --git a/crates/table/src/var_len.rs b/crates/table/src/var_len.rs index 3c260ddec63..056b2cdc02d 100644 --- a/crates/table/src/var_len.rs +++ b/crates/table/src/var_len.rs @@ -148,7 +148,7 @@ impl VarLenGranuleHeader { /// but currently have no use for them. const LEN_BITMASK: u16 = (1 << Self::LEN_BITS) - 1; - /// The [`LEN_BITMASK`] will preserve all granule lengths possible. + /// The [`Self::LEN_BITMASK`] will preserve all granule lengths possible. #[allow(clippy::assertions_on_constants)] const _ASSERT_LEN_BITMASK_FITS_ALL_POSSIBLE_GRANULE_LENGTHS: () = assert!(VarLenGranule::DATA_SIZE <= Self::LEN_BITMASK as usize); diff --git a/crates/vm/src/expr.rs b/crates/vm/src/expr.rs index 499c9bb936c..4f508ae13af 100644 --- a/crates/vm/src/expr.rs +++ b/crates/vm/src/expr.rs @@ -620,8 +620,6 @@ impl SourceExpr { /// If `self` refers to a [`DbTable`], get a reference to it. /// /// Returns `None` if `self` refers to a [`MemTable`]. - /// In that case, retrieving the [`MemTable`] requires inspecting the plan's corresponding [`SourceSet`] - /// via [`SourceSet::take_mem_table`] or [`SourceSet::take_table`]. pub fn get_db_table(&self) -> Option<&DbTable> { if let SourceExpr::DbTable(db_table) = self { Some(db_table) @@ -1753,7 +1751,7 @@ impl QueryExpr { /// which is fundamentally limited to operate on the first expr. /// Note that we still get to optimize incremental joins, because we first optimize the original query /// with [`DbTable`] sources, which results in an [`IndexJoin`] - /// then we replace the sources with [`MemTable`]s and go back to a [`JoinInner`] with `semi: true`. + /// then we replace the sources with [`MemTable`]s and go back to a [`Query::JoinInner`] with `semi: true`. /// - The `Project` must immediately follow the `JoinInner`, with no intervening exprs. /// Future work could search through intervening exprs to detect that the RHS table is unused. /// - The LHS/source table must be a [`DbTable`], not a [`MemTable`]. diff --git a/crates/vm/src/lib.rs b/crates/vm/src/lib.rs index a443a0ee0fd..02f023259a4 100644 --- a/crates/vm/src/lib.rs +++ b/crates/vm/src/lib.rs @@ -1,29 +1,3 @@ -//! Abstract Virtual Machine for execution of end-user logic -//! -//! It optimizes the code & include a more general "query planner" -//! -//! The execution is split in 3 "phases": -//! -//! 1- AST formation -//! -//! Generate the AST (that could be invalid according to the semantics). -//! -//! This step is outside the [vm] and can be done, for example, by the SQL layer. -//! -//! Use [dsl] to build the [expr:Expr] that build the AST. -//! -//! 2- AST validation -//! -//! Calling [eval::optimize] verify the code has the correct semantics (ie: It checks types, schemas, functions are valid, etc.), -//! and "desugar" the code in a more optimal form for later execution. -//! -//! This build [expr::Expr] that is what could be stored in the database, ie: Is like bytecode. -//! -//! 3- Execution -//! -//! Run the AST build from [expr::Expr]. It assumes is correct. -//! - pub use spacetimedb_lib::operator; pub mod errors; diff --git a/crates/vm/src/program.rs b/crates/vm/src/program.rs index d5166eb8edf..0b1d67bd548 100644 --- a/crates/vm/src/program.rs +++ b/crates/vm/src/program.rs @@ -1,6 +1,4 @@ //! Definition for a `Program` to run code. -//! -//! It carries an [EnvDb] with the functions, idents, types. use crate::errors::ErrorVm; use crate::expr::{Code, CrudExpr, SourceSet}; diff --git a/crates/vm/src/rel_ops.rs b/crates/vm/src/rel_ops.rs index b938b3a9629..0a592e7c565 100644 --- a/crates/vm/src/rel_ops.rs +++ b/crates/vm/src/rel_ops.rs @@ -10,9 +10,9 @@ pub trait RelOps<'a> { /// Advances the `iterator` and returns the next [RelValue]. fn next(&mut self) -> Option>; - /// Creates an `Iterator` which uses a closure to determine if a [RelValueRef] should be yielded. + /// Creates an `Iterator` which uses a closure to determine if a [RelValue] should be yielded. /// - /// Given a [RelValueRef] the closure must return true or false. + /// Given a [RelValue] the closure must return true or false. /// The returned iterator will yield only the elements for which the closure returns true. /// /// Note: @@ -31,7 +31,7 @@ pub trait RelOps<'a> { /// /// Given a [RelValue] the closure must return a subset of the current one. /// - /// The [Header] is pre-checked that all the fields exist and return a error if any field is not found. + /// The [spacetimedb_lib::relation::Header] is pre-checked that all the fields exist and return an error if any field is not found. /// /// Note: /// diff --git a/modules/sdk-test/src/lib.rs b/modules/sdk-test/src/lib.rs index 46f74998408..4ce63549a8f 100644 --- a/modules/sdk-test/src/lib.rs +++ b/modules/sdk-test/src/lib.rs @@ -134,7 +134,7 @@ pub struct EveryVecStruct { /// /// - fields is a comma-separated list of field specifiers, which are optional attribues, /// followed by a field name identifier and a type. -/// e.g. #[unique] name String +/// e.g. #\[unique\] name String /// /// A full table definition might be: /// @@ -142,8 +142,8 @@ pub struct EveryVecStruct { /// insert_or_panic insert_my_table, /// update_by update_my_table = update_by_name(name), /// delete_by delete_my_table = delete_by_name(name: String), -/// } #[primary_key] name String, -/// #[auto_inc] #[unique] id u32, +/// } #\[primary_key\] name String, +/// #\[auto_inc\] #\[unique\] id u32, /// count i64; // // Internal rules are prefixed with @.