From a206bf4fe0bbaa83e8223949d6a9b9be9d8f0992 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mario=20Alejandro=20Montoya=20Corte=CC=81s?= Date: Tue, 11 Feb 2025 10:52:28 -0500 Subject: [PATCH] Mirror load test on C# --- crates/bench/load.py | 3 +- modules/benchmarks-cs/benchmarks-cs.csproj | 6 + modules/benchmarks-cs/synthetic.cs | 506 +++++++++++++++++++++ modules/benchmarks/src/synthetic.rs | 294 ++++++------ 4 files changed, 660 insertions(+), 149 deletions(-) diff --git a/crates/bench/load.py b/crates/bench/load.py index aceb18d6c15..b1bafbf33a3 100644 --- a/crates/bench/load.py +++ b/crates/bench/load.py @@ -1,7 +1,6 @@ # Mini-tool for executing load testing and call reducer functions. import argparse import subprocess -import sys import time from datetime import datetime, timedelta @@ -77,7 +76,7 @@ def run(cli: str, database: str, init: list, load: list, frequency: float, durat python load.py -d -i -l [--no-cli] [-f ] [-s ] Example: - python load.py -d quickstart -f 2 -s 10 -i "insert_bulk_small_rows 100" -l "queries 'small, inserts:10,query:10,deletes:10';" + python load.py -d quickstart -f 2 -s 10 -i "insert_bulk_small_rows 100" -l "queries 'small, inserts:10,queries:10,deletes:10';" """ parser = argparse.ArgumentParser() diff --git a/modules/benchmarks-cs/benchmarks-cs.csproj b/modules/benchmarks-cs/benchmarks-cs.csproj index d4caede5ffa..9068ccda478 100644 --- a/modules/benchmarks-cs/benchmarks-cs.csproj +++ b/modules/benchmarks-cs/benchmarks-cs.csproj @@ -10,4 +10,10 @@ + + + + + + diff --git a/modules/benchmarks-cs/synthetic.cs b/modules/benchmarks-cs/synthetic.cs index cf888fd7a56..b2a8b9932ff 100644 --- a/modules/benchmarks-cs/synthetic.cs +++ b/modules/benchmarks-cs/synthetic.cs @@ -1,7 +1,24 @@ +using Bogus; using SpacetimeDB; namespace Benchmarks; +[SpacetimeDB.Type] +public enum BenchLoad +{ + Tiny, + Small, + Medium, + Large +} + +[SpacetimeDB.Type] +public enum Index +{ + One, + Many +} + public static partial class synthetic { // ---------- schemas ---------- @@ -66,6 +83,142 @@ public partial struct btree_each_column_u32_u64_u64_t public ulong y; } + [SpacetimeDB.Table(Name = "tiny_rows")] + public partial struct tiny_rows_t + { + [SpacetimeDB.Index.BTree] + public byte id; + } + + [SpacetimeDB.Table(Name = "small_rows")] + public partial struct small_rows_t + { + [SpacetimeDB.Index.BTree] + public ulong id; + public ulong x; + public ulong y; + } + + [SpacetimeDB.Table(Name = "small_btree_each_column_rows")] + public partial struct small_rows_btree_each_column_t + { + [SpacetimeDB.Index.BTree] + public ulong id; + [SpacetimeDB.Index.BTree] + public ulong x; + [SpacetimeDB.Index.BTree] + public ulong y; + } + + [SpacetimeDB.Table(Name = "medium_var_rows")] + public partial struct medium_var_rows_t + { + [SpacetimeDB.Index.BTree] + public ulong id; + public string name; + public string email; + public string password; + public Identity identity; + public ConnectionId connection; + public List pos; + } + + [SpacetimeDB.Table(Name = "medium_var_rows_btree_each_column")] + public partial struct medium_var_rows_btree_each_column_t + { + [SpacetimeDB.Index.BTree] + public ulong id; + [SpacetimeDB.Index.BTree] + public string name; + [SpacetimeDB.Index.BTree] + public string email; + [SpacetimeDB.Index.BTree] + public string password; + [SpacetimeDB.Index.BTree] + public Identity identity; + [SpacetimeDB.Index.BTree] + public ConnectionId connection; + //[SpacetimeDB.Index.BTree]: Not supported yet on C# + public List pos; + } + + [SpacetimeDB.Table(Name = "large_var_rows")] + public partial struct large_var_rows_t + { + [SpacetimeDB.Index.BTree] + public UInt128 id; + public string invoice_code; + public string status; + public Identity customer; + public ConnectionId company; + public string user_name; + + public double price; + public double cost; + public double discount; + public List taxes; + public double tax_total; + public double sub_total; + public double total; + + public string country; + public string state; + public string city; + public string zip_code; + public string phone; + + public string notes; + public List? tags; + } + + [SpacetimeDB.Table(Name = "large_var_rows_btree_each_column")] + public partial struct large_var_rows_btree_each_column_t + { + [SpacetimeDB.Index.BTree] + public UInt128 id; + [SpacetimeDB.Index.BTree] + public string invoice_code; + [SpacetimeDB.Index.BTree] + public string status; + [SpacetimeDB.Index.BTree] + public Identity customer; + [SpacetimeDB.Index.BTree] + public ConnectionId company; + [SpacetimeDB.Index.BTree] + public string user_name; + + [SpacetimeDB.Index.BTree] + public double price; + [SpacetimeDB.Index.BTree] + public double cost; + [SpacetimeDB.Index.BTree] + public double discount; + //[SpacetimeDB.Index.BTree]: Not supported yet on C# + public List taxes; + [SpacetimeDB.Index.BTree] + public double tax_total; + [SpacetimeDB.Index.BTree] + public double sub_total; + [SpacetimeDB.Index.BTree] + public double total; + + [SpacetimeDB.Index.BTree] + public string country; + [SpacetimeDB.Index.BTree] + public string state; + [SpacetimeDB.Index.BTree] + public string city; + [SpacetimeDB.Index.BTree] + public string zip_code; + [SpacetimeDB.Index.BTree] + public string phone; + [SpacetimeDB.Index.BTree] + + public string notes; + //[SpacetimeDB.Index.BTree]: Not supported yet on C# + public List? tags; + } + // ---------- empty ---------- [SpacetimeDB.Reducer] @@ -245,6 +398,169 @@ List people } } + public static Identity rand_identity(Faker fake) + { + return new Identity(fake.Random.Bytes(32)); + } + + public static ConnectionId rand_connection_id(Faker fake) + { + return ConnectionId.Random(); + } + + [SpacetimeDB.Reducer] + public static void insert_bulk_tiny_rows(ReducerContext ctx, byte rows) + { + for (byte id = 0; id < rows; id++) + { + ctx.Db.tiny_rows.Insert(new tiny_rows_t { id = id }); + } + Log.Info($"Inserted on tiny_rows: {rows} rows"); + } + + [SpacetimeDB.Reducer] + public static void insert_bulk_small_rows(ReducerContext ctx, ulong rows) + { + var rng = new Random(); + for (ulong id = 0; id < rows; id++) + { + ctx.Db.small_rows.Insert(new small_rows_t + { + id = id, + x = (ulong)rng.Next(), + y = (ulong)rng.Next() + }); + } + Log.Info($"Inserted on small_rows: {rows} rows"); + } + + [SpacetimeDB.Reducer] + public static void insert_bulk_small_btree_each_column_rows(ReducerContext ctx, ulong rows) + { + var rng = new Random(); + for (ulong id = 0; id < rows; id++) + { + ctx.Db.small_btree_each_column_rows.Insert(new small_rows_btree_each_column_t + { + id = id, + x = (ulong)rng.Next(), + y = (ulong)rng.Next() + }); + } + Log.Info($"Inserted on small_btree_each_column_rows: {rows} rows"); + } + + [SpacetimeDB.Reducer] + public static void insert_bulk_medium_var_rows(ReducerContext ctx, ulong rows) + { + var faker = new Faker("en"); + for (ulong id = 0; id < rows; id++) + { + ctx.Db.medium_var_rows.Insert(new medium_var_rows_t + { + id = id, + name = faker.Name.FullName(), + email = faker.Internet.Email(), + password = faker.Internet.Password(), + identity = rand_identity(faker), + connection = rand_connection_id(faker), + pos = new Faker>().Generate() + }); + } + Log.Info($"Inserted on medium_var_rows: {rows} rows"); + } + + [SpacetimeDB.Reducer] + public static void insert_bulk_medium_var_rows_btree_each_column(ReducerContext ctx, ulong rows) + { + var faker = new Faker("en"); + for (ulong id = 0; id < rows; id++) + { + ctx.Db.medium_var_rows_btree_each_column.Insert(new medium_var_rows_btree_each_column_t + { + id = id, + name = faker.Name.FullName(), + email = faker.Internet.Email(), + password = faker.Internet.Password(length: 10), + identity = rand_identity(faker), + connection = rand_connection_id(faker), + pos = new Faker>().Generate() + }); + } + Log.Info($"Inserted on medium_var_rows_btree_each_column: {rows} rows"); + } + + [SpacetimeDB.Reducer] + public static void insert_bulk_large_var_rows(ReducerContext ctx, ulong rows) + { + var faker = new Faker("en"); + for (ulong id = 0; id < rows; id++) + { + ctx.Db.large_var_rows.Insert(new large_var_rows_t + { + id = UInt128.CreateChecked(id), + invoice_code = faker.Random.String(), + status = faker.Random.String(), + customer = rand_identity(faker), + company = rand_connection_id(faker), + user_name = faker.Random.String(), + + price = faker.Random.Double(), + cost = faker.Random.Double(), + discount = faker.Random.Double(), + taxes = new Faker>().Generate(), + tax_total = faker.Random.Double(), + sub_total = faker.Random.Double(), + total = faker.Random.Double(), + + country = faker.Address.Country(), + state = faker.Address.State(), + city = faker.Address.City(), + zip_code = faker.Address.ZipCode(), + phone = faker.Phone.PhoneNumber(), + notes = faker.Lorem.Paragraph(), + tags = new Faker().GenerateBetween(min: 0, max: 3) + }); + } + Log.Info($"Inserted on large_var_rows: {rows} rows"); + } + + [SpacetimeDB.Reducer] + public static void insert_bulk_large_var_rows_btree_each_column(ReducerContext ctx, ulong rows) + { + var faker = new Faker("en"); + for (ulong id = 0; id < rows; id++) + { + ctx.Db.large_var_rows_btree_each_column.Insert(new large_var_rows_btree_each_column_t + { + id = UInt128.CreateChecked(id), + invoice_code = faker.Random.String(), + status = faker.Random.String(), + customer = rand_identity(faker), + company = rand_connection_id(faker), + user_name = faker.Random.String(), + + price = faker.Random.Double(), + cost = faker.Random.Double(), + discount = faker.Random.Double(), + taxes = new Faker>().Generate(), + tax_total = faker.Random.Double(), + sub_total = faker.Random.Double(), + total = faker.Random.Double(), + + country = faker.Address.Country(), + state = faker.Address.State(), + city = faker.Address.City(), + zip_code = faker.Address.ZipCode(), + phone = faker.Phone.PhoneNumber(), + notes = faker.Lorem.Paragraph(), + tags = [.. faker.Random.WordsArray(0,3)] + + }); + } + Log.Info($"Inserted on large_var_rows_btree_each_column: {rows} rows"); + } + // ---------- update ---------- [SpacetimeDB.Reducer] @@ -409,6 +725,31 @@ public static void filter_btree_each_column_u32_u64_u64_by_y(ReducerContext ctx, Bench.BlackBox(ctx.Db.btree_each_column_u32_u64_u64.y.Filter(y)); } + [SpacetimeDB.Reducer] + public static void filter_tiny_rows_by_id(ReducerContext ctx, byte id) + { + Bench.BlackBox(ctx.Db.tiny_rows.Iter().Where(row => row.id == id)); + } + + [SpacetimeDB.Reducer] + public static void filter_small_rows_by_id(ReducerContext ctx, ulong id) + { + Bench.BlackBox(ctx.Db.small_rows.Iter().Where(row => row.id == id)); + + } + + [SpacetimeDB.Reducer] + public static void filter_medium_var_rows_by_id(ReducerContext ctx, ulong id) + { + Bench.BlackBox(ctx.Db.medium_var_rows.Iter().Where(row => row.id == id)); + } + + [SpacetimeDB.Reducer] + public static void filter_large_var_rows_by_id(ReducerContext ctx, ulong id) + { + Bench.BlackBox(ctx.Db.large_var_rows.Iter().Where(row => row.id == id)); + } + // ---------- delete ---------- [SpacetimeDB.Reducer] @@ -423,6 +764,30 @@ public static void delete_unique_0_u32_u64_u64_by_id(ReducerContext ctx, uint id ctx.Db.unique_0_u32_u64_u64.id.Delete(id); } + [SpacetimeDB.Reducer] + public static void delete_tiny_rows_by_id(ReducerContext ctx, byte id) + { + ctx.Db.tiny_rows.id.Delete(id); + } + + [SpacetimeDB.Reducer] + public static void delete_small_rows_by_id(ReducerContext ctx, ulong id) + { + ctx.Db.small_rows.id.Delete(id); + } + + [SpacetimeDB.Reducer] + public static void delete_medium_var_rows_by_id(ReducerContext ctx, ulong id) + { + ctx.Db.medium_var_rows.id.Delete(id); + } + + [SpacetimeDB.Reducer] + public static void delete_large_var_rows_by_id(ReducerContext ctx, ulong id) + { + ctx.Db.large_var_rows.id.Delete(id); + } + // ---------- clear table ---------- [SpacetimeDB.Reducer] @@ -550,4 +915,145 @@ public static void print_many_things(ReducerContext ctx, uint n) Log.Info("hello again!"); } } + + private static BenchLoad ParseLoad(string arg) + { + var load = arg switch + { + "tiny" => BenchLoad.Tiny, + "small" => BenchLoad.Small, + "medium" => BenchLoad.Medium, + "large" => BenchLoad.Large, + _ => throw new Exception($"Invalid load type: '{arg}', expected: tiny, small, medium, or large"), + }; + return load; + } + + /// This reducer is used to load synthetic data into the database for benchmarking purposes. + /// + /// The input is a string with the following format: + /// + /// `load_type`: [`Load`], `index_type`: [`Index`], `row_count`: `uint` + [SpacetimeDB.Reducer] + public static void load(ReducerContext ctx, string input) + { + var args = input.Split(',').Select(x => x.Trim().ToLower()).ToList(); + if (args.Count != 3) + { + throw new Exception($"Expected 3 arguments, got {args.Count}"); + } + var load = ParseLoad(args[0]); + + var index = args[1] switch + { + "one" => Index.One, + "many" => Index.Many, + _ => throw new Exception($"Invalid index type: '{args[1]}', expected: one, or many"), + }; + if (!ulong.TryParse(args[2], out var rows)) + { + throw new Exception($"Invalid row count: {args[2]}"); + } + + switch (load) + { + case BenchLoad.Tiny when index == Index.One || index == Index.Many: + insert_bulk_tiny_rows(ctx, (byte)rows); + break; + case BenchLoad.Small when index == Index.One: + insert_bulk_small_rows(ctx, rows); + break; + case BenchLoad.Small when index == Index.Many: + insert_bulk_small_btree_each_column_rows(ctx, rows); + break; + case BenchLoad.Medium when index == Index.One: + insert_bulk_medium_var_rows(ctx, rows); + break; + case BenchLoad.Medium when index == Index.Many: + insert_bulk_medium_var_rows_btree_each_column(ctx, rows); + break; + case BenchLoad.Large when index == Index.One: + insert_bulk_large_var_rows(ctx, rows); + break; + case BenchLoad.Large when index == Index.Many: + insert_bulk_large_var_rows_btree_each_column(ctx, rows); + break; + } + } + + /// Used to execute a series of reducers in sequence for benchmarking purposes. + /// + /// The input is a string with the following format: + /// + /// `load_type`: [`Load`], `inserts`: `u32`, `queries`: `u32`, `deletes`: `u32` + /// + /// The order of the `inserts`, `queries`, and `deletes` can be changed and will be executed in that order. + [SpacetimeDB.Reducer] + public static void queries(ReducerContext ctx, string input) + { + var args = input.Split(',').Select(x => x.Trim().ToLower()).ToList(); + if (args.Count < 2) + { + throw new ArgumentException($"Expected at least 2 arguments, got {args.Count}"); + } + + var load = ParseLoad(args[0]); + + ulong inserts = 0, queries = 0, deletes = 0; + + foreach (var arg in args.Skip(1)) + { + var parts = arg.Split(':').Select(x => x.Trim()).ToList(); + if (parts.Count != 2) + { + throw new ArgumentException($"Invalid argument: '{arg}', expected: 'operation:count'"); + } + + if (!ulong.TryParse(parts[1], out var count)) + { + throw new ArgumentException($"Invalid count: {parts[1]}"); + } + + switch (parts[0]) + { + case "inserts": + inserts = count; + break; + case "queries": + queries = count; + break; + case "deletes": + deletes = count; + break; + default: + throw new ArgumentException($"Invalid operation: '{parts[0]}', expected: inserts, queries, or deletes"); + } + } + + Log.Info($"Executing queries: inserts: {inserts}, queries: {queries}, deletes: {deletes}"); + + switch (load) + { + case BenchLoad.Tiny: + if (inserts > 0) insert_bulk_tiny_rows(ctx, (byte)inserts); + for (ulong id = 0; id < queries; id++) filter_tiny_rows_by_id(ctx, (byte)id); + for (ulong id = 0; id < deletes; id++) delete_tiny_rows_by_id(ctx, (byte)id); + break; + case BenchLoad.Small: + if (inserts > 0) insert_bulk_small_rows(ctx, inserts); + for (ulong id = 0; id < queries; id++) filter_small_rows_by_id(ctx, id); + for (ulong id = 0; id < deletes; id++) delete_small_rows_by_id(ctx, id); + break; + case BenchLoad.Medium: + if (inserts > 0) insert_bulk_medium_var_rows(ctx, inserts); + for (ulong id = 0; id < queries; id++) filter_medium_var_rows_by_id(ctx, id); + for (ulong id = 0; id < deletes; id++) delete_medium_var_rows_by_id(ctx, id); + break; + case BenchLoad.Large: + if (inserts > 0) insert_bulk_large_var_rows(ctx, inserts); + for (ulong id = 0; id < queries; id++) filter_large_var_rows_by_id(ctx, id); + for (ulong id = 0; id < deletes; id++) delete_large_var_rows_by_id(ctx, id); + break; + } + } } diff --git a/modules/benchmarks/src/synthetic.rs b/modules/benchmarks/src/synthetic.rs index 282063d7996..696776936ab 100644 --- a/modules/benchmarks/src/synthetic.rs +++ b/modules/benchmarks/src/synthetic.rs @@ -33,6 +33,7 @@ use fake::{Fake, Faker}; use spacetimedb::rand::Rng; use spacetimedb::{log, ConnectionId, Identity, ReducerContext, SpacetimeType, StdbRng, Table}; use std::hint::black_box; +use std::str::FromStr; // ---------- schemas ---------- #[spacetimedb::table(name = unique_0_u32_u64_str)] @@ -151,7 +152,7 @@ pub struct medium_var_rows_btree_each_column_t { identity: Identity, #[index(btree)] connection: ConnectionId, - #[index(btree)] + // #[index(btree)]: Not supported yet on C# pos: Vec, } @@ -176,7 +177,7 @@ pub struct large_var_rows_t { country: String, state: String, city: String, - zip_code: Option, + zip_code: String, phone: String, notes: String, @@ -204,7 +205,7 @@ pub struct large_var_rows_btree_each_column_t { cost: f64, #[index(btree)] discount: f64, - #[index(btree)] + // #[index(btree)]: Not supported yet on C# taxes: Vec, #[index(btree)] tax_total: f64, @@ -220,13 +221,13 @@ pub struct large_var_rows_btree_each_column_t { #[index(btree)] city: String, #[index(btree)] - zip_code: Option, + zip_code: String, #[index(btree)] phone: String, #[index(btree)] notes: String, - #[index(btree)] + // #[index(btree)]: Not supported yet on C# tags: Option>, } @@ -470,148 +471,6 @@ pub fn insert_bulk_large_var_rows_btree_each_column(ctx: &ReducerContext, rows: log::info!("Inserted on large_var_rows_btree_each_column: {} rows", rows); } -/// This reducer is used to load synthetic data into the database for benchmarking purposes. -/// -/// The input is a string with the following format: -/// -/// `load_type`: [`Load`], `index_type`: [`Index`], `row_count`: `u32` -#[spacetimedb::reducer] -pub fn load(ctx: &ReducerContext, input: String) -> Result<(), String> { - let args = input.split(',').map(|x| x.trim().to_lowercase()).collect::>(); - if args.len() != 3 { - return Err(format!("Expected 3 arguments, got {}", args.len())); - } - let load = match args[0].as_str() { - "tiny" => Load::Tiny, - "small" => Load::Small, - "medium" => Load::Medium, - "large" => Load::Large, - x => { - return Err(format!( - "Invalid load type: '{x}', expected: tiny, small, medium, or large" - )) - } - }; - let index = match args[1].as_str() { - "one" => Index::One, - "many" => Index::Many, - x => return Err(format!("Invalid index type: '{x}', expected: one, or many")), - }; - let rows = args[2] - .parse::() - .map_err(|e| format!("Invalid row count: {}", e))?; - - match (load, index) { - (Load::Tiny, Index::One | Index::Many) => insert_bulk_tiny_rows(ctx, rows as u8), - (Load::Small, Index::One) => insert_bulk_small_rows(ctx, rows), - (Load::Small, Index::Many) => insert_bulk_small_btree_each_column_rows(ctx, rows), - (Load::Medium, Index::One) => insert_bulk_medium_var_rows(ctx, rows), - (Load::Medium, Index::Many) => insert_bulk_medium_var_rows_btree_each_column(ctx, rows), - (Load::Large, Index::One) => insert_bulk_large_var_rows(ctx, rows), - (Load::Large, Index::Many) => insert_bulk_large_var_rows_btree_each_column(ctx, rows), - } - - Ok(()) -} - -/// Used to execute a series of reducers in sequence for benchmarking purposes. -/// -/// The input is a string with the following format: -/// -/// `load_type`: [`Load`], `inserts`: `u32`, `query`: `u32`, `deletes`: `u32` -/// -/// The order of the `inserts`, `query`, and `deletes` can be changed and will be executed in that order. -#[spacetimedb::reducer] -pub fn queries(ctx: &ReducerContext, input: String) -> Result<(), String> { - let args = input.split(',').map(|x| x.trim().to_lowercase()).collect::>(); - if args.len() < 2 { - return Err(format!("Expected at least 2 arguments, got {}", args.len())); - } - let load = match args[0].as_str() { - "tiny" => Load::Tiny, - "small" => Load::Small, - "medium" => Load::Medium, - "large" => Load::Large, - x => { - return Err(format!( - "Invalid load type: '{x}', expected: tiny, small, medium, or large" - )) - } - }; - - let mut inserts = 0u64; - let mut queries = 0u64; - let mut deletes = 0u64; - - for arg in args.iter().skip(1) { - let parts = arg.split(':').map(|x| x.trim()).collect::>(); - if parts.len() != 2 { - return Err(format!("Invalid argument: '{arg}', expected: 'operation:count'")); - } - let count = parts[1].parse::().map_err(|e| format!("Invalid count: {}", e))?; - match parts[0] { - "inserts" => inserts = count, - "query" => queries = count, - "deletes" => deletes = count, - x => { - return Err(format!( - "Invalid operation: '{x}', expected: inserts, query, or deletes" - )) - } - } - } - - log::info!("Executing queries: inserts: {inserts}, query: {queries}, deletes: {deletes}"); - // To allow to insert duplicate rows, the `ids` not use `[unique]` attribute, causing to not be able to use `update` method - match load { - Load::Tiny => { - if inserts > 0 { - insert_bulk_tiny_rows(ctx, inserts as u8); - } - for id in 0..queries { - filter_tiny_rows_by_id(ctx, id as u8); - } - for id in 0..deletes { - delete_tiny_rows_by_id(ctx, id as u8); - } - } - Load::Small => { - if inserts > 0 { - insert_bulk_small_rows(ctx, inserts); - } - for id in 0..queries { - filter_small_rows_by_id(ctx, id); - } - for id in 0..deletes { - delete_small_rows_by_id(ctx, id); - } - } - Load::Medium => { - if inserts > 0 { - insert_bulk_medium_var_rows(ctx, inserts); - } - for id in 0..queries { - filter_medium_var_rows_by_id(ctx, id); - } - for id in 0..deletes { - delete_medium_var_rows_by_id(ctx, id); - } - } - Load::Large => { - if inserts > 0 { - insert_bulk_large_var_rows(ctx, inserts); - } - for id in 0..queries { - filter_large_var_rows_by_id(ctx, id as u128); - } - for id in 0..deletes { - delete_large_var_rows_by_id(ctx, id as u128); - } - } - } - - Ok(()) -} // ---------- update ---------- #[spacetimedb::reducer] @@ -974,3 +833,144 @@ pub fn print_many_things(_ctx: &ReducerContext, n: u32) { log::info!("hello again!"); } } + +impl FromStr for Load { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "tiny" => Ok(Load::Tiny), + "small" => Ok(Load::Small), + "medium" => Ok(Load::Medium), + "large" => Ok(Load::Large), + _ => Err(format!( + "Invalid load type: '{}', expected: tiny, small, medium, or large", + s + )), + } + } +} + +/// This reducer is used to load synthetic data into the database for benchmarking purposes. +/// +/// The input is a string with the following format: +/// +/// `load_type`: [`Load`], `index_type`: [`Index`], `row_count`: `u32` +#[spacetimedb::reducer] +pub fn load(ctx: &ReducerContext, input: String) -> Result<(), String> { + let args = input.split(',').map(|x| x.trim().to_lowercase()).collect::>(); + if args.len() != 3 { + return Err(format!("Expected 3 arguments, got {}", args.len())); + } + let load = Load::from_str(args[0].as_str())?; + + let index = match args[1].as_str() { + "one" => Index::One, + "many" => Index::Many, + x => return Err(format!("Invalid index type: '{x}', expected: one, or many")), + }; + let rows = args[2] + .parse::() + .map_err(|e| format!("Invalid row count: {}", e))?; + + match (load, index) { + (Load::Tiny, Index::One | Index::Many) => insert_bulk_tiny_rows(ctx, rows as u8), + (Load::Small, Index::One) => insert_bulk_small_rows(ctx, rows), + (Load::Small, Index::Many) => insert_bulk_small_btree_each_column_rows(ctx, rows), + (Load::Medium, Index::One) => insert_bulk_medium_var_rows(ctx, rows), + (Load::Medium, Index::Many) => insert_bulk_medium_var_rows_btree_each_column(ctx, rows), + (Load::Large, Index::One) => insert_bulk_large_var_rows(ctx, rows), + (Load::Large, Index::Many) => insert_bulk_large_var_rows_btree_each_column(ctx, rows), + } + + Ok(()) +} + +/// Used to execute a series of reducers in sequence for benchmarking purposes. +/// +/// The input is a string with the following format: +/// +/// `load_type`: [`Load`], `inserts`: `u32`, `queries`: `u32`, `deletes`: `u32` +/// +/// The order of the `inserts`, `queries`, and `deletes` can be changed and will be executed in that order. +#[spacetimedb::reducer] +pub fn queries(ctx: &ReducerContext, input: String) -> Result<(), String> { + let args = input.split(',').map(|x| x.trim().to_lowercase()).collect::>(); + if args.len() < 2 { + return Err(format!("Expected at least 2 arguments, got {}", args.len())); + } + let load = Load::from_str(args[0].as_str())?; + + let mut inserts = 0u64; + let mut queries = 0u64; + let mut deletes = 0u64; + + for arg in &args[1..] { + let parts = arg.split(':').map(|x| x.trim()).collect::>(); + if parts.len() != 2 { + return Err(format!("Invalid argument: '{arg}', expected: 'operation:count'")); + } + let count = parts[1].parse::().map_err(|e| format!("Invalid count: {}", e))?; + match parts[0] { + "inserts" => inserts = count, + "queries" => queries = count, + "deletes" => deletes = count, + x => { + return Err(format!( + "Invalid operation: '{x}', expected: inserts, queries, or deletes" + )) + } + } + } + + log::info!("Executing queries: inserts: {inserts}, queries: {queries}, deletes: {deletes}"); + // To allow to insert duplicate rows, the `ids` not use `[unique]` attribute, causing to not be able to use `update` method + match load { + Load::Tiny => { + if inserts > 0 { + insert_bulk_tiny_rows(ctx, inserts as u8); + } + for id in 0..queries { + filter_tiny_rows_by_id(ctx, id as u8); + } + for id in 0..deletes { + delete_tiny_rows_by_id(ctx, id as u8); + } + } + Load::Small => { + if inserts > 0 { + insert_bulk_small_rows(ctx, inserts); + } + for id in 0..queries { + filter_small_rows_by_id(ctx, id); + } + for id in 0..deletes { + delete_small_rows_by_id(ctx, id); + } + } + Load::Medium => { + if inserts > 0 { + insert_bulk_medium_var_rows(ctx, inserts); + } + for id in 0..queries { + filter_medium_var_rows_by_id(ctx, id); + } + for id in 0..deletes { + delete_medium_var_rows_by_id(ctx, id); + } + } + Load::Large => { + if inserts > 0 { + insert_bulk_large_var_rows(ctx, inserts); + } + for id in 0..queries { + filter_large_var_rows_by_id(ctx, id as u128); + } + for id in 0..deletes { + delete_large_var_rows_by_id(ctx, id as u128); + } + } + } + + Ok(()) +}