From 4261679960cc59d0d74aa270513bdf44cf698e52 Mon Sep 17 00:00:00 2001 From: Nikhil Sinha Date: Sun, 2 Mar 2025 12:40:33 -0500 Subject: [PATCH 1/5] feat: add api to execute clickbench benchmark use API `/api/v1/benchmark/clickbench` to perform benchmark on clickbench dataset and defined queries add env `PARQUET_FILE` to provide file path of hits.parquet add env `QUERIES_FILE` to provide file path of queries file 3 tries for each query, total 43 queries in the set api response with query no, iteration no, response time in ms --- src/handlers/http/clickbench.rs | 123 ++++++++++++++++++++++++++++++ src/handlers/http/mod.rs | 1 + src/handlers/http/modal/server.rs | 14 +++- src/parseable/streams.rs | 5 +- src/rbac/role.rs | 2 + 5 files changed, 140 insertions(+), 5 deletions(-) create mode 100644 src/handlers/http/clickbench.rs diff --git a/src/handlers/http/clickbench.rs b/src/handlers/http/clickbench.rs new file mode 100644 index 000000000..433f4709d --- /dev/null +++ b/src/handlers/http/clickbench.rs @@ -0,0 +1,123 @@ +/* + * Parseable Server (C) 2022 - 2024 Parseable, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +use std::{collections::HashMap, env, fs, time::Instant}; + +use actix_web::{web::Json, Responder}; +use datafusion::{ + common::plan_datafusion_err, + error::DataFusionError, + execution::{runtime_env::RuntimeEnvBuilder, SessionStateBuilder}, + physical_plan::collect, + prelude::{ParquetReadOptions, SessionConfig, SessionContext}, + sql::{parser::DFParser, sqlparser::dialect::dialect_from_str}, +}; +use serde_json::{json, Value}; + +pub async fn clickbench_benchmark() -> Result { + let results = tokio::task::spawn_blocking(run_benchmark) + .await + .map_err(actix_web::error::ErrorInternalServerError)? + .map_err(actix_web::error::ErrorInternalServerError)?; + Ok(results) +} + +#[tokio::main(flavor = "multi_thread")] +pub async fn run_benchmark() -> Result, anyhow::Error> { + let mut session_config = SessionConfig::from_env()?.with_information_schema(true); + + session_config = session_config.with_batch_size(8192); + + let rt_builder = RuntimeEnvBuilder::new(); + // set memory pool size + let runtime_env = rt_builder.build_arc()?; + let state = SessionStateBuilder::new() + .with_default_features() + .with_config(session_config) + .with_runtime_env(runtime_env) + .build(); + state + .catalog_list() + .catalog(&state.config_options().catalog.default_catalog) + .expect("default catalog is provided by datafusion"); + + let ctx = SessionContext::new_with_state(state); + + let mut table_options = HashMap::new(); + table_options.insert("binary_as_string", "true"); + + let parquet_file = env::var("PARQUET_FILE")?; + register_hits(&ctx, &parquet_file).await?; + let mut query_list = Vec::new(); + let queries_file = env::var("QUERIES_FILE")?; + let queries = fs::read_to_string(queries_file)?; + for query in queries.lines() { + query_list.push(query.to_string()); + } + execute_queries(&ctx, query_list).await +} + +async fn register_hits(ctx: &SessionContext, parquet_file: &str) -> Result<(), anyhow::Error> { + let options: ParquetReadOptions<'_> = Default::default(); + ctx.register_parquet("hits", parquet_file, options) + .await + .map_err(|e| { + DataFusionError::Context(format!("Registering 'hits' as {parquet_file}"), Box::new(e)) + })?; + Ok(()) +} + +pub async fn execute_queries( + ctx: &SessionContext, + query_list: Vec, +) -> Result, anyhow::Error> { + const TRIES: usize = 3; + let mut results = Vec::new(); + + for sql in query_list.iter() { + let mut elapsed_times = Vec::new(); + for _iteration in 1..=TRIES { + let start = Instant::now(); + let task_ctx = ctx.task_ctx(); + let dialect = &task_ctx.session_config().options().sql_parser.dialect; + let dialect = dialect_from_str(dialect).ok_or_else(|| { + plan_datafusion_err!( + "Unsupported SQL dialect: {dialect}. Available dialects: \ + Generic, MySQL, PostgreSQL, Hive, SQLite, Snowflake, Redshift, \ + MsSQL, ClickHouse, BigQuery, Ansi." + ) + })?; + + let statements = DFParser::parse_sql_with_dialect(sql, dialect.as_ref())?; + let statement = statements.front().unwrap(); + let plan = ctx.state().statement_to_plan(statement.clone()).await?; + + let df = ctx.execute_logical_plan(plan).await?; + let physical_plan = df.create_physical_plan().await?; + + let _ = collect(physical_plan, task_ctx.clone()).await?; + let elapsed = start.elapsed().as_secs_f64(); + elapsed_times.push(elapsed); + } + results.push(elapsed_times); + } + + let result_json = json!(results); + + Ok(Json(result_json)) +} diff --git a/src/handlers/http/mod.rs b/src/handlers/http/mod.rs index f1f702d4b..c3b166a4a 100644 --- a/src/handlers/http/mod.rs +++ b/src/handlers/http/mod.rs @@ -30,6 +30,7 @@ use self::{cluster::get_ingestor_info, query::Query}; pub mod about; pub mod alerts; mod audit; +pub mod clickbench; pub mod cluster; pub mod correlation; pub mod health_check; diff --git a/src/handlers/http/modal/server.rs b/src/handlers/http/modal/server.rs index 27a4d30f4..1205d78f5 100644 --- a/src/handlers/http/modal/server.rs +++ b/src/handlers/http/modal/server.rs @@ -25,6 +25,7 @@ use crate::handlers; use crate::handlers::http::about; use crate::handlers::http::alerts; use crate::handlers::http::base_path; +use crate::handlers::http::clickbench; use crate::handlers::http::health_check; use crate::handlers::http::query; use crate::handlers::http::users::dashboards; @@ -87,7 +88,8 @@ impl ParseableServer for Server { .service(Self::get_user_role_webscope()) .service(Self::get_counts_webscope()) .service(Self::get_alerts_webscope()) - .service(Self::get_metrics_webscope()), + .service(Self::get_metrics_webscope()) + .service(Self::get_benchmark_webscope()), ) .service(Self::get_ingest_otel_factory()) .service(Self::get_generated()); @@ -160,6 +162,16 @@ impl Server { ) } + pub fn get_benchmark_webscope() -> Scope { + web::scope("/benchmark/clickbench").service( + web::resource("").route( + web::get() + .to(clickbench::clickbench_benchmark) + .authorize(Action::Benchmark), + ), + ) + } + pub fn get_correlation_webscope() -> Scope { web::scope("/correlation") .service( diff --git a/src/parseable/streams.rs b/src/parseable/streams.rs index 088ca509d..009e01d2c 100644 --- a/src/parseable/streams.rs +++ b/src/parseable/streams.rs @@ -513,10 +513,7 @@ impl Stream { let file_size = match file.metadata() { Ok(meta) => meta.len(), Err(err) => { - warn!( - "File ({}) not found; Error = {err}", - file.display() - ); + warn!("File ({}) not found; Error = {err}", file.display()); continue; } }; diff --git a/src/rbac/role.rs b/src/rbac/role.rs index 00208631c..df7fbe15e 100644 --- a/src/rbac/role.rs +++ b/src/rbac/role.rs @@ -67,6 +67,7 @@ pub enum Action { CreateCorrelation, DeleteCorrelation, PutCorrelation, + Benchmark, } #[derive(Debug, Clone, PartialEq, Eq, Hash)] @@ -108,6 +109,7 @@ impl RoleBuilder { ), Action::Login | Action::Metrics + | Action::Benchmark | Action::PutUser | Action::ListUser | Action::PutUserRoles From d694a7004d643843329236d104e739848f63dcf7 Mon Sep 17 00:00:00 2001 From: Nikhil Sinha Date: Sun, 2 Mar 2025 14:14:18 -0500 Subject: [PATCH 2/5] deepsource fix, coderabbitai suggestions --- src/handlers/http/clickbench.rs | 65 +++++++++++++++++++++++++++------ 1 file changed, 53 insertions(+), 12 deletions(-) diff --git a/src/handlers/http/clickbench.rs b/src/handlers/http/clickbench.rs index 433f4709d..c445948e1 100644 --- a/src/handlers/http/clickbench.rs +++ b/src/handlers/http/clickbench.rs @@ -16,7 +16,7 @@ * */ -use std::{collections::HashMap, env, fs, time::Instant}; +use std::{collections::HashMap, env, fs, process::Command, time::Instant}; use actix_web::{web::Json, Responder}; use datafusion::{ @@ -28,8 +28,14 @@ use datafusion::{ sql::{parser::DFParser, sqlparser::dialect::dialect_from_str}, }; use serde_json::{json, Value}; +use tracing::warn; +static PARQUET_FILE: &str = "PARQUET_FILE"; +static QUERIES_FILE: &str = "QUERIES_FILE"; pub async fn clickbench_benchmark() -> Result { + drop_system_caches() + .await + .map_err(actix_web::error::ErrorInternalServerError)?; let results = tokio::task::spawn_blocking(run_benchmark) .await .map_err(actix_web::error::ErrorInternalServerError)? @@ -37,6 +43,19 @@ pub async fn clickbench_benchmark() -> Result Ok(results) } +pub async fn drop_system_caches() -> Result<(), anyhow::Error> { + // Sync to flush file system buffers + Command::new("sync") + .status() + .expect("Failed to execute sync command"); + let _ = Command::new("sudo") + .args(["sh", "-c", "echo 3 > /proc/sys/vm/drop_caches"]) + .output() + .map_err(|e| anyhow::Error::msg(e.to_string()))?; + + Ok(()) +} + #[tokio::main(flavor = "multi_thread")] pub async fn run_benchmark() -> Result, anyhow::Error> { let mut session_config = SessionConfig::from_env()?.with_information_schema(true); @@ -61,10 +80,13 @@ pub async fn run_benchmark() -> Result, anyhow::Error> { let mut table_options = HashMap::new(); table_options.insert("binary_as_string", "true"); - let parquet_file = env::var("PARQUET_FILE")?; + let parquet_file = env::var(PARQUET_FILE) + .map_err(|_| anyhow::anyhow!("PARQUET_FILE environment variable not set. Please set it to the path of the hits.parquet file."))?; register_hits(&ctx, &parquet_file).await?; + println!("hits registered"); let mut query_list = Vec::new(); - let queries_file = env::var("QUERIES_FILE")?; + let queries_file = env::var(QUERIES_FILE) + .map_err(|_| anyhow::anyhow!("QUERIES_FILE environment variable not set. Please set it to the path of the queries file."))?; let queries = fs::read_to_string(queries_file)?; for query in queries.lines() { query_list.push(query.to_string()); @@ -73,7 +95,7 @@ pub async fn run_benchmark() -> Result, anyhow::Error> { } async fn register_hits(ctx: &SessionContext, parquet_file: &str) -> Result<(), anyhow::Error> { - let options: ParquetReadOptions<'_> = Default::default(); + let options: ParquetReadOptions<'_> = ParquetReadOptions::default(); ctx.register_parquet("hits", parquet_file, options) .await .map_err(|e| { @@ -87,24 +109,28 @@ pub async fn execute_queries( query_list: Vec, ) -> Result, anyhow::Error> { const TRIES: usize = 3; - let mut results = Vec::new(); + let mut results = Vec::with_capacity(query_list.len()); + let mut query_count = 1; + let mut total_elapsed_per_iteration = [0.0; TRIES]; - for sql in query_list.iter() { - let mut elapsed_times = Vec::new(); - for _iteration in 1..=TRIES { + for (query_index, sql) in query_list.iter().enumerate() { + let mut elapsed_times = Vec::with_capacity(TRIES); + for iteration in 1..=TRIES { let start = Instant::now(); let task_ctx = ctx.task_ctx(); let dialect = &task_ctx.session_config().options().sql_parser.dialect; let dialect = dialect_from_str(dialect).ok_or_else(|| { plan_datafusion_err!( "Unsupported SQL dialect: {dialect}. Available dialects: \ - Generic, MySQL, PostgreSQL, Hive, SQLite, Snowflake, Redshift, \ - MsSQL, ClickHouse, BigQuery, Ansi." + Generic, MySQL, PostgreSQL, Hive, SQLite, Snowflake, Redshift, \ + MsSQL, ClickHouse, BigQuery, Ansi." ) })?; let statements = DFParser::parse_sql_with_dialect(sql, dialect.as_ref())?; - let statement = statements.front().unwrap(); + let statement = statements + .front() + .ok_or_else(|| anyhow::anyhow!("No SQL statement found in query: {}", sql))?; let plan = ctx.state().statement_to_plan(statement.clone()).await?; let df = ctx.execute_logical_plan(plan).await?; @@ -112,9 +138,24 @@ pub async fn execute_queries( let _ = collect(physical_plan, task_ctx.clone()).await?; let elapsed = start.elapsed().as_secs_f64(); + total_elapsed_per_iteration[iteration - 1] += elapsed; + + warn!("query {query_count} iteration {iteration} completed in {elapsed} secs"); elapsed_times.push(elapsed); } - results.push(elapsed_times); + query_count += 1; + results.push(json!({ + "query_index": query_index, + "query": sql, + "elapsed_times": elapsed_times + })); + } + for (iteration, total_elapsed) in total_elapsed_per_iteration.iter().enumerate() { + warn!( + "Total time for iteration {}: {} seconds", + iteration + 1, + total_elapsed + ); } let result_json = json!(results); From 07199eeef90646cb554102c6014d3eb420bd771c Mon Sep 17 00:00:00 2001 From: Nikhil Sinha Date: Sun, 2 Mar 2025 21:31:46 -0500 Subject: [PATCH 3/5] refactor, coderabbitai suggestions --- src/handlers/http/clickbench.rs | 79 +++++++++++++++++++-------------- 1 file changed, 45 insertions(+), 34 deletions(-) diff --git a/src/handlers/http/clickbench.rs b/src/handlers/http/clickbench.rs index c445948e1..e6e151cad 100644 --- a/src/handlers/http/clickbench.rs +++ b/src/handlers/http/clickbench.rs @@ -23,12 +23,11 @@ use datafusion::{ common::plan_datafusion_err, error::DataFusionError, execution::{runtime_env::RuntimeEnvBuilder, SessionStateBuilder}, - physical_plan::collect, prelude::{ParquetReadOptions, SessionConfig, SessionContext}, sql::{parser::DFParser, sqlparser::dialect::dialect_from_str}, }; use serde_json::{json, Value}; -use tracing::warn; +use tracing::{info, warn}; static PARQUET_FILE: &str = "PARQUET_FILE"; static QUERIES_FILE: &str = "QUERIES_FILE"; @@ -45,20 +44,24 @@ pub async fn clickbench_benchmark() -> Result pub async fn drop_system_caches() -> Result<(), anyhow::Error> { // Sync to flush file system buffers - Command::new("sync") - .status() - .expect("Failed to execute sync command"); + match Command::new("sync").status() { + Ok(_) => {} + Err(e) => warn!("Failed to execute sync command: {}", e), + } let _ = Command::new("sudo") - .args(["sh", "-c", "echo 3 > /proc/sys/vm/drop_caches"]) - .output() - .map_err(|e| anyhow::Error::msg(e.to_string()))?; + .args(["sh", "-c", "echo 3 > /proc/sys/vm/drop_caches"]) + .output() + .map_err(|e| { + warn!("Failed to drop system caches: {}", e); + anyhow::Error::msg("Failed to drop system caches. This might be expected if not running on Linux or without sudo privileges.") + })?; Ok(()) } #[tokio::main(flavor = "multi_thread")] pub async fn run_benchmark() -> Result, anyhow::Error> { - let mut session_config = SessionConfig::from_env()?.with_information_schema(true); + let mut session_config = SessionConfig::new().with_information_schema(true); session_config = session_config.with_batch_size(8192); @@ -81,12 +84,12 @@ pub async fn run_benchmark() -> Result, anyhow::Error> { table_options.insert("binary_as_string", "true"); let parquet_file = env::var(PARQUET_FILE) - .map_err(|_| anyhow::anyhow!("PARQUET_FILE environment variable not set. Please set it to the path of the hits.parquet file."))?; + .map_err(|_| anyhow::anyhow!("PARQUET_FILE environment variable not set. Please set it to the path of the hits.parquet file."))?; register_hits(&ctx, &parquet_file).await?; - println!("hits registered"); + info!("hits.parquet registered"); let mut query_list = Vec::new(); let queries_file = env::var(QUERIES_FILE) - .map_err(|_| anyhow::anyhow!("QUERIES_FILE environment variable not set. Please set it to the path of the queries file."))?; + .map_err(|_| anyhow::anyhow!("QUERIES_FILE environment variable not set. Please set it to the path of the queries file."))?; let queries = fs::read_to_string(queries_file)?; for query in queries.lines() { query_list.push(query.to_string()); @@ -110,9 +113,7 @@ pub async fn execute_queries( ) -> Result, anyhow::Error> { const TRIES: usize = 3; let mut results = Vec::with_capacity(query_list.len()); - let mut query_count = 1; let mut total_elapsed_per_iteration = [0.0; TRIES]; - for (query_index, sql) in query_list.iter().enumerate() { let mut elapsed_times = Vec::with_capacity(TRIES); for iteration in 1..=TRIES { @@ -122,8 +123,8 @@ pub async fn execute_queries( let dialect = dialect_from_str(dialect).ok_or_else(|| { plan_datafusion_err!( "Unsupported SQL dialect: {dialect}. Available dialects: \ - Generic, MySQL, PostgreSQL, Hive, SQLite, Snowflake, Redshift, \ - MsSQL, ClickHouse, BigQuery, Ansi." + Generic, MySQL, PostgreSQL, Hive, SQLite, Snowflake, Redshift, \ + MsSQL, ClickHouse, BigQuery, Ansi." ) })?; @@ -134,31 +135,41 @@ pub async fn execute_queries( let plan = ctx.state().statement_to_plan(statement.clone()).await?; let df = ctx.execute_logical_plan(plan).await?; - let physical_plan = df.create_physical_plan().await?; - let _ = collect(physical_plan, task_ctx.clone()).await?; + let _ = df.collect().await?; let elapsed = start.elapsed().as_secs_f64(); total_elapsed_per_iteration[iteration - 1] += elapsed; - - warn!("query {query_count} iteration {iteration} completed in {elapsed} secs"); + info!("query {query_index} iteration {iteration} completed in {elapsed} secs"); elapsed_times.push(elapsed); + + results.push(json!({ + "query_index": query_index, + "query": sql, + "elapsed_times": { + "iteration": iteration + 1, + "elapsed_time": elapsed_times + } + })); } - query_count += 1; - results.push(json!({ - "query_index": query_index, - "query": sql, - "elapsed_times": elapsed_times - })); - } - for (iteration, total_elapsed) in total_elapsed_per_iteration.iter().enumerate() { - warn!( - "Total time for iteration {}: {} seconds", - iteration + 1, - total_elapsed - ); } - let result_json = json!(results); + let summary: Vec = total_elapsed_per_iteration + .iter() + .enumerate() + .map(|(iteration, &total_elapsed)| { + json!({ + "iteration": iteration + 1, + "total_elapsed": total_elapsed + }) + }) + .collect(); + + info!("summary: {:?}", summary); + + let result_json = json!({ + "summary": summary, + "results": results + }); Ok(Json(result_json)) } From 449ef80ecdc43bad1715ecd2b63f0b04501a4852 Mon Sep 17 00:00:00 2001 From: Nikhil Sinha Date: Sun, 2 Mar 2025 21:35:37 -0500 Subject: [PATCH 4/5] iteration no. corrected --- src/handlers/http/clickbench.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/handlers/http/clickbench.rs b/src/handlers/http/clickbench.rs index e6e151cad..2b4afd5be 100644 --- a/src/handlers/http/clickbench.rs +++ b/src/handlers/http/clickbench.rs @@ -143,12 +143,10 @@ pub async fn execute_queries( elapsed_times.push(elapsed); results.push(json!({ - "query_index": query_index, - "query": sql, - "elapsed_times": { - "iteration": iteration + 1, - "elapsed_time": elapsed_times - } + "query_index": query_index, + "query": sql, + "iteration": iteration, + "elapsed_time": elapsed })); } } From 8ef201404e8b24ecb8dd6374158f208cc7f38029 Mon Sep 17 00:00:00 2001 From: Nikhil Sinha Date: Sun, 2 Mar 2025 22:09:42 -0500 Subject: [PATCH 5/5] json structure change --- src/handlers/http/clickbench.rs | 48 ++++++++++++++++++++------------- 1 file changed, 30 insertions(+), 18 deletions(-) diff --git a/src/handlers/http/clickbench.rs b/src/handlers/http/clickbench.rs index 2b4afd5be..02df9d4e2 100644 --- a/src/handlers/http/clickbench.rs +++ b/src/handlers/http/clickbench.rs @@ -16,7 +16,7 @@ * */ -use std::{collections::HashMap, env, fs, process::Command, time::Instant}; +use std::{env, fs, process::Command, time::Instant}; use actix_web::{web::Json, Responder}; use datafusion::{ @@ -35,9 +35,8 @@ pub async fn clickbench_benchmark() -> Result drop_system_caches() .await .map_err(actix_web::error::ErrorInternalServerError)?; - let results = tokio::task::spawn_blocking(run_benchmark) + let results = run_benchmark() .await - .map_err(actix_web::error::ErrorInternalServerError)? .map_err(actix_web::error::ErrorInternalServerError)?; Ok(results) } @@ -59,7 +58,6 @@ pub async fn drop_system_caches() -> Result<(), anyhow::Error> { Ok(()) } -#[tokio::main(flavor = "multi_thread")] pub async fn run_benchmark() -> Result, anyhow::Error> { let mut session_config = SessionConfig::new().with_information_schema(true); @@ -80,9 +78,6 @@ pub async fn run_benchmark() -> Result, anyhow::Error> { let ctx = SessionContext::new_with_state(state); - let mut table_options = HashMap::new(); - table_options.insert("binary_as_string", "true"); - let parquet_file = env::var(PARQUET_FILE) .map_err(|_| anyhow::anyhow!("PARQUET_FILE environment variable not set. Please set it to the path of the hits.parquet file."))?; register_hits(&ctx, &parquet_file).await?; @@ -94,7 +89,12 @@ pub async fn run_benchmark() -> Result, anyhow::Error> { for query in queries.lines() { query_list.push(query.to_string()); } - execute_queries(&ctx, query_list).await + let results = tokio::task::spawn_blocking(move || execute_queries(&ctx, query_list)) + .await + .map_err(|e| anyhow::anyhow!(e))? + .map_err(|e| anyhow::anyhow!(e))?; + + Ok(results) } async fn register_hits(ctx: &SessionContext, parquet_file: &str) -> Result<(), anyhow::Error> { @@ -107,12 +107,13 @@ async fn register_hits(ctx: &SessionContext, parquet_file: &str) -> Result<(), a Ok(()) } +#[tokio::main(flavor = "multi_thread")] pub async fn execute_queries( ctx: &SessionContext, query_list: Vec, ) -> Result, anyhow::Error> { const TRIES: usize = 3; - let mut results = Vec::with_capacity(query_list.len()); + let mut results = Vec::with_capacity(query_list.len() * TRIES); let mut total_elapsed_per_iteration = [0.0; TRIES]; for (query_index, sql) in query_list.iter().enumerate() { let mut elapsed_times = Vec::with_capacity(TRIES); @@ -141,14 +142,27 @@ pub async fn execute_queries( total_elapsed_per_iteration[iteration - 1] += elapsed; info!("query {query_index} iteration {iteration} completed in {elapsed} secs"); elapsed_times.push(elapsed); - - results.push(json!({ - "query_index": query_index, - "query": sql, - "iteration": iteration, - "elapsed_time": elapsed - })); } + let iterations: Vec = elapsed_times + .iter() + .enumerate() + .map(|(iteration, &elapsed_time)| { + json!({ + "iteration": iteration + 1, + "elapsed_time": elapsed_time + }) + }) + .collect(); + + let query_result = json!({ + "query_details": { + "query_index": query_index + 1, + "query": sql + }, + "iterations": iterations + }); + + results.push(query_result); } let summary: Vec = total_elapsed_per_iteration @@ -162,8 +176,6 @@ pub async fn execute_queries( }) .collect(); - info!("summary: {:?}", summary); - let result_json = json!({ "summary": summary, "results": results