Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add sysinfo metrics #1139

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 3 additions & 4 deletions src/correlation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -129,10 +129,9 @@ impl Correlations {
.await?;

// Update in memory
self.write().await.insert(
correlation.id.to_owned(),
correlation.clone(),
);
self.write()
.await
.insert(correlation.id.to_owned(), correlation.clone());

Ok(correlation)
}
Expand Down
36 changes: 13 additions & 23 deletions src/handlers/http/cluster/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ use crate::storage::{ObjectStorageError, STREAM_ROOT_DIRECTORY};
use crate::storage::{ObjectStoreFormat, PARSEABLE_ROOT_DIRECTORY};
use crate::HTTP_CLIENT;
use actix_web::http::header::{self, HeaderMap};
use actix_web::web::Path;
use actix_web::web::{Json, Path};
use actix_web::Responder;
use bytes::Bytes;
use chrono::Utc;
Expand Down Expand Up @@ -729,6 +729,9 @@ async fn fetch_cluster_metrics() -> Result<Vec<Metrics>, PostError> {
let mut dresses = vec![];

for ingestor in ingestor_metadata {
if !utils::check_liveness(&ingestor.domain_name).await {
continue;
}
let uri = Url::parse(&format!(
"{}{}/metrics",
&ingestor.domain_name,
Expand All @@ -749,11 +752,10 @@ async fn fetch_cluster_metrics() -> Result<Vec<Metrics>, PostError> {
let text = res.text().await.map_err(PostError::NetworkError)?;
let lines: Vec<Result<String, std::io::Error>> =
text.lines().map(|line| Ok(line.to_owned())).collect_vec();

let sample = prometheus_parse::Scrape::parse(lines.into_iter())
.map_err(|err| PostError::CustomError(err.to_string()))?
.samples;
let ingestor_metrics = Metrics::from_prometheus_samples(sample, &ingestor)
let ingestor_metrics = Metrics::ingestor_prometheus_samples(sample, &ingestor)
.await
.map_err(|err| {
error!("Fatal: failed to get ingestor metrics: {:?}", err);
Expand All @@ -767,10 +769,11 @@ async fn fetch_cluster_metrics() -> Result<Vec<Metrics>, PostError> {
);
}
}
dresses.push(Metrics::querier_prometheus_metrics().await);
Ok(dresses)
}

pub fn init_cluster_metrics_schedular() -> Result<(), PostError> {
pub async fn init_cluster_metrics_scheduler() -> Result<(), PostError> {
info!("Setting up schedular for cluster metrics ingestion");
let mut scheduler = AsyncScheduler::new();
scheduler
Expand All @@ -779,25 +782,12 @@ pub fn init_cluster_metrics_schedular() -> Result<(), PostError> {
let result: Result<(), PostError> = async {
let cluster_metrics = fetch_cluster_metrics().await;
if let Ok(metrics) = cluster_metrics {
if !metrics.is_empty() {
info!("Cluster metrics fetched successfully from all ingestors");
if let Ok(metrics_bytes) = serde_json::to_vec(&metrics) {
if matches!(
ingest_internal_stream(
INTERNAL_STREAM_NAME.to_string(),
bytes::Bytes::from(metrics_bytes),
)
.await,
Ok(())
) {
info!("Cluster metrics successfully ingested into internal stream");
} else {
error!("Failed to ingest cluster metrics into internal stream");
}
} else {
error!("Failed to serialize cluster metrics");
}
}
let json_value = serde_json::to_value(metrics)
.map_err(|e| anyhow::anyhow!("Failed to serialize metrics: {}", e))?;

ingest_internal_stream(INTERNAL_STREAM_NAME.to_string(), Json(json_value))
.await
.map_err(|e| anyhow::anyhow!("Failed to ingest metrics: {}", e))?;
}
Ok(())
}
Expand Down
31 changes: 19 additions & 12 deletions src/handlers/http/ingest.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,8 @@ use super::logstream::error::{CreateStreamError, StreamError};
use super::modal::utils::ingest_utils::{flatten_and_push_logs, push_logs};
use super::users::dashboards::DashboardError;
use super::users::filters::FiltersError;
use crate::event::format::LogSource;
use crate::event::{
self,
error::EventError,
format::{self, EventFormat},
};
use crate::event::format::{self, EventFormat, LogSource};
use crate::event::{self, error::EventError};
use crate::handlers::http::modal::utils::logstream_utils::create_stream_and_schema_from_storage;
use crate::handlers::{LOG_SOURCE_KEY, STREAM_NAME_HEADER_KEY};
use crate::metadata::error::stream_info::MetadataError;
Expand All @@ -36,12 +32,12 @@ use crate::otel::metrics::flatten_otel_metrics;
use crate::otel::traces::flatten_otel_traces;
use crate::storage::{ObjectStorageError, StreamType};
use crate::utils::header_parsing::ParseHeaderError;
use crate::utils::json::flatten::JsonFlattenError;
use crate::utils::json::convert_array_to_object;
use crate::utils::json::flatten::{convert_to_array, JsonFlattenError};
use actix_web::web::{Json, Path};
use actix_web::{http::header::ContentType, HttpRequest, HttpResponse};
use arrow_array::RecordBatch;
use arrow_schema::Schema;
use bytes::Bytes;
use chrono::Utc;
use http::StatusCode;
use opentelemetry_proto::tonic::logs::v1::LogsData;
Expand Down Expand Up @@ -77,18 +73,29 @@ pub async fn ingest(req: HttpRequest, Json(json): Json<Value>) -> Result<HttpRes
Ok(HttpResponse::Ok().finish())
}

pub async fn ingest_internal_stream(stream_name: String, body: Bytes) -> Result<(), PostError> {
let size: usize = body.len();
pub async fn ingest_internal_stream(
stream_name: String,
Json(json): Json<Value>,
) -> Result<(), PostError> {
let size = serde_json::to_vec(&json).unwrap().len() as u64;
let data = convert_to_array(convert_array_to_object(
json,
None,
None,
None,
SchemaVersion::V0,
&LogSource::default(),
)?)?;

let parsed_timestamp = Utc::now().naive_utc();
let (rb, is_first) = {
let body_val: Value = serde_json::from_slice(&body)?;
let hash_map = STREAM_INFO.read().unwrap();
let schema = hash_map
.get(&stream_name)
.ok_or(PostError::StreamNotFound(stream_name.clone()))?
.schema
.clone();
let event = format::json::Event { data: body_val };
let event = format::json::Event { data };
// For internal streams, use old schema
event.into_recordbatch(&schema, false, None, SchemaVersion::V0)?
};
Expand Down
1 change: 1 addition & 0 deletions src/handlers/http/modal/ingest_server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,7 @@ impl ParseableServer for IngestServer {

// set the ingestor metadata
set_ingestor_metadata().await?;
metrics::init_system_metrics_scheduler().await?;

// Ingestors shouldn't have to deal with OpenId auth flow
let app = self.start(shutdown_rx, prometheus, None);
Expand Down
10 changes: 4 additions & 6 deletions src/handlers/http/modal/query_server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,10 @@

use crate::correlation::CORRELATIONS;
use crate::handlers::airplane;
use crate::handlers::http::base_path;
use crate::handlers::http::cluster::{self, init_cluster_metrics_schedular};
use crate::handlers::http::logstream::create_internal_stream_if_not_exists;
use crate::handlers::http::middleware::{DisAllowRootUser, RouteExt};
use crate::handlers::http::{self, role};
use crate::handlers::http::{base_path, cluster};
use crate::handlers::http::{logstream, MAX_EVENT_PAYLOAD_SIZE};
use crate::hottier::HotTierManager;
use crate::rbac::role::Action;
Expand All @@ -35,7 +34,7 @@ use actix_web::{web, Scope};
use async_trait::async_trait;
use bytes::Bytes;
use tokio::sync::oneshot;
use tracing::{error, info};
use tracing::error;

use crate::{option::CONFIG, ParseableServer};

Expand Down Expand Up @@ -104,15 +103,14 @@ impl ParseableServer for QueryServer {
// track all parquet files already in the data directory
storage::retention::load_retention_from_global();

metrics::init_system_metrics_scheduler().await?;
cluster::init_cluster_metrics_scheduler().await?;
// all internal data structures populated now.
// start the analytics scheduler if enabled
if CONFIG.options.send_analytics {
analytics::init_analytics_scheduler()?;
}

if matches!(init_cluster_metrics_schedular(), Ok(())) {
info!("Cluster metrics scheduler started successfully");
}
if let Some(hot_tier_manager) = HotTierManager::global() {
hot_tier_manager.put_internal_stream_hot_tier().await?;
hot_tier_manager.download_from_s3()?;
Expand Down
2 changes: 2 additions & 0 deletions src/handlers/http/modal/server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,8 @@ impl ParseableServer for Server {
let (mut remote_sync_handler, mut remote_sync_outbox, mut remote_sync_inbox) =
sync::object_store_sync().await;

metrics::init_system_metrics_scheduler().await?;

if CONFIG.options.send_analytics {
analytics::init_analytics_scheduler()?;
}
Expand Down
Loading
Loading