-
Notifications
You must be signed in to change notification settings - Fork 12
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Add Visualization Type to Descriptions (#75)
* Added visualization type to descriptions Signed-off-by: Kartik Pattaswamy <[email protected]> * Modified description for script Signed-off-by: Kartik Pattaswamy <[email protected]> * Modified and verified all script descriptions Signed-off-by: Kartik Pattaswamy <[email protected]> * Changed wording of script visualization description Signed-off-by: Kartik Pattaswamy <[email protected]>
- Loading branch information
1 parent
e85da77
commit 87879c2
Showing
12 changed files
with
12 additions
and
12 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,5 +1,5 @@ | ||
{ | ||
"name": "HTTP Error Rate by Service (Wide Format)", | ||
"description": "", | ||
"description": "Use with the Table visualization. Query outputs HTTP error and total request count per service", | ||
"script": "'''\nThis query outputs a table of HTTP error and total request count per service.\n\nThis query is for use with Grafana's Pixie Datasource Plugin only,\nas it uses Grafana macros for adding Grafana dashboard context.\n'''\n\n# $pixieCluster - work around to update the panel if this dashboard variable is present\n\n# Import Pixie's module for querying data.\nimport px\n\n# Import HTTP events table.\ndf = px.DataFrame(table='http_events', start_time=__time_from)\n\n# Add columns for service, namespace info.\ndf.namespace = df.ctx['namespace']\ndf.service = df.ctx['service']\n\n# Filter out requests that don't have a service defined.\ndf = df[df.service != '']\n\n# Filter out requests from the Pixie (pl) namespace.\ndf = df[df.namespace != 'pl']\n\n# Add column for HTTP response status errors.\ndf.error = df.resp_status >= 400\n\n# Group HTTP events by service, counting errors and total HTTP events.\ndf = df.groupby(['service']).agg(\n error_count=('error', px.sum),\n total_requests=('resp_status', px.count)\n)\n\n# Output the DataFrame.\npx.display(df)\n" | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,5 +1,5 @@ | ||
{ | ||
"name": "HTTP Request Throughput by Service", | ||
"description": "Displays time series showing overall HTTP request throughput per service.", | ||
"description": "Use with Table or Time series visualization. Displays overall HTTP request throughput per service.", | ||
"script": "# Copyright 2018- The Pixie Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n'''\nThis query outputs a table of time series data showing overall HTTP\nrequest throughput per service.\n\nThis query is for use with Grafana's Pixie Datasource Plugin only,\nas it uses Grafana macros for adding Grafana dashboard context.\n'''\n\n# $pixieCluster - work around to update the panel if this dashboard variable is present\n\n# Import Pixie's module for querying data.\nimport px\n\n# Load data from Pixie's `http_events` table into a Dataframe.\ndf = px.DataFrame(table='http_events', start_time=__time_from)\n\n# Add K8s metadata context.\ndf.service = df.ctx['service']\ndf.namespace = df.ctx['namespace']\n\n# Filter out requests that don't have a service defined.\ndf = df[df.service != '']\n\n# Bin the 'time_' column using the interval provided by Grafana.\ndf.timestamp = px.bin(df.time_, __interval)\n\n# Group data by unique pairings of 'timestamp' and 'service'\n# and count the total number of requests per unique pairing.\nper_ns_df = df.groupby(['timestamp', 'service']).agg(\n throughput_total=('latency', px.count)\n )\n\n# Calculate throughput by dividing # of requests by the time interval.\nper_ns_df.request_throughput = per_ns_df.throughput_total / __interval\nper_ns_df.request_throughput = per_ns_df.request_throughput * 1e9\n\n# Rename 'timestamp' column to 'time_'. The Grafana plugin expects a 'time_'\n# column to display data in a Graph or Time series.\nper_ns_df.time_ = per_ns_df.timestamp\n\n# Output select columns of the DataFrame.\npx.display(per_ns_df['time_', 'service', 'request_throughput'])" | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,5 +1,5 @@ | ||
{ | ||
"name": "Raw HTTP Requests", | ||
"description": "Query outputs a table of time series data showing overall HTTP request throughput", | ||
"description": "Use with Table or Time series visualization. Query outputs a table of time series data showing overall HTTP request throughput.", | ||
"script": "# Copyright 2018- The Pixie Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n'''\nThis query outputs a table of time series data showing overall HTTP request throughput.\n\nThis query is for use with Grafana's Pixie Datasource Plugin only,\nas it uses Grafana macros for adding Grafana dashboard context.\n'''\n\n# $pixieCluster - work around to update the panel if this dashboard variable is present\n\n# Import Pixie's module for querying data.\nimport px\n\n# Load data from Pixie's `http_events` table into a Dataframe.\ndf = px.DataFrame(table='http_events', start_time=__time_from)\n\n# Add context.\ndf.pod = df.ctx['pod']\ndf.service = df.ctx['service']\ndf.namespace = df.ctx['namespace']\ndf.node = df.ctx['node']\n\n# Add optional filtering.\n# df = df[df.service == 'px-sock-shop/front-end']\n# df = df[px.contains(df.pod, 'front-end')]\n\n# Bin the 'time_' column using the interval provided by Grafana.\ndf.timestamp = px.bin(df.time_, __interval)\n\n# Group data by unique 'timestamp' and count the total number of\n# requests per unique timestamp.\nper_ns_df = df.groupby(['timestamp']).agg(\n throughput_total=('latency', px.count)\n )\n\n# Calculate throughput by dividing # of requests by the time interval.\nper_ns_df.request_throughput = per_ns_df.throughput_total / __interval\n\n# Rename 'timestamp' column to 'time_'. The Grafana plugin expects a 'time_'\n# column to display data in a Graph or Time series.\nper_ns_df.time_ = per_ns_df.timestamp\nper_ns_df.request_throughput = per_ns_df.request_throughput * 1e9\n\n# Output select columns of the DataFrame.\npx.display(per_ns_df['time_', 'request_throughput'])" | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,5 +1,5 @@ | ||
{ | ||
"name": "HTTP Service Map", | ||
"description": "This query outputs a graph of the HTTP traffic between the services in your cluster.", | ||
"description": "Use with the Node Graph visualization. This query outputs HTTP traffic between the services in your cluster.", | ||
"script": "# Copyright 2018- The Pixie Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n'''\nThis query outputs a graph of the HTTP traffic between the services in\nyour cluster. Use with Grafana's node graph panel.\n\nThis query is for use with Grafana's Pixie Datasource Plugin only,\nas it uses Grafana macros for adding Grafana dashboard context.\nThe functions in this query are pulled from the px/cluster script:\nhttps://github.com/pixie-io/pixie/tree/main/src/pxl_scripts/px/cluster\n'''\n\n# $pixieCluster - work around to update the panel if this dashboard variable is present\n\n# Import Pixie's module for querying data.\nimport px\n\n# Window size to use on time_ column for bucketing.\nns_per_s = 1000 * 1000 * 1000\nns_per_ms = 1000 * 1000\nwindow_ns = px.DurationNanos(10 * ns_per_s)\n\n# Flag to filter out health checks from the data.\nfilter_health_checks = True\n\n# Whether or not to include traffic from IPs that don't resolve to a known pod/service.\ninclude_ips = True\n\n\ndef http_stats():\n ''' Get a dataframe of HTTP events.\n Certain traffic (like health checks) are auto removed, and some standard fields are added.\n '''\n df = px.DataFrame(table='http_events', start_time=__time_from)\n\n # Add K8s metadata.\n df.namespace = df.ctx['namespace']\n df.service = df.ctx['service']\n df.pod = df.ctx['pod']\n\n # Add optional filters.\n df = df[df.namespace == 'px-sock-shop']\n # df = df[df.service == '']\n # df = df[df.pod == '']\n\n # Filter out non-k8s entities.\n df = df[df.pod != '']\n\n # Snap timestamps to bins.\n df.timestamp = px.bin(df.time_, window_ns)\n\n # Additional HTTP fields, pre-computed for convenience.\n df.failure = df.resp_status >= 400\n\n # Remove health checks, and anything with no remote address.\n health_check_req = ((df.req_path == '/healthz' or df.req_path == '/readyz') or\n df.req_path == '/livez')\n filter_out_conds = (health_check_req and filter_health_checks) or (df['remote_addr'] == '-')\n df = df[not filter_out_conds]\n\n return df\n\n\ndef service_let_graph():\n ''' Compute a summary of traffic by requesting service, for requests on services\n in the current cluster. Similar to `inbound_let_summary` but also breaks down\n by pod in addition to service.\n '''\n df = http_stats()\n df = df.groupby(['timestamp', 'service', 'remote_addr', 'pod', 'trace_role']).agg(\n latency_quantiles=('latency', px.quantiles),\n error_rate=('failure', px.mean),\n throughput_total=('latency', px.count),\n inbound_bytes_total=('req_body_size', px.sum),\n outbound_bytes_total=('resp_body_size', px.sum)\n )\n\n # Get the traced and remote pod/service/IP information.\n df.traced_pod = df.pod\n df.traced_svc = df.service\n df.traced_ip = px.pod_name_to_pod_ip(df.pod)\n df.remote_pod = px.pod_id_to_pod_name(px.ip_to_pod_id(df.remote_addr))\n df.remote_svc = px.service_id_to_service_name(px.ip_to_service_id(df.remote_addr))\n df.remote_ip = df.remote_addr\n # If external IPs are excluded in the service graph, then we also exclude any\n # traffic where we don't know the remote pod or remote service name.\n df = df[include_ips or (df.remote_pod != '' or df.remote_svc != '')]\n\n # Associate it with Client/Server roles, based on the trace role.\n df.is_server_side_tracing = df.trace_role == 2\n df.responder_pod = px.select(df.is_server_side_tracing, df.traced_pod, df.remote_pod)\n df.requestor_pod = px.select(df.is_server_side_tracing, df.remote_pod, df.traced_pod)\n df.responder_service = px.select(df.is_server_side_tracing, df.traced_svc, df.remote_svc)\n df.requestor_service = px.select(df.is_server_side_tracing, df.remote_svc, df.traced_svc)\n df.responder_ip = px.select(df.is_server_side_tracing, df.traced_ip, df.remote_ip)\n df.requestor_ip = px.select(df.is_server_side_tracing, df.remote_ip, df.traced_ip)\n\n # Compute statistics about each edge of the service graph.\n df.latency_p50 = px.DurationNanos(px.floor(px.pluck_float64(df.latency_quantiles, 'p50')))\n df.latency_p90 = px.DurationNanos(px.floor(px.pluck_float64(df.latency_quantiles, 'p90')))\n df.latency_p99 = px.DurationNanos(px.floor(px.pluck_float64(df.latency_quantiles, 'p99')))\n df.request_throughput = df.throughput_total / window_ns\n df.inbound_throughput = df.inbound_bytes_total / window_ns\n df.outbound_throughput = df.outbound_bytes_total / window_ns\n df.error_rate = px.Percent(df.error_rate)\n return df.groupby(['responder_pod', 'requestor_pod', 'responder_service',\n 'requestor_service', 'responder_ip', 'requestor_ip']).agg(\n latency_p50=('latency_p50', px.mean),\n latency_p90=('latency_p90', px.mean),\n latency_p99=('latency_p99', px.mean),\n request_throughput=('request_throughput', px.mean),\n error_rate=('error_rate', px.mean),\n inbound_throughput=('inbound_throughput', px.mean),\n outbound_throughput=('outbound_throughput', px.mean),\n throughput_total=('throughput_total', px.sum)\n )\n\n\ndef graphnode_sources():\n df = service_let_graph()\n # Use Pod name for source node id and title. If pod name is not available,\n # use service name or IP address.\n df.source_svc_ip = px.select(df.requestor_service != '', df.requestor_service, df.requestor_ip)\n df.id = px.select(df.requestor_pod != '', df.requestor_pod, df.source_svc_ip)\n df.title = df.id\n df = df.groupby(['id', 'title']).agg()\n return df\n\n\ndef graphnode_targets():\n df = service_let_graph()\n # Use Pod name for target node id and title. If pod name is not available,\n # use service name or IP address.\n df.target_svc_ip = px.select(df.responder_service != '', df.responder_service, df.responder_ip)\n df.id = px.select(df.responder_pod != '', df.responder_pod, df.target_svc_ip)\n df.title = df.id\n df = df.groupby(['id', 'title']).agg()\n return df\n\n\ndef nodes():\n node_sources = graphnode_sources()\n node_targets = graphnode_targets()\n df = node_sources.append(node_targets)\n return df\n\n\ndef edges():\n df = service_let_graph()\n df.source_svc_ip = px.select(df.requestor_service != '', df.requestor_service, df.requestor_ip)\n df.source = px.select(df.requestor_pod != '', df.requestor_pod, df.source_svc_ip)\n df.target_svc_ip = px.select(df.responder_service != '', df.responder_service, df.responder_ip)\n df.target = px.select(df.responder_pod != '', df.responder_pod, df.target_svc_ip)\n df.id = df.source + '-' + df.target\n df.mainStat = df.error_rate * 100\n df.secondaryStat = df.latency_p90 / ns_per_ms\n return df[['id', 'source', 'target', 'mainStat', 'secondaryStat']]\n\n\nnodes_table = nodes()\nedges_table = edges()\npx.display(nodes_table, \"nodes\")\npx.display(edges_table, \"edges\")" | ||
} |
Oops, something went wrong.