From 0e2ba3bb95e73aaed31e5dfb60aa2061424de265 Mon Sep 17 00:00:00 2001 From: Ken Koch Date: Wed, 8 Jan 2025 17:31:01 -0500 Subject: [PATCH] KEP-4939: Support TLS in gRPC probe --- keps/prod-readiness/sig-node/4939.yaml | 6 + .../sig-node/4939-tls-in-grpc-probe/README.md | 682 ++++++++++++++++++ keps/sig-node/4939-tls-in-grpc-probe/kep.yaml | 42 ++ 3 files changed, 730 insertions(+) create mode 100644 keps/prod-readiness/sig-node/4939.yaml create mode 100644 keps/sig-node/4939-tls-in-grpc-probe/README.md create mode 100644 keps/sig-node/4939-tls-in-grpc-probe/kep.yaml diff --git a/keps/prod-readiness/sig-node/4939.yaml b/keps/prod-readiness/sig-node/4939.yaml new file mode 100644 index 00000000000..2cd9af881c1 --- /dev/null +++ b/keps/prod-readiness/sig-node/4939.yaml @@ -0,0 +1,6 @@ +# The KEP must have an approver from the +# "prod-readiness-approvers" group +# of http://git.k8s.io/enhancements/OWNERS_ALIASES +kep-number: 4939 +alpha: + approver: "@deads2k" diff --git a/keps/sig-node/4939-tls-in-grpc-probe/README.md b/keps/sig-node/4939-tls-in-grpc-probe/README.md new file mode 100644 index 00000000000..167692e8f44 --- /dev/null +++ b/keps/sig-node/4939-tls-in-grpc-probe/README.md @@ -0,0 +1,682 @@ + +# KEP-4939: Support TLS in GRPC Probe + + + + +- [Release Signoff Checklist](#release-signoff-checklist) +- [Summary](#summary) +- [Motivation](#motivation) + - [Goals](#goals) + - [Non-Goals](#non-goals) +- [Proposal](#proposal) + - [Risks and Mitigations](#risks-and-mitigations) +- [Design Details](#design-details) + - [Test Plan](#test-plan) + - [Prerequisite testing updates](#prerequisite-testing-updates) + - [Unit tests](#unit-tests) + - [Integration tests](#integration-tests) + - [e2e tests](#e2e-tests) + - [Graduation Criteria](#graduation-criteria) + - [Alpha](#alpha) + - [Beta](#beta) + - [GA](#ga) + - [Upgrade / Downgrade Strategy](#upgrade--downgrade-strategy) + - [Version Skew Strategy](#version-skew-strategy) +- [Production Readiness Review Questionnaire](#production-readiness-review-questionnaire) + - [Feature Enablement and Rollback](#feature-enablement-and-rollback) + - [Rollout, Upgrade and Rollback Planning](#rollout-upgrade-and-rollback-planning) + - [Monitoring Requirements](#monitoring-requirements) + - [Dependencies](#dependencies) + - [Scalability](#scalability) + - [Troubleshooting](#troubleshooting) +- [Implementation History](#implementation-history) +- [Drawbacks](#drawbacks) +- [Alternatives](#alternatives) + + +## Release Signoff Checklist + + + +Items marked with (R) are required *prior to targeting to a milestone / release*. + +- [ ] (R) Enhancement issue in release milestone, which links to KEP dir in [kubernetes/enhancements] (not the initial KEP PR) +- [ ] (R) KEP approvers have approved the KEP status as `implementable` +- [ ] (R) Design details are appropriately documented +- [ ] (R) Test plan is in place, giving consideration to SIG Architecture and SIG Testing input (including test refactors) + - [ ] e2e Tests for all Beta API Operations (endpoints) + - [ ] (R) Ensure GA e2e tests meet requirements for [Conformance Tests](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/conformance-tests.md) + - [ ] (R) Minimum Two Week Window for GA e2e tests to prove flake free +- [ ] (R) Graduation criteria is in place + - [ ] (R) [all GA Endpoints](https://github.com/kubernetes/community/pull/1806) must be hit by [Conformance Tests](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/conformance-tests.md) +- [ ] (R) Production readiness review completed +- [ ] (R) Production readiness review approved +- [ ] "Implementation History" section is up-to-date for milestone +- [ ] User-facing documentation has been created in [kubernetes/website], for publication to [kubernetes.io] +- [ ] Supporting documentation—e.g., additional design documents, links to mailing list discussions/SIG meetings, relevant PRs/issues, release notes + + + +[kubernetes.io]: https://kubernetes.io/ +[kubernetes/enhancements]: https://git.k8s.io/enhancements +[kubernetes/website]: https://git.k8s.io/website + +## Summary + + + +The new gRPC health probe enables developers to probe +[gRPC health servers](https://github.com/grpc-ecosystem/grpc-health-probe) +from the node. +This allows them to stop using workarounds such as this +[grpc-health-probe](https://github.com/grpc-ecosystem/grpc-health-probe) +paired with `exec` probes. + +It allows natively running health checks on gRPC services +without deploying additional binaries as well as other benefits +outlined in [the announcement](https://kubernetes.io/blog/2022/05/13/grpc-probes-now-in-beta/). + +A limitation in the current implementation is that it only supports +gRPC servers that do not leverage TLS connections. +Even if they are not concerned about certificate verification for the health check, +a connection cannot be established at all if the server is expecting TLS and the client +is not. + +This enhancement aims to add configuration options to enable TLS on the gRPC probe. + +## Motivation + + + +We often deploy internal gRPC services on our cluster. +These deployments provide internal services and it's simple to add +the health server to them so they can be verified through a single interface. + +It's also worth noting we have and internal CA that signs certs for communicating +with these servers and all of them use TLS. + +Currently, we are using the `exec` probe for readiness and liveness configured as: + +``` +liveness_probe { + exec { + command = [ + "/bin/grpc_health_probe", + "-addr=:8443", + "-tls", + "-tls-no-verify", + ] + } +} +``` + +We would really like to switch to the gRPC probes introduced in 1.24 +but are unable to do so since there is no way to configure it to use a TLS connection +when reaching out to the health server. + +Instead we must continue to rely on the `exec` probe and cannot reap the benefits +described in [the announcement](https://kubernetes.io/blog/2022/05/13/grpc-probes-now-in-beta/). + +### Goals + + + +The primary goal is to support TLS connections when using the `grpc` probe. +The probe will use TLS but not verify the certificate. + +### Non-Goals + + + +It is not a goal of this KEP to support providing a certificate to verify the TLS +connection. + +## Proposal + + + +I would like to add new configuration fields alongside `port` and `service` in the +[Probe GRPCAction](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#Probe). + +They can be used to indicate whether or not TLS should be used +and can serve as a basis for future TLS related functionality if desired. + +### Risks and Mitigations + + + +1. Adds more code to Kubelet and surface area to `Pod.Spec` + +## Design Details + + + +We should add a new struct to +[GRPCAction](https://github.com/kubernetes/kubernetes/blob/f422a58398d457a88ac7b05afca85a0e6b805605/pkg/apis/core/types.go#L2665-L2676) +named `tls` which is optional. +It would have a single field `mode` which only will have one value currently `NoVerify`. + +The presence of the `tls` object would be enough to indicate a desire to use TLS. + +For example, these configurations would enable TLS with no verification: + +``` +grpcAction: + port: 12345 + tls: + mode: NoVerify +``` +``` +grpcAction: + port: 12345 + tls: +``` + +This configuration would disable TLS: + +``` +grpcAction: + port: 12345 +``` + +I have identified the relevant code changes once the configuration +structure is hammered out. + +Currently, the gRPC probe uses `insecure.NewCredentials()` when +[establishing the connection](https://github.com/kubernetes/kubernetes/blob/d9441212d3e11dc13198f9d4df273c3555ecad11/pkg/probe/grpc/grpc.go#L59). + +If configured to use TLS, that `DialOption` should be replaced with: + +```go +tlsConfig := &tls.Config{ + InsecureSkipVerify: true, +} +tlsCredentials := credentials.NewTLS(tlsConfig) + +opts := []grpc.DialOption{ + // ... + grpc.WithTransportCredentials(tlsCredentials), + // ... +} +``` + +### Test Plan + + + +[x] I/we understand the owners of the involved components may require updates to +existing tests to make this code solid enough prior to committing the changes necessary +to implement this enhancement. + +##### Prerequisite testing updates + + + +##### Unit tests + + + + + +Plan is to add unit tests to `pkg/probe/grpc/grpc_test.go` +that can verify the flags are interpreted correctly +and that a TLS (with no verify) and non-TLS configurations work as expected. + +- `k8s.io/kubernetes/pkg/probe/grpc`: TBD + +##### Integration tests + +N/A, only unit tests and e2e coverage. + +##### e2e tests + + + +Tests in test/e2e/common/node/container_probe.go: +- should not be restarted with a GRPC liveness probe: TBD +- should be restarted with a GRPC liveness probe: TBD + +### Graduation Criteria + +#### Alpha + +- Implement the feature. +- Add unit and e2e tests for the feature. + +#### Beta + +- Solicit feedback from the Alpha. +- Ensure tests are stable and passing. + +#### GA +- [ ] Address feedback from beta usage +- [ ] Validate that API is appropriate for users +- [ ] Close on any remaining open issues & bugs + +### Upgrade / Downgrade Strategy + +Upgrade: default values for new configurables should default to the current state so upgrade should not require anything. + +Downgrade: gRPC probes will not support TLS in a downgrade from alpha + +### Version Skew Strategy + +Generally, the feature should not be used until it is fully available +on the control plane as well as all nodes. Primarily, this is because +unless the user is very careful to configure their health check servers +to support TLS and non-TLS, partial availability of the feature will +result in failed health checks. + +In all situations where where the new feature is partially available or the feature flag is +disabled on the nodes and they are configured with the `tls` flag, +the older nodes will receive the `tls` config but ignore it. +This would cause the probing gRPC connections to made without TLS, +and unless the health check server can handle TLS and non-TLS connections, +it will fail and mark the node unhealthy. + +## Production Readiness Review Questionnaire + + + +### Feature Enablement and Rollback + + + +###### How can this feature be enabled / disabled in a live cluster? + + + +- [x] Feature gate (also fill in values in `kep.yaml`) + - Feature gate name: `GRPCContainerProbeTLS` + - Components depending on the feature gate: + - `kubelet` (probing) + - API server (API changes) + +###### Does enabling the feature change any default behavior? + +No, it should be designed so that omitting the `tls` +configuration causes it to not use TLS which is the default behavior. + +###### Can the feature be disabled once it has been enabled (i.e. can we roll back the enablement)? + +Yes. Removing the `tls` configuration should cause the probe to work as it did before. + +###### What happens if we reenable the feature if it was previously rolled back? + +Re-applying the `tls` configuration would cause the probe to start using TLS again. + +###### Are there any tests for feature enablement/disablement? + +Unit tests can be implemented to verify the gRPC probe +behavees correctly with the feature enabled or disabled. + +### Rollout, Upgrade and Rollback Planning + + + +###### How can a rollout or rollback fail? Can it impact already running workloads? + +Enabling or disabling the `tls` feature should only affect the next run of the probe. +It wouldn't "fail" but it would revert to not using TLS which would be an issue if the running gRPC server is expecting TLS. + +###### What specific metrics should inform a rollback? + +Rollback wouldn't address issues. Pods will need to stop using the new probe +type. + +###### Were upgrade and rollback tested? Was the upgrade->downgrade->upgrade path tested? + +N/A + +###### Is the rollout accompanied by any deprecations and/or removals of features, APIs, fields of API types, flags, etc.? + +No + +### Monitoring Requirements + + + +###### How can an operator determine if the feature is in use by workloads? + + + +###### How can someone using this feature know that it is working for their instance? + +When gRPC probe is configured, Pod must be scheduled and, the metric +`probe_total` can be observed to see the result of probe execution. + +Event will be emitted for the failed probe and logs available in `kubelet.log` +to troubleshoot the failing probes. + +###### What are the reasonable SLOs (Service Level Objectives) for the enhancement? + +Probe must leverage TLS as configured and succeed +whenever service has returned the correct response +in defined timeout, and fail otherwise. + +###### What are the SLIs (Service Level Indicators) an operator can use to determine the health of the service? + +The metric `probe_total` can be used to check for the probe result. Event and +`kubelet.log` log entries can be observed to troubleshoot issues. + +###### Are there any missing metrics that would be useful to have to improve observability of this feature? + +N/A + +### Dependencies + + + +###### Does this feature depend on any specific services running in the cluster? + +No + +### Scalability + + + +###### Will enabling / using this feature result in any new API calls? + +No + +###### Will enabling / using this feature result in introducing new API types? + +A new type for the `tls` config will be added to the `grpcAction` type. + +###### Will enabling / using this feature result in any new calls to the cloud provider? + +No + +###### Will enabling / using this feature result in increasing size or count of the existing API objects? + +If configured, they will increase the config for the `grpcProbe` to include the `tls` option. + +###### Will enabling / using this feature result in increasing time taken by any operations covered by existing SLIs/SLOs? + +There could be a marginal increase due to TLS vs. the default Non-TLS + +###### Will enabling / using this feature result in non-negligible increase of resource usage (CPU, RAM, disk, IO, ...) in any components? + +There could be a marginal increase due to TLS vs. the default Non-TLS + +###### Can enabling / using this feature result in resource exhaustion of some node resources (PIDs, sockets, inodes, etc.)? + +Taken from the +[original KEP](https://github.com/kubernetes/enhancements/blob/4855713ab20b9653b3b715ded82d772bb38a8108/keps/sig-node/2727-grpc-probe/README.md#can-enabling--using-this-feature-result-in-resource-exhaustion-of-some-node-resources-pids-sockets-inodes-etc) +for the gRPC probe since it's still applicable: + +Yes, gRPC probes use node resources to establish connection. +This may lead to issue like [kubernetes/kubernetes#89898](https://github.com/kubernetes/kubernetes/issues/89898). + +The node resources for gRPC probes can be exhausted by a Pod with HostPort +making many connections to different destinations or any other process on a node. +This problem cannot be addressed generically. + +However, the design where node resources are being used for gRPC probes works +for the most setups. The default pods maximum is `110`. There are currently +no limits on number of containers. The number of containers is limited by the +amount of resources requested by these containers. With the fix limiting +the `TIME_WAIT` for the socket to 1 second, +[this calculation](https://github.com/kubernetes/kubernetes/issues/89898#issuecomment-1383207322) +demonstrates it will be hard to reach the limits on sockets. + +### Troubleshooting + + + +###### How does this feature react if the API server and/or etcd is unavailable? + +No dependency on etcd availability. + +###### What are other known failure modes? + +None + +###### What steps should be taken if SLOs are not being met to determine the problem? + +N/A + +## Implementation History + +* 2021-11-17: [Original gRPC Probe implementation](https://github.com/kubernetes/kubernetes/commit/b7affcced15923b8a45510301a90542eec232c49) + +## Drawbacks + +N/A + +## Alternatives + +[Discussed](https://github.com/kubernetes/enhancements/pull/5029#discussion_r1936341743) +using a boolean parameter instead of the `tls` struct. +Decided to use the approach in order to leave fexibility for +future TLS related configuration such as certificate verification. + diff --git a/keps/sig-node/4939-tls-in-grpc-probe/kep.yaml b/keps/sig-node/4939-tls-in-grpc-probe/kep.yaml new file mode 100644 index 00000000000..e9cadca5773 --- /dev/null +++ b/keps/sig-node/4939-tls-in-grpc-probe/kep.yaml @@ -0,0 +1,42 @@ +title: TLS in gRPC Probe +kep-number: 4939 +authors: + - "@kkoch986" +owning-sig: sig-node +participating-sigs: + - sig-node + - sig-network +status: provisional +creation-date: 2025-01-08 +reviewers: + - aojea +approvers: + - mrunalp + +see-also: + - "/keps/sig-node/2727-grpc-probe" +replaces: + +# The target maturity stage in the current dev cycle for this KEP. +stage: alpha + +# The most recent milestone for which work toward delivery of this KEP has been +# done. This can be the current (upcoming) milestone, if it is being actively +# worked on. +latest-milestone: "v1.33" + +# The milestone at which this feature was, or is targeted to be, at each stage. +milestone: + alpha: "v1.33" + +# The following PRR answers are required at alpha release +# List the feature gate name and the components for which it must be enabled +feature-gates: + - name: GRPCContainerProbeTLS + components: + - kube-apiserver + - kubelet +disable-supported: true +# The following PRR answers are required at beta release +# metrics: +# - my_feature_metric