From 624f2d019368dbd3263881b783d2fffce627e42e Mon Sep 17 00:00:00 2001 From: Matthias Wessendorf Date: Fri, 15 Nov 2024 09:10:14 +0100 Subject: [PATCH 1/3] Adding cert-mananger bits Basics for Cert-manager Signed-off-by: Matthias Wessendorf --- go.mod | 2 + go.sum | 4 + hack/update-codegen.sh | 14 + .../clientset/versioned/clientset.go | 133 ++ .../versioned/fake/clientset_generated.go | 92 ++ .../clientset/versioned/fake/doc.go | 20 + .../clientset/versioned/fake/register.go | 58 + .../clientset/versioned/scheme/doc.go | 20 + .../clientset/versioned/scheme/register.go | 58 + .../versioned/typed/acme/v1/acme_client.go | 112 ++ .../versioned/typed/acme/v1/challenge.go | 195 +++ .../clientset/versioned/typed/acme/v1/doc.go | 20 + .../versioned/typed/acme/v1/fake/doc.go | 20 + .../typed/acme/v1/fake/fake_acme_client.go | 44 + .../typed/acme/v1/fake/fake_challenge.go | 141 ++ .../typed/acme/v1/fake/fake_order.go | 141 ++ .../typed/acme/v1/generated_expansion.go | 23 + .../versioned/typed/acme/v1/order.go | 195 +++ .../typed/certmanager/v1/certificate.go | 195 +++ .../certmanager/v1/certificaterequest.go | 195 +++ .../certmanager/v1/certmanager_client.go | 122 ++ .../typed/certmanager/v1/clusterissuer.go | 184 +++ .../versioned/typed/certmanager/v1/doc.go | 20 + .../typed/certmanager/v1/fake/doc.go | 20 + .../certmanager/v1/fake/fake_certificate.go | 141 ++ .../v1/fake/fake_certificaterequest.go | 141 ++ .../v1/fake/fake_certmanager_client.go | 52 + .../certmanager/v1/fake/fake_clusterissuer.go | 132 ++ .../typed/certmanager/v1/fake/fake_issuer.go | 141 ++ .../certmanager/v1/generated_expansion.go | 27 + .../versioned/typed/certmanager/v1/issuer.go | 195 +++ .../externalversions/acme/interface.go | 46 + .../externalversions/acme/v1/challenge.go | 90 ++ .../externalversions/acme/v1/interface.go | 52 + .../externalversions/acme/v1/order.go | 90 ++ .../externalversions/certmanager/interface.go | 46 + .../certmanager/v1/certificate.go | 90 ++ .../certmanager/v1/certificaterequest.go | 90 ++ .../certmanager/v1/clusterissuer.go | 89 ++ .../certmanager/v1/interface.go | 66 + .../externalversions/certmanager/v1/issuer.go | 90 ++ .../informers/externalversions/factory.go | 267 ++++ .../informers/externalversions/generic.go | 75 + .../internalinterfaces/factory_interfaces.go | 40 + .../certmanager/injection/client/client.go | 57 + .../certmanager/injection/client/fake/fake.go | 57 + .../informers/acme/v1/challenge/challenge.go | 47 + .../informers/acme/v1/challenge/fake/fake.go | 40 + .../acme/v1/challenge/filtered/challenge.go | 65 + .../acme/v1/challenge/filtered/fake/fake.go | 52 + .../informers/acme/v1/order/fake/fake.go | 40 + .../acme/v1/order/filtered/fake/fake.go | 52 + .../informers/acme/v1/order/filtered/order.go | 65 + .../informers/acme/v1/order/order.go | 47 + .../certmanager/v1/certificate/certificate.go | 47 + .../certmanager/v1/certificate/fake/fake.go | 40 + .../v1/certificate/filtered/certificate.go | 65 + .../v1/certificate/filtered/fake/fake.go | 52 + .../certificaterequest/certificaterequest.go | 47 + .../v1/certificaterequest/fake/fake.go | 40 + .../filtered/certificaterequest.go | 65 + .../certificaterequest/filtered/fake/fake.go | 52 + .../v1/clusterissuer/clusterissuer.go | 47 + .../certmanager/v1/clusterissuer/fake/fake.go | 40 + .../clusterissuer/filtered/clusterissuer.go | 65 + .../v1/clusterissuer/filtered/fake/fake.go | 52 + .../certmanager/v1/issuer/fake/fake.go | 40 + .../v1/issuer/filtered/fake/fake.go | 52 + .../certmanager/v1/issuer/filtered/issuer.go | 65 + .../informers/certmanager/v1/issuer/issuer.go | 47 + .../injection/informers/factory/factory.go | 56 + .../injection/informers/factory/fake/fake.go | 45 + .../filtered/fake/fake_filtered_factory.go | 60 + .../factory/filtered/filtered_factory.go | 78 + .../certmanager/listers/acme/v1/challenge.go | 99 ++ .../listers/acme/v1/expansion_generated.go | 35 + .../certmanager/listers/acme/v1/order.go | 99 ++ .../listers/certmanager/v1/certificate.go | 99 ++ .../certmanager/v1/certificaterequest.go | 99 ++ .../listers/certmanager/v1/clusterissuer.go | 68 + .../certmanager/v1/expansion_generated.go | 47 + .../listers/certmanager/v1/issuer.go | 99 ++ .../cert-manager/cert-manager/LICENSE | 202 +++ .../cert-manager/cert-manager/LICENSES | 169 +++ .../cert-manager/pkg/apis/acme/doc.go | 22 + .../cert-manager/pkg/apis/acme/v1/const.go | 21 + .../cert-manager/pkg/apis/acme/v1/doc.go | 20 + .../cert-manager/pkg/apis/acme/v1/register.go | 58 + .../cert-manager/pkg/apis/acme/v1/types.go | 57 + .../pkg/apis/acme/v1/types_challenge.go | 146 ++ .../pkg/apis/acme/v1/types_issuer.go | 650 +++++++++ .../pkg/apis/acme/v1/types_order.go | 240 ++++ .../pkg/apis/acme/v1/zz_generated.deepcopy.go | 919 ++++++++++++ .../cert-manager/pkg/apis/certmanager/doc.go | 23 + .../pkg/apis/certmanager/v1/const.go | 43 + .../pkg/apis/certmanager/v1/doc.go | 21 + .../pkg/apis/certmanager/v1/generic_issuer.go | 85 ++ .../pkg/apis/certmanager/v1/register.go | 62 + .../pkg/apis/certmanager/v1/types.go | 300 ++++ .../apis/certmanager/v1/types_certificate.go | 598 ++++++++ .../v1/types_certificaterequest.go | 249 ++++ .../pkg/apis/certmanager/v1/types_issuer.go | 376 +++++ .../certmanager/v1/zz_generated.deepcopy.go | 1047 ++++++++++++++ .../cert-manager/pkg/apis/meta/doc.go | 22 + .../cert-manager/pkg/apis/meta/v1/doc.go | 21 + .../cert-manager/pkg/apis/meta/v1/register.go | 51 + .../cert-manager/pkg/apis/meta/v1/types.go | 79 + .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 71 + vendor/modules.txt | 11 + vendor/sigs.k8s.io/gateway-api/LICENSE | 201 +++ .../gateway-api/apis/v1beta1/doc.go | 22 + .../gateway-api/apis/v1beta1/gateway_types.go | 946 ++++++++++++ .../apis/v1beta1/gatewayclass_types.go | 212 +++ .../apis/v1beta1/httproute_types.go | 1115 +++++++++++++++ .../apis/v1beta1/object_reference_types.go | 147 ++ .../apis/v1beta1/referencegrant_types.go | 143 ++ .../gateway-api/apis/v1beta1/shared_types.go | 643 +++++++++ .../apis/v1beta1/zz_generated.deepcopy.go | 1271 +++++++++++++++++ .../apis/v1beta1/zz_generated.register.go | 73 + 119 files changed, 16546 insertions(+) create mode 100644 pkg/client/certmanager/clientset/versioned/clientset.go create mode 100644 pkg/client/certmanager/clientset/versioned/fake/clientset_generated.go create mode 100644 pkg/client/certmanager/clientset/versioned/fake/doc.go create mode 100644 pkg/client/certmanager/clientset/versioned/fake/register.go create mode 100644 pkg/client/certmanager/clientset/versioned/scheme/doc.go create mode 100644 pkg/client/certmanager/clientset/versioned/scheme/register.go create mode 100644 pkg/client/certmanager/clientset/versioned/typed/acme/v1/acme_client.go create mode 100644 pkg/client/certmanager/clientset/versioned/typed/acme/v1/challenge.go create mode 100644 pkg/client/certmanager/clientset/versioned/typed/acme/v1/doc.go create mode 100644 pkg/client/certmanager/clientset/versioned/typed/acme/v1/fake/doc.go create mode 100644 pkg/client/certmanager/clientset/versioned/typed/acme/v1/fake/fake_acme_client.go create mode 100644 pkg/client/certmanager/clientset/versioned/typed/acme/v1/fake/fake_challenge.go create mode 100644 pkg/client/certmanager/clientset/versioned/typed/acme/v1/fake/fake_order.go create mode 100644 pkg/client/certmanager/clientset/versioned/typed/acme/v1/generated_expansion.go create mode 100644 pkg/client/certmanager/clientset/versioned/typed/acme/v1/order.go create mode 100644 pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/certificate.go create mode 100644 pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/certificaterequest.go create mode 100644 pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/certmanager_client.go create mode 100644 pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/clusterissuer.go create mode 100644 pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/doc.go create mode 100644 pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/fake/doc.go create mode 100644 pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/fake/fake_certificate.go create mode 100644 pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/fake/fake_certificaterequest.go create mode 100644 pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/fake/fake_certmanager_client.go create mode 100644 pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/fake/fake_clusterissuer.go create mode 100644 pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/fake/fake_issuer.go create mode 100644 pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/generated_expansion.go create mode 100644 pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/issuer.go create mode 100644 pkg/client/certmanager/informers/externalversions/acme/interface.go create mode 100644 pkg/client/certmanager/informers/externalversions/acme/v1/challenge.go create mode 100644 pkg/client/certmanager/informers/externalversions/acme/v1/interface.go create mode 100644 pkg/client/certmanager/informers/externalversions/acme/v1/order.go create mode 100644 pkg/client/certmanager/informers/externalversions/certmanager/interface.go create mode 100644 pkg/client/certmanager/informers/externalversions/certmanager/v1/certificate.go create mode 100644 pkg/client/certmanager/informers/externalversions/certmanager/v1/certificaterequest.go create mode 100644 pkg/client/certmanager/informers/externalversions/certmanager/v1/clusterissuer.go create mode 100644 pkg/client/certmanager/informers/externalversions/certmanager/v1/interface.go create mode 100644 pkg/client/certmanager/informers/externalversions/certmanager/v1/issuer.go create mode 100644 pkg/client/certmanager/informers/externalversions/factory.go create mode 100644 pkg/client/certmanager/informers/externalversions/generic.go create mode 100644 pkg/client/certmanager/informers/externalversions/internalinterfaces/factory_interfaces.go create mode 100644 pkg/client/certmanager/injection/client/client.go create mode 100644 pkg/client/certmanager/injection/client/fake/fake.go create mode 100644 pkg/client/certmanager/injection/informers/acme/v1/challenge/challenge.go create mode 100644 pkg/client/certmanager/injection/informers/acme/v1/challenge/fake/fake.go create mode 100644 pkg/client/certmanager/injection/informers/acme/v1/challenge/filtered/challenge.go create mode 100644 pkg/client/certmanager/injection/informers/acme/v1/challenge/filtered/fake/fake.go create mode 100644 pkg/client/certmanager/injection/informers/acme/v1/order/fake/fake.go create mode 100644 pkg/client/certmanager/injection/informers/acme/v1/order/filtered/fake/fake.go create mode 100644 pkg/client/certmanager/injection/informers/acme/v1/order/filtered/order.go create mode 100644 pkg/client/certmanager/injection/informers/acme/v1/order/order.go create mode 100644 pkg/client/certmanager/injection/informers/certmanager/v1/certificate/certificate.go create mode 100644 pkg/client/certmanager/injection/informers/certmanager/v1/certificate/fake/fake.go create mode 100644 pkg/client/certmanager/injection/informers/certmanager/v1/certificate/filtered/certificate.go create mode 100644 pkg/client/certmanager/injection/informers/certmanager/v1/certificate/filtered/fake/fake.go create mode 100644 pkg/client/certmanager/injection/informers/certmanager/v1/certificaterequest/certificaterequest.go create mode 100644 pkg/client/certmanager/injection/informers/certmanager/v1/certificaterequest/fake/fake.go create mode 100644 pkg/client/certmanager/injection/informers/certmanager/v1/certificaterequest/filtered/certificaterequest.go create mode 100644 pkg/client/certmanager/injection/informers/certmanager/v1/certificaterequest/filtered/fake/fake.go create mode 100644 pkg/client/certmanager/injection/informers/certmanager/v1/clusterissuer/clusterissuer.go create mode 100644 pkg/client/certmanager/injection/informers/certmanager/v1/clusterissuer/fake/fake.go create mode 100644 pkg/client/certmanager/injection/informers/certmanager/v1/clusterissuer/filtered/clusterissuer.go create mode 100644 pkg/client/certmanager/injection/informers/certmanager/v1/clusterissuer/filtered/fake/fake.go create mode 100644 pkg/client/certmanager/injection/informers/certmanager/v1/issuer/fake/fake.go create mode 100644 pkg/client/certmanager/injection/informers/certmanager/v1/issuer/filtered/fake/fake.go create mode 100644 pkg/client/certmanager/injection/informers/certmanager/v1/issuer/filtered/issuer.go create mode 100644 pkg/client/certmanager/injection/informers/certmanager/v1/issuer/issuer.go create mode 100644 pkg/client/certmanager/injection/informers/factory/factory.go create mode 100644 pkg/client/certmanager/injection/informers/factory/fake/fake.go create mode 100644 pkg/client/certmanager/injection/informers/factory/filtered/fake/fake_filtered_factory.go create mode 100644 pkg/client/certmanager/injection/informers/factory/filtered/filtered_factory.go create mode 100644 pkg/client/certmanager/listers/acme/v1/challenge.go create mode 100644 pkg/client/certmanager/listers/acme/v1/expansion_generated.go create mode 100644 pkg/client/certmanager/listers/acme/v1/order.go create mode 100644 pkg/client/certmanager/listers/certmanager/v1/certificate.go create mode 100644 pkg/client/certmanager/listers/certmanager/v1/certificaterequest.go create mode 100644 pkg/client/certmanager/listers/certmanager/v1/clusterissuer.go create mode 100644 pkg/client/certmanager/listers/certmanager/v1/expansion_generated.go create mode 100644 pkg/client/certmanager/listers/certmanager/v1/issuer.go create mode 100644 vendor/github.com/cert-manager/cert-manager/LICENSE create mode 100644 vendor/github.com/cert-manager/cert-manager/LICENSES create mode 100644 vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/doc.go create mode 100644 vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/const.go create mode 100644 vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/doc.go create mode 100644 vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/register.go create mode 100644 vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types.go create mode 100644 vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_challenge.go create mode 100644 vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_issuer.go create mode 100644 vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_order.go create mode 100644 vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/doc.go create mode 100644 vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/const.go create mode 100644 vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/doc.go create mode 100644 vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/generic_issuer.go create mode 100644 vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/register.go create mode 100644 vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types.go create mode 100644 vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificate.go create mode 100644 vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificaterequest.go create mode 100644 vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_issuer.go create mode 100644 vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/doc.go create mode 100644 vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/doc.go create mode 100644 vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/register.go create mode 100644 vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/types.go create mode 100644 vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/zz_generated.deepcopy.go create mode 100644 vendor/sigs.k8s.io/gateway-api/LICENSE create mode 100644 vendor/sigs.k8s.io/gateway-api/apis/v1beta1/doc.go create mode 100644 vendor/sigs.k8s.io/gateway-api/apis/v1beta1/gateway_types.go create mode 100644 vendor/sigs.k8s.io/gateway-api/apis/v1beta1/gatewayclass_types.go create mode 100644 vendor/sigs.k8s.io/gateway-api/apis/v1beta1/httproute_types.go create mode 100644 vendor/sigs.k8s.io/gateway-api/apis/v1beta1/object_reference_types.go create mode 100644 vendor/sigs.k8s.io/gateway-api/apis/v1beta1/referencegrant_types.go create mode 100644 vendor/sigs.k8s.io/gateway-api/apis/v1beta1/shared_types.go create mode 100644 vendor/sigs.k8s.io/gateway-api/apis/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/sigs.k8s.io/gateway-api/apis/v1beta1/zz_generated.register.go diff --git a/go.mod b/go.mod index 073113f6014..597f228b97e 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,7 @@ go 1.22.7 require ( github.com/ahmetb/gen-crd-api-reference-docs v0.3.1-0.20210420163308-c1402a70e2f1 + github.com/cert-manager/cert-manager v1.13.3 github.com/cloudevents/conformance v0.2.0 github.com/cloudevents/sdk-go/observability/opencensus/v2 v2.15.2 github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20240508060731-1ed9471c98bd @@ -121,6 +122,7 @@ require ( k8s.io/klog v1.0.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240808142205-8e686545bdb8 // indirect + sigs.k8s.io/gateway-api v0.8.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index a4367964462..0e5c992651d 100644 --- a/go.sum +++ b/go.sum @@ -64,6 +64,8 @@ github.com/blendle/zapdriver v1.3.1/go.mod h1:mdXfREi6u5MArG4j9fewC+FGnXaBR+T4Ox github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cert-manager/cert-manager v1.13.3 h1:3R4G0RI7K0OkTZhWlVOC5SGZMYa2NwqmQJoyKydrz/M= +github.com/cert-manager/cert-manager v1.13.3/go.mod h1:BM2+Pt/NmSv1Zr25/MHv6BgIEF9IUxA1xAjp80qkxgc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= @@ -863,6 +865,8 @@ knative.dev/reconciler-test v0.0.0-20250129131157-3424ad806aa1/go.mod h1:rpvG6Am rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/gateway-api v0.8.0 h1:isQQ3Jx2qFP7vaA3ls0846F0Amp9Eq14P08xbSwVbQg= +sigs.k8s.io/gateway-api v0.8.0/go.mod h1:okOnjPNBFbIS/Rw9kAhuIUaIkLhTKEu+ARIuXk2dgaM= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index dc5206ae11d..5022868f8a4 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -44,6 +44,13 @@ kube::codegen::gen_client \ --with-watch \ "${REPO_ROOT_DIR}/pkg/apis" +kube::codegen::gen_client \ + --boilerplate "${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt" \ + --output-dir "${REPO_ROOT_DIR}/pkg/client/certmanager" \ + --output-pkg "knative.dev/eventing/pkg/client/certmanager" \ + --with-watch \ + "${REPO_ROOT_DIR}/vendor/github.com/cert-manager/cert-manager/pkg/apis" + group "Knative Codegen" # Knative Injection @@ -52,6 +59,13 @@ ${KNATIVE_CODEGEN_PKG}/hack/generate-knative.sh "injection" \ "sinks:v1alpha1 eventing:v1alpha1 eventing:v1beta1 eventing:v1beta2 eventing:v1beta3 eventing:v1 messaging:v1 flows:v1 sources:v1alpha1 sources:v1beta2 sources:v1 duck:v1beta1 duck:v1" \ --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt +# Knative Injection (for cert-manager) +${KNATIVE_CODEGEN_PKG}/hack/generate-knative.sh "injection" \ + knative.dev/eventing/pkg/client/certmanager github.com/cert-manager/cert-manager/pkg/apis \ + "certmanager:v1 acme:v1" \ + --disable-informer-init \ + --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt + group "Generating API reference docs" ${REPO_ROOT_DIR}/hack/update-reference-docs.sh diff --git a/pkg/client/certmanager/clientset/versioned/clientset.go b/pkg/client/certmanager/clientset/versioned/clientset.go new file mode 100644 index 00000000000..6997909367d --- /dev/null +++ b/pkg/client/certmanager/clientset/versioned/clientset.go @@ -0,0 +1,133 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + "fmt" + "net/http" + + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" + acmev1 "knative.dev/eventing/pkg/client/certmanager/clientset/versioned/typed/acme/v1" + certmanagerv1 "knative.dev/eventing/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + AcmeV1() acmev1.AcmeV1Interface + CertmanagerV1() certmanagerv1.CertmanagerV1Interface +} + +// Clientset contains the clients for groups. +type Clientset struct { + *discovery.DiscoveryClient + acmeV1 *acmev1.AcmeV1Client + certmanagerV1 *certmanagerv1.CertmanagerV1Client +} + +// AcmeV1 retrieves the AcmeV1Client +func (c *Clientset) AcmeV1() acmev1.AcmeV1Interface { + return c.acmeV1 +} + +// CertmanagerV1 retrieves the CertmanagerV1Client +func (c *Clientset) CertmanagerV1() certmanagerv1.CertmanagerV1Interface { + return c.certmanagerV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.acmeV1, err = acmev1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + cs.certmanagerV1, err = certmanagerv1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.acmeV1 = acmev1.New(c) + cs.certmanagerV1 = certmanagerv1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/pkg/client/certmanager/clientset/versioned/fake/clientset_generated.go b/pkg/client/certmanager/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 00000000000..417cffbfe22 --- /dev/null +++ b/pkg/client/certmanager/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,92 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" + clientset "knative.dev/eventing/pkg/client/certmanager/clientset/versioned" + acmev1 "knative.dev/eventing/pkg/client/certmanager/clientset/versioned/typed/acme/v1" + fakeacmev1 "knative.dev/eventing/pkg/client/certmanager/clientset/versioned/typed/acme/v1/fake" + certmanagerv1 "knative.dev/eventing/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1" + fakecertmanagerv1 "knative.dev/eventing/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/fake" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + +var ( + _ clientset.Interface = &Clientset{} + _ testing.FakeClient = &Clientset{} +) + +// AcmeV1 retrieves the AcmeV1Client +func (c *Clientset) AcmeV1() acmev1.AcmeV1Interface { + return &fakeacmev1.FakeAcmeV1{Fake: &c.Fake} +} + +// CertmanagerV1 retrieves the CertmanagerV1Client +func (c *Clientset) CertmanagerV1() certmanagerv1.CertmanagerV1Interface { + return &fakecertmanagerv1.FakeCertmanagerV1{Fake: &c.Fake} +} diff --git a/pkg/client/certmanager/clientset/versioned/fake/doc.go b/pkg/client/certmanager/clientset/versioned/fake/doc.go new file mode 100644 index 00000000000..51ebc03dc3a --- /dev/null +++ b/pkg/client/certmanager/clientset/versioned/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/pkg/client/certmanager/clientset/versioned/fake/register.go b/pkg/client/certmanager/clientset/versioned/fake/register.go new file mode 100644 index 00000000000..447bef5420b --- /dev/null +++ b/pkg/client/certmanager/clientset/versioned/fake/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + acmev1 "github.com/cert-manager/cert-manager/pkg/apis/acme/v1" + certmanagerv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) + +var localSchemeBuilder = runtime.SchemeBuilder{ + acmev1.AddToScheme, + certmanagerv1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/pkg/client/certmanager/clientset/versioned/scheme/doc.go b/pkg/client/certmanager/clientset/versioned/scheme/doc.go new file mode 100644 index 00000000000..5768b36ee37 --- /dev/null +++ b/pkg/client/certmanager/clientset/versioned/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/pkg/client/certmanager/clientset/versioned/scheme/register.go b/pkg/client/certmanager/clientset/versioned/scheme/register.go new file mode 100644 index 00000000000..373049da0d7 --- /dev/null +++ b/pkg/client/certmanager/clientset/versioned/scheme/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + acmev1 "github.com/cert-manager/cert-manager/pkg/apis/acme/v1" + certmanagerv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + acmev1.AddToScheme, + certmanagerv1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/pkg/client/certmanager/clientset/versioned/typed/acme/v1/acme_client.go b/pkg/client/certmanager/clientset/versioned/typed/acme/v1/acme_client.go new file mode 100644 index 00000000000..bd95bb6764c --- /dev/null +++ b/pkg/client/certmanager/clientset/versioned/typed/acme/v1/acme_client.go @@ -0,0 +1,112 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "net/http" + + v1 "github.com/cert-manager/cert-manager/pkg/apis/acme/v1" + rest "k8s.io/client-go/rest" + "knative.dev/eventing/pkg/client/certmanager/clientset/versioned/scheme" +) + +type AcmeV1Interface interface { + RESTClient() rest.Interface + ChallengesGetter + OrdersGetter +} + +// AcmeV1Client is used to interact with features provided by the acme.cert-manager.io group. +type AcmeV1Client struct { + restClient rest.Interface +} + +func (c *AcmeV1Client) Challenges(namespace string) ChallengeInterface { + return newChallenges(c, namespace) +} + +func (c *AcmeV1Client) Orders(namespace string) OrderInterface { + return newOrders(c, namespace) +} + +// NewForConfig creates a new AcmeV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*AcmeV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AcmeV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AcmeV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &AcmeV1Client{client}, nil +} + +// NewForConfigOrDie creates a new AcmeV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AcmeV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AcmeV1Client for the given RESTClient. +func New(c rest.Interface) *AcmeV1Client { + return &AcmeV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AcmeV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/pkg/client/certmanager/clientset/versioned/typed/acme/v1/challenge.go b/pkg/client/certmanager/clientset/versioned/typed/acme/v1/challenge.go new file mode 100644 index 00000000000..e8a0f26f4ad --- /dev/null +++ b/pkg/client/certmanager/clientset/versioned/typed/acme/v1/challenge.go @@ -0,0 +1,195 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/cert-manager/cert-manager/pkg/apis/acme/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + scheme "knative.dev/eventing/pkg/client/certmanager/clientset/versioned/scheme" +) + +// ChallengesGetter has a method to return a ChallengeInterface. +// A group's client should implement this interface. +type ChallengesGetter interface { + Challenges(namespace string) ChallengeInterface +} + +// ChallengeInterface has methods to work with Challenge resources. +type ChallengeInterface interface { + Create(ctx context.Context, challenge *v1.Challenge, opts metav1.CreateOptions) (*v1.Challenge, error) + Update(ctx context.Context, challenge *v1.Challenge, opts metav1.UpdateOptions) (*v1.Challenge, error) + UpdateStatus(ctx context.Context, challenge *v1.Challenge, opts metav1.UpdateOptions) (*v1.Challenge, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Challenge, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ChallengeList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Challenge, err error) + ChallengeExpansion +} + +// challenges implements ChallengeInterface +type challenges struct { + client rest.Interface + ns string +} + +// newChallenges returns a Challenges +func newChallenges(c *AcmeV1Client, namespace string) *challenges { + return &challenges{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the challenge, and returns the corresponding challenge object, and an error if there is any. +func (c *challenges) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Challenge, err error) { + result = &v1.Challenge{} + err = c.client.Get(). + Namespace(c.ns). + Resource("challenges"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Challenges that match those selectors. +func (c *challenges) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ChallengeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ChallengeList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("challenges"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested challenges. +func (c *challenges) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("challenges"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a challenge and creates it. Returns the server's representation of the challenge, and an error, if there is any. +func (c *challenges) Create(ctx context.Context, challenge *v1.Challenge, opts metav1.CreateOptions) (result *v1.Challenge, err error) { + result = &v1.Challenge{} + err = c.client.Post(). + Namespace(c.ns). + Resource("challenges"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(challenge). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a challenge and updates it. Returns the server's representation of the challenge, and an error, if there is any. +func (c *challenges) Update(ctx context.Context, challenge *v1.Challenge, opts metav1.UpdateOptions) (result *v1.Challenge, err error) { + result = &v1.Challenge{} + err = c.client.Put(). + Namespace(c.ns). + Resource("challenges"). + Name(challenge.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(challenge). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *challenges) UpdateStatus(ctx context.Context, challenge *v1.Challenge, opts metav1.UpdateOptions) (result *v1.Challenge, err error) { + result = &v1.Challenge{} + err = c.client.Put(). + Namespace(c.ns). + Resource("challenges"). + Name(challenge.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(challenge). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the challenge and deletes it. Returns an error if one occurs. +func (c *challenges) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("challenges"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *challenges) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("challenges"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched challenge. +func (c *challenges) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Challenge, err error) { + result = &v1.Challenge{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("challenges"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/client/certmanager/clientset/versioned/typed/acme/v1/doc.go b/pkg/client/certmanager/clientset/versioned/typed/acme/v1/doc.go new file mode 100644 index 00000000000..2ce17146a02 --- /dev/null +++ b/pkg/client/certmanager/clientset/versioned/typed/acme/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/pkg/client/certmanager/clientset/versioned/typed/acme/v1/fake/doc.go b/pkg/client/certmanager/clientset/versioned/typed/acme/v1/fake/doc.go new file mode 100644 index 00000000000..40528db3a52 --- /dev/null +++ b/pkg/client/certmanager/clientset/versioned/typed/acme/v1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/pkg/client/certmanager/clientset/versioned/typed/acme/v1/fake/fake_acme_client.go b/pkg/client/certmanager/clientset/versioned/typed/acme/v1/fake/fake_acme_client.go new file mode 100644 index 00000000000..7e52c617993 --- /dev/null +++ b/pkg/client/certmanager/clientset/versioned/typed/acme/v1/fake/fake_acme_client.go @@ -0,0 +1,44 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" + v1 "knative.dev/eventing/pkg/client/certmanager/clientset/versioned/typed/acme/v1" +) + +type FakeAcmeV1 struct { + *testing.Fake +} + +func (c *FakeAcmeV1) Challenges(namespace string) v1.ChallengeInterface { + return &FakeChallenges{c, namespace} +} + +func (c *FakeAcmeV1) Orders(namespace string) v1.OrderInterface { + return &FakeOrders{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeAcmeV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/pkg/client/certmanager/clientset/versioned/typed/acme/v1/fake/fake_challenge.go b/pkg/client/certmanager/clientset/versioned/typed/acme/v1/fake/fake_challenge.go new file mode 100644 index 00000000000..02f6a0fe6de --- /dev/null +++ b/pkg/client/certmanager/clientset/versioned/typed/acme/v1/fake/fake_challenge.go @@ -0,0 +1,141 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "github.com/cert-manager/cert-manager/pkg/apis/acme/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeChallenges implements ChallengeInterface +type FakeChallenges struct { + Fake *FakeAcmeV1 + ns string +} + +var challengesResource = v1.SchemeGroupVersion.WithResource("challenges") + +var challengesKind = v1.SchemeGroupVersion.WithKind("Challenge") + +// Get takes name of the challenge, and returns the corresponding challenge object, and an error if there is any. +func (c *FakeChallenges) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Challenge, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(challengesResource, c.ns, name), &v1.Challenge{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Challenge), err +} + +// List takes label and field selectors, and returns the list of Challenges that match those selectors. +func (c *FakeChallenges) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ChallengeList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(challengesResource, challengesKind, c.ns, opts), &v1.ChallengeList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.ChallengeList{ListMeta: obj.(*v1.ChallengeList).ListMeta} + for _, item := range obj.(*v1.ChallengeList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested challenges. +func (c *FakeChallenges) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(challengesResource, c.ns, opts)) + +} + +// Create takes the representation of a challenge and creates it. Returns the server's representation of the challenge, and an error, if there is any. +func (c *FakeChallenges) Create(ctx context.Context, challenge *v1.Challenge, opts metav1.CreateOptions) (result *v1.Challenge, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(challengesResource, c.ns, challenge), &v1.Challenge{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Challenge), err +} + +// Update takes the representation of a challenge and updates it. Returns the server's representation of the challenge, and an error, if there is any. +func (c *FakeChallenges) Update(ctx context.Context, challenge *v1.Challenge, opts metav1.UpdateOptions) (result *v1.Challenge, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(challengesResource, c.ns, challenge), &v1.Challenge{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Challenge), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeChallenges) UpdateStatus(ctx context.Context, challenge *v1.Challenge, opts metav1.UpdateOptions) (*v1.Challenge, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(challengesResource, "status", c.ns, challenge), &v1.Challenge{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Challenge), err +} + +// Delete takes name of the challenge and deletes it. Returns an error if one occurs. +func (c *FakeChallenges) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(challengesResource, c.ns, name, opts), &v1.Challenge{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeChallenges) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + action := testing.NewDeleteCollectionAction(challengesResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1.ChallengeList{}) + return err +} + +// Patch applies the patch and returns the patched challenge. +func (c *FakeChallenges) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Challenge, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(challengesResource, c.ns, name, pt, data, subresources...), &v1.Challenge{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Challenge), err +} diff --git a/pkg/client/certmanager/clientset/versioned/typed/acme/v1/fake/fake_order.go b/pkg/client/certmanager/clientset/versioned/typed/acme/v1/fake/fake_order.go new file mode 100644 index 00000000000..2135c177468 --- /dev/null +++ b/pkg/client/certmanager/clientset/versioned/typed/acme/v1/fake/fake_order.go @@ -0,0 +1,141 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "github.com/cert-manager/cert-manager/pkg/apis/acme/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeOrders implements OrderInterface +type FakeOrders struct { + Fake *FakeAcmeV1 + ns string +} + +var ordersResource = v1.SchemeGroupVersion.WithResource("orders") + +var ordersKind = v1.SchemeGroupVersion.WithKind("Order") + +// Get takes name of the order, and returns the corresponding order object, and an error if there is any. +func (c *FakeOrders) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Order, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(ordersResource, c.ns, name), &v1.Order{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Order), err +} + +// List takes label and field selectors, and returns the list of Orders that match those selectors. +func (c *FakeOrders) List(ctx context.Context, opts metav1.ListOptions) (result *v1.OrderList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(ordersResource, ordersKind, c.ns, opts), &v1.OrderList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.OrderList{ListMeta: obj.(*v1.OrderList).ListMeta} + for _, item := range obj.(*v1.OrderList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested orders. +func (c *FakeOrders) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(ordersResource, c.ns, opts)) + +} + +// Create takes the representation of a order and creates it. Returns the server's representation of the order, and an error, if there is any. +func (c *FakeOrders) Create(ctx context.Context, order *v1.Order, opts metav1.CreateOptions) (result *v1.Order, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(ordersResource, c.ns, order), &v1.Order{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Order), err +} + +// Update takes the representation of a order and updates it. Returns the server's representation of the order, and an error, if there is any. +func (c *FakeOrders) Update(ctx context.Context, order *v1.Order, opts metav1.UpdateOptions) (result *v1.Order, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(ordersResource, c.ns, order), &v1.Order{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Order), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeOrders) UpdateStatus(ctx context.Context, order *v1.Order, opts metav1.UpdateOptions) (*v1.Order, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(ordersResource, "status", c.ns, order), &v1.Order{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Order), err +} + +// Delete takes name of the order and deletes it. Returns an error if one occurs. +func (c *FakeOrders) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(ordersResource, c.ns, name, opts), &v1.Order{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeOrders) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + action := testing.NewDeleteCollectionAction(ordersResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1.OrderList{}) + return err +} + +// Patch applies the patch and returns the patched order. +func (c *FakeOrders) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Order, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(ordersResource, c.ns, name, pt, data, subresources...), &v1.Order{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Order), err +} diff --git a/pkg/client/certmanager/clientset/versioned/typed/acme/v1/generated_expansion.go b/pkg/client/certmanager/clientset/versioned/typed/acme/v1/generated_expansion.go new file mode 100644 index 00000000000..a5254e0e303 --- /dev/null +++ b/pkg/client/certmanager/clientset/versioned/typed/acme/v1/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type ChallengeExpansion interface{} + +type OrderExpansion interface{} diff --git a/pkg/client/certmanager/clientset/versioned/typed/acme/v1/order.go b/pkg/client/certmanager/clientset/versioned/typed/acme/v1/order.go new file mode 100644 index 00000000000..fce94c2f1c2 --- /dev/null +++ b/pkg/client/certmanager/clientset/versioned/typed/acme/v1/order.go @@ -0,0 +1,195 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/cert-manager/cert-manager/pkg/apis/acme/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + scheme "knative.dev/eventing/pkg/client/certmanager/clientset/versioned/scheme" +) + +// OrdersGetter has a method to return a OrderInterface. +// A group's client should implement this interface. +type OrdersGetter interface { + Orders(namespace string) OrderInterface +} + +// OrderInterface has methods to work with Order resources. +type OrderInterface interface { + Create(ctx context.Context, order *v1.Order, opts metav1.CreateOptions) (*v1.Order, error) + Update(ctx context.Context, order *v1.Order, opts metav1.UpdateOptions) (*v1.Order, error) + UpdateStatus(ctx context.Context, order *v1.Order, opts metav1.UpdateOptions) (*v1.Order, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Order, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.OrderList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Order, err error) + OrderExpansion +} + +// orders implements OrderInterface +type orders struct { + client rest.Interface + ns string +} + +// newOrders returns a Orders +func newOrders(c *AcmeV1Client, namespace string) *orders { + return &orders{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the order, and returns the corresponding order object, and an error if there is any. +func (c *orders) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Order, err error) { + result = &v1.Order{} + err = c.client.Get(). + Namespace(c.ns). + Resource("orders"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Orders that match those selectors. +func (c *orders) List(ctx context.Context, opts metav1.ListOptions) (result *v1.OrderList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.OrderList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("orders"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested orders. +func (c *orders) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("orders"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a order and creates it. Returns the server's representation of the order, and an error, if there is any. +func (c *orders) Create(ctx context.Context, order *v1.Order, opts metav1.CreateOptions) (result *v1.Order, err error) { + result = &v1.Order{} + err = c.client.Post(). + Namespace(c.ns). + Resource("orders"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(order). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a order and updates it. Returns the server's representation of the order, and an error, if there is any. +func (c *orders) Update(ctx context.Context, order *v1.Order, opts metav1.UpdateOptions) (result *v1.Order, err error) { + result = &v1.Order{} + err = c.client.Put(). + Namespace(c.ns). + Resource("orders"). + Name(order.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(order). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *orders) UpdateStatus(ctx context.Context, order *v1.Order, opts metav1.UpdateOptions) (result *v1.Order, err error) { + result = &v1.Order{} + err = c.client.Put(). + Namespace(c.ns). + Resource("orders"). + Name(order.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(order). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the order and deletes it. Returns an error if one occurs. +func (c *orders) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("orders"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *orders) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("orders"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched order. +func (c *orders) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Order, err error) { + result = &v1.Order{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("orders"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/certificate.go b/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/certificate.go new file mode 100644 index 00000000000..33456feb601 --- /dev/null +++ b/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/certificate.go @@ -0,0 +1,195 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + scheme "knative.dev/eventing/pkg/client/certmanager/clientset/versioned/scheme" +) + +// CertificatesGetter has a method to return a CertificateInterface. +// A group's client should implement this interface. +type CertificatesGetter interface { + Certificates(namespace string) CertificateInterface +} + +// CertificateInterface has methods to work with Certificate resources. +type CertificateInterface interface { + Create(ctx context.Context, certificate *v1.Certificate, opts metav1.CreateOptions) (*v1.Certificate, error) + Update(ctx context.Context, certificate *v1.Certificate, opts metav1.UpdateOptions) (*v1.Certificate, error) + UpdateStatus(ctx context.Context, certificate *v1.Certificate, opts metav1.UpdateOptions) (*v1.Certificate, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Certificate, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.CertificateList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Certificate, err error) + CertificateExpansion +} + +// certificates implements CertificateInterface +type certificates struct { + client rest.Interface + ns string +} + +// newCertificates returns a Certificates +func newCertificates(c *CertmanagerV1Client, namespace string) *certificates { + return &certificates{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the certificate, and returns the corresponding certificate object, and an error if there is any. +func (c *certificates) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Certificate, err error) { + result = &v1.Certificate{} + err = c.client.Get(). + Namespace(c.ns). + Resource("certificates"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Certificates that match those selectors. +func (c *certificates) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CertificateList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.CertificateList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("certificates"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested certificates. +func (c *certificates) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("certificates"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a certificate and creates it. Returns the server's representation of the certificate, and an error, if there is any. +func (c *certificates) Create(ctx context.Context, certificate *v1.Certificate, opts metav1.CreateOptions) (result *v1.Certificate, err error) { + result = &v1.Certificate{} + err = c.client.Post(). + Namespace(c.ns). + Resource("certificates"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(certificate). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a certificate and updates it. Returns the server's representation of the certificate, and an error, if there is any. +func (c *certificates) Update(ctx context.Context, certificate *v1.Certificate, opts metav1.UpdateOptions) (result *v1.Certificate, err error) { + result = &v1.Certificate{} + err = c.client.Put(). + Namespace(c.ns). + Resource("certificates"). + Name(certificate.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(certificate). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *certificates) UpdateStatus(ctx context.Context, certificate *v1.Certificate, opts metav1.UpdateOptions) (result *v1.Certificate, err error) { + result = &v1.Certificate{} + err = c.client.Put(). + Namespace(c.ns). + Resource("certificates"). + Name(certificate.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(certificate). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the certificate and deletes it. Returns an error if one occurs. +func (c *certificates) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("certificates"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *certificates) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("certificates"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched certificate. +func (c *certificates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Certificate, err error) { + result = &v1.Certificate{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("certificates"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/certificaterequest.go b/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/certificaterequest.go new file mode 100644 index 00000000000..3bd45a86b89 --- /dev/null +++ b/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/certificaterequest.go @@ -0,0 +1,195 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + scheme "knative.dev/eventing/pkg/client/certmanager/clientset/versioned/scheme" +) + +// CertificateRequestsGetter has a method to return a CertificateRequestInterface. +// A group's client should implement this interface. +type CertificateRequestsGetter interface { + CertificateRequests(namespace string) CertificateRequestInterface +} + +// CertificateRequestInterface has methods to work with CertificateRequest resources. +type CertificateRequestInterface interface { + Create(ctx context.Context, certificateRequest *v1.CertificateRequest, opts metav1.CreateOptions) (*v1.CertificateRequest, error) + Update(ctx context.Context, certificateRequest *v1.CertificateRequest, opts metav1.UpdateOptions) (*v1.CertificateRequest, error) + UpdateStatus(ctx context.Context, certificateRequest *v1.CertificateRequest, opts metav1.UpdateOptions) (*v1.CertificateRequest, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CertificateRequest, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.CertificateRequestList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CertificateRequest, err error) + CertificateRequestExpansion +} + +// certificateRequests implements CertificateRequestInterface +type certificateRequests struct { + client rest.Interface + ns string +} + +// newCertificateRequests returns a CertificateRequests +func newCertificateRequests(c *CertmanagerV1Client, namespace string) *certificateRequests { + return &certificateRequests{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the certificateRequest, and returns the corresponding certificateRequest object, and an error if there is any. +func (c *certificateRequests) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CertificateRequest, err error) { + result = &v1.CertificateRequest{} + err = c.client.Get(). + Namespace(c.ns). + Resource("certificaterequests"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CertificateRequests that match those selectors. +func (c *certificateRequests) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CertificateRequestList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.CertificateRequestList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("certificaterequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested certificateRequests. +func (c *certificateRequests) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("certificaterequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a certificateRequest and creates it. Returns the server's representation of the certificateRequest, and an error, if there is any. +func (c *certificateRequests) Create(ctx context.Context, certificateRequest *v1.CertificateRequest, opts metav1.CreateOptions) (result *v1.CertificateRequest, err error) { + result = &v1.CertificateRequest{} + err = c.client.Post(). + Namespace(c.ns). + Resource("certificaterequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(certificateRequest). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a certificateRequest and updates it. Returns the server's representation of the certificateRequest, and an error, if there is any. +func (c *certificateRequests) Update(ctx context.Context, certificateRequest *v1.CertificateRequest, opts metav1.UpdateOptions) (result *v1.CertificateRequest, err error) { + result = &v1.CertificateRequest{} + err = c.client.Put(). + Namespace(c.ns). + Resource("certificaterequests"). + Name(certificateRequest.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(certificateRequest). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *certificateRequests) UpdateStatus(ctx context.Context, certificateRequest *v1.CertificateRequest, opts metav1.UpdateOptions) (result *v1.CertificateRequest, err error) { + result = &v1.CertificateRequest{} + err = c.client.Put(). + Namespace(c.ns). + Resource("certificaterequests"). + Name(certificateRequest.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(certificateRequest). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the certificateRequest and deletes it. Returns an error if one occurs. +func (c *certificateRequests) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("certificaterequests"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *certificateRequests) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("certificaterequests"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched certificateRequest. +func (c *certificateRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CertificateRequest, err error) { + result = &v1.CertificateRequest{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("certificaterequests"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/certmanager_client.go b/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/certmanager_client.go new file mode 100644 index 00000000000..13e86184c1b --- /dev/null +++ b/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/certmanager_client.go @@ -0,0 +1,122 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "net/http" + + v1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + rest "k8s.io/client-go/rest" + "knative.dev/eventing/pkg/client/certmanager/clientset/versioned/scheme" +) + +type CertmanagerV1Interface interface { + RESTClient() rest.Interface + CertificatesGetter + CertificateRequestsGetter + ClusterIssuersGetter + IssuersGetter +} + +// CertmanagerV1Client is used to interact with features provided by the cert-manager.io group. +type CertmanagerV1Client struct { + restClient rest.Interface +} + +func (c *CertmanagerV1Client) Certificates(namespace string) CertificateInterface { + return newCertificates(c, namespace) +} + +func (c *CertmanagerV1Client) CertificateRequests(namespace string) CertificateRequestInterface { + return newCertificateRequests(c, namespace) +} + +func (c *CertmanagerV1Client) ClusterIssuers() ClusterIssuerInterface { + return newClusterIssuers(c) +} + +func (c *CertmanagerV1Client) Issuers(namespace string) IssuerInterface { + return newIssuers(c, namespace) +} + +// NewForConfig creates a new CertmanagerV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*CertmanagerV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new CertmanagerV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CertmanagerV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &CertmanagerV1Client{client}, nil +} + +// NewForConfigOrDie creates a new CertmanagerV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *CertmanagerV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CertmanagerV1Client for the given RESTClient. +func New(c rest.Interface) *CertmanagerV1Client { + return &CertmanagerV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CertmanagerV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/clusterissuer.go b/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/clusterissuer.go new file mode 100644 index 00000000000..37de6f73ebe --- /dev/null +++ b/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/clusterissuer.go @@ -0,0 +1,184 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + scheme "knative.dev/eventing/pkg/client/certmanager/clientset/versioned/scheme" +) + +// ClusterIssuersGetter has a method to return a ClusterIssuerInterface. +// A group's client should implement this interface. +type ClusterIssuersGetter interface { + ClusterIssuers() ClusterIssuerInterface +} + +// ClusterIssuerInterface has methods to work with ClusterIssuer resources. +type ClusterIssuerInterface interface { + Create(ctx context.Context, clusterIssuer *v1.ClusterIssuer, opts metav1.CreateOptions) (*v1.ClusterIssuer, error) + Update(ctx context.Context, clusterIssuer *v1.ClusterIssuer, opts metav1.UpdateOptions) (*v1.ClusterIssuer, error) + UpdateStatus(ctx context.Context, clusterIssuer *v1.ClusterIssuer, opts metav1.UpdateOptions) (*v1.ClusterIssuer, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterIssuer, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterIssuerList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterIssuer, err error) + ClusterIssuerExpansion +} + +// clusterIssuers implements ClusterIssuerInterface +type clusterIssuers struct { + client rest.Interface +} + +// newClusterIssuers returns a ClusterIssuers +func newClusterIssuers(c *CertmanagerV1Client) *clusterIssuers { + return &clusterIssuers{ + client: c.RESTClient(), + } +} + +// Get takes name of the clusterIssuer, and returns the corresponding clusterIssuer object, and an error if there is any. +func (c *clusterIssuers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterIssuer, err error) { + result = &v1.ClusterIssuer{} + err = c.client.Get(). + Resource("clusterissuers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterIssuers that match those selectors. +func (c *clusterIssuers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterIssuerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ClusterIssuerList{} + err = c.client.Get(). + Resource("clusterissuers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterIssuers. +func (c *clusterIssuers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("clusterissuers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a clusterIssuer and creates it. Returns the server's representation of the clusterIssuer, and an error, if there is any. +func (c *clusterIssuers) Create(ctx context.Context, clusterIssuer *v1.ClusterIssuer, opts metav1.CreateOptions) (result *v1.ClusterIssuer, err error) { + result = &v1.ClusterIssuer{} + err = c.client.Post(). + Resource("clusterissuers"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterIssuer). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a clusterIssuer and updates it. Returns the server's representation of the clusterIssuer, and an error, if there is any. +func (c *clusterIssuers) Update(ctx context.Context, clusterIssuer *v1.ClusterIssuer, opts metav1.UpdateOptions) (result *v1.ClusterIssuer, err error) { + result = &v1.ClusterIssuer{} + err = c.client.Put(). + Resource("clusterissuers"). + Name(clusterIssuer.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterIssuer). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *clusterIssuers) UpdateStatus(ctx context.Context, clusterIssuer *v1.ClusterIssuer, opts metav1.UpdateOptions) (result *v1.ClusterIssuer, err error) { + result = &v1.ClusterIssuer{} + err = c.client.Put(). + Resource("clusterissuers"). + Name(clusterIssuer.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterIssuer). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the clusterIssuer and deletes it. Returns an error if one occurs. +func (c *clusterIssuers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("clusterissuers"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterIssuers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("clusterissuers"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched clusterIssuer. +func (c *clusterIssuers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterIssuer, err error) { + result = &v1.ClusterIssuer{} + err = c.client.Patch(pt). + Resource("clusterissuers"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/doc.go b/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/doc.go new file mode 100644 index 00000000000..2ce17146a02 --- /dev/null +++ b/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/fake/doc.go b/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/fake/doc.go new file mode 100644 index 00000000000..40528db3a52 --- /dev/null +++ b/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/fake/fake_certificate.go b/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/fake/fake_certificate.go new file mode 100644 index 00000000000..b87a96a156f --- /dev/null +++ b/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/fake/fake_certificate.go @@ -0,0 +1,141 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCertificates implements CertificateInterface +type FakeCertificates struct { + Fake *FakeCertmanagerV1 + ns string +} + +var certificatesResource = v1.SchemeGroupVersion.WithResource("certificates") + +var certificatesKind = v1.SchemeGroupVersion.WithKind("Certificate") + +// Get takes name of the certificate, and returns the corresponding certificate object, and an error if there is any. +func (c *FakeCertificates) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Certificate, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(certificatesResource, c.ns, name), &v1.Certificate{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Certificate), err +} + +// List takes label and field selectors, and returns the list of Certificates that match those selectors. +func (c *FakeCertificates) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CertificateList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(certificatesResource, certificatesKind, c.ns, opts), &v1.CertificateList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.CertificateList{ListMeta: obj.(*v1.CertificateList).ListMeta} + for _, item := range obj.(*v1.CertificateList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested certificates. +func (c *FakeCertificates) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(certificatesResource, c.ns, opts)) + +} + +// Create takes the representation of a certificate and creates it. Returns the server's representation of the certificate, and an error, if there is any. +func (c *FakeCertificates) Create(ctx context.Context, certificate *v1.Certificate, opts metav1.CreateOptions) (result *v1.Certificate, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(certificatesResource, c.ns, certificate), &v1.Certificate{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Certificate), err +} + +// Update takes the representation of a certificate and updates it. Returns the server's representation of the certificate, and an error, if there is any. +func (c *FakeCertificates) Update(ctx context.Context, certificate *v1.Certificate, opts metav1.UpdateOptions) (result *v1.Certificate, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(certificatesResource, c.ns, certificate), &v1.Certificate{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Certificate), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeCertificates) UpdateStatus(ctx context.Context, certificate *v1.Certificate, opts metav1.UpdateOptions) (*v1.Certificate, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(certificatesResource, "status", c.ns, certificate), &v1.Certificate{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Certificate), err +} + +// Delete takes name of the certificate and deletes it. Returns an error if one occurs. +func (c *FakeCertificates) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(certificatesResource, c.ns, name, opts), &v1.Certificate{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCertificates) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + action := testing.NewDeleteCollectionAction(certificatesResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1.CertificateList{}) + return err +} + +// Patch applies the patch and returns the patched certificate. +func (c *FakeCertificates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Certificate, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(certificatesResource, c.ns, name, pt, data, subresources...), &v1.Certificate{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Certificate), err +} diff --git a/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/fake/fake_certificaterequest.go b/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/fake/fake_certificaterequest.go new file mode 100644 index 00000000000..fd1cd2f32d8 --- /dev/null +++ b/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/fake/fake_certificaterequest.go @@ -0,0 +1,141 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCertificateRequests implements CertificateRequestInterface +type FakeCertificateRequests struct { + Fake *FakeCertmanagerV1 + ns string +} + +var certificaterequestsResource = v1.SchemeGroupVersion.WithResource("certificaterequests") + +var certificaterequestsKind = v1.SchemeGroupVersion.WithKind("CertificateRequest") + +// Get takes name of the certificateRequest, and returns the corresponding certificateRequest object, and an error if there is any. +func (c *FakeCertificateRequests) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CertificateRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(certificaterequestsResource, c.ns, name), &v1.CertificateRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.CertificateRequest), err +} + +// List takes label and field selectors, and returns the list of CertificateRequests that match those selectors. +func (c *FakeCertificateRequests) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CertificateRequestList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(certificaterequestsResource, certificaterequestsKind, c.ns, opts), &v1.CertificateRequestList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.CertificateRequestList{ListMeta: obj.(*v1.CertificateRequestList).ListMeta} + for _, item := range obj.(*v1.CertificateRequestList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested certificateRequests. +func (c *FakeCertificateRequests) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(certificaterequestsResource, c.ns, opts)) + +} + +// Create takes the representation of a certificateRequest and creates it. Returns the server's representation of the certificateRequest, and an error, if there is any. +func (c *FakeCertificateRequests) Create(ctx context.Context, certificateRequest *v1.CertificateRequest, opts metav1.CreateOptions) (result *v1.CertificateRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(certificaterequestsResource, c.ns, certificateRequest), &v1.CertificateRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.CertificateRequest), err +} + +// Update takes the representation of a certificateRequest and updates it. Returns the server's representation of the certificateRequest, and an error, if there is any. +func (c *FakeCertificateRequests) Update(ctx context.Context, certificateRequest *v1.CertificateRequest, opts metav1.UpdateOptions) (result *v1.CertificateRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(certificaterequestsResource, c.ns, certificateRequest), &v1.CertificateRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.CertificateRequest), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeCertificateRequests) UpdateStatus(ctx context.Context, certificateRequest *v1.CertificateRequest, opts metav1.UpdateOptions) (*v1.CertificateRequest, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(certificaterequestsResource, "status", c.ns, certificateRequest), &v1.CertificateRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.CertificateRequest), err +} + +// Delete takes name of the certificateRequest and deletes it. Returns an error if one occurs. +func (c *FakeCertificateRequests) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(certificaterequestsResource, c.ns, name, opts), &v1.CertificateRequest{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCertificateRequests) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + action := testing.NewDeleteCollectionAction(certificaterequestsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1.CertificateRequestList{}) + return err +} + +// Patch applies the patch and returns the patched certificateRequest. +func (c *FakeCertificateRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CertificateRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(certificaterequestsResource, c.ns, name, pt, data, subresources...), &v1.CertificateRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.CertificateRequest), err +} diff --git a/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/fake/fake_certmanager_client.go b/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/fake/fake_certmanager_client.go new file mode 100644 index 00000000000..8972cfb15bd --- /dev/null +++ b/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/fake/fake_certmanager_client.go @@ -0,0 +1,52 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" + v1 "knative.dev/eventing/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1" +) + +type FakeCertmanagerV1 struct { + *testing.Fake +} + +func (c *FakeCertmanagerV1) Certificates(namespace string) v1.CertificateInterface { + return &FakeCertificates{c, namespace} +} + +func (c *FakeCertmanagerV1) CertificateRequests(namespace string) v1.CertificateRequestInterface { + return &FakeCertificateRequests{c, namespace} +} + +func (c *FakeCertmanagerV1) ClusterIssuers() v1.ClusterIssuerInterface { + return &FakeClusterIssuers{c} +} + +func (c *FakeCertmanagerV1) Issuers(namespace string) v1.IssuerInterface { + return &FakeIssuers{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeCertmanagerV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/fake/fake_clusterissuer.go b/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/fake/fake_clusterissuer.go new file mode 100644 index 00000000000..fd06fd9e0a0 --- /dev/null +++ b/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/fake/fake_clusterissuer.go @@ -0,0 +1,132 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeClusterIssuers implements ClusterIssuerInterface +type FakeClusterIssuers struct { + Fake *FakeCertmanagerV1 +} + +var clusterissuersResource = v1.SchemeGroupVersion.WithResource("clusterissuers") + +var clusterissuersKind = v1.SchemeGroupVersion.WithKind("ClusterIssuer") + +// Get takes name of the clusterIssuer, and returns the corresponding clusterIssuer object, and an error if there is any. +func (c *FakeClusterIssuers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterIssuer, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(clusterissuersResource, name), &v1.ClusterIssuer{}) + if obj == nil { + return nil, err + } + return obj.(*v1.ClusterIssuer), err +} + +// List takes label and field selectors, and returns the list of ClusterIssuers that match those selectors. +func (c *FakeClusterIssuers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterIssuerList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(clusterissuersResource, clusterissuersKind, opts), &v1.ClusterIssuerList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.ClusterIssuerList{ListMeta: obj.(*v1.ClusterIssuerList).ListMeta} + for _, item := range obj.(*v1.ClusterIssuerList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusterIssuers. +func (c *FakeClusterIssuers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(clusterissuersResource, opts)) +} + +// Create takes the representation of a clusterIssuer and creates it. Returns the server's representation of the clusterIssuer, and an error, if there is any. +func (c *FakeClusterIssuers) Create(ctx context.Context, clusterIssuer *v1.ClusterIssuer, opts metav1.CreateOptions) (result *v1.ClusterIssuer, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(clusterissuersResource, clusterIssuer), &v1.ClusterIssuer{}) + if obj == nil { + return nil, err + } + return obj.(*v1.ClusterIssuer), err +} + +// Update takes the representation of a clusterIssuer and updates it. Returns the server's representation of the clusterIssuer, and an error, if there is any. +func (c *FakeClusterIssuers) Update(ctx context.Context, clusterIssuer *v1.ClusterIssuer, opts metav1.UpdateOptions) (result *v1.ClusterIssuer, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(clusterissuersResource, clusterIssuer), &v1.ClusterIssuer{}) + if obj == nil { + return nil, err + } + return obj.(*v1.ClusterIssuer), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeClusterIssuers) UpdateStatus(ctx context.Context, clusterIssuer *v1.ClusterIssuer, opts metav1.UpdateOptions) (*v1.ClusterIssuer, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(clusterissuersResource, "status", clusterIssuer), &v1.ClusterIssuer{}) + if obj == nil { + return nil, err + } + return obj.(*v1.ClusterIssuer), err +} + +// Delete takes name of the clusterIssuer and deletes it. Returns an error if one occurs. +func (c *FakeClusterIssuers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(clusterissuersResource, name, opts), &v1.ClusterIssuer{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeClusterIssuers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(clusterissuersResource, listOpts) + + _, err := c.Fake.Invokes(action, &v1.ClusterIssuerList{}) + return err +} + +// Patch applies the patch and returns the patched clusterIssuer. +func (c *FakeClusterIssuers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterIssuer, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(clusterissuersResource, name, pt, data, subresources...), &v1.ClusterIssuer{}) + if obj == nil { + return nil, err + } + return obj.(*v1.ClusterIssuer), err +} diff --git a/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/fake/fake_issuer.go b/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/fake/fake_issuer.go new file mode 100644 index 00000000000..78053a6bf7d --- /dev/null +++ b/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/fake/fake_issuer.go @@ -0,0 +1,141 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeIssuers implements IssuerInterface +type FakeIssuers struct { + Fake *FakeCertmanagerV1 + ns string +} + +var issuersResource = v1.SchemeGroupVersion.WithResource("issuers") + +var issuersKind = v1.SchemeGroupVersion.WithKind("Issuer") + +// Get takes name of the issuer, and returns the corresponding issuer object, and an error if there is any. +func (c *FakeIssuers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Issuer, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(issuersResource, c.ns, name), &v1.Issuer{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Issuer), err +} + +// List takes label and field selectors, and returns the list of Issuers that match those selectors. +func (c *FakeIssuers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IssuerList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(issuersResource, issuersKind, c.ns, opts), &v1.IssuerList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.IssuerList{ListMeta: obj.(*v1.IssuerList).ListMeta} + for _, item := range obj.(*v1.IssuerList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested issuers. +func (c *FakeIssuers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(issuersResource, c.ns, opts)) + +} + +// Create takes the representation of a issuer and creates it. Returns the server's representation of the issuer, and an error, if there is any. +func (c *FakeIssuers) Create(ctx context.Context, issuer *v1.Issuer, opts metav1.CreateOptions) (result *v1.Issuer, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(issuersResource, c.ns, issuer), &v1.Issuer{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Issuer), err +} + +// Update takes the representation of a issuer and updates it. Returns the server's representation of the issuer, and an error, if there is any. +func (c *FakeIssuers) Update(ctx context.Context, issuer *v1.Issuer, opts metav1.UpdateOptions) (result *v1.Issuer, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(issuersResource, c.ns, issuer), &v1.Issuer{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Issuer), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeIssuers) UpdateStatus(ctx context.Context, issuer *v1.Issuer, opts metav1.UpdateOptions) (*v1.Issuer, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(issuersResource, "status", c.ns, issuer), &v1.Issuer{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Issuer), err +} + +// Delete takes name of the issuer and deletes it. Returns an error if one occurs. +func (c *FakeIssuers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(issuersResource, c.ns, name, opts), &v1.Issuer{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeIssuers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + action := testing.NewDeleteCollectionAction(issuersResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1.IssuerList{}) + return err +} + +// Patch applies the patch and returns the patched issuer. +func (c *FakeIssuers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Issuer, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(issuersResource, c.ns, name, pt, data, subresources...), &v1.Issuer{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Issuer), err +} diff --git a/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/generated_expansion.go b/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/generated_expansion.go new file mode 100644 index 00000000000..566c1dc77fb --- /dev/null +++ b/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/generated_expansion.go @@ -0,0 +1,27 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type CertificateExpansion interface{} + +type CertificateRequestExpansion interface{} + +type ClusterIssuerExpansion interface{} + +type IssuerExpansion interface{} diff --git a/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/issuer.go b/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/issuer.go new file mode 100644 index 00000000000..6776ba0ef3d --- /dev/null +++ b/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1/issuer.go @@ -0,0 +1,195 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + scheme "knative.dev/eventing/pkg/client/certmanager/clientset/versioned/scheme" +) + +// IssuersGetter has a method to return a IssuerInterface. +// A group's client should implement this interface. +type IssuersGetter interface { + Issuers(namespace string) IssuerInterface +} + +// IssuerInterface has methods to work with Issuer resources. +type IssuerInterface interface { + Create(ctx context.Context, issuer *v1.Issuer, opts metav1.CreateOptions) (*v1.Issuer, error) + Update(ctx context.Context, issuer *v1.Issuer, opts metav1.UpdateOptions) (*v1.Issuer, error) + UpdateStatus(ctx context.Context, issuer *v1.Issuer, opts metav1.UpdateOptions) (*v1.Issuer, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Issuer, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.IssuerList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Issuer, err error) + IssuerExpansion +} + +// issuers implements IssuerInterface +type issuers struct { + client rest.Interface + ns string +} + +// newIssuers returns a Issuers +func newIssuers(c *CertmanagerV1Client, namespace string) *issuers { + return &issuers{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the issuer, and returns the corresponding issuer object, and an error if there is any. +func (c *issuers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Issuer, err error) { + result = &v1.Issuer{} + err = c.client.Get(). + Namespace(c.ns). + Resource("issuers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Issuers that match those selectors. +func (c *issuers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IssuerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.IssuerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("issuers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested issuers. +func (c *issuers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("issuers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a issuer and creates it. Returns the server's representation of the issuer, and an error, if there is any. +func (c *issuers) Create(ctx context.Context, issuer *v1.Issuer, opts metav1.CreateOptions) (result *v1.Issuer, err error) { + result = &v1.Issuer{} + err = c.client.Post(). + Namespace(c.ns). + Resource("issuers"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(issuer). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a issuer and updates it. Returns the server's representation of the issuer, and an error, if there is any. +func (c *issuers) Update(ctx context.Context, issuer *v1.Issuer, opts metav1.UpdateOptions) (result *v1.Issuer, err error) { + result = &v1.Issuer{} + err = c.client.Put(). + Namespace(c.ns). + Resource("issuers"). + Name(issuer.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(issuer). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *issuers) UpdateStatus(ctx context.Context, issuer *v1.Issuer, opts metav1.UpdateOptions) (result *v1.Issuer, err error) { + result = &v1.Issuer{} + err = c.client.Put(). + Namespace(c.ns). + Resource("issuers"). + Name(issuer.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(issuer). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the issuer and deletes it. Returns an error if one occurs. +func (c *issuers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("issuers"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *issuers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("issuers"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched issuer. +func (c *issuers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Issuer, err error) { + result = &v1.Issuer{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("issuers"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/client/certmanager/informers/externalversions/acme/interface.go b/pkg/client/certmanager/informers/externalversions/acme/interface.go new file mode 100644 index 00000000000..ee63cb3af2f --- /dev/null +++ b/pkg/client/certmanager/informers/externalversions/acme/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package acme + +import ( + v1 "knative.dev/eventing/pkg/client/certmanager/informers/externalversions/acme/v1" + internalinterfaces "knative.dev/eventing/pkg/client/certmanager/informers/externalversions/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/pkg/client/certmanager/informers/externalversions/acme/v1/challenge.go b/pkg/client/certmanager/informers/externalversions/acme/v1/challenge.go new file mode 100644 index 00000000000..c5ae421974f --- /dev/null +++ b/pkg/client/certmanager/informers/externalversions/acme/v1/challenge.go @@ -0,0 +1,90 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + acmev1 "github.com/cert-manager/cert-manager/pkg/apis/acme/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + versioned "knative.dev/eventing/pkg/client/certmanager/clientset/versioned" + internalinterfaces "knative.dev/eventing/pkg/client/certmanager/informers/externalversions/internalinterfaces" + v1 "knative.dev/eventing/pkg/client/certmanager/listers/acme/v1" +) + +// ChallengeInformer provides access to a shared informer and lister for +// Challenges. +type ChallengeInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ChallengeLister +} + +type challengeInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewChallengeInformer constructs a new informer for Challenge type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewChallengeInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredChallengeInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredChallengeInformer constructs a new informer for Challenge type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredChallengeInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AcmeV1().Challenges(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AcmeV1().Challenges(namespace).Watch(context.TODO(), options) + }, + }, + &acmev1.Challenge{}, + resyncPeriod, + indexers, + ) +} + +func (f *challengeInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredChallengeInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *challengeInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&acmev1.Challenge{}, f.defaultInformer) +} + +func (f *challengeInformer) Lister() v1.ChallengeLister { + return v1.NewChallengeLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/certmanager/informers/externalversions/acme/v1/interface.go b/pkg/client/certmanager/informers/externalversions/acme/v1/interface.go new file mode 100644 index 00000000000..56c0b55fd6b --- /dev/null +++ b/pkg/client/certmanager/informers/externalversions/acme/v1/interface.go @@ -0,0 +1,52 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "knative.dev/eventing/pkg/client/certmanager/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Challenges returns a ChallengeInformer. + Challenges() ChallengeInformer + // Orders returns a OrderInformer. + Orders() OrderInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Challenges returns a ChallengeInformer. +func (v *version) Challenges() ChallengeInformer { + return &challengeInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Orders returns a OrderInformer. +func (v *version) Orders() OrderInformer { + return &orderInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/pkg/client/certmanager/informers/externalversions/acme/v1/order.go b/pkg/client/certmanager/informers/externalversions/acme/v1/order.go new file mode 100644 index 00000000000..92ef07b6808 --- /dev/null +++ b/pkg/client/certmanager/informers/externalversions/acme/v1/order.go @@ -0,0 +1,90 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + acmev1 "github.com/cert-manager/cert-manager/pkg/apis/acme/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + versioned "knative.dev/eventing/pkg/client/certmanager/clientset/versioned" + internalinterfaces "knative.dev/eventing/pkg/client/certmanager/informers/externalversions/internalinterfaces" + v1 "knative.dev/eventing/pkg/client/certmanager/listers/acme/v1" +) + +// OrderInformer provides access to a shared informer and lister for +// Orders. +type OrderInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.OrderLister +} + +type orderInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewOrderInformer constructs a new informer for Order type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewOrderInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredOrderInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredOrderInformer constructs a new informer for Order type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredOrderInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AcmeV1().Orders(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AcmeV1().Orders(namespace).Watch(context.TODO(), options) + }, + }, + &acmev1.Order{}, + resyncPeriod, + indexers, + ) +} + +func (f *orderInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredOrderInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *orderInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&acmev1.Order{}, f.defaultInformer) +} + +func (f *orderInformer) Lister() v1.OrderLister { + return v1.NewOrderLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/certmanager/informers/externalversions/certmanager/interface.go b/pkg/client/certmanager/informers/externalversions/certmanager/interface.go new file mode 100644 index 00000000000..5822fc2133d --- /dev/null +++ b/pkg/client/certmanager/informers/externalversions/certmanager/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package certmanager + +import ( + v1 "knative.dev/eventing/pkg/client/certmanager/informers/externalversions/certmanager/v1" + internalinterfaces "knative.dev/eventing/pkg/client/certmanager/informers/externalversions/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/pkg/client/certmanager/informers/externalversions/certmanager/v1/certificate.go b/pkg/client/certmanager/informers/externalversions/certmanager/v1/certificate.go new file mode 100644 index 00000000000..371c02956d4 --- /dev/null +++ b/pkg/client/certmanager/informers/externalversions/certmanager/v1/certificate.go @@ -0,0 +1,90 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + certmanagerv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + versioned "knative.dev/eventing/pkg/client/certmanager/clientset/versioned" + internalinterfaces "knative.dev/eventing/pkg/client/certmanager/informers/externalversions/internalinterfaces" + v1 "knative.dev/eventing/pkg/client/certmanager/listers/certmanager/v1" +) + +// CertificateInformer provides access to a shared informer and lister for +// Certificates. +type CertificateInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.CertificateLister +} + +type certificateInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewCertificateInformer constructs a new informer for Certificate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCertificateInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCertificateInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredCertificateInformer constructs a new informer for Certificate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCertificateInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CertmanagerV1().Certificates(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CertmanagerV1().Certificates(namespace).Watch(context.TODO(), options) + }, + }, + &certmanagerv1.Certificate{}, + resyncPeriod, + indexers, + ) +} + +func (f *certificateInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCertificateInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *certificateInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&certmanagerv1.Certificate{}, f.defaultInformer) +} + +func (f *certificateInformer) Lister() v1.CertificateLister { + return v1.NewCertificateLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/certmanager/informers/externalversions/certmanager/v1/certificaterequest.go b/pkg/client/certmanager/informers/externalversions/certmanager/v1/certificaterequest.go new file mode 100644 index 00000000000..38e7d1d2b61 --- /dev/null +++ b/pkg/client/certmanager/informers/externalversions/certmanager/v1/certificaterequest.go @@ -0,0 +1,90 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + certmanagerv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + versioned "knative.dev/eventing/pkg/client/certmanager/clientset/versioned" + internalinterfaces "knative.dev/eventing/pkg/client/certmanager/informers/externalversions/internalinterfaces" + v1 "knative.dev/eventing/pkg/client/certmanager/listers/certmanager/v1" +) + +// CertificateRequestInformer provides access to a shared informer and lister for +// CertificateRequests. +type CertificateRequestInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.CertificateRequestLister +} + +type certificateRequestInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewCertificateRequestInformer constructs a new informer for CertificateRequest type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCertificateRequestInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCertificateRequestInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredCertificateRequestInformer constructs a new informer for CertificateRequest type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCertificateRequestInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CertmanagerV1().CertificateRequests(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CertmanagerV1().CertificateRequests(namespace).Watch(context.TODO(), options) + }, + }, + &certmanagerv1.CertificateRequest{}, + resyncPeriod, + indexers, + ) +} + +func (f *certificateRequestInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCertificateRequestInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *certificateRequestInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&certmanagerv1.CertificateRequest{}, f.defaultInformer) +} + +func (f *certificateRequestInformer) Lister() v1.CertificateRequestLister { + return v1.NewCertificateRequestLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/certmanager/informers/externalversions/certmanager/v1/clusterissuer.go b/pkg/client/certmanager/informers/externalversions/certmanager/v1/clusterissuer.go new file mode 100644 index 00000000000..e9db2121e5b --- /dev/null +++ b/pkg/client/certmanager/informers/externalversions/certmanager/v1/clusterissuer.go @@ -0,0 +1,89 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + certmanagerv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + versioned "knative.dev/eventing/pkg/client/certmanager/clientset/versioned" + internalinterfaces "knative.dev/eventing/pkg/client/certmanager/informers/externalversions/internalinterfaces" + v1 "knative.dev/eventing/pkg/client/certmanager/listers/certmanager/v1" +) + +// ClusterIssuerInformer provides access to a shared informer and lister for +// ClusterIssuers. +type ClusterIssuerInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ClusterIssuerLister +} + +type clusterIssuerInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterIssuerInformer constructs a new informer for ClusterIssuer type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterIssuerInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterIssuerInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterIssuerInformer constructs a new informer for ClusterIssuer type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterIssuerInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CertmanagerV1().ClusterIssuers().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CertmanagerV1().ClusterIssuers().Watch(context.TODO(), options) + }, + }, + &certmanagerv1.ClusterIssuer{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterIssuerInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterIssuerInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterIssuerInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&certmanagerv1.ClusterIssuer{}, f.defaultInformer) +} + +func (f *clusterIssuerInformer) Lister() v1.ClusterIssuerLister { + return v1.NewClusterIssuerLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/certmanager/informers/externalversions/certmanager/v1/interface.go b/pkg/client/certmanager/informers/externalversions/certmanager/v1/interface.go new file mode 100644 index 00000000000..3d108f4c602 --- /dev/null +++ b/pkg/client/certmanager/informers/externalversions/certmanager/v1/interface.go @@ -0,0 +1,66 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "knative.dev/eventing/pkg/client/certmanager/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Certificates returns a CertificateInformer. + Certificates() CertificateInformer + // CertificateRequests returns a CertificateRequestInformer. + CertificateRequests() CertificateRequestInformer + // ClusterIssuers returns a ClusterIssuerInformer. + ClusterIssuers() ClusterIssuerInformer + // Issuers returns a IssuerInformer. + Issuers() IssuerInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Certificates returns a CertificateInformer. +func (v *version) Certificates() CertificateInformer { + return &certificateInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// CertificateRequests returns a CertificateRequestInformer. +func (v *version) CertificateRequests() CertificateRequestInformer { + return &certificateRequestInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ClusterIssuers returns a ClusterIssuerInformer. +func (v *version) ClusterIssuers() ClusterIssuerInformer { + return &clusterIssuerInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Issuers returns a IssuerInformer. +func (v *version) Issuers() IssuerInformer { + return &issuerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/pkg/client/certmanager/informers/externalversions/certmanager/v1/issuer.go b/pkg/client/certmanager/informers/externalversions/certmanager/v1/issuer.go new file mode 100644 index 00000000000..eb0d57de3dc --- /dev/null +++ b/pkg/client/certmanager/informers/externalversions/certmanager/v1/issuer.go @@ -0,0 +1,90 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + certmanagerv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + versioned "knative.dev/eventing/pkg/client/certmanager/clientset/versioned" + internalinterfaces "knative.dev/eventing/pkg/client/certmanager/informers/externalversions/internalinterfaces" + v1 "knative.dev/eventing/pkg/client/certmanager/listers/certmanager/v1" +) + +// IssuerInformer provides access to a shared informer and lister for +// Issuers. +type IssuerInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.IssuerLister +} + +type issuerInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewIssuerInformer constructs a new informer for Issuer type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewIssuerInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredIssuerInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredIssuerInformer constructs a new informer for Issuer type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredIssuerInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CertmanagerV1().Issuers(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CertmanagerV1().Issuers(namespace).Watch(context.TODO(), options) + }, + }, + &certmanagerv1.Issuer{}, + resyncPeriod, + indexers, + ) +} + +func (f *issuerInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredIssuerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *issuerInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&certmanagerv1.Issuer{}, f.defaultInformer) +} + +func (f *issuerInformer) Lister() v1.IssuerLister { + return v1.NewIssuerLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/certmanager/informers/externalversions/factory.go b/pkg/client/certmanager/informers/externalversions/factory.go new file mode 100644 index 00000000000..b2017cf1cc5 --- /dev/null +++ b/pkg/client/certmanager/informers/externalversions/factory.go @@ -0,0 +1,267 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" + versioned "knative.dev/eventing/pkg/client/certmanager/clientset/versioned" + acme "knative.dev/eventing/pkg/client/certmanager/informers/externalversions/acme" + certmanager "knative.dev/eventing/pkg/client/certmanager/informers/externalversions/certmanager" + internalinterfaces "knative.dev/eventing/pkg/client/certmanager/informers/externalversions/internalinterfaces" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + transform cache.TransformFunc + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool + // wg tracks how many goroutines were started. + wg sync.WaitGroup + // shuttingDown is true when Shutdown has been called. It may still be running + // because it needs to wait for goroutines. + shuttingDown bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// WithTransform sets a transform on all informers. +func WithTransform(transform cache.TransformFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.transform = transform + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.shuttingDown { + return + } + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + f.wg.Add(1) + // We need a new variable in each loop iteration, + // otherwise the goroutine would use the loop variable + // and that keeps changing. + informer := informer + go func() { + defer f.wg.Done() + informer.Run(stopCh) + }() + f.startedInformers[informerType] = true + } + } +} + +func (f *sharedInformerFactory) Shutdown() { + f.lock.Lock() + f.shuttingDown = true + f.lock.Unlock() + + // Will return immediately if there is nothing to wait for. + f.wg.Wait() +} + +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + informer.SetTransform(f.transform) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +// +// It is typically used like this: +// +// ctx, cancel := context.Background() +// defer cancel() +// factory := NewSharedInformerFactory(client, resyncPeriod) +// defer factory.WaitForStop() // Returns immediately if nothing was started. +// genericInformer := factory.ForResource(resource) +// typedInformer := factory.SomeAPIGroup().V1().SomeType() +// factory.Start(ctx.Done()) // Start processing these informers. +// synced := factory.WaitForCacheSync(ctx.Done()) +// for v, ok := range synced { +// if !ok { +// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v) +// return +// } +// } +// +// // Creating informers can also be created after Start, but then +// // Start must be called again: +// anotherGenericInformer := factory.ForResource(resource) +// factory.Start(ctx.Done()) +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + + // Start initializes all requested informers. They are handled in goroutines + // which run until the stop channel gets closed. + Start(stopCh <-chan struct{}) + + // Shutdown marks a factory as shutting down. At that point no new + // informers can be started anymore and Start will return without + // doing anything. + // + // In addition, Shutdown blocks until all goroutines have terminated. For that + // to happen, the close channel(s) that they were started with must be closed, + // either before Shutdown gets called or while it is waiting. + // + // Shutdown may be called multiple times, even concurrently. All such calls will + // block until all goroutines have terminated. + Shutdown() + + // WaitForCacheSync blocks until all started informers' caches were synced + // or the stop channel gets closed. + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + // ForResource gives generic access to a shared informer of the matching type. + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // InformerFor returns the SharedIndexInformer for obj using an internal + // client. + InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer + + Acme() acme.Interface + Certmanager() certmanager.Interface +} + +func (f *sharedInformerFactory) Acme() acme.Interface { + return acme.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Certmanager() certmanager.Interface { + return certmanager.New(f, f.namespace, f.tweakListOptions) +} diff --git a/pkg/client/certmanager/informers/externalversions/generic.go b/pkg/client/certmanager/informers/externalversions/generic.go new file mode 100644 index 00000000000..cb5a0be6df6 --- /dev/null +++ b/pkg/client/certmanager/informers/externalversions/generic.go @@ -0,0 +1,75 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + "fmt" + + v1 "github.com/cert-manager/cert-manager/pkg/apis/acme/v1" + certmanagerv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=acme.cert-manager.io, Version=v1 + case v1.SchemeGroupVersion.WithResource("challenges"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Acme().V1().Challenges().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("orders"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Acme().V1().Orders().Informer()}, nil + + // Group=cert-manager.io, Version=v1 + case certmanagerv1.SchemeGroupVersion.WithResource("certificates"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Certmanager().V1().Certificates().Informer()}, nil + case certmanagerv1.SchemeGroupVersion.WithResource("certificaterequests"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Certmanager().V1().CertificateRequests().Informer()}, nil + case certmanagerv1.SchemeGroupVersion.WithResource("clusterissuers"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Certmanager().V1().ClusterIssuers().Informer()}, nil + case certmanagerv1.SchemeGroupVersion.WithResource("issuers"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Certmanager().V1().Issuers().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/pkg/client/certmanager/informers/externalversions/internalinterfaces/factory_interfaces.go b/pkg/client/certmanager/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 00000000000..79c136d94f4 --- /dev/null +++ b/pkg/client/certmanager/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,40 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" + versioned "knative.dev/eventing/pkg/client/certmanager/clientset/versioned" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/pkg/client/certmanager/injection/client/client.go b/pkg/client/certmanager/injection/client/client.go new file mode 100644 index 00000000000..a9cf608ae05 --- /dev/null +++ b/pkg/client/certmanager/injection/client/client.go @@ -0,0 +1,57 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package client + +import ( + context "context" + + rest "k8s.io/client-go/rest" + versioned "knative.dev/eventing/pkg/client/certmanager/clientset/versioned" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterClient(withClientFromConfig) + injection.Default.RegisterClientFetcher(func(ctx context.Context) interface{} { + return Get(ctx) + }) +} + +// Key is used as the key for associating information with a context.Context. +type Key struct{} + +func withClientFromConfig(ctx context.Context, cfg *rest.Config) context.Context { + return context.WithValue(ctx, Key{}, versioned.NewForConfigOrDie(cfg)) +} + +// Get extracts the versioned.Interface client from the context. +func Get(ctx context.Context) versioned.Interface { + untyped := ctx.Value(Key{}) + if untyped == nil { + if injection.GetConfig(ctx) == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/eventing/pkg/client/certmanager/clientset/versioned.Interface from context. This context is not the application context (which is typically given to constructors via sharedmain).") + } else { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/eventing/pkg/client/certmanager/clientset/versioned.Interface from context.") + } + } + return untyped.(versioned.Interface) +} diff --git a/pkg/client/certmanager/injection/client/fake/fake.go b/pkg/client/certmanager/injection/client/fake/fake.go new file mode 100644 index 00000000000..43e4a7239f8 --- /dev/null +++ b/pkg/client/certmanager/injection/client/fake/fake.go @@ -0,0 +1,57 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + context "context" + + runtime "k8s.io/apimachinery/pkg/runtime" + rest "k8s.io/client-go/rest" + fake "knative.dev/eventing/pkg/client/certmanager/clientset/versioned/fake" + client "knative.dev/eventing/pkg/client/certmanager/injection/client" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Fake.RegisterClient(withClient) + injection.Fake.RegisterClientFetcher(func(ctx context.Context) interface{} { + return Get(ctx) + }) +} + +func withClient(ctx context.Context, cfg *rest.Config) context.Context { + ctx, _ = With(ctx) + return ctx +} + +func With(ctx context.Context, objects ...runtime.Object) (context.Context, *fake.Clientset) { + cs := fake.NewSimpleClientset(objects...) + return context.WithValue(ctx, client.Key{}, cs), cs +} + +// Get extracts the Kubernetes client from the context. +func Get(ctx context.Context) *fake.Clientset { + untyped := ctx.Value(client.Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/eventing/pkg/client/certmanager/clientset/versioned/fake.Clientset from context.") + } + return untyped.(*fake.Clientset) +} diff --git a/pkg/client/certmanager/injection/informers/acme/v1/challenge/challenge.go b/pkg/client/certmanager/injection/informers/acme/v1/challenge/challenge.go new file mode 100644 index 00000000000..22969ecc776 --- /dev/null +++ b/pkg/client/certmanager/injection/informers/acme/v1/challenge/challenge.go @@ -0,0 +1,47 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package challenge + +import ( + context "context" + + v1 "knative.dev/eventing/pkg/client/certmanager/informers/externalversions/acme/v1" + factory "knative.dev/eventing/pkg/client/certmanager/injection/informers/factory" + controller "knative.dev/pkg/controller" + logging "knative.dev/pkg/logging" +) + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func WithInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Acme().V1().Challenges() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1.ChallengeInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/eventing/pkg/client/certmanager/informers/externalversions/acme/v1.ChallengeInformer from context.") + } + return untyped.(v1.ChallengeInformer) +} diff --git a/pkg/client/certmanager/injection/informers/acme/v1/challenge/fake/fake.go b/pkg/client/certmanager/injection/informers/acme/v1/challenge/fake/fake.go new file mode 100644 index 00000000000..7864ec8b2cb --- /dev/null +++ b/pkg/client/certmanager/injection/informers/acme/v1/challenge/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + context "context" + + challenge "knative.dev/eventing/pkg/client/certmanager/injection/informers/acme/v1/challenge" + fake "knative.dev/eventing/pkg/client/certmanager/injection/informers/factory/fake" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" +) + +var Get = challenge.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Acme().V1().Challenges() + return context.WithValue(ctx, challenge.Key{}, inf), inf.Informer() +} diff --git a/pkg/client/certmanager/injection/informers/acme/v1/challenge/filtered/challenge.go b/pkg/client/certmanager/injection/informers/acme/v1/challenge/filtered/challenge.go new file mode 100644 index 00000000000..8b44b4dc597 --- /dev/null +++ b/pkg/client/certmanager/injection/informers/acme/v1/challenge/filtered/challenge.go @@ -0,0 +1,65 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package filtered + +import ( + context "context" + + v1 "knative.dev/eventing/pkg/client/certmanager/informers/externalversions/acme/v1" + filtered "knative.dev/eventing/pkg/client/certmanager/injection/informers/factory/filtered" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterFilteredInformers(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct { + Selector string +} + +func withInformer(ctx context.Context) (context.Context, []controller.Informer) { + untyped := ctx.Value(filtered.LabelKey{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch labelkey from context.") + } + labelSelectors := untyped.([]string) + infs := []controller.Informer{} + for _, selector := range labelSelectors { + f := filtered.Get(ctx, selector) + inf := f.Acme().V1().Challenges() + ctx = context.WithValue(ctx, Key{Selector: selector}, inf) + infs = append(infs, inf.Informer()) + } + return ctx, infs +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context, selector string) v1.ChallengeInformer { + untyped := ctx.Value(Key{Selector: selector}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch knative.dev/eventing/pkg/client/certmanager/informers/externalversions/acme/v1.ChallengeInformer with selector %s from context.", selector) + } + return untyped.(v1.ChallengeInformer) +} diff --git a/pkg/client/certmanager/injection/informers/acme/v1/challenge/filtered/fake/fake.go b/pkg/client/certmanager/injection/informers/acme/v1/challenge/filtered/fake/fake.go new file mode 100644 index 00000000000..6b56dbb7b6a --- /dev/null +++ b/pkg/client/certmanager/injection/informers/acme/v1/challenge/filtered/fake/fake.go @@ -0,0 +1,52 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + context "context" + + filtered "knative.dev/eventing/pkg/client/certmanager/injection/informers/acme/v1/challenge/filtered" + factoryfiltered "knative.dev/eventing/pkg/client/certmanager/injection/informers/factory/filtered" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +var Get = filtered.Get + +func init() { + injection.Fake.RegisterFilteredInformers(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, []controller.Informer) { + untyped := ctx.Value(factoryfiltered.LabelKey{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch labelkey from context.") + } + labelSelectors := untyped.([]string) + infs := []controller.Informer{} + for _, selector := range labelSelectors { + f := factoryfiltered.Get(ctx, selector) + inf := f.Acme().V1().Challenges() + ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf) + infs = append(infs, inf.Informer()) + } + return ctx, infs +} diff --git a/pkg/client/certmanager/injection/informers/acme/v1/order/fake/fake.go b/pkg/client/certmanager/injection/informers/acme/v1/order/fake/fake.go new file mode 100644 index 00000000000..1bc1b0dffb1 --- /dev/null +++ b/pkg/client/certmanager/injection/informers/acme/v1/order/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + context "context" + + order "knative.dev/eventing/pkg/client/certmanager/injection/informers/acme/v1/order" + fake "knative.dev/eventing/pkg/client/certmanager/injection/informers/factory/fake" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" +) + +var Get = order.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Acme().V1().Orders() + return context.WithValue(ctx, order.Key{}, inf), inf.Informer() +} diff --git a/pkg/client/certmanager/injection/informers/acme/v1/order/filtered/fake/fake.go b/pkg/client/certmanager/injection/informers/acme/v1/order/filtered/fake/fake.go new file mode 100644 index 00000000000..10be33fe2c9 --- /dev/null +++ b/pkg/client/certmanager/injection/informers/acme/v1/order/filtered/fake/fake.go @@ -0,0 +1,52 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + context "context" + + filtered "knative.dev/eventing/pkg/client/certmanager/injection/informers/acme/v1/order/filtered" + factoryfiltered "knative.dev/eventing/pkg/client/certmanager/injection/informers/factory/filtered" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +var Get = filtered.Get + +func init() { + injection.Fake.RegisterFilteredInformers(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, []controller.Informer) { + untyped := ctx.Value(factoryfiltered.LabelKey{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch labelkey from context.") + } + labelSelectors := untyped.([]string) + infs := []controller.Informer{} + for _, selector := range labelSelectors { + f := factoryfiltered.Get(ctx, selector) + inf := f.Acme().V1().Orders() + ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf) + infs = append(infs, inf.Informer()) + } + return ctx, infs +} diff --git a/pkg/client/certmanager/injection/informers/acme/v1/order/filtered/order.go b/pkg/client/certmanager/injection/informers/acme/v1/order/filtered/order.go new file mode 100644 index 00000000000..e843ce8ff38 --- /dev/null +++ b/pkg/client/certmanager/injection/informers/acme/v1/order/filtered/order.go @@ -0,0 +1,65 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package filtered + +import ( + context "context" + + v1 "knative.dev/eventing/pkg/client/certmanager/informers/externalversions/acme/v1" + filtered "knative.dev/eventing/pkg/client/certmanager/injection/informers/factory/filtered" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterFilteredInformers(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct { + Selector string +} + +func withInformer(ctx context.Context) (context.Context, []controller.Informer) { + untyped := ctx.Value(filtered.LabelKey{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch labelkey from context.") + } + labelSelectors := untyped.([]string) + infs := []controller.Informer{} + for _, selector := range labelSelectors { + f := filtered.Get(ctx, selector) + inf := f.Acme().V1().Orders() + ctx = context.WithValue(ctx, Key{Selector: selector}, inf) + infs = append(infs, inf.Informer()) + } + return ctx, infs +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context, selector string) v1.OrderInformer { + untyped := ctx.Value(Key{Selector: selector}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch knative.dev/eventing/pkg/client/certmanager/informers/externalversions/acme/v1.OrderInformer with selector %s from context.", selector) + } + return untyped.(v1.OrderInformer) +} diff --git a/pkg/client/certmanager/injection/informers/acme/v1/order/order.go b/pkg/client/certmanager/injection/informers/acme/v1/order/order.go new file mode 100644 index 00000000000..33c30d2ea46 --- /dev/null +++ b/pkg/client/certmanager/injection/informers/acme/v1/order/order.go @@ -0,0 +1,47 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package order + +import ( + context "context" + + v1 "knative.dev/eventing/pkg/client/certmanager/informers/externalversions/acme/v1" + factory "knative.dev/eventing/pkg/client/certmanager/injection/informers/factory" + controller "knative.dev/pkg/controller" + logging "knative.dev/pkg/logging" +) + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func WithInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Acme().V1().Orders() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1.OrderInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/eventing/pkg/client/certmanager/informers/externalversions/acme/v1.OrderInformer from context.") + } + return untyped.(v1.OrderInformer) +} diff --git a/pkg/client/certmanager/injection/informers/certmanager/v1/certificate/certificate.go b/pkg/client/certmanager/injection/informers/certmanager/v1/certificate/certificate.go new file mode 100644 index 00000000000..ee96d7d0171 --- /dev/null +++ b/pkg/client/certmanager/injection/informers/certmanager/v1/certificate/certificate.go @@ -0,0 +1,47 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package certificate + +import ( + context "context" + + v1 "knative.dev/eventing/pkg/client/certmanager/informers/externalversions/certmanager/v1" + factory "knative.dev/eventing/pkg/client/certmanager/injection/informers/factory" + controller "knative.dev/pkg/controller" + logging "knative.dev/pkg/logging" +) + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func WithInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Certmanager().V1().Certificates() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1.CertificateInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/eventing/pkg/client/certmanager/informers/externalversions/certmanager/v1.CertificateInformer from context.") + } + return untyped.(v1.CertificateInformer) +} diff --git a/pkg/client/certmanager/injection/informers/certmanager/v1/certificate/fake/fake.go b/pkg/client/certmanager/injection/informers/certmanager/v1/certificate/fake/fake.go new file mode 100644 index 00000000000..4a1f2987fed --- /dev/null +++ b/pkg/client/certmanager/injection/informers/certmanager/v1/certificate/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + context "context" + + certificate "knative.dev/eventing/pkg/client/certmanager/injection/informers/certmanager/v1/certificate" + fake "knative.dev/eventing/pkg/client/certmanager/injection/informers/factory/fake" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" +) + +var Get = certificate.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Certmanager().V1().Certificates() + return context.WithValue(ctx, certificate.Key{}, inf), inf.Informer() +} diff --git a/pkg/client/certmanager/injection/informers/certmanager/v1/certificate/filtered/certificate.go b/pkg/client/certmanager/injection/informers/certmanager/v1/certificate/filtered/certificate.go new file mode 100644 index 00000000000..409f43366fd --- /dev/null +++ b/pkg/client/certmanager/injection/informers/certmanager/v1/certificate/filtered/certificate.go @@ -0,0 +1,65 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package filtered + +import ( + context "context" + + v1 "knative.dev/eventing/pkg/client/certmanager/informers/externalversions/certmanager/v1" + filtered "knative.dev/eventing/pkg/client/certmanager/injection/informers/factory/filtered" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterFilteredInformers(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct { + Selector string +} + +func withInformer(ctx context.Context) (context.Context, []controller.Informer) { + untyped := ctx.Value(filtered.LabelKey{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch labelkey from context.") + } + labelSelectors := untyped.([]string) + infs := []controller.Informer{} + for _, selector := range labelSelectors { + f := filtered.Get(ctx, selector) + inf := f.Certmanager().V1().Certificates() + ctx = context.WithValue(ctx, Key{Selector: selector}, inf) + infs = append(infs, inf.Informer()) + } + return ctx, infs +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context, selector string) v1.CertificateInformer { + untyped := ctx.Value(Key{Selector: selector}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch knative.dev/eventing/pkg/client/certmanager/informers/externalversions/certmanager/v1.CertificateInformer with selector %s from context.", selector) + } + return untyped.(v1.CertificateInformer) +} diff --git a/pkg/client/certmanager/injection/informers/certmanager/v1/certificate/filtered/fake/fake.go b/pkg/client/certmanager/injection/informers/certmanager/v1/certificate/filtered/fake/fake.go new file mode 100644 index 00000000000..f1ee094462a --- /dev/null +++ b/pkg/client/certmanager/injection/informers/certmanager/v1/certificate/filtered/fake/fake.go @@ -0,0 +1,52 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + context "context" + + filtered "knative.dev/eventing/pkg/client/certmanager/injection/informers/certmanager/v1/certificate/filtered" + factoryfiltered "knative.dev/eventing/pkg/client/certmanager/injection/informers/factory/filtered" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +var Get = filtered.Get + +func init() { + injection.Fake.RegisterFilteredInformers(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, []controller.Informer) { + untyped := ctx.Value(factoryfiltered.LabelKey{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch labelkey from context.") + } + labelSelectors := untyped.([]string) + infs := []controller.Informer{} + for _, selector := range labelSelectors { + f := factoryfiltered.Get(ctx, selector) + inf := f.Certmanager().V1().Certificates() + ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf) + infs = append(infs, inf.Informer()) + } + return ctx, infs +} diff --git a/pkg/client/certmanager/injection/informers/certmanager/v1/certificaterequest/certificaterequest.go b/pkg/client/certmanager/injection/informers/certmanager/v1/certificaterequest/certificaterequest.go new file mode 100644 index 00000000000..b9e39c09731 --- /dev/null +++ b/pkg/client/certmanager/injection/informers/certmanager/v1/certificaterequest/certificaterequest.go @@ -0,0 +1,47 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package certificaterequest + +import ( + context "context" + + v1 "knative.dev/eventing/pkg/client/certmanager/informers/externalversions/certmanager/v1" + factory "knative.dev/eventing/pkg/client/certmanager/injection/informers/factory" + controller "knative.dev/pkg/controller" + logging "knative.dev/pkg/logging" +) + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func WithInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Certmanager().V1().CertificateRequests() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1.CertificateRequestInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/eventing/pkg/client/certmanager/informers/externalversions/certmanager/v1.CertificateRequestInformer from context.") + } + return untyped.(v1.CertificateRequestInformer) +} diff --git a/pkg/client/certmanager/injection/informers/certmanager/v1/certificaterequest/fake/fake.go b/pkg/client/certmanager/injection/informers/certmanager/v1/certificaterequest/fake/fake.go new file mode 100644 index 00000000000..72b86ab276f --- /dev/null +++ b/pkg/client/certmanager/injection/informers/certmanager/v1/certificaterequest/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + context "context" + + certificaterequest "knative.dev/eventing/pkg/client/certmanager/injection/informers/certmanager/v1/certificaterequest" + fake "knative.dev/eventing/pkg/client/certmanager/injection/informers/factory/fake" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" +) + +var Get = certificaterequest.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Certmanager().V1().CertificateRequests() + return context.WithValue(ctx, certificaterequest.Key{}, inf), inf.Informer() +} diff --git a/pkg/client/certmanager/injection/informers/certmanager/v1/certificaterequest/filtered/certificaterequest.go b/pkg/client/certmanager/injection/informers/certmanager/v1/certificaterequest/filtered/certificaterequest.go new file mode 100644 index 00000000000..573d54c9c75 --- /dev/null +++ b/pkg/client/certmanager/injection/informers/certmanager/v1/certificaterequest/filtered/certificaterequest.go @@ -0,0 +1,65 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package filtered + +import ( + context "context" + + v1 "knative.dev/eventing/pkg/client/certmanager/informers/externalversions/certmanager/v1" + filtered "knative.dev/eventing/pkg/client/certmanager/injection/informers/factory/filtered" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterFilteredInformers(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct { + Selector string +} + +func withInformer(ctx context.Context) (context.Context, []controller.Informer) { + untyped := ctx.Value(filtered.LabelKey{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch labelkey from context.") + } + labelSelectors := untyped.([]string) + infs := []controller.Informer{} + for _, selector := range labelSelectors { + f := filtered.Get(ctx, selector) + inf := f.Certmanager().V1().CertificateRequests() + ctx = context.WithValue(ctx, Key{Selector: selector}, inf) + infs = append(infs, inf.Informer()) + } + return ctx, infs +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context, selector string) v1.CertificateRequestInformer { + untyped := ctx.Value(Key{Selector: selector}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch knative.dev/eventing/pkg/client/certmanager/informers/externalversions/certmanager/v1.CertificateRequestInformer with selector %s from context.", selector) + } + return untyped.(v1.CertificateRequestInformer) +} diff --git a/pkg/client/certmanager/injection/informers/certmanager/v1/certificaterequest/filtered/fake/fake.go b/pkg/client/certmanager/injection/informers/certmanager/v1/certificaterequest/filtered/fake/fake.go new file mode 100644 index 00000000000..885b4162c51 --- /dev/null +++ b/pkg/client/certmanager/injection/informers/certmanager/v1/certificaterequest/filtered/fake/fake.go @@ -0,0 +1,52 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + context "context" + + filtered "knative.dev/eventing/pkg/client/certmanager/injection/informers/certmanager/v1/certificaterequest/filtered" + factoryfiltered "knative.dev/eventing/pkg/client/certmanager/injection/informers/factory/filtered" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +var Get = filtered.Get + +func init() { + injection.Fake.RegisterFilteredInformers(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, []controller.Informer) { + untyped := ctx.Value(factoryfiltered.LabelKey{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch labelkey from context.") + } + labelSelectors := untyped.([]string) + infs := []controller.Informer{} + for _, selector := range labelSelectors { + f := factoryfiltered.Get(ctx, selector) + inf := f.Certmanager().V1().CertificateRequests() + ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf) + infs = append(infs, inf.Informer()) + } + return ctx, infs +} diff --git a/pkg/client/certmanager/injection/informers/certmanager/v1/clusterissuer/clusterissuer.go b/pkg/client/certmanager/injection/informers/certmanager/v1/clusterissuer/clusterissuer.go new file mode 100644 index 00000000000..2e3299608db --- /dev/null +++ b/pkg/client/certmanager/injection/informers/certmanager/v1/clusterissuer/clusterissuer.go @@ -0,0 +1,47 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package clusterissuer + +import ( + context "context" + + v1 "knative.dev/eventing/pkg/client/certmanager/informers/externalversions/certmanager/v1" + factory "knative.dev/eventing/pkg/client/certmanager/injection/informers/factory" + controller "knative.dev/pkg/controller" + logging "knative.dev/pkg/logging" +) + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func WithInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Certmanager().V1().ClusterIssuers() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1.ClusterIssuerInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/eventing/pkg/client/certmanager/informers/externalversions/certmanager/v1.ClusterIssuerInformer from context.") + } + return untyped.(v1.ClusterIssuerInformer) +} diff --git a/pkg/client/certmanager/injection/informers/certmanager/v1/clusterissuer/fake/fake.go b/pkg/client/certmanager/injection/informers/certmanager/v1/clusterissuer/fake/fake.go new file mode 100644 index 00000000000..a73e1c5b76d --- /dev/null +++ b/pkg/client/certmanager/injection/informers/certmanager/v1/clusterissuer/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + context "context" + + clusterissuer "knative.dev/eventing/pkg/client/certmanager/injection/informers/certmanager/v1/clusterissuer" + fake "knative.dev/eventing/pkg/client/certmanager/injection/informers/factory/fake" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" +) + +var Get = clusterissuer.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Certmanager().V1().ClusterIssuers() + return context.WithValue(ctx, clusterissuer.Key{}, inf), inf.Informer() +} diff --git a/pkg/client/certmanager/injection/informers/certmanager/v1/clusterissuer/filtered/clusterissuer.go b/pkg/client/certmanager/injection/informers/certmanager/v1/clusterissuer/filtered/clusterissuer.go new file mode 100644 index 00000000000..cd0d6027523 --- /dev/null +++ b/pkg/client/certmanager/injection/informers/certmanager/v1/clusterissuer/filtered/clusterissuer.go @@ -0,0 +1,65 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package filtered + +import ( + context "context" + + v1 "knative.dev/eventing/pkg/client/certmanager/informers/externalversions/certmanager/v1" + filtered "knative.dev/eventing/pkg/client/certmanager/injection/informers/factory/filtered" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterFilteredInformers(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct { + Selector string +} + +func withInformer(ctx context.Context) (context.Context, []controller.Informer) { + untyped := ctx.Value(filtered.LabelKey{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch labelkey from context.") + } + labelSelectors := untyped.([]string) + infs := []controller.Informer{} + for _, selector := range labelSelectors { + f := filtered.Get(ctx, selector) + inf := f.Certmanager().V1().ClusterIssuers() + ctx = context.WithValue(ctx, Key{Selector: selector}, inf) + infs = append(infs, inf.Informer()) + } + return ctx, infs +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context, selector string) v1.ClusterIssuerInformer { + untyped := ctx.Value(Key{Selector: selector}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch knative.dev/eventing/pkg/client/certmanager/informers/externalversions/certmanager/v1.ClusterIssuerInformer with selector %s from context.", selector) + } + return untyped.(v1.ClusterIssuerInformer) +} diff --git a/pkg/client/certmanager/injection/informers/certmanager/v1/clusterissuer/filtered/fake/fake.go b/pkg/client/certmanager/injection/informers/certmanager/v1/clusterissuer/filtered/fake/fake.go new file mode 100644 index 00000000000..804dc43a79f --- /dev/null +++ b/pkg/client/certmanager/injection/informers/certmanager/v1/clusterissuer/filtered/fake/fake.go @@ -0,0 +1,52 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + context "context" + + filtered "knative.dev/eventing/pkg/client/certmanager/injection/informers/certmanager/v1/clusterissuer/filtered" + factoryfiltered "knative.dev/eventing/pkg/client/certmanager/injection/informers/factory/filtered" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +var Get = filtered.Get + +func init() { + injection.Fake.RegisterFilteredInformers(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, []controller.Informer) { + untyped := ctx.Value(factoryfiltered.LabelKey{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch labelkey from context.") + } + labelSelectors := untyped.([]string) + infs := []controller.Informer{} + for _, selector := range labelSelectors { + f := factoryfiltered.Get(ctx, selector) + inf := f.Certmanager().V1().ClusterIssuers() + ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf) + infs = append(infs, inf.Informer()) + } + return ctx, infs +} diff --git a/pkg/client/certmanager/injection/informers/certmanager/v1/issuer/fake/fake.go b/pkg/client/certmanager/injection/informers/certmanager/v1/issuer/fake/fake.go new file mode 100644 index 00000000000..8cae3f8d208 --- /dev/null +++ b/pkg/client/certmanager/injection/informers/certmanager/v1/issuer/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + context "context" + + issuer "knative.dev/eventing/pkg/client/certmanager/injection/informers/certmanager/v1/issuer" + fake "knative.dev/eventing/pkg/client/certmanager/injection/informers/factory/fake" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" +) + +var Get = issuer.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Certmanager().V1().Issuers() + return context.WithValue(ctx, issuer.Key{}, inf), inf.Informer() +} diff --git a/pkg/client/certmanager/injection/informers/certmanager/v1/issuer/filtered/fake/fake.go b/pkg/client/certmanager/injection/informers/certmanager/v1/issuer/filtered/fake/fake.go new file mode 100644 index 00000000000..d2e8f958fb7 --- /dev/null +++ b/pkg/client/certmanager/injection/informers/certmanager/v1/issuer/filtered/fake/fake.go @@ -0,0 +1,52 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + context "context" + + filtered "knative.dev/eventing/pkg/client/certmanager/injection/informers/certmanager/v1/issuer/filtered" + factoryfiltered "knative.dev/eventing/pkg/client/certmanager/injection/informers/factory/filtered" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +var Get = filtered.Get + +func init() { + injection.Fake.RegisterFilteredInformers(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, []controller.Informer) { + untyped := ctx.Value(factoryfiltered.LabelKey{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch labelkey from context.") + } + labelSelectors := untyped.([]string) + infs := []controller.Informer{} + for _, selector := range labelSelectors { + f := factoryfiltered.Get(ctx, selector) + inf := f.Certmanager().V1().Issuers() + ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf) + infs = append(infs, inf.Informer()) + } + return ctx, infs +} diff --git a/pkg/client/certmanager/injection/informers/certmanager/v1/issuer/filtered/issuer.go b/pkg/client/certmanager/injection/informers/certmanager/v1/issuer/filtered/issuer.go new file mode 100644 index 00000000000..c09145efe4a --- /dev/null +++ b/pkg/client/certmanager/injection/informers/certmanager/v1/issuer/filtered/issuer.go @@ -0,0 +1,65 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package filtered + +import ( + context "context" + + v1 "knative.dev/eventing/pkg/client/certmanager/informers/externalversions/certmanager/v1" + filtered "knative.dev/eventing/pkg/client/certmanager/injection/informers/factory/filtered" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterFilteredInformers(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct { + Selector string +} + +func withInformer(ctx context.Context) (context.Context, []controller.Informer) { + untyped := ctx.Value(filtered.LabelKey{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch labelkey from context.") + } + labelSelectors := untyped.([]string) + infs := []controller.Informer{} + for _, selector := range labelSelectors { + f := filtered.Get(ctx, selector) + inf := f.Certmanager().V1().Issuers() + ctx = context.WithValue(ctx, Key{Selector: selector}, inf) + infs = append(infs, inf.Informer()) + } + return ctx, infs +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context, selector string) v1.IssuerInformer { + untyped := ctx.Value(Key{Selector: selector}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch knative.dev/eventing/pkg/client/certmanager/informers/externalversions/certmanager/v1.IssuerInformer with selector %s from context.", selector) + } + return untyped.(v1.IssuerInformer) +} diff --git a/pkg/client/certmanager/injection/informers/certmanager/v1/issuer/issuer.go b/pkg/client/certmanager/injection/informers/certmanager/v1/issuer/issuer.go new file mode 100644 index 00000000000..ea3ce85479c --- /dev/null +++ b/pkg/client/certmanager/injection/informers/certmanager/v1/issuer/issuer.go @@ -0,0 +1,47 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package issuer + +import ( + context "context" + + v1 "knative.dev/eventing/pkg/client/certmanager/informers/externalversions/certmanager/v1" + factory "knative.dev/eventing/pkg/client/certmanager/injection/informers/factory" + controller "knative.dev/pkg/controller" + logging "knative.dev/pkg/logging" +) + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func WithInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Certmanager().V1().Issuers() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1.IssuerInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/eventing/pkg/client/certmanager/informers/externalversions/certmanager/v1.IssuerInformer from context.") + } + return untyped.(v1.IssuerInformer) +} diff --git a/pkg/client/certmanager/injection/informers/factory/factory.go b/pkg/client/certmanager/injection/informers/factory/factory.go new file mode 100644 index 00000000000..feeac1a93e6 --- /dev/null +++ b/pkg/client/certmanager/injection/informers/factory/factory.go @@ -0,0 +1,56 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package factory + +import ( + context "context" + + externalversions "knative.dev/eventing/pkg/client/certmanager/informers/externalversions" + client "knative.dev/eventing/pkg/client/certmanager/injection/client" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterInformerFactory(withInformerFactory) +} + +// Key is used as the key for associating information with a context.Context. +type Key struct{} + +func withInformerFactory(ctx context.Context) context.Context { + c := client.Get(ctx) + opts := make([]externalversions.SharedInformerOption, 0, 1) + if injection.HasNamespaceScope(ctx) { + opts = append(opts, externalversions.WithNamespace(injection.GetNamespaceScope(ctx))) + } + return context.WithValue(ctx, Key{}, + externalversions.NewSharedInformerFactoryWithOptions(c, controller.GetResyncPeriod(ctx), opts...)) +} + +// Get extracts the InformerFactory from the context. +func Get(ctx context.Context) externalversions.SharedInformerFactory { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/eventing/pkg/client/certmanager/informers/externalversions.SharedInformerFactory from context.") + } + return untyped.(externalversions.SharedInformerFactory) +} diff --git a/pkg/client/certmanager/injection/informers/factory/fake/fake.go b/pkg/client/certmanager/injection/informers/factory/fake/fake.go new file mode 100644 index 00000000000..c608b86107a --- /dev/null +++ b/pkg/client/certmanager/injection/informers/factory/fake/fake.go @@ -0,0 +1,45 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + context "context" + + externalversions "knative.dev/eventing/pkg/client/certmanager/informers/externalversions" + fake "knative.dev/eventing/pkg/client/certmanager/injection/client/fake" + factory "knative.dev/eventing/pkg/client/certmanager/injection/informers/factory" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" +) + +var Get = factory.Get + +func init() { + injection.Fake.RegisterInformerFactory(withInformerFactory) +} + +func withInformerFactory(ctx context.Context) context.Context { + c := fake.Get(ctx) + opts := make([]externalversions.SharedInformerOption, 0, 1) + if injection.HasNamespaceScope(ctx) { + opts = append(opts, externalversions.WithNamespace(injection.GetNamespaceScope(ctx))) + } + return context.WithValue(ctx, factory.Key{}, + externalversions.NewSharedInformerFactoryWithOptions(c, controller.GetResyncPeriod(ctx), opts...)) +} diff --git a/pkg/client/certmanager/injection/informers/factory/filtered/fake/fake_filtered_factory.go b/pkg/client/certmanager/injection/informers/factory/filtered/fake/fake_filtered_factory.go new file mode 100644 index 00000000000..2ce583dd428 --- /dev/null +++ b/pkg/client/certmanager/injection/informers/factory/filtered/fake/fake_filtered_factory.go @@ -0,0 +1,60 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fakeFilteredFactory + +import ( + context "context" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + externalversions "knative.dev/eventing/pkg/client/certmanager/informers/externalversions" + fake "knative.dev/eventing/pkg/client/certmanager/injection/client/fake" + filtered "knative.dev/eventing/pkg/client/certmanager/injection/informers/factory/filtered" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +var Get = filtered.Get + +func init() { + injection.Fake.RegisterInformerFactory(withInformerFactory) +} + +func withInformerFactory(ctx context.Context) context.Context { + c := fake.Get(ctx) + untyped := ctx.Value(filtered.LabelKey{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch labelkey from context.") + } + labelSelectors := untyped.([]string) + for _, selector := range labelSelectors { + selectorVal := selector + opts := []externalversions.SharedInformerOption{} + if injection.HasNamespaceScope(ctx) { + opts = append(opts, externalversions.WithNamespace(injection.GetNamespaceScope(ctx))) + } + opts = append(opts, externalversions.WithTweakListOptions(func(l *v1.ListOptions) { + l.LabelSelector = selectorVal + })) + ctx = context.WithValue(ctx, filtered.Key{Selector: selectorVal}, + externalversions.NewSharedInformerFactoryWithOptions(c, controller.GetResyncPeriod(ctx), opts...)) + } + return ctx +} diff --git a/pkg/client/certmanager/injection/informers/factory/filtered/filtered_factory.go b/pkg/client/certmanager/injection/informers/factory/filtered/filtered_factory.go new file mode 100644 index 00000000000..81197395057 --- /dev/null +++ b/pkg/client/certmanager/injection/informers/factory/filtered/filtered_factory.go @@ -0,0 +1,78 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package filteredFactory + +import ( + context "context" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + externalversions "knative.dev/eventing/pkg/client/certmanager/informers/externalversions" + client "knative.dev/eventing/pkg/client/certmanager/injection/client" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterInformerFactory(withInformerFactory) +} + +// Key is used as the key for associating information with a context.Context. +type Key struct { + Selector string +} + +type LabelKey struct{} + +func WithSelectors(ctx context.Context, selector ...string) context.Context { + return context.WithValue(ctx, LabelKey{}, selector) +} + +func withInformerFactory(ctx context.Context) context.Context { + c := client.Get(ctx) + untyped := ctx.Value(LabelKey{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch labelkey from context.") + } + labelSelectors := untyped.([]string) + for _, selector := range labelSelectors { + selectorVal := selector + opts := []externalversions.SharedInformerOption{} + if injection.HasNamespaceScope(ctx) { + opts = append(opts, externalversions.WithNamespace(injection.GetNamespaceScope(ctx))) + } + opts = append(opts, externalversions.WithTweakListOptions(func(l *v1.ListOptions) { + l.LabelSelector = selectorVal + })) + ctx = context.WithValue(ctx, Key{Selector: selectorVal}, + externalversions.NewSharedInformerFactoryWithOptions(c, controller.GetResyncPeriod(ctx), opts...)) + } + return ctx +} + +// Get extracts the InformerFactory from the context. +func Get(ctx context.Context, selector string) externalversions.SharedInformerFactory { + untyped := ctx.Value(Key{Selector: selector}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch knative.dev/eventing/pkg/client/certmanager/informers/externalversions.SharedInformerFactory with selector %s from context.", selector) + } + return untyped.(externalversions.SharedInformerFactory) +} diff --git a/pkg/client/certmanager/listers/acme/v1/challenge.go b/pkg/client/certmanager/listers/acme/v1/challenge.go new file mode 100644 index 00000000000..33ba82b624e --- /dev/null +++ b/pkg/client/certmanager/listers/acme/v1/challenge.go @@ -0,0 +1,99 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/cert-manager/cert-manager/pkg/apis/acme/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ChallengeLister helps list Challenges. +// All objects returned here must be treated as read-only. +type ChallengeLister interface { + // List lists all Challenges in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Challenge, err error) + // Challenges returns an object that can list and get Challenges. + Challenges(namespace string) ChallengeNamespaceLister + ChallengeListerExpansion +} + +// challengeLister implements the ChallengeLister interface. +type challengeLister struct { + indexer cache.Indexer +} + +// NewChallengeLister returns a new ChallengeLister. +func NewChallengeLister(indexer cache.Indexer) ChallengeLister { + return &challengeLister{indexer: indexer} +} + +// List lists all Challenges in the indexer. +func (s *challengeLister) List(selector labels.Selector) (ret []*v1.Challenge, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Challenge)) + }) + return ret, err +} + +// Challenges returns an object that can list and get Challenges. +func (s *challengeLister) Challenges(namespace string) ChallengeNamespaceLister { + return challengeNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ChallengeNamespaceLister helps list and get Challenges. +// All objects returned here must be treated as read-only. +type ChallengeNamespaceLister interface { + // List lists all Challenges in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Challenge, err error) + // Get retrieves the Challenge from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.Challenge, error) + ChallengeNamespaceListerExpansion +} + +// challengeNamespaceLister implements the ChallengeNamespaceLister +// interface. +type challengeNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Challenges in the indexer for a given namespace. +func (s challengeNamespaceLister) List(selector labels.Selector) (ret []*v1.Challenge, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Challenge)) + }) + return ret, err +} + +// Get retrieves the Challenge from the indexer for a given namespace and name. +func (s challengeNamespaceLister) Get(name string) (*v1.Challenge, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("challenge"), name) + } + return obj.(*v1.Challenge), nil +} diff --git a/pkg/client/certmanager/listers/acme/v1/expansion_generated.go b/pkg/client/certmanager/listers/acme/v1/expansion_generated.go new file mode 100644 index 00000000000..3ac442b5f7c --- /dev/null +++ b/pkg/client/certmanager/listers/acme/v1/expansion_generated.go @@ -0,0 +1,35 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// ChallengeListerExpansion allows custom methods to be added to +// ChallengeLister. +type ChallengeListerExpansion interface{} + +// ChallengeNamespaceListerExpansion allows custom methods to be added to +// ChallengeNamespaceLister. +type ChallengeNamespaceListerExpansion interface{} + +// OrderListerExpansion allows custom methods to be added to +// OrderLister. +type OrderListerExpansion interface{} + +// OrderNamespaceListerExpansion allows custom methods to be added to +// OrderNamespaceLister. +type OrderNamespaceListerExpansion interface{} diff --git a/pkg/client/certmanager/listers/acme/v1/order.go b/pkg/client/certmanager/listers/acme/v1/order.go new file mode 100644 index 00000000000..7c6d360f6d9 --- /dev/null +++ b/pkg/client/certmanager/listers/acme/v1/order.go @@ -0,0 +1,99 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/cert-manager/cert-manager/pkg/apis/acme/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// OrderLister helps list Orders. +// All objects returned here must be treated as read-only. +type OrderLister interface { + // List lists all Orders in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Order, err error) + // Orders returns an object that can list and get Orders. + Orders(namespace string) OrderNamespaceLister + OrderListerExpansion +} + +// orderLister implements the OrderLister interface. +type orderLister struct { + indexer cache.Indexer +} + +// NewOrderLister returns a new OrderLister. +func NewOrderLister(indexer cache.Indexer) OrderLister { + return &orderLister{indexer: indexer} +} + +// List lists all Orders in the indexer. +func (s *orderLister) List(selector labels.Selector) (ret []*v1.Order, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Order)) + }) + return ret, err +} + +// Orders returns an object that can list and get Orders. +func (s *orderLister) Orders(namespace string) OrderNamespaceLister { + return orderNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// OrderNamespaceLister helps list and get Orders. +// All objects returned here must be treated as read-only. +type OrderNamespaceLister interface { + // List lists all Orders in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Order, err error) + // Get retrieves the Order from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.Order, error) + OrderNamespaceListerExpansion +} + +// orderNamespaceLister implements the OrderNamespaceLister +// interface. +type orderNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Orders in the indexer for a given namespace. +func (s orderNamespaceLister) List(selector labels.Selector) (ret []*v1.Order, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Order)) + }) + return ret, err +} + +// Get retrieves the Order from the indexer for a given namespace and name. +func (s orderNamespaceLister) Get(name string) (*v1.Order, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("order"), name) + } + return obj.(*v1.Order), nil +} diff --git a/pkg/client/certmanager/listers/certmanager/v1/certificate.go b/pkg/client/certmanager/listers/certmanager/v1/certificate.go new file mode 100644 index 00000000000..7787987e7a5 --- /dev/null +++ b/pkg/client/certmanager/listers/certmanager/v1/certificate.go @@ -0,0 +1,99 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// CertificateLister helps list Certificates. +// All objects returned here must be treated as read-only. +type CertificateLister interface { + // List lists all Certificates in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Certificate, err error) + // Certificates returns an object that can list and get Certificates. + Certificates(namespace string) CertificateNamespaceLister + CertificateListerExpansion +} + +// certificateLister implements the CertificateLister interface. +type certificateLister struct { + indexer cache.Indexer +} + +// NewCertificateLister returns a new CertificateLister. +func NewCertificateLister(indexer cache.Indexer) CertificateLister { + return &certificateLister{indexer: indexer} +} + +// List lists all Certificates in the indexer. +func (s *certificateLister) List(selector labels.Selector) (ret []*v1.Certificate, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Certificate)) + }) + return ret, err +} + +// Certificates returns an object that can list and get Certificates. +func (s *certificateLister) Certificates(namespace string) CertificateNamespaceLister { + return certificateNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// CertificateNamespaceLister helps list and get Certificates. +// All objects returned here must be treated as read-only. +type CertificateNamespaceLister interface { + // List lists all Certificates in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Certificate, err error) + // Get retrieves the Certificate from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.Certificate, error) + CertificateNamespaceListerExpansion +} + +// certificateNamespaceLister implements the CertificateNamespaceLister +// interface. +type certificateNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Certificates in the indexer for a given namespace. +func (s certificateNamespaceLister) List(selector labels.Selector) (ret []*v1.Certificate, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Certificate)) + }) + return ret, err +} + +// Get retrieves the Certificate from the indexer for a given namespace and name. +func (s certificateNamespaceLister) Get(name string) (*v1.Certificate, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("certificate"), name) + } + return obj.(*v1.Certificate), nil +} diff --git a/pkg/client/certmanager/listers/certmanager/v1/certificaterequest.go b/pkg/client/certmanager/listers/certmanager/v1/certificaterequest.go new file mode 100644 index 00000000000..bbb6860864b --- /dev/null +++ b/pkg/client/certmanager/listers/certmanager/v1/certificaterequest.go @@ -0,0 +1,99 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// CertificateRequestLister helps list CertificateRequests. +// All objects returned here must be treated as read-only. +type CertificateRequestLister interface { + // List lists all CertificateRequests in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.CertificateRequest, err error) + // CertificateRequests returns an object that can list and get CertificateRequests. + CertificateRequests(namespace string) CertificateRequestNamespaceLister + CertificateRequestListerExpansion +} + +// certificateRequestLister implements the CertificateRequestLister interface. +type certificateRequestLister struct { + indexer cache.Indexer +} + +// NewCertificateRequestLister returns a new CertificateRequestLister. +func NewCertificateRequestLister(indexer cache.Indexer) CertificateRequestLister { + return &certificateRequestLister{indexer: indexer} +} + +// List lists all CertificateRequests in the indexer. +func (s *certificateRequestLister) List(selector labels.Selector) (ret []*v1.CertificateRequest, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.CertificateRequest)) + }) + return ret, err +} + +// CertificateRequests returns an object that can list and get CertificateRequests. +func (s *certificateRequestLister) CertificateRequests(namespace string) CertificateRequestNamespaceLister { + return certificateRequestNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// CertificateRequestNamespaceLister helps list and get CertificateRequests. +// All objects returned here must be treated as read-only. +type CertificateRequestNamespaceLister interface { + // List lists all CertificateRequests in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.CertificateRequest, err error) + // Get retrieves the CertificateRequest from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.CertificateRequest, error) + CertificateRequestNamespaceListerExpansion +} + +// certificateRequestNamespaceLister implements the CertificateRequestNamespaceLister +// interface. +type certificateRequestNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all CertificateRequests in the indexer for a given namespace. +func (s certificateRequestNamespaceLister) List(selector labels.Selector) (ret []*v1.CertificateRequest, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.CertificateRequest)) + }) + return ret, err +} + +// Get retrieves the CertificateRequest from the indexer for a given namespace and name. +func (s certificateRequestNamespaceLister) Get(name string) (*v1.CertificateRequest, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("certificaterequest"), name) + } + return obj.(*v1.CertificateRequest), nil +} diff --git a/pkg/client/certmanager/listers/certmanager/v1/clusterissuer.go b/pkg/client/certmanager/listers/certmanager/v1/clusterissuer.go new file mode 100644 index 00000000000..0a59a5b8fb4 --- /dev/null +++ b/pkg/client/certmanager/listers/certmanager/v1/clusterissuer.go @@ -0,0 +1,68 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ClusterIssuerLister helps list ClusterIssuers. +// All objects returned here must be treated as read-only. +type ClusterIssuerLister interface { + // List lists all ClusterIssuers in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ClusterIssuer, err error) + // Get retrieves the ClusterIssuer from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.ClusterIssuer, error) + ClusterIssuerListerExpansion +} + +// clusterIssuerLister implements the ClusterIssuerLister interface. +type clusterIssuerLister struct { + indexer cache.Indexer +} + +// NewClusterIssuerLister returns a new ClusterIssuerLister. +func NewClusterIssuerLister(indexer cache.Indexer) ClusterIssuerLister { + return &clusterIssuerLister{indexer: indexer} +} + +// List lists all ClusterIssuers in the indexer. +func (s *clusterIssuerLister) List(selector labels.Selector) (ret []*v1.ClusterIssuer, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ClusterIssuer)) + }) + return ret, err +} + +// Get retrieves the ClusterIssuer from the index for a given name. +func (s *clusterIssuerLister) Get(name string) (*v1.ClusterIssuer, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("clusterissuer"), name) + } + return obj.(*v1.ClusterIssuer), nil +} diff --git a/pkg/client/certmanager/listers/certmanager/v1/expansion_generated.go b/pkg/client/certmanager/listers/certmanager/v1/expansion_generated.go new file mode 100644 index 00000000000..d8a20ac3c8b --- /dev/null +++ b/pkg/client/certmanager/listers/certmanager/v1/expansion_generated.go @@ -0,0 +1,47 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// CertificateListerExpansion allows custom methods to be added to +// CertificateLister. +type CertificateListerExpansion interface{} + +// CertificateNamespaceListerExpansion allows custom methods to be added to +// CertificateNamespaceLister. +type CertificateNamespaceListerExpansion interface{} + +// CertificateRequestListerExpansion allows custom methods to be added to +// CertificateRequestLister. +type CertificateRequestListerExpansion interface{} + +// CertificateRequestNamespaceListerExpansion allows custom methods to be added to +// CertificateRequestNamespaceLister. +type CertificateRequestNamespaceListerExpansion interface{} + +// ClusterIssuerListerExpansion allows custom methods to be added to +// ClusterIssuerLister. +type ClusterIssuerListerExpansion interface{} + +// IssuerListerExpansion allows custom methods to be added to +// IssuerLister. +type IssuerListerExpansion interface{} + +// IssuerNamespaceListerExpansion allows custom methods to be added to +// IssuerNamespaceLister. +type IssuerNamespaceListerExpansion interface{} diff --git a/pkg/client/certmanager/listers/certmanager/v1/issuer.go b/pkg/client/certmanager/listers/certmanager/v1/issuer.go new file mode 100644 index 00000000000..4e91d70d91b --- /dev/null +++ b/pkg/client/certmanager/listers/certmanager/v1/issuer.go @@ -0,0 +1,99 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// IssuerLister helps list Issuers. +// All objects returned here must be treated as read-only. +type IssuerLister interface { + // List lists all Issuers in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Issuer, err error) + // Issuers returns an object that can list and get Issuers. + Issuers(namespace string) IssuerNamespaceLister + IssuerListerExpansion +} + +// issuerLister implements the IssuerLister interface. +type issuerLister struct { + indexer cache.Indexer +} + +// NewIssuerLister returns a new IssuerLister. +func NewIssuerLister(indexer cache.Indexer) IssuerLister { + return &issuerLister{indexer: indexer} +} + +// List lists all Issuers in the indexer. +func (s *issuerLister) List(selector labels.Selector) (ret []*v1.Issuer, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Issuer)) + }) + return ret, err +} + +// Issuers returns an object that can list and get Issuers. +func (s *issuerLister) Issuers(namespace string) IssuerNamespaceLister { + return issuerNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// IssuerNamespaceLister helps list and get Issuers. +// All objects returned here must be treated as read-only. +type IssuerNamespaceLister interface { + // List lists all Issuers in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Issuer, err error) + // Get retrieves the Issuer from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.Issuer, error) + IssuerNamespaceListerExpansion +} + +// issuerNamespaceLister implements the IssuerNamespaceLister +// interface. +type issuerNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Issuers in the indexer for a given namespace. +func (s issuerNamespaceLister) List(selector labels.Selector) (ret []*v1.Issuer, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Issuer)) + }) + return ret, err +} + +// Get retrieves the Issuer from the indexer for a given namespace and name. +func (s issuerNamespaceLister) Get(name string) (*v1.Issuer, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("issuer"), name) + } + return obj.(*v1.Issuer), nil +} diff --git a/vendor/github.com/cert-manager/cert-manager/LICENSE b/vendor/github.com/cert-manager/cert-manager/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/cert-manager/cert-manager/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/cert-manager/cert-manager/LICENSES b/vendor/github.com/cert-manager/cert-manager/LICENSES new file mode 100644 index 00000000000..22764fe9def --- /dev/null +++ b/vendor/github.com/cert-manager/cert-manager/LICENSES @@ -0,0 +1,169 @@ +cloud.google.com/go/compute/metadata,https://github.com/googleapis/google-cloud-go/blob/compute/metadata/v0.2.3/compute/metadata/LICENSE,Apache-2.0 +github.com/Azure/azure-sdk-for-go,https://github.com/Azure/azure-sdk-for-go/blob/v68.0.0/LICENSE.txt,MIT +github.com/Azure/go-autorest/autorest,https://github.com/Azure/go-autorest/blob/autorest/v0.11.29/autorest/LICENSE,Apache-2.0 +github.com/Azure/go-autorest/autorest/adal,https://github.com/Azure/go-autorest/blob/autorest/adal/v0.9.23/autorest/adal/LICENSE,Apache-2.0 +github.com/Azure/go-autorest/autorest/date,https://github.com/Azure/go-autorest/blob/autorest/date/v0.3.0/autorest/date/LICENSE,Apache-2.0 +github.com/Azure/go-autorest/autorest/to,https://github.com/Azure/go-autorest/blob/autorest/to/v0.4.0/autorest/to/LICENSE,Apache-2.0 +github.com/Azure/go-autorest/autorest/validation,https://github.com/Azure/go-autorest/blob/autorest/validation/v0.3.1/autorest/validation/LICENSE,Apache-2.0 +github.com/Azure/go-autorest/logger,https://github.com/Azure/go-autorest/blob/logger/v0.2.1/logger/LICENSE,Apache-2.0 +github.com/Azure/go-autorest/tracing,https://github.com/Azure/go-autorest/blob/tracing/v0.6.0/tracing/LICENSE,Apache-2.0 +github.com/Azure/go-ntlmssp,https://github.com/Azure/go-ntlmssp/blob/754e69321358/LICENSE,MIT +github.com/NYTimes/gziphandler,https://github.com/NYTimes/gziphandler/blob/v1.1.1/LICENSE,Apache-2.0 +github.com/Venafi/vcert/v4,https://github.com/Venafi/vcert/blob/69f417ae176d/LICENSE,Apache-2.0 +github.com/akamai/AkamaiOPEN-edgegrid-golang,https://github.com/akamai/AkamaiOPEN-edgegrid-golang/blob/v1.2.2/LICENSE,Apache-2.0 +github.com/antlr/antlr4/runtime/Go/antlr/v4,https://github.com/antlr/antlr4/blob/8188dc5388df/runtime/Go/antlr/v4/LICENSE,BSD-3-Clause +github.com/asaskevich/govalidator,https://github.com/asaskevich/govalidator/blob/21a406dcc535/LICENSE,MIT +github.com/aws/aws-sdk-go,https://github.com/aws/aws-sdk-go/blob/v1.45.7/LICENSE.txt,Apache-2.0 +github.com/aws/aws-sdk-go/internal/sync/singleflight,https://github.com/aws/aws-sdk-go/blob/v1.45.7/internal/sync/singleflight/LICENSE,BSD-3-Clause +github.com/beorn7/perks/quantile,https://github.com/beorn7/perks/blob/v1.0.1/LICENSE,MIT +github.com/blang/semver/v4,https://github.com/blang/semver/blob/v4.0.0/v4/LICENSE,MIT +github.com/cenkalti/backoff/v3,https://github.com/cenkalti/backoff/blob/v3.2.2/LICENSE,MIT +github.com/cenkalti/backoff/v4,https://github.com/cenkalti/backoff/blob/v4.2.1/LICENSE,MIT +github.com/cert-manager/cert-manager,https://github.com/cert-manager/cert-manager/blob/HEAD/LICENSE,Apache-2.0 +github.com/cert-manager/cert-manager/pkg/issuer/acme/dns/azuredns,https://github.com/cert-manager/cert-manager/blob/HEAD/pkg/issuer/acme/dns/azuredns/LICENSE,MIT +github.com/cert-manager/cert-manager/pkg/issuer/acme/dns/clouddns,https://github.com/cert-manager/cert-manager/blob/HEAD/pkg/issuer/acme/dns/clouddns/LICENSE,MIT +github.com/cert-manager/cert-manager/pkg/issuer/acme/dns/cloudflare,https://github.com/cert-manager/cert-manager/blob/HEAD/pkg/issuer/acme/dns/cloudflare/LICENSE,MIT +github.com/cert-manager/cert-manager/pkg/issuer/acme/dns/route53,https://github.com/cert-manager/cert-manager/blob/HEAD/pkg/issuer/acme/dns/route53/LICENSE,MIT +github.com/cert-manager/cert-manager/pkg/issuer/acme/dns/util,https://github.com/cert-manager/cert-manager/blob/HEAD/pkg/issuer/acme/dns/util/LICENSE,MIT +github.com/cespare/xxhash/v2,https://github.com/cespare/xxhash/blob/v2.2.0/LICENSE.txt,MIT +github.com/coreos/go-semver/semver,https://github.com/coreos/go-semver/blob/v0.3.1/LICENSE,Apache-2.0 +github.com/coreos/go-systemd/v22,https://github.com/coreos/go-systemd/blob/v22.5.0/LICENSE,Apache-2.0 +github.com/cpu/goacmedns,https://github.com/cpu/goacmedns/blob/v0.1.1/LICENSE,MIT +github.com/davecgh/go-spew/spew,https://github.com/davecgh/go-spew/blob/d8f796af33cc/LICENSE,ISC +github.com/digitalocean/godo,https://github.com/digitalocean/godo/blob/v1.102.1/LICENSE.txt,MIT +github.com/digitalocean/godo,https://github.com/digitalocean/godo/blob/v1.102.1/LICENSE.txt,BSD-3-Clause +github.com/emicklei/go-restful/v3,https://github.com/emicklei/go-restful/blob/v3.11.0/LICENSE,MIT +github.com/evanphx/json-patch,https://github.com/evanphx/json-patch/blob/v5.6.0/LICENSE,BSD-3-Clause +github.com/evanphx/json-patch/v5,https://github.com/evanphx/json-patch/blob/v5.6.0/v5/LICENSE,BSD-3-Clause +github.com/felixge/httpsnoop,https://github.com/felixge/httpsnoop/blob/v1.0.4/LICENSE.txt,MIT +github.com/fsnotify/fsnotify,https://github.com/fsnotify/fsnotify/blob/v1.6.0/LICENSE,BSD-3-Clause +github.com/go-asn1-ber/asn1-ber,https://github.com/go-asn1-ber/asn1-ber/blob/v1.5.4/LICENSE,MIT +github.com/go-jose/go-jose/v3,https://github.com/go-jose/go-jose/blob/v3.0.1/LICENSE,Apache-2.0 +github.com/go-jose/go-jose/v3/json,https://github.com/go-jose/go-jose/blob/v3.0.1/json/LICENSE,BSD-3-Clause +github.com/go-ldap/ldap/v3,https://github.com/go-ldap/ldap/blob/v3.4.5/v3/LICENSE,MIT +github.com/go-logr/logr,https://github.com/go-logr/logr/blob/v1.3.0/LICENSE,Apache-2.0 +github.com/go-logr/stdr,https://github.com/go-logr/stdr/blob/v1.2.2/LICENSE,Apache-2.0 +github.com/go-logr/zapr,https://github.com/go-logr/zapr/blob/v1.2.4/LICENSE,Apache-2.0 +github.com/go-openapi/jsonpointer,https://github.com/go-openapi/jsonpointer/blob/v0.19.6/LICENSE,Apache-2.0 +github.com/go-openapi/jsonreference,https://github.com/go-openapi/jsonreference/blob/v0.20.2/LICENSE,Apache-2.0 +github.com/go-openapi/swag,https://github.com/go-openapi/swag/blob/v0.22.3/LICENSE,Apache-2.0 +github.com/gogo/protobuf,https://github.com/gogo/protobuf/blob/v1.3.2/LICENSE,BSD-3-Clause +github.com/golang-jwt/jwt/v4,https://github.com/golang-jwt/jwt/blob/v4.5.0/LICENSE,MIT +github.com/golang/groupcache/lru,https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE,Apache-2.0 +github.com/golang/protobuf,https://github.com/golang/protobuf/blob/v1.5.3/LICENSE,BSD-3-Clause +github.com/golang/snappy,https://github.com/golang/snappy/blob/v0.0.4/LICENSE,BSD-3-Clause +github.com/google/cel-go,https://github.com/google/cel-go/blob/v0.16.0/LICENSE,Apache-2.0 +github.com/google/cel-go,https://github.com/google/cel-go/blob/v0.16.0/LICENSE,BSD-3-Clause +github.com/google/gnostic-models,https://github.com/google/gnostic-models/blob/v0.6.8/LICENSE,Apache-2.0 +github.com/google/go-cmp/cmp,https://github.com/google/go-cmp/blob/v0.6.0/LICENSE,BSD-3-Clause +github.com/google/go-querystring/query,https://github.com/google/go-querystring/blob/v1.1.0/LICENSE,BSD-3-Clause +github.com/google/gofuzz,https://github.com/google/gofuzz/blob/v1.2.0/LICENSE,Apache-2.0 +github.com/google/s2a-go,https://github.com/google/s2a-go/blob/v0.1.7/LICENSE.md,Apache-2.0 +github.com/google/uuid,https://github.com/google/uuid/blob/v1.3.1/LICENSE,BSD-3-Clause +github.com/googleapis/enterprise-certificate-proxy/client,https://github.com/googleapis/enterprise-certificate-proxy/blob/v0.2.5/LICENSE,Apache-2.0 +github.com/googleapis/gax-go/v2,https://github.com/googleapis/gax-go/blob/v2.12.0/v2/LICENSE,BSD-3-Clause +github.com/grpc-ecosystem/go-grpc-prometheus,https://github.com/grpc-ecosystem/go-grpc-prometheus/blob/v1.2.0/LICENSE,Apache-2.0 +github.com/grpc-ecosystem/grpc-gateway/v2,https://github.com/grpc-ecosystem/grpc-gateway/blob/v2.16.0/LICENSE.txt,BSD-3-Clause +github.com/hashicorp/errwrap,https://github.com/hashicorp/errwrap/blob/v1.1.0/LICENSE,MPL-2.0 +github.com/hashicorp/go-cleanhttp,https://github.com/hashicorp/go-cleanhttp/blob/v0.5.2/LICENSE,MPL-2.0 +github.com/hashicorp/go-multierror,https://github.com/hashicorp/go-multierror/blob/v1.1.1/LICENSE,MPL-2.0 +github.com/hashicorp/go-retryablehttp,https://github.com/hashicorp/go-retryablehttp/blob/v0.7.4/LICENSE,MPL-2.0 +github.com/hashicorp/go-rootcerts,https://github.com/hashicorp/go-rootcerts/blob/v1.0.2/LICENSE,MPL-2.0 +github.com/hashicorp/go-secure-stdlib/parseutil,https://github.com/hashicorp/go-secure-stdlib/blob/parseutil/v0.1.7/parseutil/LICENSE,MPL-2.0 +github.com/hashicorp/go-secure-stdlib/strutil,https://github.com/hashicorp/go-secure-stdlib/blob/strutil/v0.1.2/strutil/LICENSE,MPL-2.0 +github.com/hashicorp/go-sockaddr,https://github.com/hashicorp/go-sockaddr/blob/v1.0.2/LICENSE,MPL-2.0 +github.com/hashicorp/hcl,https://github.com/hashicorp/hcl/blob/v1.0.1-vault-5/LICENSE,MPL-2.0 +github.com/hashicorp/vault/api,https://github.com/hashicorp/vault/blob/api/v1.10.0/api/LICENSE,MPL-2.0 +github.com/hashicorp/vault/sdk/helper,https://github.com/hashicorp/vault/blob/sdk/v0.10.0/sdk/LICENSE,MPL-2.0 +github.com/imdario/mergo,https://github.com/imdario/mergo/blob/v0.3.13/LICENSE,BSD-3-Clause +github.com/jmespath/go-jmespath,https://github.com/jmespath/go-jmespath/blob/b0104c826a24/LICENSE,Apache-2.0 +github.com/josharian/intern,https://github.com/josharian/intern/blob/v1.0.0/license.md,MIT +github.com/json-iterator/go,https://github.com/json-iterator/go/blob/v1.1.12/LICENSE,MIT +github.com/kr/pretty,https://github.com/kr/pretty/blob/v0.3.1/License,MIT +github.com/kr/text,https://github.com/kr/text/blob/v0.2.0/License,MIT +github.com/mailru/easyjson,https://github.com/mailru/easyjson/blob/v0.7.7/LICENSE,MIT +github.com/matttproud/golang_protobuf_extensions/pbutil,https://github.com/matttproud/golang_protobuf_extensions/blob/v1.0.4/LICENSE,Apache-2.0 +github.com/miekg/dns,https://github.com/miekg/dns/blob/v1.1.55/LICENSE,BSD-3-Clause +github.com/mitchellh/go-homedir,https://github.com/mitchellh/go-homedir/blob/v1.1.0/LICENSE,MIT +github.com/mitchellh/mapstructure,https://github.com/mitchellh/mapstructure/blob/v1.5.0/LICENSE,MIT +github.com/modern-go/concurrent,https://github.com/modern-go/concurrent/blob/bacd9c7ef1dd/LICENSE,Apache-2.0 +github.com/modern-go/reflect2,https://github.com/modern-go/reflect2/blob/v1.0.2/LICENSE,Apache-2.0 +github.com/munnerz/goautoneg,https://github.com/munnerz/goautoneg/blob/a7dc8b61c822/LICENSE,BSD-3-Clause +github.com/patrickmn/go-cache,https://github.com/patrickmn/go-cache/blob/v2.1.0/LICENSE,MIT +github.com/pavlo-v-chernykh/keystore-go/v4,https://github.com/pavlo-v-chernykh/keystore-go/blob/v4.5.0/LICENSE,MIT +github.com/pierrec/lz4,https://github.com/pierrec/lz4/blob/v2.6.1/LICENSE,BSD-3-Clause +github.com/pkg/errors,https://github.com/pkg/errors/blob/v0.9.1/LICENSE,BSD-2-Clause +github.com/prometheus/client_golang/prometheus,https://github.com/prometheus/client_golang/blob/v1.16.0/LICENSE,Apache-2.0 +github.com/prometheus/client_model/go,https://github.com/prometheus/client_model/blob/v0.4.0/LICENSE,Apache-2.0 +github.com/prometheus/common,https://github.com/prometheus/common/blob/v0.44.0/LICENSE,Apache-2.0 +github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg,https://github.com/prometheus/common/blob/v0.44.0/internal/bitbucket.org/ww/goautoneg/README.txt,BSD-3-Clause +github.com/prometheus/procfs,https://github.com/prometheus/procfs/blob/v0.10.1/LICENSE,Apache-2.0 +github.com/rogpeppe/go-internal/fmtsort,https://github.com/rogpeppe/go-internal/blob/v1.11.0/LICENSE,BSD-3-Clause +github.com/ryanuber/go-glob,https://github.com/ryanuber/go-glob/blob/v1.0.0/LICENSE,MIT +github.com/sirupsen/logrus,https://github.com/sirupsen/logrus/blob/v1.9.3/LICENSE,MIT +github.com/spf13/cobra,https://github.com/spf13/cobra/blob/v1.7.0/LICENSE.txt,Apache-2.0 +github.com/spf13/pflag,https://github.com/spf13/pflag/blob/v1.0.5/LICENSE,BSD-3-Clause +github.com/stoewer/go-strcase,https://github.com/stoewer/go-strcase/blob/v1.2.0/LICENSE,MIT +github.com/youmark/pkcs8,https://github.com/youmark/pkcs8/blob/1326539a0a0a/LICENSE,MIT +go.etcd.io/etcd/api/v3,https://github.com/etcd-io/etcd/blob/api/v3.5.9/api/LICENSE,Apache-2.0 +go.etcd.io/etcd/client/pkg/v3,https://github.com/etcd-io/etcd/blob/client/pkg/v3.5.9/client/pkg/LICENSE,Apache-2.0 +go.etcd.io/etcd/client/v3,https://github.com/etcd-io/etcd/blob/client/v3.5.9/client/v3/LICENSE,Apache-2.0 +go.opencensus.io,https://github.com/census-instrumentation/opencensus-go/blob/v0.24.0/LICENSE,Apache-2.0 +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc,https://github.com/open-telemetry/opentelemetry-go-contrib/blob/instrumentation/google.golang.org/grpc/otelgrpc/v0.46.0/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE,Apache-2.0 +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp,https://github.com/open-telemetry/opentelemetry-go-contrib/blob/instrumentation/net/http/otelhttp/v0.46.0/instrumentation/net/http/otelhttp/LICENSE,Apache-2.0 +go.opentelemetry.io/otel,https://github.com/open-telemetry/opentelemetry-go/blob/v1.20.0/LICENSE,Apache-2.0 +go.opentelemetry.io/otel/exporters/otlp/otlptrace,https://github.com/open-telemetry/opentelemetry-go/blob/exporters/otlp/otlptrace/v1.20.0/exporters/otlp/otlptrace/LICENSE,Apache-2.0 +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc,https://github.com/open-telemetry/opentelemetry-go/blob/exporters/otlp/otlptrace/otlptracegrpc/v1.20.0/exporters/otlp/otlptrace/otlptracegrpc/LICENSE,Apache-2.0 +go.opentelemetry.io/otel/metric,https://github.com/open-telemetry/opentelemetry-go/blob/metric/v1.20.0/metric/LICENSE,Apache-2.0 +go.opentelemetry.io/otel/sdk,https://github.com/open-telemetry/opentelemetry-go/blob/sdk/v1.20.0/sdk/LICENSE,Apache-2.0 +go.opentelemetry.io/otel/trace,https://github.com/open-telemetry/opentelemetry-go/blob/trace/v1.20.0/trace/LICENSE,Apache-2.0 +go.opentelemetry.io/proto/otlp,https://github.com/open-telemetry/opentelemetry-proto-go/blob/otlp/v1.0.0/otlp/LICENSE,Apache-2.0 +go.uber.org/multierr,https://github.com/uber-go/multierr/blob/v1.11.0/LICENSE.txt,MIT +go.uber.org/zap,https://github.com/uber-go/zap/blob/v1.25.0/LICENSE.txt,MIT +golang.org/x/crypto,https://cs.opensource.google/go/x/crypto/+/v0.14.0:LICENSE,BSD-3-Clause +golang.org/x/exp,https://cs.opensource.google/go/x/exp/+/92128663:LICENSE,BSD-3-Clause +golang.org/x/net,https://cs.opensource.google/go/x/net/+/v0.17.0:LICENSE,BSD-3-Clause +golang.org/x/oauth2,https://cs.opensource.google/go/x/oauth2/+/v0.12.0:LICENSE,BSD-3-Clause +golang.org/x/sync,https://cs.opensource.google/go/x/sync/+/v0.3.0:LICENSE,BSD-3-Clause +golang.org/x/sys,https://cs.opensource.google/go/x/sys/+/v0.14.0:LICENSE,BSD-3-Clause +golang.org/x/term,https://cs.opensource.google/go/x/term/+/v0.13.0:LICENSE,BSD-3-Clause +golang.org/x/text,https://cs.opensource.google/go/x/text/+/v0.13.0:LICENSE,BSD-3-Clause +golang.org/x/time/rate,https://cs.opensource.google/go/x/time/+/v0.3.0:LICENSE,BSD-3-Clause +gomodules.xyz/jsonpatch/v2,https://github.com/gomodules/jsonpatch/blob/v2.4.0/v2/LICENSE,Apache-2.0 +google.golang.org/api,https://github.com/googleapis/google-api-go-client/blob/v0.140.0/LICENSE,BSD-3-Clause +google.golang.org/api/internal/third_party/uritemplates,https://github.com/googleapis/google-api-go-client/blob/v0.140.0/internal/third_party/uritemplates/LICENSE,BSD-3-Clause +google.golang.org/genproto/googleapis/api,https://github.com/googleapis/go-genproto/blob/b8732ec3820d/googleapis/api/LICENSE,Apache-2.0 +google.golang.org/genproto/googleapis/rpc,https://github.com/googleapis/go-genproto/blob/2d3300fd4832/googleapis/rpc/LICENSE,Apache-2.0 +google.golang.org/grpc,https://github.com/grpc/grpc-go/blob/v1.59.0/LICENSE,Apache-2.0 +google.golang.org/protobuf,https://github.com/protocolbuffers/protobuf-go/blob/v1.31.0/LICENSE,BSD-3-Clause +gopkg.in/inf.v0,https://github.com/go-inf/inf/blob/v0.9.1/LICENSE,BSD-3-Clause +gopkg.in/ini.v1,https://github.com/go-ini/ini/blob/v1.62.0/LICENSE,Apache-2.0 +gopkg.in/natefinch/lumberjack.v2,https://github.com/natefinch/lumberjack/blob/v2.2.1/LICENSE,MIT +gopkg.in/yaml.v2,https://github.com/go-yaml/yaml/blob/v2.4.0/LICENSE,Apache-2.0 +gopkg.in/yaml.v3,https://github.com/go-yaml/yaml/blob/v3.0.1/LICENSE,MIT +k8s.io/api,https://github.com/kubernetes/api/blob/v0.28.1/LICENSE,Apache-2.0 +k8s.io/apiextensions-apiserver/pkg,https://github.com/kubernetes/apiextensions-apiserver/blob/v0.28.1/LICENSE,Apache-2.0 +k8s.io/apimachinery/pkg,https://github.com/kubernetes/apimachinery/blob/v0.28.1/LICENSE,Apache-2.0 +k8s.io/apimachinery/third_party/forked/golang,https://github.com/kubernetes/apimachinery/blob/v0.28.1/third_party/forked/golang/LICENSE,BSD-3-Clause +k8s.io/apiserver,https://github.com/kubernetes/apiserver/blob/v0.28.1/LICENSE,Apache-2.0 +k8s.io/client-go,https://github.com/kubernetes/client-go/blob/v0.28.1/LICENSE,Apache-2.0 +k8s.io/component-base,https://github.com/kubernetes/component-base/blob/v0.28.1/LICENSE,Apache-2.0 +k8s.io/klog/v2,https://github.com/kubernetes/klog/blob/v2.100.1/LICENSE,Apache-2.0 +k8s.io/kms,https://github.com/kubernetes/kms/blob/v0.28.1/LICENSE,Apache-2.0 +k8s.io/kube-aggregator/pkg/apis/apiregistration,https://github.com/kubernetes/kube-aggregator/blob/v0.28.1/LICENSE,Apache-2.0 +k8s.io/kube-openapi/pkg,https://github.com/kubernetes/kube-openapi/blob/d090da108d2f/LICENSE,Apache-2.0 +k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json,https://github.com/kubernetes/kube-openapi/blob/d090da108d2f/pkg/internal/third_party/go-json-experiment/json/LICENSE,BSD-3-Clause +k8s.io/kube-openapi/pkg/validation/errors,https://github.com/kubernetes/kube-openapi/blob/d090da108d2f/pkg/validation/errors/LICENSE,Apache-2.0 +k8s.io/kube-openapi/pkg/validation/spec,https://github.com/kubernetes/kube-openapi/blob/d090da108d2f/pkg/validation/spec/LICENSE,Apache-2.0 +k8s.io/kube-openapi/pkg/validation/strfmt,https://github.com/kubernetes/kube-openapi/blob/d090da108d2f/pkg/validation/strfmt/LICENSE,Apache-2.0 +k8s.io/utils,https://github.com/kubernetes/utils/blob/3b25d923346b/LICENSE,Apache-2.0 +k8s.io/utils/internal/third_party/forked/golang,https://github.com/kubernetes/utils/blob/3b25d923346b/internal/third_party/forked/golang/LICENSE,BSD-3-Clause +sigs.k8s.io/apiserver-network-proxy/konnectivity-client,https://github.com/kubernetes-sigs/apiserver-network-proxy/blob/konnectivity-client/v0.1.2/konnectivity-client/LICENSE,Apache-2.0 +sigs.k8s.io/controller-runtime,https://github.com/kubernetes-sigs/controller-runtime/blob/v0.16.1/LICENSE,Apache-2.0 +sigs.k8s.io/gateway-api,https://github.com/kubernetes-sigs/gateway-api/blob/v0.8.0/LICENSE,Apache-2.0 +sigs.k8s.io/json,https://github.com/kubernetes-sigs/json/blob/bc3834ca7abd/LICENSE,Apache-2.0 +sigs.k8s.io/json,https://github.com/kubernetes-sigs/json/blob/bc3834ca7abd/LICENSE,BSD-3-Clause +sigs.k8s.io/structured-merge-diff/v4,https://github.com/kubernetes-sigs/structured-merge-diff/blob/v4.3.0/LICENSE,Apache-2.0 +sigs.k8s.io/yaml,https://github.com/kubernetes-sigs/yaml/blob/v1.3.0/LICENSE,MIT +sigs.k8s.io/yaml,https://github.com/kubernetes-sigs/yaml/blob/v1.3.0/LICENSE,BSD-3-Clause +software.sslmate.com/src/go-pkcs12,https://github.com/SSLMate/go-pkcs12/blob/v0.2.1/LICENSE,BSD-3-Clause diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/doc.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/doc.go new file mode 100644 index 00000000000..745de66dc2e --- /dev/null +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2020 The cert-manager Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=acme.cert-manager.io + +// Package acme contains types in the acme cert-manager API group +package acme + +const GroupName = "acme.cert-manager.io" diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/const.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/const.go new file mode 100644 index 00000000000..287ed2f6d7d --- /dev/null +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/const.go @@ -0,0 +1,21 @@ +/* +Copyright 2020 The cert-manager Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +const ( + ACMEFinalizer = "finalizer.acme.cert-manager.io" +) diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/doc.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/doc.go new file mode 100644 index 00000000000..92b6583d629 --- /dev/null +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The cert-manager Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 is the v1 version of the API. +// +k8s:deepcopy-gen=package,register +// +groupName=acme.cert-manager.io +package v1 diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/register.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/register.go new file mode 100644 index 00000000000..e36ef7cf216 --- /dev/null +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2020 The cert-manager Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/cert-manager/cert-manager/pkg/apis/acme" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: acme.GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Order{}, + &OrderList{}, + &Challenge{}, + &ChallengeList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types.go new file mode 100644 index 00000000000..edfc16f1e65 --- /dev/null +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types.go @@ -0,0 +1,57 @@ +/* +Copyright 2020 The cert-manager Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +const ( + // ACMECertificateHTTP01IngressNameOverride is annotation to override ingress name. + // If this annotation is specified on a Certificate or Order resource when + // using the HTTP01 solver type, the ingress.name field of the HTTP01 + // solver's configuration will be set to the value given here. + // This is especially useful for users of Ingress controllers that maintain + // a 1:1 mapping between endpoint IP and Ingress resource. + ACMECertificateHTTP01IngressNameOverride = "acme.cert-manager.io/http01-override-ingress-name" + + // ACMECertificateHTTP01IngressClassOverride is annotation to override ingress class. + // If this annotation is specified on a Certificate or Order resource when + // using the HTTP01 solver type, the ingress.class field of the HTTP01 + // solver's configuration will be set to the value given here. + // This is especially useful for users deploying many different ingress + // classes into a single cluster that want to be able to re-use a single + // solver for each ingress class. + ACMECertificateHTTP01IngressClassOverride = "acme.cert-manager.io/http01-override-ingress-class" + + // IngressEditInPlaceAnnotationKey is used to toggle the use of ingressClass instead + // of ingress on the created Certificate resource + IngressEditInPlaceAnnotationKey = "acme.cert-manager.io/http01-edit-in-place" + + // DomainLabelKey is added to the labels of a Pod serving an ACME challenge. + // Its value will be the hash of the domain name that is being verified. + DomainLabelKey = "acme.cert-manager.io/http-domain" + + // TokenLabelKey is added to the labels of a Pod serving an ACME challenge. + // Its value will be the hash of the challenge token that is being served by the pod. + TokenLabelKey = "acme.cert-manager.io/http-token" + + // SolverIdentificationLabelKey is added to the labels of a Pod serving an ACME challenge. + // Its value will be the "true" if the Pod is an HTTP-01 solver. + SolverIdentificationLabelKey = "acme.cert-manager.io/http01-solver" +) + +const ( + OrderKind = "Order" + ChallengeKind = "Challenge" +) diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_challenge.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_challenge.go new file mode 100644 index 00000000000..cfc4f241429 --- /dev/null +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_challenge.go @@ -0,0 +1,146 @@ +/* +Copyright 2020 The cert-manager Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + cmmeta "github.com/cert-manager/cert-manager/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:storageversion + +// Challenge is a type to represent a Challenge request with an ACME server +// +k8s:openapi-gen=true +// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.state" +// +kubebuilder:printcolumn:name="Domain",type="string",JSONPath=".spec.dnsName" +// +kubebuilder:printcolumn:name="Reason",type="string",JSONPath=".status.reason",description="",priority=1 +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC." +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=challenges +type Challenge struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec ChallengeSpec `json:"spec"` + // +optional + Status ChallengeStatus `json:"status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ChallengeList is a list of Challenges +type ChallengeList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Challenge `json:"items"` +} + +type ChallengeSpec struct { + // The URL of the ACME Challenge resource for this challenge. + // This can be used to lookup details about the status of this challenge. + URL string `json:"url"` + + // The URL to the ACME Authorization resource that this + // challenge is a part of. + AuthorizationURL string `json:"authorizationURL"` + + // dnsName is the identifier that this challenge is for, e.g. example.com. + // If the requested DNSName is a 'wildcard', this field MUST be set to the + // non-wildcard domain, e.g. for `*.example.com`, it must be `example.com`. + DNSName string `json:"dnsName"` + + // wildcard will be true if this challenge is for a wildcard identifier, + // for example '*.example.com'. + // +optional + Wildcard bool `json:"wildcard"` + + // The type of ACME challenge this resource represents. + // One of "HTTP-01" or "DNS-01". + Type ACMEChallengeType `json:"type"` + + // The ACME challenge token for this challenge. + // This is the raw value returned from the ACME server. + Token string `json:"token"` + + // The ACME challenge key for this challenge + // For HTTP01 challenges, this is the value that must be responded with to + // complete the HTTP01 challenge in the format: + // `.`. + // For DNS01 challenges, this is the base64 encoded SHA256 sum of the + // `.` + // text that must be set as the TXT record content. + Key string `json:"key"` + + // Contains the domain solving configuration that should be used to + // solve this challenge resource. + Solver ACMEChallengeSolver `json:"solver"` + + // References a properly configured ACME-type Issuer which should + // be used to create this Challenge. + // If the Issuer does not exist, processing will be retried. + // If the Issuer is not an 'ACME' Issuer, an error will be returned and the + // Challenge will be marked as failed. + IssuerRef cmmeta.ObjectReference `json:"issuerRef"` +} + +// The type of ACME challenge. Only HTTP-01 and DNS-01 are supported. +// +kubebuilder:validation:Enum=HTTP-01;DNS-01 +type ACMEChallengeType string + +const ( + // ACMEChallengeTypeHTTP01 denotes a Challenge is of type http-01 + // More info: https://letsencrypt.org/docs/challenge-types/#http-01-challenge + ACMEChallengeTypeHTTP01 ACMEChallengeType = "HTTP-01" + + // ACMEChallengeTypeDNS01 denotes a Challenge is of type dns-01 + // More info: https://letsencrypt.org/docs/challenge-types/#dns-01-challenge + ACMEChallengeTypeDNS01 ACMEChallengeType = "DNS-01" +) + +type ChallengeStatus struct { + // Used to denote whether this challenge should be processed or not. + // This field will only be set to true by the 'scheduling' component. + // It will only be set to false by the 'challenges' controller, after the + // challenge has reached a final state or timed out. + // If this field is set to false, the challenge controller will not take + // any more action. + // +optional + Processing bool `json:"processing"` + + // presented will be set to true if the challenge values for this challenge + // are currently 'presented'. + // This *does not* imply the self check is passing. Only that the values + // have been 'submitted' for the appropriate challenge mechanism (i.e. the + // DNS01 TXT record has been presented, or the HTTP01 configuration has been + // configured). + // +optional + Presented bool `json:"presented"` + + // Contains human readable information on why the Challenge is in the + // current state. + // +optional + Reason string `json:"reason,omitempty"` + + // Contains the current 'state' of the challenge. + // If not set, the state of the challenge is unknown. + // +optional + State State `json:"state,omitempty"` +} diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_issuer.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_issuer.go new file mode 100644 index 00000000000..9f663280cbc --- /dev/null +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_issuer.go @@ -0,0 +1,650 @@ +/* +Copyright 2020 The cert-manager Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + gwapi "sigs.k8s.io/gateway-api/apis/v1beta1" + + cmmeta "github.com/cert-manager/cert-manager/pkg/apis/meta/v1" +) + +// ACMEIssuer contains the specification for an ACME issuer. +// This uses the RFC8555 specification to obtain certificates by completing +// 'challenges' to prove ownership of domain identifiers. +// Earlier draft versions of the ACME specification are not supported. +type ACMEIssuer struct { + // Email is the email address to be associated with the ACME account. + // This field is optional, but it is strongly recommended to be set. + // It will be used to contact you in case of issues with your account or + // certificates, including expiry notification emails. + // This field may be updated after the account is initially registered. + // +optional + Email string `json:"email,omitempty"` + + // Server is the URL used to access the ACME server's 'directory' endpoint. + // For example, for Let's Encrypt's staging endpoint, you would use: + // "https://acme-staging-v02.api.letsencrypt.org/directory". + // Only ACME v2 endpoints (i.e. RFC 8555) are supported. + Server string `json:"server"` + + // PreferredChain is the chain to use if the ACME server outputs multiple. + // PreferredChain is no guarantee that this one gets delivered by the ACME + // endpoint. + // For example, for Let's Encrypt's DST crosssign you would use: + // "DST Root CA X3" or "ISRG Root X1" for the newer Let's Encrypt root CA. + // This value picks the first certificate bundle in the ACME alternative + // chains that has a certificate with this value as its issuer's CN + // +optional + // +kubebuilder:validation:MaxLength=64 + PreferredChain string `json:"preferredChain,omitempty"` + + // Base64-encoded bundle of PEM CAs which can be used to validate the certificate + // chain presented by the ACME server. + // Mutually exclusive with SkipTLSVerify; prefer using CABundle to prevent various + // kinds of security vulnerabilities. + // If CABundle and SkipTLSVerify are unset, the system certificate bundle inside + // the container is used to validate the TLS connection. + // +optional + CABundle []byte `json:"caBundle,omitempty"` + + // INSECURE: Enables or disables validation of the ACME server TLS certificate. + // If true, requests to the ACME server will not have the TLS certificate chain + // validated. + // Mutually exclusive with CABundle; prefer using CABundle to prevent various + // kinds of security vulnerabilities. + // Only enable this option in development environments. + // If CABundle and SkipTLSVerify are unset, the system certificate bundle inside + // the container is used to validate the TLS connection. + // Defaults to false. + // +optional + SkipTLSVerify bool `json:"skipTLSVerify,omitempty"` + + // ExternalAccountBinding is a reference to a CA external account of the ACME + // server. + // If set, upon registration cert-manager will attempt to associate the given + // external account credentials with the registered ACME account. + // +optional + ExternalAccountBinding *ACMEExternalAccountBinding `json:"externalAccountBinding,omitempty"` + + // PrivateKey is the name of a Kubernetes Secret resource that will be used to + // store the automatically generated ACME account private key. + // Optionally, a `key` may be specified to select a specific entry within + // the named Secret resource. + // If `key` is not specified, a default of `tls.key` will be used. + PrivateKey cmmeta.SecretKeySelector `json:"privateKeySecretRef"` + + // Solvers is a list of challenge solvers that will be used to solve + // ACME challenges for the matching domains. + // Solver configurations must be provided in order to obtain certificates + // from an ACME server. + // For more information, see: https://cert-manager.io/docs/configuration/acme/ + // +optional + Solvers []ACMEChallengeSolver `json:"solvers,omitempty"` + + // Enables or disables generating a new ACME account key. + // If true, the Issuer resource will *not* request a new account but will expect + // the account key to be supplied via an existing secret. + // If false, the cert-manager system will generate a new ACME account key + // for the Issuer. + // Defaults to false. + // +optional + DisableAccountKeyGeneration bool `json:"disableAccountKeyGeneration,omitempty"` + + // Enables requesting a Not After date on certificates that matches the + // duration of the certificate. This is not supported by all ACME servers + // like Let's Encrypt. If set to true when the ACME server does not support + // it it will create an error on the Order. + // Defaults to false. + // +optional + EnableDurationFeature bool `json:"enableDurationFeature,omitempty"` +} + +// ACMEExternalAccountBinding is a reference to a CA external account of the ACME +// server. +type ACMEExternalAccountBinding struct { + // keyID is the ID of the CA key that the External Account is bound to. + KeyID string `json:"keyID"` + + // keySecretRef is a Secret Key Selector referencing a data item in a Kubernetes + // Secret which holds the symmetric MAC key of the External Account Binding. + // The `key` is the index string that is paired with the key data in the + // Secret and should not be confused with the key data itself, or indeed with + // the External Account Binding keyID above. + // The secret key stored in the Secret **must** be un-padded, base64 URL + // encoded data. + Key cmmeta.SecretKeySelector `json:"keySecretRef"` + + // Deprecated: keyAlgorithm field exists for historical compatibility + // reasons and should not be used. The algorithm is now hardcoded to HS256 + // in golang/x/crypto/acme. + // +optional + KeyAlgorithm HMACKeyAlgorithm `json:"keyAlgorithm,omitempty"` +} + +// HMACKeyAlgorithm is the name of a key algorithm used for HMAC encryption +// +kubebuilder:validation:Enum=HS256;HS384;HS512 +type HMACKeyAlgorithm string + +const ( + HS256 HMACKeyAlgorithm = "HS256" + HS384 HMACKeyAlgorithm = "HS384" + HS512 HMACKeyAlgorithm = "HS512" +) + +// An ACMEChallengeSolver describes how to solve ACME challenges for the issuer it is part of. +// A selector may be provided to use different solving strategies for different DNS names. +// Only one of HTTP01 or DNS01 must be provided. +type ACMEChallengeSolver struct { + // Selector selects a set of DNSNames on the Certificate resource that + // should be solved using this challenge solver. + // If not specified, the solver will be treated as the 'default' solver + // with the lowest priority, i.e. if any other solver has a more specific + // match, it will be used instead. + // +optional + Selector *CertificateDNSNameSelector `json:"selector,omitempty"` + + // Configures cert-manager to attempt to complete authorizations by + // performing the HTTP01 challenge flow. + // It is not possible to obtain certificates for wildcard domain names + // (e.g. `*.example.com`) using the HTTP01 challenge mechanism. + // +optional + HTTP01 *ACMEChallengeSolverHTTP01 `json:"http01,omitempty"` + + // Configures cert-manager to attempt to complete authorizations by + // performing the DNS01 challenge flow. + // +optional + DNS01 *ACMEChallengeSolverDNS01 `json:"dns01,omitempty"` +} + +// CertificateDNSNameSelector selects certificates using a label selector, and +// can optionally select individual DNS names within those certificates. +// If both MatchLabels and DNSNames are empty, this selector will match all +// certificates and DNS names within them. +type CertificateDNSNameSelector struct { + // A label selector that is used to refine the set of certificate's that + // this challenge solver will apply to. + // +optional + MatchLabels map[string]string `json:"matchLabels,omitempty"` + + // List of DNSNames that this solver will be used to solve. + // If specified and a match is found, a dnsNames selector will take + // precedence over a dnsZones selector. + // If multiple solvers match with the same dnsNames value, the solver + // with the most matching labels in matchLabels will be selected. + // If neither has more matches, the solver defined earlier in the list + // will be selected. + // +optional + DNSNames []string `json:"dnsNames,omitempty"` + + // List of DNSZones that this solver will be used to solve. + // The most specific DNS zone match specified here will take precedence + // over other DNS zone matches, so a solver specifying sys.example.com + // will be selected over one specifying example.com for the domain + // www.sys.example.com. + // If multiple solvers match with the same dnsZones value, the solver + // with the most matching labels in matchLabels will be selected. + // If neither has more matches, the solver defined earlier in the list + // will be selected. + // +optional + DNSZones []string `json:"dnsZones,omitempty"` +} + +// ACMEChallengeSolverHTTP01 contains configuration detailing how to solve +// HTTP01 challenges within a Kubernetes cluster. +// Typically this is accomplished through creating 'routes' of some description +// that configure ingress controllers to direct traffic to 'solver pods', which +// are responsible for responding to the ACME server's HTTP requests. +// Only one of Ingress / Gateway can be specified. +type ACMEChallengeSolverHTTP01 struct { + // The ingress based HTTP01 challenge solver will solve challenges by + // creating or modifying Ingress resources in order to route requests for + // '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are + // provisioned by cert-manager for each Challenge to be completed. + // +optional + Ingress *ACMEChallengeSolverHTTP01Ingress `json:"ingress,omitempty"` + + // The Gateway API is a sig-network community API that models service networking + // in Kubernetes (https://gateway-api.sigs.k8s.io/). The Gateway solver will + // create HTTPRoutes with the specified labels in the same namespace as the challenge. + // This solver is experimental, and fields / behaviour may change in the future. + // +optional + GatewayHTTPRoute *ACMEChallengeSolverHTTP01GatewayHTTPRoute `json:"gatewayHTTPRoute,omitempty"` +} + +type ACMEChallengeSolverHTTP01Ingress struct { + // Optional service type for Kubernetes solver service. Supported values + // are NodePort or ClusterIP. If unset, defaults to NodePort. + // +optional + ServiceType corev1.ServiceType `json:"serviceType,omitempty"` + + // This field configures the field `ingressClassName` on the created Ingress + // resources used to solve ACME challenges that use this challenge solver. + // This is the recommended way of configuring the ingress class. Only one of + // `class`, `name` or `ingressClassName` may be specified. + // +optional + IngressClassName *string `json:"ingressClassName,omitempty"` + + // This field configures the annotation `kubernetes.io/ingress.class` when + // creating Ingress resources to solve ACME challenges that use this + // challenge solver. Only one of `class`, `name` or `ingressClassName` may + // be specified. + // +optional + Class *string `json:"class,omitempty"` + + // The name of the ingress resource that should have ACME challenge solving + // routes inserted into it in order to solve HTTP01 challenges. + // This is typically used in conjunction with ingress controllers like + // ingress-gce, which maintains a 1:1 mapping between external IPs and + // ingress resources. Only one of `class`, `name` or `ingressClassName` may + // be specified. + // +optional + Name string `json:"name,omitempty"` + + // Optional pod template used to configure the ACME challenge solver pods + // used for HTTP01 challenges. + // +optional + PodTemplate *ACMEChallengeSolverHTTP01IngressPodTemplate `json:"podTemplate,omitempty"` + + // Optional ingress template used to configure the ACME challenge solver + // ingress used for HTTP01 challenges. + // +optional + IngressTemplate *ACMEChallengeSolverHTTP01IngressTemplate `json:"ingressTemplate,omitempty"` +} + +// The ACMEChallengeSolverHTTP01GatewayHTTPRoute solver will create HTTPRoute objects for a Gateway class +// routing to an ACME challenge solver pod. +type ACMEChallengeSolverHTTP01GatewayHTTPRoute struct { + // Optional service type for Kubernetes solver service. Supported values + // are NodePort or ClusterIP. If unset, defaults to NodePort. + // +optional + ServiceType corev1.ServiceType `json:"serviceType,omitempty"` + + // Custom labels that will be applied to HTTPRoutes created by cert-manager + // while solving HTTP-01 challenges. + // +optional + Labels map[string]string `json:"labels,omitempty"` + + // When solving an HTTP-01 challenge, cert-manager creates an HTTPRoute. + // cert-manager needs to know which parentRefs should be used when creating + // the HTTPRoute. Usually, the parentRef references a Gateway. See: + // https://gateway-api.sigs.k8s.io/api-types/httproute/#attaching-to-gateways + ParentRefs []gwapi.ParentReference `json:"parentRefs,omitempty"` +} + +type ACMEChallengeSolverHTTP01IngressPodTemplate struct { + // ObjectMeta overrides for the pod used to solve HTTP01 challenges. + // Only the 'labels' and 'annotations' fields may be set. + // If labels or annotations overlap with in-built values, the values here + // will override the in-built values. + // +optional + ACMEChallengeSolverHTTP01IngressPodObjectMeta `json:"metadata"` + + // PodSpec defines overrides for the HTTP01 challenge solver pod. + // Check ACMEChallengeSolverHTTP01IngressPodSpec to find out currently supported fields. + // All other fields will be ignored. + // +optional + Spec ACMEChallengeSolverHTTP01IngressPodSpec `json:"spec"` +} + +type ACMEChallengeSolverHTTP01IngressPodObjectMeta struct { + // Annotations that should be added to the create ACME HTTP01 solver pods. + // +optional + Annotations map[string]string `json:"annotations,omitempty"` + + // Labels that should be added to the created ACME HTTP01 solver pods. + // +optional + Labels map[string]string `json:"labels,omitempty"` +} + +type ACMEChallengeSolverHTTP01IngressPodSpec struct { + // NodeSelector is a selector which must be true for the pod to fit on a node. + // Selector which must match a node's labels for the pod to be scheduled on that node. + // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // If specified, the pod's scheduling constraints + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty"` + + // If specified, the pod's tolerations. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // If specified, the pod's priorityClassName. + // +optional + PriorityClassName string `json:"priorityClassName,omitempty"` + + // If specified, the pod's service account + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty"` + + // If specified, the pod's imagePullSecrets + // +optional + ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name"` +} + +type ACMEChallengeSolverHTTP01IngressTemplate struct { + // ObjectMeta overrides for the ingress used to solve HTTP01 challenges. + // Only the 'labels' and 'annotations' fields may be set. + // If labels or annotations overlap with in-built values, the values here + // will override the in-built values. + // +optional + ACMEChallengeSolverHTTP01IngressObjectMeta `json:"metadata"` +} + +type ACMEChallengeSolverHTTP01IngressObjectMeta struct { + // Annotations that should be added to the created ACME HTTP01 solver ingress. + // +optional + Annotations map[string]string `json:"annotations,omitempty"` + + // Labels that should be added to the created ACME HTTP01 solver ingress. + // +optional + Labels map[string]string `json:"labels,omitempty"` +} + +// Used to configure a DNS01 challenge provider to be used when solving DNS01 +// challenges. +// Only one DNS provider may be configured per solver. +type ACMEChallengeSolverDNS01 struct { + // CNAMEStrategy configures how the DNS01 provider should handle CNAME + // records when found in DNS zones. + // +optional + CNAMEStrategy CNAMEStrategy `json:"cnameStrategy,omitempty"` + + // Use the Akamai DNS zone management API to manage DNS01 challenge records. + // +optional + Akamai *ACMEIssuerDNS01ProviderAkamai `json:"akamai,omitempty"` + + // Use the Google Cloud DNS API to manage DNS01 challenge records. + // +optional + CloudDNS *ACMEIssuerDNS01ProviderCloudDNS `json:"cloudDNS,omitempty"` + + // Use the Cloudflare API to manage DNS01 challenge records. + // +optional + Cloudflare *ACMEIssuerDNS01ProviderCloudflare `json:"cloudflare,omitempty"` + + // Use the AWS Route53 API to manage DNS01 challenge records. + // +optional + Route53 *ACMEIssuerDNS01ProviderRoute53 `json:"route53,omitempty"` + + // Use the Microsoft Azure DNS API to manage DNS01 challenge records. + // +optional + AzureDNS *ACMEIssuerDNS01ProviderAzureDNS `json:"azureDNS,omitempty"` + + // Use the DigitalOcean DNS API to manage DNS01 challenge records. + // +optional + DigitalOcean *ACMEIssuerDNS01ProviderDigitalOcean `json:"digitalocean,omitempty"` + + // Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage + // DNS01 challenge records. + // +optional + AcmeDNS *ACMEIssuerDNS01ProviderAcmeDNS `json:"acmeDNS,omitempty"` + + // Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) + // to manage DNS01 challenge records. + // +optional + RFC2136 *ACMEIssuerDNS01ProviderRFC2136 `json:"rfc2136,omitempty"` + + // Configure an external webhook based DNS01 challenge solver to manage + // DNS01 challenge records. + // +optional + Webhook *ACMEIssuerDNS01ProviderWebhook `json:"webhook,omitempty"` +} + +// CNAMEStrategy configures how the DNS01 provider should handle CNAME records +// when found in DNS zones. +// By default, the None strategy will be applied (i.e. do not follow CNAMEs). +// +kubebuilder:validation:Enum=None;Follow +type CNAMEStrategy string + +const ( + // NoneStrategy indicates that no CNAME resolution strategy should be used + // when determining which DNS zone to update during DNS01 challenges. + NoneStrategy = "None" + + // FollowStrategy will cause cert-manager to recurse through CNAMEs in + // order to determine which DNS zone to update during DNS01 challenges. + // This is useful if you do not want to grant cert-manager access to your + // root DNS zone, and instead delegate the _acme-challenge.example.com + // subdomain to some other, less privileged domain. + FollowStrategy = "Follow" +) + +// ACMEIssuerDNS01ProviderAkamai is a structure containing the DNS +// configuration for Akamai DNS—Zone Record Management API +type ACMEIssuerDNS01ProviderAkamai struct { + ServiceConsumerDomain string `json:"serviceConsumerDomain"` + ClientToken cmmeta.SecretKeySelector `json:"clientTokenSecretRef"` + ClientSecret cmmeta.SecretKeySelector `json:"clientSecretSecretRef"` + AccessToken cmmeta.SecretKeySelector `json:"accessTokenSecretRef"` +} + +// ACMEIssuerDNS01ProviderCloudDNS is a structure containing the DNS +// configuration for Google Cloud DNS +type ACMEIssuerDNS01ProviderCloudDNS struct { + // +optional + ServiceAccount *cmmeta.SecretKeySelector `json:"serviceAccountSecretRef,omitempty"` + Project string `json:"project"` + + // HostedZoneName is an optional field that tells cert-manager in which + // Cloud DNS zone the challenge record has to be created. + // If left empty cert-manager will automatically choose a zone. + // +optional + HostedZoneName string `json:"hostedZoneName,omitempty"` +} + +// ACMEIssuerDNS01ProviderCloudflare is a structure containing the DNS +// configuration for Cloudflare. +// One of `apiKeySecretRef` or `apiTokenSecretRef` must be provided. +type ACMEIssuerDNS01ProviderCloudflare struct { + // Email of the account, only required when using API key based authentication. + // +optional + Email string `json:"email,omitempty"` + + // API key to use to authenticate with Cloudflare. + // Note: using an API token to authenticate is now the recommended method + // as it allows greater control of permissions. + // +optional + APIKey *cmmeta.SecretKeySelector `json:"apiKeySecretRef,omitempty"` + + // API token used to authenticate with Cloudflare. + // +optional + APIToken *cmmeta.SecretKeySelector `json:"apiTokenSecretRef,omitempty"` +} + +// ACMEIssuerDNS01ProviderDigitalOcean is a structure containing the DNS +// configuration for DigitalOcean Domains +type ACMEIssuerDNS01ProviderDigitalOcean struct { + Token cmmeta.SecretKeySelector `json:"tokenSecretRef"` +} + +// ACMEIssuerDNS01ProviderRoute53 is a structure containing the Route 53 +// configuration for AWS +type ACMEIssuerDNS01ProviderRoute53 struct { + // The AccessKeyID is used for authentication. + // Cannot be set when SecretAccessKeyID is set. + // If neither the Access Key nor Key ID are set, we fall-back to using env + // vars, shared credentials file or AWS Instance metadata, + // see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + // +optional + AccessKeyID string `json:"accessKeyID,omitempty"` + + // The SecretAccessKey is used for authentication. If set, pull the AWS + // access key ID from a key within a Kubernetes Secret. + // Cannot be set when AccessKeyID is set. + // If neither the Access Key nor Key ID are set, we fall-back to using env + // vars, shared credentials file or AWS Instance metadata, + // see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + // +optional + SecretAccessKeyID *cmmeta.SecretKeySelector `json:"accessKeyIDSecretRef,omitempty"` + + // The SecretAccessKey is used for authentication. + // If neither the Access Key nor Key ID are set, we fall-back to using env + // vars, shared credentials file or AWS Instance metadata, + // see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + // +optional + SecretAccessKey cmmeta.SecretKeySelector `json:"secretAccessKeySecretRef"` + + // Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey + // or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata + // +optional + Role string `json:"role,omitempty"` + + // If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. + // +optional + HostedZoneID string `json:"hostedZoneID,omitempty"` + + // Always set the region when using AccessKeyID and SecretAccessKey + Region string `json:"region"` +} + +// ACMEIssuerDNS01ProviderAzureDNS is a structure containing the +// configuration for Azure DNS +type ACMEIssuerDNS01ProviderAzureDNS struct { + // if both this and ClientSecret are left unset MSI will be used + // +optional + ClientID string `json:"clientID,omitempty"` + + // if both this and ClientID are left unset MSI will be used + // +optional + ClientSecret *cmmeta.SecretKeySelector `json:"clientSecretSecretRef,omitempty"` + + // ID of the Azure subscription + SubscriptionID string `json:"subscriptionID"` + + // when specifying ClientID and ClientSecret then this field is also needed + // +optional + TenantID string `json:"tenantID,omitempty"` + + // resource group the DNS zone is located in + ResourceGroupName string `json:"resourceGroupName"` + + // name of the DNS zone that should be used + // +optional + HostedZoneName string `json:"hostedZoneName,omitempty"` + + // name of the Azure environment (default AzurePublicCloud) + // +optional + Environment AzureDNSEnvironment `json:"environment,omitempty"` + + // managed identity configuration, can not be used at the same time as clientID, clientSecretSecretRef or tenantID + // +optional + ManagedIdentity *AzureManagedIdentity `json:"managedIdentity,omitempty"` +} + +type AzureManagedIdentity struct { + // client ID of the managed identity, can not be used at the same time as resourceID + // +optional + ClientID string `json:"clientID,omitempty"` + + // resource ID of the managed identity, can not be used at the same time as clientID + // +optional + ResourceID string `json:"resourceID,omitempty"` +} + +// +kubebuilder:validation:Enum=AzurePublicCloud;AzureChinaCloud;AzureGermanCloud;AzureUSGovernmentCloud +type AzureDNSEnvironment string + +const ( + AzurePublicCloud AzureDNSEnvironment = "AzurePublicCloud" + AzureChinaCloud AzureDNSEnvironment = "AzureChinaCloud" + AzureGermanCloud AzureDNSEnvironment = "AzureGermanCloud" + AzureUSGovernmentCloud AzureDNSEnvironment = "AzureUSGovernmentCloud" +) + +// ACMEIssuerDNS01ProviderAcmeDNS is a structure containing the +// configuration for ACME-DNS servers +type ACMEIssuerDNS01ProviderAcmeDNS struct { + Host string `json:"host"` + + AccountSecret cmmeta.SecretKeySelector `json:"accountSecretRef"` +} + +// ACMEIssuerDNS01ProviderRFC2136 is a structure containing the +// configuration for RFC2136 DNS +type ACMEIssuerDNS01ProviderRFC2136 struct { + // The IP address or hostname of an authoritative DNS server supporting + // RFC2136 in the form host:port. If the host is an IPv6 address it must be + // enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. + // This field is required. + Nameserver string `json:"nameserver"` + + // The name of the secret containing the TSIG value. + // If ``tsigKeyName`` is defined, this field is required. + // +optional + TSIGSecret cmmeta.SecretKeySelector `json:"tsigSecretSecretRef,omitempty"` + + // The TSIG Key name configured in the DNS. + // If ``tsigSecretSecretRef`` is defined, this field is required. + // +optional + TSIGKeyName string `json:"tsigKeyName,omitempty"` + + // The TSIG Algorithm configured in the DNS supporting RFC2136. Used only + // when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. + // Supported values are (case-insensitive): ``HMACMD5`` (default), + // ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``. + // +optional + TSIGAlgorithm string `json:"tsigAlgorithm,omitempty"` +} + +// ACMEIssuerDNS01ProviderWebhook specifies configuration for a webhook DNS01 +// provider, including where to POST ChallengePayload resources. +type ACMEIssuerDNS01ProviderWebhook struct { + // The API group name that should be used when POSTing ChallengePayload + // resources to the webhook apiserver. + // This should be the same as the GroupName specified in the webhook + // provider implementation. + GroupName string `json:"groupName"` + + // The name of the solver to use, as defined in the webhook provider + // implementation. + // This will typically be the name of the provider, e.g. 'cloudflare'. + SolverName string `json:"solverName"` + + // Additional configuration that should be passed to the webhook apiserver + // when challenges are processed. + // This can contain arbitrary JSON data. + // Secret values should not be specified in this stanza. + // If secret values are needed (e.g. credentials for a DNS service), you + // should use a SecretKeySelector to reference a Secret resource. + // For details on the schema of this field, consult the webhook provider + // implementation's documentation. + // +optional + Config *apiextensionsv1.JSON `json:"config,omitempty"` +} + +type ACMEIssuerStatus struct { + // URI is the unique account identifier, which can also be used to retrieve + // account details from the CA + // +optional + URI string `json:"uri,omitempty"` + + // LastRegisteredEmail is the email associated with the latest registered + // ACME account, in order to track changes made to registered account + // associated with the Issuer + // +optional + LastRegisteredEmail string `json:"lastRegisteredEmail,omitempty"` + + // LastPrivateKeyHash is a hash of the private key associated with the latest + // registered ACME account, in order to track changes made to registered account + // associated with the Issuer + // +optional + LastPrivateKeyHash string `json:"lastPrivateKeyHash,omitempty"` +} diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_order.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_order.go new file mode 100644 index 00000000000..e9a50a30134 --- /dev/null +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_order.go @@ -0,0 +1,240 @@ +/* +Copyright 2020 The cert-manager Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + cmmeta "github.com/cert-manager/cert-manager/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:storageversion + +// Order is a type to represent an Order with an ACME server +// +k8s:openapi-gen=true +type Order struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec OrderSpec `json:"spec"` + // +optional + Status OrderStatus `json:"status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OrderList is a list of Orders +type OrderList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Order `json:"items"` +} + +type OrderSpec struct { + // Certificate signing request bytes in DER encoding. + // This will be used when finalizing the order. + // This field must be set on the order. + Request []byte `json:"request"` + + // IssuerRef references a properly configured ACME-type Issuer which should + // be used to create this Order. + // If the Issuer does not exist, processing will be retried. + // If the Issuer is not an 'ACME' Issuer, an error will be returned and the + // Order will be marked as failed. + IssuerRef cmmeta.ObjectReference `json:"issuerRef"` + + // CommonName is the common name as specified on the DER encoded CSR. + // If specified, this value must also be present in `dnsNames` or `ipAddresses`. + // This field must match the corresponding field on the DER encoded CSR. + // +optional + CommonName string `json:"commonName,omitempty"` + + // DNSNames is a list of DNS names that should be included as part of the Order + // validation process. + // This field must match the corresponding field on the DER encoded CSR. + //+optional + DNSNames []string `json:"dnsNames,omitempty"` + + // IPAddresses is a list of IP addresses that should be included as part of the Order + // validation process. + // This field must match the corresponding field on the DER encoded CSR. + // +optional + IPAddresses []string `json:"ipAddresses,omitempty"` + + // Duration is the duration for the not after date for the requested certificate. + // this is set on order creation as pe the ACME spec. + // +optional + Duration *metav1.Duration `json:"duration,omitempty"` +} + +type OrderStatus struct { + // URL of the Order. + // This will initially be empty when the resource is first created. + // The Order controller will populate this field when the Order is first processed. + // This field will be immutable after it is initially set. + // +optional + URL string `json:"url,omitempty"` + + // FinalizeURL of the Order. + // This is used to obtain certificates for this order once it has been completed. + // +optional + FinalizeURL string `json:"finalizeURL,omitempty"` + + // Authorizations contains data returned from the ACME server on what + // authorizations must be completed in order to validate the DNS names + // specified on the Order. + // +optional + Authorizations []ACMEAuthorization `json:"authorizations,omitempty"` + + // Certificate is a copy of the PEM encoded certificate for this Order. + // This field will be populated after the order has been successfully + // finalized with the ACME server, and the order has transitioned to the + // 'valid' state. + // +optional + Certificate []byte `json:"certificate,omitempty"` + + // State contains the current state of this Order resource. + // States 'success' and 'expired' are 'final' + // +optional + State State `json:"state,omitempty"` + + // Reason optionally provides more information about a why the order is in + // the current state. + // +optional + Reason string `json:"reason,omitempty"` + + // FailureTime stores the time that this order failed. + // This is used to influence garbage collection and back-off. + // +optional + FailureTime *metav1.Time `json:"failureTime,omitempty"` +} + +// ACMEAuthorization contains data returned from the ACME server on an +// authorization that must be completed in order validate a DNS name on an ACME +// Order resource. +type ACMEAuthorization struct { + // URL is the URL of the Authorization that must be completed + URL string `json:"url"` + + // Identifier is the DNS name to be validated as part of this authorization + // +optional + Identifier string `json:"identifier,omitempty"` + + // Wildcard will be true if this authorization is for a wildcard DNS name. + // If this is true, the identifier will be the *non-wildcard* version of + // the DNS name. + // For example, if '*.example.com' is the DNS name being validated, this + // field will be 'true' and the 'identifier' field will be 'example.com'. + // +optional + Wildcard *bool `json:"wildcard,omitempty"` + + // InitialState is the initial state of the ACME authorization when first + // fetched from the ACME server. + // If an Authorization is already 'valid', the Order controller will not + // create a Challenge resource for the authorization. This will occur when + // working with an ACME server that enables 'authz reuse' (such as Let's + // Encrypt's production endpoint). + // If not set and 'identifier' is set, the state is assumed to be pending + // and a Challenge will be created. + // +optional + InitialState State `json:"initialState,omitempty"` + + // Challenges specifies the challenge types offered by the ACME server. + // One of these challenge types will be selected when validating the DNS + // name and an appropriate Challenge resource will be created to perform + // the ACME challenge process. + // +optional + Challenges []ACMEChallenge `json:"challenges,omitempty"` +} + +// Challenge specifies a challenge offered by the ACME server for an Order. +// An appropriate Challenge resource can be created to perform the ACME +// challenge process. +type ACMEChallenge struct { + // URL is the URL of this challenge. It can be used to retrieve additional + // metadata about the Challenge from the ACME server. + URL string `json:"url"` + + // Token is the token that must be presented for this challenge. + // This is used to compute the 'key' that must also be presented. + Token string `json:"token"` + + // Type is the type of challenge being offered, e.g. 'http-01', 'dns-01', + // 'tls-sni-01', etc. + // This is the raw value retrieved from the ACME server. + // Only 'http-01' and 'dns-01' are supported by cert-manager, other values + // will be ignored. + Type string `json:"type"` +} + +// State represents the state of an ACME resource, such as an Order. +// The possible options here map to the corresponding values in the +// ACME specification. +// Full details of these values can be found here: https://tools.ietf.org/html/draft-ietf-acme-acme-15#section-7.1.6 +// Clients utilising this type must also gracefully handle unknown +// values, as the contents of this enumeration may be added to over time. +// +kubebuilder:validation:Enum=valid;ready;pending;processing;invalid;expired;errored +type State string + +const ( + // Unknown is not a real state as part of the ACME spec. + // It is used to represent an unrecognised value. + Unknown State = "" + + // Valid signifies that an ACME resource is in a valid state. + // If an order is 'valid', it has been finalized with the ACME server and + // the certificate can be retrieved from the ACME server using the + // certificate URL stored in the Order's status subresource. + // This is a final state. + Valid State = "valid" + + // Ready signifies that an ACME resource is in a ready state. + // If an order is 'ready', all of its challenges have been completed + // successfully and the order is ready to be finalized. + // Once finalized, it will transition to the Valid state. + // This is a transient state. + Ready State = "ready" + + // Pending signifies that an ACME resource is still pending and is not yet ready. + // If an Order is marked 'Pending', the validations for that Order are still in progress. + // This is a transient state. + Pending State = "pending" + + // Processing signifies that an ACME resource is being processed by the server. + // If an Order is marked 'Processing', the validations for that Order are currently being processed. + // This is a transient state. + Processing State = "processing" + + // Invalid signifies that an ACME resource is invalid for some reason. + // If an Order is marked 'invalid', one of its validations be have invalid for some reason. + // This is a final state. + Invalid State = "invalid" + + // Expired signifies that an ACME resource has expired. + // If an Order is marked 'Expired', one of its validations may have expired or the Order itself. + // This is a final state. + Expired State = "expired" + + // Errored signifies that the ACME resource has errored for some reason. + // This is a catch-all state, and is used for marking internal cert-manager + // errors such as validation failures. + // This is a final state. + Errored State = "errored" +) diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/zz_generated.deepcopy.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..b5472216ccd --- /dev/null +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/zz_generated.deepcopy.go @@ -0,0 +1,919 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The cert-manager Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "github.com/cert-manager/cert-manager/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + v1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACMEAuthorization) DeepCopyInto(out *ACMEAuthorization) { + *out = *in + if in.Wildcard != nil { + in, out := &in.Wildcard, &out.Wildcard + *out = new(bool) + **out = **in + } + if in.Challenges != nil { + in, out := &in.Challenges, &out.Challenges + *out = make([]ACMEChallenge, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACMEAuthorization. +func (in *ACMEAuthorization) DeepCopy() *ACMEAuthorization { + if in == nil { + return nil + } + out := new(ACMEAuthorization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACMEChallenge) DeepCopyInto(out *ACMEChallenge) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACMEChallenge. +func (in *ACMEChallenge) DeepCopy() *ACMEChallenge { + if in == nil { + return nil + } + out := new(ACMEChallenge) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACMEChallengeSolver) DeepCopyInto(out *ACMEChallengeSolver) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(CertificateDNSNameSelector) + (*in).DeepCopyInto(*out) + } + if in.HTTP01 != nil { + in, out := &in.HTTP01, &out.HTTP01 + *out = new(ACMEChallengeSolverHTTP01) + (*in).DeepCopyInto(*out) + } + if in.DNS01 != nil { + in, out := &in.DNS01, &out.DNS01 + *out = new(ACMEChallengeSolverDNS01) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACMEChallengeSolver. +func (in *ACMEChallengeSolver) DeepCopy() *ACMEChallengeSolver { + if in == nil { + return nil + } + out := new(ACMEChallengeSolver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACMEChallengeSolverDNS01) DeepCopyInto(out *ACMEChallengeSolverDNS01) { + *out = *in + if in.Akamai != nil { + in, out := &in.Akamai, &out.Akamai + *out = new(ACMEIssuerDNS01ProviderAkamai) + **out = **in + } + if in.CloudDNS != nil { + in, out := &in.CloudDNS, &out.CloudDNS + *out = new(ACMEIssuerDNS01ProviderCloudDNS) + (*in).DeepCopyInto(*out) + } + if in.Cloudflare != nil { + in, out := &in.Cloudflare, &out.Cloudflare + *out = new(ACMEIssuerDNS01ProviderCloudflare) + (*in).DeepCopyInto(*out) + } + if in.Route53 != nil { + in, out := &in.Route53, &out.Route53 + *out = new(ACMEIssuerDNS01ProviderRoute53) + (*in).DeepCopyInto(*out) + } + if in.AzureDNS != nil { + in, out := &in.AzureDNS, &out.AzureDNS + *out = new(ACMEIssuerDNS01ProviderAzureDNS) + (*in).DeepCopyInto(*out) + } + if in.DigitalOcean != nil { + in, out := &in.DigitalOcean, &out.DigitalOcean + *out = new(ACMEIssuerDNS01ProviderDigitalOcean) + **out = **in + } + if in.AcmeDNS != nil { + in, out := &in.AcmeDNS, &out.AcmeDNS + *out = new(ACMEIssuerDNS01ProviderAcmeDNS) + **out = **in + } + if in.RFC2136 != nil { + in, out := &in.RFC2136, &out.RFC2136 + *out = new(ACMEIssuerDNS01ProviderRFC2136) + **out = **in + } + if in.Webhook != nil { + in, out := &in.Webhook, &out.Webhook + *out = new(ACMEIssuerDNS01ProviderWebhook) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACMEChallengeSolverDNS01. +func (in *ACMEChallengeSolverDNS01) DeepCopy() *ACMEChallengeSolverDNS01 { + if in == nil { + return nil + } + out := new(ACMEChallengeSolverDNS01) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACMEChallengeSolverHTTP01) DeepCopyInto(out *ACMEChallengeSolverHTTP01) { + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = new(ACMEChallengeSolverHTTP01Ingress) + (*in).DeepCopyInto(*out) + } + if in.GatewayHTTPRoute != nil { + in, out := &in.GatewayHTTPRoute, &out.GatewayHTTPRoute + *out = new(ACMEChallengeSolverHTTP01GatewayHTTPRoute) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACMEChallengeSolverHTTP01. +func (in *ACMEChallengeSolverHTTP01) DeepCopy() *ACMEChallengeSolverHTTP01 { + if in == nil { + return nil + } + out := new(ACMEChallengeSolverHTTP01) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACMEChallengeSolverHTTP01GatewayHTTPRoute) DeepCopyInto(out *ACMEChallengeSolverHTTP01GatewayHTTPRoute) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ParentRefs != nil { + in, out := &in.ParentRefs, &out.ParentRefs + *out = make([]v1beta1.ParentReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACMEChallengeSolverHTTP01GatewayHTTPRoute. +func (in *ACMEChallengeSolverHTTP01GatewayHTTPRoute) DeepCopy() *ACMEChallengeSolverHTTP01GatewayHTTPRoute { + if in == nil { + return nil + } + out := new(ACMEChallengeSolverHTTP01GatewayHTTPRoute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACMEChallengeSolverHTTP01Ingress) DeepCopyInto(out *ACMEChallengeSolverHTTP01Ingress) { + *out = *in + if in.IngressClassName != nil { + in, out := &in.IngressClassName, &out.IngressClassName + *out = new(string) + **out = **in + } + if in.Class != nil { + in, out := &in.Class, &out.Class + *out = new(string) + **out = **in + } + if in.PodTemplate != nil { + in, out := &in.PodTemplate, &out.PodTemplate + *out = new(ACMEChallengeSolverHTTP01IngressPodTemplate) + (*in).DeepCopyInto(*out) + } + if in.IngressTemplate != nil { + in, out := &in.IngressTemplate, &out.IngressTemplate + *out = new(ACMEChallengeSolverHTTP01IngressTemplate) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACMEChallengeSolverHTTP01Ingress. +func (in *ACMEChallengeSolverHTTP01Ingress) DeepCopy() *ACMEChallengeSolverHTTP01Ingress { + if in == nil { + return nil + } + out := new(ACMEChallengeSolverHTTP01Ingress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACMEChallengeSolverHTTP01IngressObjectMeta) DeepCopyInto(out *ACMEChallengeSolverHTTP01IngressObjectMeta) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACMEChallengeSolverHTTP01IngressObjectMeta. +func (in *ACMEChallengeSolverHTTP01IngressObjectMeta) DeepCopy() *ACMEChallengeSolverHTTP01IngressObjectMeta { + if in == nil { + return nil + } + out := new(ACMEChallengeSolverHTTP01IngressObjectMeta) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACMEChallengeSolverHTTP01IngressPodObjectMeta) DeepCopyInto(out *ACMEChallengeSolverHTTP01IngressPodObjectMeta) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACMEChallengeSolverHTTP01IngressPodObjectMeta. +func (in *ACMEChallengeSolverHTTP01IngressPodObjectMeta) DeepCopy() *ACMEChallengeSolverHTTP01IngressPodObjectMeta { + if in == nil { + return nil + } + out := new(ACMEChallengeSolverHTTP01IngressPodObjectMeta) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACMEChallengeSolverHTTP01IngressPodSpec) DeepCopyInto(out *ACMEChallengeSolverHTTP01IngressPodSpec) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(corev1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]corev1.LocalObjectReference, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACMEChallengeSolverHTTP01IngressPodSpec. +func (in *ACMEChallengeSolverHTTP01IngressPodSpec) DeepCopy() *ACMEChallengeSolverHTTP01IngressPodSpec { + if in == nil { + return nil + } + out := new(ACMEChallengeSolverHTTP01IngressPodSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACMEChallengeSolverHTTP01IngressPodTemplate) DeepCopyInto(out *ACMEChallengeSolverHTTP01IngressPodTemplate) { + *out = *in + in.ACMEChallengeSolverHTTP01IngressPodObjectMeta.DeepCopyInto(&out.ACMEChallengeSolverHTTP01IngressPodObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACMEChallengeSolverHTTP01IngressPodTemplate. +func (in *ACMEChallengeSolverHTTP01IngressPodTemplate) DeepCopy() *ACMEChallengeSolverHTTP01IngressPodTemplate { + if in == nil { + return nil + } + out := new(ACMEChallengeSolverHTTP01IngressPodTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACMEChallengeSolverHTTP01IngressTemplate) DeepCopyInto(out *ACMEChallengeSolverHTTP01IngressTemplate) { + *out = *in + in.ACMEChallengeSolverHTTP01IngressObjectMeta.DeepCopyInto(&out.ACMEChallengeSolverHTTP01IngressObjectMeta) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACMEChallengeSolverHTTP01IngressTemplate. +func (in *ACMEChallengeSolverHTTP01IngressTemplate) DeepCopy() *ACMEChallengeSolverHTTP01IngressTemplate { + if in == nil { + return nil + } + out := new(ACMEChallengeSolverHTTP01IngressTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACMEExternalAccountBinding) DeepCopyInto(out *ACMEExternalAccountBinding) { + *out = *in + out.Key = in.Key + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACMEExternalAccountBinding. +func (in *ACMEExternalAccountBinding) DeepCopy() *ACMEExternalAccountBinding { + if in == nil { + return nil + } + out := new(ACMEExternalAccountBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACMEIssuer) DeepCopyInto(out *ACMEIssuer) { + *out = *in + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.ExternalAccountBinding != nil { + in, out := &in.ExternalAccountBinding, &out.ExternalAccountBinding + *out = new(ACMEExternalAccountBinding) + **out = **in + } + out.PrivateKey = in.PrivateKey + if in.Solvers != nil { + in, out := &in.Solvers, &out.Solvers + *out = make([]ACMEChallengeSolver, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACMEIssuer. +func (in *ACMEIssuer) DeepCopy() *ACMEIssuer { + if in == nil { + return nil + } + out := new(ACMEIssuer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACMEIssuerDNS01ProviderAcmeDNS) DeepCopyInto(out *ACMEIssuerDNS01ProviderAcmeDNS) { + *out = *in + out.AccountSecret = in.AccountSecret + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACMEIssuerDNS01ProviderAcmeDNS. +func (in *ACMEIssuerDNS01ProviderAcmeDNS) DeepCopy() *ACMEIssuerDNS01ProviderAcmeDNS { + if in == nil { + return nil + } + out := new(ACMEIssuerDNS01ProviderAcmeDNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACMEIssuerDNS01ProviderAkamai) DeepCopyInto(out *ACMEIssuerDNS01ProviderAkamai) { + *out = *in + out.ClientToken = in.ClientToken + out.ClientSecret = in.ClientSecret + out.AccessToken = in.AccessToken + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACMEIssuerDNS01ProviderAkamai. +func (in *ACMEIssuerDNS01ProviderAkamai) DeepCopy() *ACMEIssuerDNS01ProviderAkamai { + if in == nil { + return nil + } + out := new(ACMEIssuerDNS01ProviderAkamai) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACMEIssuerDNS01ProviderAzureDNS) DeepCopyInto(out *ACMEIssuerDNS01ProviderAzureDNS) { + *out = *in + if in.ClientSecret != nil { + in, out := &in.ClientSecret, &out.ClientSecret + *out = new(metav1.SecretKeySelector) + **out = **in + } + if in.ManagedIdentity != nil { + in, out := &in.ManagedIdentity, &out.ManagedIdentity + *out = new(AzureManagedIdentity) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACMEIssuerDNS01ProviderAzureDNS. +func (in *ACMEIssuerDNS01ProviderAzureDNS) DeepCopy() *ACMEIssuerDNS01ProviderAzureDNS { + if in == nil { + return nil + } + out := new(ACMEIssuerDNS01ProviderAzureDNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACMEIssuerDNS01ProviderCloudDNS) DeepCopyInto(out *ACMEIssuerDNS01ProviderCloudDNS) { + *out = *in + if in.ServiceAccount != nil { + in, out := &in.ServiceAccount, &out.ServiceAccount + *out = new(metav1.SecretKeySelector) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACMEIssuerDNS01ProviderCloudDNS. +func (in *ACMEIssuerDNS01ProviderCloudDNS) DeepCopy() *ACMEIssuerDNS01ProviderCloudDNS { + if in == nil { + return nil + } + out := new(ACMEIssuerDNS01ProviderCloudDNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACMEIssuerDNS01ProviderCloudflare) DeepCopyInto(out *ACMEIssuerDNS01ProviderCloudflare) { + *out = *in + if in.APIKey != nil { + in, out := &in.APIKey, &out.APIKey + *out = new(metav1.SecretKeySelector) + **out = **in + } + if in.APIToken != nil { + in, out := &in.APIToken, &out.APIToken + *out = new(metav1.SecretKeySelector) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACMEIssuerDNS01ProviderCloudflare. +func (in *ACMEIssuerDNS01ProviderCloudflare) DeepCopy() *ACMEIssuerDNS01ProviderCloudflare { + if in == nil { + return nil + } + out := new(ACMEIssuerDNS01ProviderCloudflare) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACMEIssuerDNS01ProviderDigitalOcean) DeepCopyInto(out *ACMEIssuerDNS01ProviderDigitalOcean) { + *out = *in + out.Token = in.Token + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACMEIssuerDNS01ProviderDigitalOcean. +func (in *ACMEIssuerDNS01ProviderDigitalOcean) DeepCopy() *ACMEIssuerDNS01ProviderDigitalOcean { + if in == nil { + return nil + } + out := new(ACMEIssuerDNS01ProviderDigitalOcean) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACMEIssuerDNS01ProviderRFC2136) DeepCopyInto(out *ACMEIssuerDNS01ProviderRFC2136) { + *out = *in + out.TSIGSecret = in.TSIGSecret + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACMEIssuerDNS01ProviderRFC2136. +func (in *ACMEIssuerDNS01ProviderRFC2136) DeepCopy() *ACMEIssuerDNS01ProviderRFC2136 { + if in == nil { + return nil + } + out := new(ACMEIssuerDNS01ProviderRFC2136) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACMEIssuerDNS01ProviderRoute53) DeepCopyInto(out *ACMEIssuerDNS01ProviderRoute53) { + *out = *in + if in.SecretAccessKeyID != nil { + in, out := &in.SecretAccessKeyID, &out.SecretAccessKeyID + *out = new(metav1.SecretKeySelector) + **out = **in + } + out.SecretAccessKey = in.SecretAccessKey + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACMEIssuerDNS01ProviderRoute53. +func (in *ACMEIssuerDNS01ProviderRoute53) DeepCopy() *ACMEIssuerDNS01ProviderRoute53 { + if in == nil { + return nil + } + out := new(ACMEIssuerDNS01ProviderRoute53) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACMEIssuerDNS01ProviderWebhook) DeepCopyInto(out *ACMEIssuerDNS01ProviderWebhook) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(apiextensionsv1.JSON) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACMEIssuerDNS01ProviderWebhook. +func (in *ACMEIssuerDNS01ProviderWebhook) DeepCopy() *ACMEIssuerDNS01ProviderWebhook { + if in == nil { + return nil + } + out := new(ACMEIssuerDNS01ProviderWebhook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACMEIssuerStatus) DeepCopyInto(out *ACMEIssuerStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACMEIssuerStatus. +func (in *ACMEIssuerStatus) DeepCopy() *ACMEIssuerStatus { + if in == nil { + return nil + } + out := new(ACMEIssuerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedIdentity) DeepCopyInto(out *AzureManagedIdentity) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedIdentity. +func (in *AzureManagedIdentity) DeepCopy() *AzureManagedIdentity { + if in == nil { + return nil + } + out := new(AzureManagedIdentity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateDNSNameSelector) DeepCopyInto(out *CertificateDNSNameSelector) { + *out = *in + if in.MatchLabels != nil { + in, out := &in.MatchLabels, &out.MatchLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.DNSNames != nil { + in, out := &in.DNSNames, &out.DNSNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DNSZones != nil { + in, out := &in.DNSZones, &out.DNSZones + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateDNSNameSelector. +func (in *CertificateDNSNameSelector) DeepCopy() *CertificateDNSNameSelector { + if in == nil { + return nil + } + out := new(CertificateDNSNameSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Challenge) DeepCopyInto(out *Challenge) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Challenge. +func (in *Challenge) DeepCopy() *Challenge { + if in == nil { + return nil + } + out := new(Challenge) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Challenge) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChallengeList) DeepCopyInto(out *ChallengeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Challenge, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChallengeList. +func (in *ChallengeList) DeepCopy() *ChallengeList { + if in == nil { + return nil + } + out := new(ChallengeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ChallengeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChallengeSpec) DeepCopyInto(out *ChallengeSpec) { + *out = *in + in.Solver.DeepCopyInto(&out.Solver) + out.IssuerRef = in.IssuerRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChallengeSpec. +func (in *ChallengeSpec) DeepCopy() *ChallengeSpec { + if in == nil { + return nil + } + out := new(ChallengeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChallengeStatus) DeepCopyInto(out *ChallengeStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChallengeStatus. +func (in *ChallengeStatus) DeepCopy() *ChallengeStatus { + if in == nil { + return nil + } + out := new(ChallengeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Order) DeepCopyInto(out *Order) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Order. +func (in *Order) DeepCopy() *Order { + if in == nil { + return nil + } + out := new(Order) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Order) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrderList) DeepCopyInto(out *OrderList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Order, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrderList. +func (in *OrderList) DeepCopy() *OrderList { + if in == nil { + return nil + } + out := new(OrderList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OrderList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrderSpec) DeepCopyInto(out *OrderSpec) { + *out = *in + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = make([]byte, len(*in)) + copy(*out, *in) + } + out.IssuerRef = in.IssuerRef + if in.DNSNames != nil { + in, out := &in.DNSNames, &out.DNSNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IPAddresses != nil { + in, out := &in.IPAddresses, &out.IPAddresses + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(apismetav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrderSpec. +func (in *OrderSpec) DeepCopy() *OrderSpec { + if in == nil { + return nil + } + out := new(OrderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrderStatus) DeepCopyInto(out *OrderStatus) { + *out = *in + if in.Authorizations != nil { + in, out := &in.Authorizations, &out.Authorizations + *out = make([]ACMEAuthorization, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.FailureTime != nil { + in, out := &in.FailureTime, &out.FailureTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrderStatus. +func (in *OrderStatus) DeepCopy() *OrderStatus { + if in == nil { + return nil + } + out := new(OrderStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/doc.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/doc.go new file mode 100644 index 00000000000..16c92a1d987 --- /dev/null +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2020 The cert-manager Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=cert-manager.io +// +groupGoName=Certmanager + +// Package certmanager is the internal version of the API. +package certmanager + +const GroupName = "cert-manager.io" diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/const.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/const.go new file mode 100644 index 00000000000..7b8a8b0b678 --- /dev/null +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/const.go @@ -0,0 +1,43 @@ +/* +Copyright 2020 The cert-manager Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import "time" + +const ( + // minimum permitted certificate duration by cert-manager + MinimumCertificateDuration = time.Hour + + // default certificate duration if Issuer.spec.duration is not set + DefaultCertificateDuration = time.Hour * 24 * 90 + + // minimum certificate duration before certificate expiration + MinimumRenewBefore = time.Minute * 5 + + // Deprecated: the default is now 2/3 of Certificate's duration + DefaultRenewBefore = time.Hour * 24 * 30 +) + +const ( + // Default index key for the Secret reference for Token authentication + DefaultVaultTokenAuthSecretKey = "token" + + // Default mount path location for Kubernetes ServiceAccount authentication + // (/v1/auth/kubernetes). The endpoint will then be called at `/login`, so + // left as the default, `/v1/auth/kubernetes/login` will be called. + DefaultVaultKubernetesAuthMountPath = "/v1/auth/kubernetes" +) diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/doc.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/doc.go new file mode 100644 index 00000000000..348211c6859 --- /dev/null +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2020 The cert-manager Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 is the v1 version of the API. +// +k8s:deepcopy-gen=package,register +// +groupName=cert-manager.io +// +groupGoName=Certmanager +package v1 diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/generic_issuer.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/generic_issuer.go new file mode 100644 index 00000000000..d757978fe0c --- /dev/null +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/generic_issuer.go @@ -0,0 +1,85 @@ +/* +Copyright 2020 The cert-manager Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + cmacme "github.com/cert-manager/cert-manager/pkg/apis/acme/v1" +) + +type GenericIssuer interface { + runtime.Object + metav1.Object + + GetObjectMeta() *metav1.ObjectMeta + GetSpec() *IssuerSpec + GetStatus() *IssuerStatus +} + +var _ GenericIssuer = &Issuer{} +var _ GenericIssuer = &ClusterIssuer{} + +func (c *ClusterIssuer) GetObjectMeta() *metav1.ObjectMeta { + return &c.ObjectMeta +} +func (c *ClusterIssuer) GetSpec() *IssuerSpec { + return &c.Spec +} +func (c *ClusterIssuer) GetStatus() *IssuerStatus { + return &c.Status +} +func (c *ClusterIssuer) SetSpec(spec IssuerSpec) { + c.Spec = spec +} +func (c *ClusterIssuer) SetStatus(status IssuerStatus) { + c.Status = status +} +func (c *ClusterIssuer) Copy() GenericIssuer { + return c.DeepCopy() +} +func (c *Issuer) GetObjectMeta() *metav1.ObjectMeta { + return &c.ObjectMeta +} +func (c *Issuer) GetSpec() *IssuerSpec { + return &c.Spec +} +func (c *Issuer) GetStatus() *IssuerStatus { + return &c.Status +} +func (c *Issuer) SetSpec(spec IssuerSpec) { + c.Spec = spec +} +func (c *Issuer) SetStatus(status IssuerStatus) { + c.Status = status +} +func (c *Issuer) Copy() GenericIssuer { + return c.DeepCopy() +} + +// TODO: refactor these functions away +func (i *IssuerStatus) ACMEStatus() *cmacme.ACMEIssuerStatus { + // this is an edge case, but this will prevent panics + if i == nil { + return &cmacme.ACMEIssuerStatus{} + } + if i.ACME == nil { + i.ACME = &cmacme.ACMEIssuerStatus{} + } + return i.ACME +} diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/register.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/register.go new file mode 100644 index 00000000000..fb745dbb642 --- /dev/null +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/register.go @@ -0,0 +1,62 @@ +/* +Copyright 2020 The cert-manager Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/cert-manager/cert-manager/pkg/apis/certmanager" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: certmanager.GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Certificate{}, + &CertificateList{}, + &Issuer{}, + &IssuerList{}, + &ClusterIssuer{}, + &ClusterIssuerList{}, + &CertificateRequest{}, + &CertificateRequestList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types.go new file mode 100644 index 00000000000..276722793e9 --- /dev/null +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types.go @@ -0,0 +1,300 @@ +/* +Copyright 2020 The cert-manager Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +const ( + + // Common label keys added to resources + // Label key that indicates that a resource is of interest to + // cert-manager controller By default this is set on + // certificate.spec.secretName secret as well as on the temporary + // private key Secret. If using SecretsFilteredCaching feature, you + // might want to set this (with a value of 'true') to any other Secrets + // that cert-manager controller needs to read, such as issuer + // credentials Secrets. + // fao = 'for attention of' + // See https://github.com/cert-manager/cert-manager/blob/master/design/20221205-memory-management.md#risks-and-mitigations + PartOfCertManagerControllerLabelKey = "controller.cert-manager.io/fao" + + // Common annotation keys added to resources + + // Annotation key for DNS subjectAltNames. + AltNamesAnnotationKey = "cert-manager.io/alt-names" + + // Annotation key for IP subjectAltNames. + IPSANAnnotationKey = "cert-manager.io/ip-sans" + + // Annotation key for URI subjectAltNames. + URISANAnnotationKey = "cert-manager.io/uri-sans" + + // Annotation key for certificate common name. + CommonNameAnnotationKey = "cert-manager.io/common-name" + + // Duration key for certificate duration. + DurationAnnotationKey = "cert-manager.io/duration" + + // Annotation key for certificate renewBefore. + RenewBeforeAnnotationKey = "cert-manager.io/renew-before" + + // Annotation key for emails subjectAltNames. + EmailsAnnotationKey = "cert-manager.io/email-sans" + + // Annotation key for subject organization. + SubjectOrganizationsAnnotationKey = "cert-manager.io/subject-organizations" + + // Annotation key for subject organizational units. + SubjectOrganizationalUnitsAnnotationKey = "cert-manager.io/subject-organizationalunits" + + // Annotation key for subject organizational units. + SubjectCountriesAnnotationKey = "cert-manager.io/subject-countries" + + // Annotation key for subject provinces. + SubjectProvincesAnnotationKey = "cert-manager.io/subject-provinces" + + // Annotation key for subject localities. + SubjectLocalitiesAnnotationKey = "cert-manager.io/subject-localities" + + // Annotation key for subject provinces. + SubjectStreetAddressesAnnotationKey = "cert-manager.io/subject-streetaddresses" + + // Annotation key for subject postal codes. + SubjectPostalCodesAnnotationKey = "cert-manager.io/subject-postalcodes" + + // Annotation key for subject serial number. + SubjectSerialNumberAnnotationKey = "cert-manager.io/subject-serialnumber" + + // Annotation key for certificate key usages. + UsagesAnnotationKey = "cert-manager.io/usages" + + // Annotation key the 'name' of the Issuer resource. + IssuerNameAnnotationKey = "cert-manager.io/issuer-name" + + // Annotation key for the 'kind' of the Issuer resource. + IssuerKindAnnotationKey = "cert-manager.io/issuer-kind" + + // Annotation key for the 'group' of the Issuer resource. + IssuerGroupAnnotationKey = "cert-manager.io/issuer-group" + + // Annotation key for the name of the certificate that a resource is related to. + CertificateNameKey = "cert-manager.io/certificate-name" + + // Annotation key used to denote whether a Secret is named on a Certificate + // as a 'next private key' Secret resource. + IsNextPrivateKeySecretLabelKey = "cert-manager.io/next-private-key" + + // Annotation key used to limit the number of CertificateRequests to be kept for a Certificate. + // Minimum value is 1. + // If unset all CertificateRequests will be kept. + RevisionHistoryLimitAnnotationKey = "cert-manager.io/revision-history-limit" + + // Annotation key used to set the PrivateKeyAlgorithm for a Certificate. + // If PrivateKeyAlgorithm is specified and `size` is not provided, + // key size of 256 will be used for `ECDSA` key algorithm and + // key size of 2048 will be used for `RSA` key algorithm. + // key size is ignored when using the `Ed25519` key algorithm. + // If unset an algorithm `RSA` will be used. + PrivateKeyAlgorithmAnnotationKey = "cert-manager.io/private-key-algorithm" + + // Annotation key used to set the PrivateKeyEncoding for a Certificate. + // If provided, allowed values are `PKCS1` and `PKCS8` standing for PKCS#1 + // and PKCS#8, respectively. + // If unset an encoding `PKCS1` will be used. + PrivateKeyEncodingAnnotationKey = "cert-manager.io/private-key-encoding" + + // Annotation key used to set the size of the private key for a Certificate. + // If PrivateKeyAlgorithm is set to `RSA`, valid values are `2048`, `4096` or `8192`, + // and will default to `2048` if not specified. + // If PrivateKeyAlgorithm is set to `ECDSA`, valid values are `256`, `384` or `521`, + // and will default to `256` if not specified. + // If PrivateKeyAlgorithm is set to `Ed25519`, Size is ignored. + // No other values are allowed. + PrivateKeySizeAnnotationKey = "cert-manager.io/private-key-size" + + // Annotation key used to set the PrivateKeyRotationPolicy for a Certificate. + // If unset a policy `Never` will be used. + PrivateKeyRotationPolicyAnnotationKey = "cert-manager.io/private-key-rotation-policy" +) + +const ( + // IngressIssuerNameAnnotationKey holds the issuerNameAnnotation value which can be + // used to override the issuer specified on the created Certificate resource. + IngressIssuerNameAnnotationKey = "cert-manager.io/issuer" + // IngressClusterIssuerNameAnnotationKey holds the clusterIssuerNameAnnotation value which + // can be used to override the issuer specified on the created Certificate resource. The Certificate + // will reference the specified *ClusterIssuer* instead of normal issuer. + IngressClusterIssuerNameAnnotationKey = "cert-manager.io/cluster-issuer" + // IngressACMEIssuerHTTP01IngressClassAnnotationKey holds the acmeIssuerHTTP01IngressClassAnnotation value + // which can be used to override the http01 ingressClass if the challenge type is set to http01 + IngressACMEIssuerHTTP01IngressClassAnnotationKey = "acme.cert-manager.io/http01-ingress-class" + + // IngressClassAnnotationKey picks a specific "class" for the Ingress. The + // controller only processes Ingresses with this annotation either unset, or + // set to either the configured value or the empty string. + IngressClassAnnotationKey = "kubernetes.io/ingress.class" +) + +// Annotation names for CertificateRequests +const ( + // Annotation added to CertificateRequest resources to denote the name of + // a Secret resource containing the private key used to sign the CSR stored + // on the resource. + // This annotation *may* not be present, and is used by the 'self signing' + // issuer type to self-sign certificates. + CertificateRequestPrivateKeyAnnotationKey = "cert-manager.io/private-key-secret-name" + + // Annotation to declare the CertificateRequest "revision", belonging to a Certificate Resource + CertificateRequestRevisionAnnotationKey = "cert-manager.io/certificate-revision" +) + +const ( + // IssueTemporaryCertificateAnnotation is an annotation that can be added to + // Certificate resources. + // If it is present, a temporary internally signed certificate will be + // stored in the target Secret resource whilst the real Issuer is processing + // the certificate request. + IssueTemporaryCertificateAnnotation = "cert-manager.io/issue-temporary-certificate" +) + +// Common/known resource kinds. +const ( + ClusterIssuerKind = "ClusterIssuer" + IssuerKind = "Issuer" + CertificateKind = "Certificate" + CertificateRequestKind = "CertificateRequest" +) + +const ( + // WantInjectAnnotation is the annotation that specifies that a particular + // object wants injection of CAs. It takes the form of a reference to a certificate + // as namespace/name. The certificate is expected to have the is-serving-for annotations. + WantInjectAnnotation = "cert-manager.io/inject-ca-from" + + // WantInjectAPIServerCAAnnotation will - if set to "true" - make the cainjector + // inject the CA certificate for the Kubernetes apiserver into the resource. + // It discovers the apiserver's CA by inspecting the service account credentials + // mounted into the cainjector pod. + WantInjectAPIServerCAAnnotation = "cert-manager.io/inject-apiserver-ca" + + // WantInjectFromSecretAnnotation is the annotation that specifies that a particular + // object wants injection of CAs. It takes the form of a reference to a Secret + // as namespace/name. + WantInjectFromSecretAnnotation = "cert-manager.io/inject-ca-from-secret" + + // AllowsInjectionFromSecretAnnotation is an annotation that must be added + // to Secret resource that want to denote that they can be directly + // injected into injectables that have a `inject-ca-from-secret` annotation. + // If an injectable references a Secret that does NOT have this annotation, + // the cainjector will refuse to inject the secret. + AllowsInjectionFromSecretAnnotation = "cert-manager.io/allow-direct-injection" +) + +// Issuer specific Annotations +const ( + // VenafiCustomFieldsAnnotationKey is the annotation that passes on JSON encoded custom fields to the Venafi issuer + // This will only work with Venafi TPP v19.3 and higher + // The value is an array with objects containing the name and value keys + // for example: `[{"name": "custom-field", "value": "custom-value"}]` + VenafiCustomFieldsAnnotationKey = "venafi.cert-manager.io/custom-fields" + + // VenafiPickupIDAnnotationKey is the annotation key used to record the + // Venafi Pickup ID of a certificate signing request that has been submitted + // to the Venafi API for collection later. + VenafiPickupIDAnnotationKey = "venafi.cert-manager.io/pickup-id" +) + +// KeyUsage specifies valid usage contexts for keys. +// See: +// https://tools.ietf.org/html/rfc5280#section-4.2.1.3 +// https://tools.ietf.org/html/rfc5280#section-4.2.1.12 +// +// Valid KeyUsage values are as follows: +// "signing", +// "digital signature", +// "content commitment", +// "key encipherment", +// "key agreement", +// "data encipherment", +// "cert sign", +// "crl sign", +// "encipher only", +// "decipher only", +// "any", +// "server auth", +// "client auth", +// "code signing", +// "email protection", +// "s/mime", +// "ipsec end system", +// "ipsec tunnel", +// "ipsec user", +// "timestamping", +// "ocsp signing", +// "microsoft sgc", +// "netscape sgc" +// +kubebuilder:validation:Enum="signing";"digital signature";"content commitment";"key encipherment";"key agreement";"data encipherment";"cert sign";"crl sign";"encipher only";"decipher only";"any";"server auth";"client auth";"code signing";"email protection";"s/mime";"ipsec end system";"ipsec tunnel";"ipsec user";"timestamping";"ocsp signing";"microsoft sgc";"netscape sgc" +type KeyUsage string + +const ( + UsageSigning KeyUsage = "signing" + UsageDigitalSignature KeyUsage = "digital signature" + UsageContentCommitment KeyUsage = "content commitment" + UsageKeyEncipherment KeyUsage = "key encipherment" + UsageKeyAgreement KeyUsage = "key agreement" + UsageDataEncipherment KeyUsage = "data encipherment" + UsageCertSign KeyUsage = "cert sign" + UsageCRLSign KeyUsage = "crl sign" + UsageEncipherOnly KeyUsage = "encipher only" + UsageDecipherOnly KeyUsage = "decipher only" + UsageAny KeyUsage = "any" + UsageServerAuth KeyUsage = "server auth" + UsageClientAuth KeyUsage = "client auth" + UsageCodeSigning KeyUsage = "code signing" + UsageEmailProtection KeyUsage = "email protection" + UsageSMIME KeyUsage = "s/mime" + UsageIPsecEndSystem KeyUsage = "ipsec end system" + UsageIPsecTunnel KeyUsage = "ipsec tunnel" + UsageIPsecUser KeyUsage = "ipsec user" + UsageTimestamping KeyUsage = "timestamping" + UsageOCSPSigning KeyUsage = "ocsp signing" + UsageMicrosoftSGC KeyUsage = "microsoft sgc" + UsageNetscapeSGC KeyUsage = "netscape sgc" +) + +// Keystore specific secret keys +const ( + // PKCS12SecretKey is the name of the data entry in the Secret resource + // used to store the p12 file. + PKCS12SecretKey = "keystore.p12" + // Data Entry Name in the Secret resource for PKCS12 containing Certificate Authority + PKCS12TruststoreKey = "truststore.p12" + + // JKSSecretKey is the name of the data entry in the Secret resource + // used to store the jks file. + JKSSecretKey = "keystore.jks" + // Data Entry Name in the Secret resource for JKS containing Certificate Authority + JKSTruststoreKey = "truststore.jks" +) + +// DefaultKeyUsages contains the default list of key usages +func DefaultKeyUsages() []KeyUsage { + // The serverAuth EKU is required as of Mac OS Catalina: https://support.apple.com/en-us/HT210176 + // Without this usage, certificates will _always_ flag a warning in newer Mac OS browsers. + // We don't explicitly add it here as it leads to strange behaviour when a user sets isCA: true + // (in which case, 'serverAuth' on the CA can break a lot of clients). + // CAs can (and often do) opt to automatically add usages. + return []KeyUsage{UsageDigitalSignature, UsageKeyEncipherment} +} diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificate.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificate.go new file mode 100644 index 00000000000..64e789443bb --- /dev/null +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificate.go @@ -0,0 +1,598 @@ +/* +Copyright 2020 The cert-manager Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + cmmeta "github.com/cert-manager/cert-manager/pkg/apis/meta/v1" +) + +// NOTE: Be mindful of adding OpenAPI validation- see https://github.com/cert-manager/cert-manager/issues/3644 + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:storageversion + +// A Certificate resource should be created to ensure an up to date and signed +// X.509 certificate is stored in the Kubernetes Secret resource named in `spec.secretName`. +// +// The stored certificate will be renewed before it expires (as configured by `spec.renewBefore`). +type Certificate struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Specification of the desired state of the Certificate resource. + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec CertificateSpec `json:"spec"` + + // Status of the Certificate. + // This is set and managed automatically. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status CertificateStatus `json:"status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CertificateList is a list of Certificates. +type CertificateList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + // List of Certificates + Items []Certificate `json:"items"` +} + +// +kubebuilder:validation:Enum=RSA;ECDSA;Ed25519 +type PrivateKeyAlgorithm string + +const ( + // RSA private key algorithm. + RSAKeyAlgorithm PrivateKeyAlgorithm = "RSA" + + // ECDSA private key algorithm. + ECDSAKeyAlgorithm PrivateKeyAlgorithm = "ECDSA" + + // Ed25519 private key algorithm. + Ed25519KeyAlgorithm PrivateKeyAlgorithm = "Ed25519" +) + +// +kubebuilder:validation:Enum=PKCS1;PKCS8 +type PrivateKeyEncoding string + +const ( + // PKCS1 private key encoding. + // PKCS1 produces a PEM block that contains the private key algorithm + // in the header and the private key in the body. A key that uses this + // can be recognised by its `BEGIN RSA PRIVATE KEY` or `BEGIN EC PRIVATE KEY` header. + // NOTE: This encoding is not supported for Ed25519 keys. Attempting to use + // this encoding with an Ed25519 key will be ignored and default to PKCS8. + PKCS1 PrivateKeyEncoding = "PKCS1" + + // PKCS8 private key encoding. + // PKCS8 produces a PEM block with a static header and both the private + // key algorithm and the private key in the body. A key that uses this + // encoding can be recognised by its `BEGIN PRIVATE KEY` header. + PKCS8 PrivateKeyEncoding = "PKCS8" +) + +// CertificateSpec defines the desired state of Certificate. +// +// NOTE: The specification contains a lot of "requested" certificate attributes, it is +// important to note that the issuer can choose to ignore or change any of +// these requested attributes. How the issuer maps a certificate request to a +// signed certificate is the full responsibility of the issuer itself. For example, +// as an edge case, an issuer that inverts the isCA value is free to do so. +// +// A valid Certificate requires at least one of a CommonName, LiteralSubject, DNSName, or +// URI to be valid. +type CertificateSpec struct { + // Requested set of X509 certificate subject attributes. + // More info: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 + // + // The common name attribute is specified separately in the `commonName` field. + // Cannot be set if the `literalSubject` field is set. + // +optional + Subject *X509Subject `json:"subject,omitempty"` + + // Requested X.509 certificate subject, represented using the LDAP "String + // Representation of a Distinguished Name" [1]. + // Important: the LDAP string format also specifies the order of the attributes + // in the subject, this is important when issuing certs for LDAP authentication. + // Example: `CN=foo,DC=corp,DC=example,DC=com` + // More info [1]: https://datatracker.ietf.org/doc/html/rfc4514 + // More info: https://github.com/cert-manager/cert-manager/issues/3203 + // More info: https://github.com/cert-manager/cert-manager/issues/4424 + // + // Cannot be set if the `subject` or `commonName` field is set. + // This is an Alpha Feature and is only enabled with the + // `--feature-gates=LiteralCertificateSubject=true` option set on both + // the controller and webhook components. + // +optional + LiteralSubject string `json:"literalSubject,omitempty"` + + // Requested common name X509 certificate subject attribute. + // More info: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 + // NOTE: TLS clients will ignore this value when any subject alternative name is + // set (see https://tools.ietf.org/html/rfc6125#section-6.4.4). + // + // Should have a length of 64 characters or fewer to avoid generating invalid CSRs. + // Cannot be set if the `literalSubject` field is set. + // +optional + CommonName string `json:"commonName,omitempty"` + + // Requested 'duration' (i.e. lifetime) of the Certificate. Note that the + // issuer may choose to ignore the requested duration, just like any other + // requested attribute. + // + // If unset, this defaults to 90 days. + // Minimum accepted duration is 1 hour. + // Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration. + // +optional + Duration *metav1.Duration `json:"duration,omitempty"` + + // How long before the currently issued certificate's expiry cert-manager should + // renew the certificate. For example, if a certificate is valid for 60 minutes, + // and `renewBefore=10m`, cert-manager will begin to attempt to renew the certificate + // 50 minutes after it was issued (i.e. when there are 10 minutes remaining until + // the certificate is no longer valid). + // + // NOTE: The actual lifetime of the issued certificate is used to determine the + // renewal time. If an issuer returns a certificate with a different lifetime than + // the one requested, cert-manager will use the lifetime of the issued certificate. + // + // If unset, this defaults to 1/3 of the issued certificate's lifetime. + // Minimum accepted value is 5 minutes. + // Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration. + // +optional + RenewBefore *metav1.Duration `json:"renewBefore,omitempty"` + + // Requested DNS subject alternative names. + // +optional + DNSNames []string `json:"dnsNames,omitempty"` + + // Requested IP address subject alternative names. + // +optional + IPAddresses []string `json:"ipAddresses,omitempty"` + + // Requested URI subject alternative names. + // +optional + URIs []string `json:"uris,omitempty"` + + // Requested email subject alternative names. + // +optional + EmailAddresses []string `json:"emailAddresses,omitempty"` + + // Name of the Secret resource that will be automatically created and + // managed by this Certificate resource. It will be populated with a + // private key and certificate, signed by the denoted issuer. The Secret + // resource lives in the same namespace as the Certificate resource. + SecretName string `json:"secretName"` + + // Defines annotations and labels to be copied to the Certificate's Secret. + // Labels and annotations on the Secret will be changed as they appear on the + // SecretTemplate when added or removed. SecretTemplate annotations are added + // in conjunction with, and cannot overwrite, the base set of annotations + // cert-manager sets on the Certificate's Secret. + // +optional + SecretTemplate *CertificateSecretTemplate `json:"secretTemplate,omitempty"` + + // Additional keystore output formats to be stored in the Certificate's Secret. + // +optional + Keystores *CertificateKeystores `json:"keystores,omitempty"` + + // Reference to the issuer responsible for issuing the certificate. + // If the issuer is namespace-scoped, it must be in the same namespace + // as the Certificate. If the issuer is cluster-scoped, it can be used + // from any namespace. + // + // The `name` field of the reference must always be specified. + IssuerRef cmmeta.ObjectReference `json:"issuerRef"` + + // Requested basic constraints isCA value. + // The isCA value is used to set the `isCA` field on the created CertificateRequest + // resources. Note that the issuer may choose to ignore the requested isCA value, just + // like any other requested attribute. + // + // If true, this will automatically add the `cert sign` usage to the list + // of requested `usages`. + // +optional + IsCA bool `json:"isCA,omitempty"` + + // Requested key usages and extended key usages. + // These usages are used to set the `usages` field on the created CertificateRequest + // resources. If `encodeUsagesInRequest` is unset or set to `true`, the usages + // will additionally be encoded in the `request` field which contains the CSR blob. + // + // If unset, defaults to `digital signature` and `key encipherment`. + // +optional + Usages []KeyUsage `json:"usages,omitempty"` + + // Private key options. These include the key algorithm and size, the used + // encoding and the rotation policy. + // +optional + PrivateKey *CertificatePrivateKey `json:"privateKey,omitempty"` + + // Whether the KeyUsage and ExtKeyUsage extensions should be set in the encoded CSR. + // + // This option defaults to true, and should only be disabled if the target + // issuer does not support CSRs with these X509 KeyUsage/ ExtKeyUsage extensions. + // +optional + EncodeUsagesInRequest *bool `json:"encodeUsagesInRequest,omitempty"` + + // The maximum number of CertificateRequest revisions that are maintained in + // the Certificate's history. Each revision represents a single `CertificateRequest` + // created by this Certificate, either when it was created, renewed, or Spec + // was changed. Revisions will be removed by oldest first if the number of + // revisions exceeds this number. + // + // If set, revisionHistoryLimit must be a value of `1` or greater. + // If unset (`nil`), revisions will not be garbage collected. + // Default value is `nil`. + // +optional + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` + + // Defines extra output formats of the private key and signed certificate chain + // to be written to this Certificate's target Secret. + // + // This is an Alpha Feature and is only enabled with the + // `--feature-gates=AdditionalCertificateOutputFormats=true` option set on both + // the controller and webhook components. + // +optional + AdditionalOutputFormats []CertificateAdditionalOutputFormat `json:"additionalOutputFormats,omitempty"` +} + +// CertificatePrivateKey contains configuration options for private keys +// used by the Certificate controller. +// These include the key algorithm and size, the used encoding and the +// rotation policy. +type CertificatePrivateKey struct { + // RotationPolicy controls how private keys should be regenerated when a + // re-issuance is being processed. + // + // If set to `Never`, a private key will only be generated if one does not + // already exist in the target `spec.secretName`. If one does exists but it + // does not have the correct algorithm or size, a warning will be raised + // to await user intervention. + // If set to `Always`, a private key matching the specified requirements + // will be generated whenever a re-issuance occurs. + // Default is `Never` for backward compatibility. + // +optional + RotationPolicy PrivateKeyRotationPolicy `json:"rotationPolicy,omitempty"` + + // The private key cryptography standards (PKCS) encoding for this + // certificate's private key to be encoded in. + // + // If provided, allowed values are `PKCS1` and `PKCS8` standing for PKCS#1 + // and PKCS#8, respectively. + // Defaults to `PKCS1` if not specified. + // +optional + Encoding PrivateKeyEncoding `json:"encoding,omitempty"` + + // Algorithm is the private key algorithm of the corresponding private key + // for this certificate. + // + // If provided, allowed values are either `RSA`, `ECDSA` or `Ed25519`. + // If `algorithm` is specified and `size` is not provided, + // key size of 2048 will be used for `RSA` key algorithm and + // key size of 256 will be used for `ECDSA` key algorithm. + // key size is ignored when using the `Ed25519` key algorithm. + // +optional + Algorithm PrivateKeyAlgorithm `json:"algorithm,omitempty"` + + // Size is the key bit size of the corresponding private key for this certificate. + // + // If `algorithm` is set to `RSA`, valid values are `2048`, `4096` or `8192`, + // and will default to `2048` if not specified. + // If `algorithm` is set to `ECDSA`, valid values are `256`, `384` or `521`, + // and will default to `256` if not specified. + // If `algorithm` is set to `Ed25519`, Size is ignored. + // No other values are allowed. + // +optional + Size int `json:"size,omitempty"` +} + +// Denotes how private keys should be generated or sourced when a Certificate +// is being issued. +// +kubebuilder:validation:Enum=Never;Always +type PrivateKeyRotationPolicy string + +var ( + // RotationPolicyNever means a private key will only be generated if one + // does not already exist in the target `spec.secretName`. + // If one does exists but it does not have the correct algorithm or size, + // a warning will be raised to await user intervention. + RotationPolicyNever PrivateKeyRotationPolicy = "Never" + + // RotationPolicyAlways means a private key matching the specified + // requirements will be generated whenever a re-issuance occurs. + RotationPolicyAlways PrivateKeyRotationPolicy = "Always" +) + +// CertificateOutputFormatType specifies which additional output formats should +// be written to the Certificate's target Secret. +// Allowed values are `DER` or `CombinedPEM`. +// When Type is set to `DER` an additional entry `key.der` will be written to +// the Secret, containing the binary format of the private key. +// When Type is set to `CombinedPEM` an additional entry `tls-combined.pem` +// will be written to the Secret, containing the PEM formatted private key and +// signed certificate chain (tls.key + tls.crt concatenated). +// +kubebuilder:validation:Enum=DER;CombinedPEM +type CertificateOutputFormatType string + +const ( + // CertificateOutputFormatDERKey is the name of the data entry in the Secret + // resource used to store the DER formatted private key. + CertificateOutputFormatDERKey string = "key.der" + + // CertificateOutputFormatDER writes the Certificate's private key in DER + // binary format to the `key.der` target Secret Data key. + CertificateOutputFormatDER CertificateOutputFormatType = "DER" + + // CertificateOutputFormatCombinedPEMKey is the name of the data entry in the Secret + // resource used to store the combined PEM (key + signed certificate). + CertificateOutputFormatCombinedPEMKey string = "tls-combined.pem" + + // CertificateOutputFormatCombinedPEM writes the Certificate's signed + // certificate chain and private key, in PEM format, to the + // `tls-combined.pem` target Secret Data key. The value at this key will + // include the private key PEM document, followed by at least one new line + // character, followed by the chain of signed certificate PEM documents + // (` + \n + `). + CertificateOutputFormatCombinedPEM CertificateOutputFormatType = "CombinedPEM" +) + +// CertificateAdditionalOutputFormat defines an additional output format of a +// Certificate resource. These contain supplementary data formats of the signed +// certificate chain and paired private key. +type CertificateAdditionalOutputFormat struct { + // Type is the name of the format type that should be written to the + // Certificate's target Secret. + Type CertificateOutputFormatType `json:"type"` +} + +// X509Subject Full X509 name specification +type X509Subject struct { + // Organizations to be used on the Certificate. + // +optional + Organizations []string `json:"organizations,omitempty"` + // Countries to be used on the Certificate. + // +optional + Countries []string `json:"countries,omitempty"` + // Organizational Units to be used on the Certificate. + // +optional + OrganizationalUnits []string `json:"organizationalUnits,omitempty"` + // Cities to be used on the Certificate. + // +optional + Localities []string `json:"localities,omitempty"` + // State/Provinces to be used on the Certificate. + // +optional + Provinces []string `json:"provinces,omitempty"` + // Street addresses to be used on the Certificate. + // +optional + StreetAddresses []string `json:"streetAddresses,omitempty"` + // Postal codes to be used on the Certificate. + // +optional + PostalCodes []string `json:"postalCodes,omitempty"` + // Serial number to be used on the Certificate. + // +optional + SerialNumber string `json:"serialNumber,omitempty"` +} + +// CertificateKeystores configures additional keystore output formats to be +// created in the Certificate's output Secret. +type CertificateKeystores struct { + // JKS configures options for storing a JKS keystore in the + // `spec.secretName` Secret resource. + // +optional + JKS *JKSKeystore `json:"jks,omitempty"` + + // PKCS12 configures options for storing a PKCS12 keystore in the + // `spec.secretName` Secret resource. + // +optional + PKCS12 *PKCS12Keystore `json:"pkcs12,omitempty"` +} + +// JKS configures options for storing a JKS keystore in the `spec.secretName` +// Secret resource. +type JKSKeystore struct { + // Create enables JKS keystore creation for the Certificate. + // If true, a file named `keystore.jks` will be created in the target + // Secret resource, encrypted using the password stored in + // `passwordSecretRef`. + // The keystore file will be updated immediately. + // If the issuer provided a CA certificate, a file named `truststore.jks` + // will also be created in the target Secret resource, encrypted using the + // password stored in `passwordSecretRef` + // containing the issuing Certificate Authority + Create bool `json:"create"` + + // PasswordSecretRef is a reference to a key in a Secret resource + // containing the password used to encrypt the JKS keystore. + PasswordSecretRef cmmeta.SecretKeySelector `json:"passwordSecretRef"` +} + +// PKCS12 configures options for storing a PKCS12 keystore in the +// `spec.secretName` Secret resource. +type PKCS12Keystore struct { + // Create enables PKCS12 keystore creation for the Certificate. + // If true, a file named `keystore.p12` will be created in the target + // Secret resource, encrypted using the password stored in + // `passwordSecretRef`. + // The keystore file will be updated immediately. + // If the issuer provided a CA certificate, a file named `truststore.p12` will + // also be created in the target Secret resource, encrypted using the + // password stored in `passwordSecretRef` containing the issuing Certificate + // Authority + Create bool `json:"create"` + + // PasswordSecretRef is a reference to a key in a Secret resource + // containing the password used to encrypt the PKCS12 keystore. + PasswordSecretRef cmmeta.SecretKeySelector `json:"passwordSecretRef"` +} + +// CertificateStatus defines the observed state of Certificate +type CertificateStatus struct { + // List of status conditions to indicate the status of certificates. + // Known condition types are `Ready` and `Issuing`. + // +listType=map + // +listMapKey=type + // +optional + Conditions []CertificateCondition `json:"conditions,omitempty"` + + // LastFailureTime is set only if the lastest issuance for this + // Certificate failed and contains the time of the failure. If an + // issuance has failed, the delay till the next issuance will be + // calculated using formula time.Hour * 2 ^ (failedIssuanceAttempts - + // 1). If the latest issuance has succeeded this field will be unset. + // +optional + LastFailureTime *metav1.Time `json:"lastFailureTime,omitempty"` + + // The time after which the certificate stored in the secret named + // by this resource in `spec.secretName` is valid. + // +optional + NotBefore *metav1.Time `json:"notBefore,omitempty"` + + // The expiration time of the certificate stored in the secret named + // by this resource in `spec.secretName`. + // +optional + NotAfter *metav1.Time `json:"notAfter,omitempty"` + + // RenewalTime is the time at which the certificate will be next + // renewed. + // If not set, no upcoming renewal is scheduled. + // +optional + RenewalTime *metav1.Time `json:"renewalTime,omitempty"` + + // The current 'revision' of the certificate as issued. + // + // When a CertificateRequest resource is created, it will have the + // `cert-manager.io/certificate-revision` set to one greater than the + // current value of this field. + // + // Upon issuance, this field will be set to the value of the annotation + // on the CertificateRequest resource used to issue the certificate. + // + // Persisting the value on the CertificateRequest resource allows the + // certificates controller to know whether a request is part of an old + // issuance or if it is part of the ongoing revision's issuance by + // checking if the revision value in the annotation is greater than this + // field. + // +optional + Revision *int `json:"revision,omitempty"` + + // The name of the Secret resource containing the private key to be used + // for the next certificate iteration. + // The keymanager controller will automatically set this field if the + // `Issuing` condition is set to `True`. + // It will automatically unset this field when the Issuing condition is + // not set or False. + // +optional + NextPrivateKeySecretName *string `json:"nextPrivateKeySecretName,omitempty"` + + // The number of continuous failed issuance attempts up till now. This + // field gets removed (if set) on a successful issuance and gets set to + // 1 if unset and an issuance has failed. If an issuance has failed, the + // delay till the next issuance will be calculated using formula + // time.Hour * 2 ^ (failedIssuanceAttempts - 1). + // +optional + FailedIssuanceAttempts *int `json:"failedIssuanceAttempts,omitempty"` +} + +// CertificateCondition contains condition information for an Certificate. +type CertificateCondition struct { + // Type of the condition, known values are (`Ready`, `Issuing`). + Type CertificateConditionType `json:"type"` + + // Status of the condition, one of (`True`, `False`, `Unknown`). + Status cmmeta.ConditionStatus `json:"status"` + + // LastTransitionTime is the timestamp corresponding to the last status + // change of this condition. + // +optional + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + + // Reason is a brief machine readable explanation for the condition's last + // transition. + // +optional + Reason string `json:"reason,omitempty"` + + // Message is a human readable description of the details of the last + // transition, complementing reason. + // +optional + Message string `json:"message,omitempty"` + + // If set, this represents the .metadata.generation that the condition was + // set based upon. + // For instance, if .metadata.generation is currently 12, but the + // .status.condition[x].observedGeneration is 9, the condition is out of date + // with respect to the current state of the Certificate. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` +} + +// CertificateConditionType represents an Certificate condition value. +type CertificateConditionType string + +const ( + // CertificateConditionReady indicates that a certificate is ready for use. + // This is defined as: + // - The target secret exists + // - The target secret contains a certificate that has not expired + // - The target secret contains a private key valid for the certificate + // - The commonName and dnsNames attributes match those specified on the Certificate + CertificateConditionReady CertificateConditionType = "Ready" + + // A condition added to Certificate resources when an issuance is required. + // This condition will be automatically added and set to true if: + // * No keypair data exists in the target Secret + // * The data stored in the Secret cannot be decoded + // * The private key and certificate do not have matching public keys + // * If a CertificateRequest for the current revision exists and the + // certificate data stored in the Secret does not match the + // `status.certificate` on the CertificateRequest. + // * If no CertificateRequest resource exists for the current revision, + // the options on the Certificate resource are compared against the + // X.509 data in the Secret, similar to what's done in earlier versions. + // If there is a mismatch, an issuance is triggered. + // This condition may also be added by external API consumers to trigger + // a re-issuance manually for any other reason. + // + // It will be removed by the 'issuing' controller upon completing issuance. + CertificateConditionIssuing CertificateConditionType = "Issuing" +) + +// CertificateSecretTemplate defines the default labels and annotations +// to be copied to the Kubernetes Secret resource named in `CertificateSpec.secretName`. +type CertificateSecretTemplate struct { + // Annotations is a key value map to be copied to the target Kubernetes Secret. + // +optional + Annotations map[string]string `json:"annotations,omitempty"` + + // Labels is a key value map to be copied to the target Kubernetes Secret. + // +optional + Labels map[string]string `json:"labels,omitempty"` +} diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificaterequest.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificaterequest.go new file mode 100644 index 00000000000..59797c76c75 --- /dev/null +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificaterequest.go @@ -0,0 +1,249 @@ +/* +Copyright 2020 The cert-manager Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + cmmeta "github.com/cert-manager/cert-manager/pkg/apis/meta/v1" +) + +const ( + // Pending indicates that a CertificateRequest is still in progress. + CertificateRequestReasonPending = "Pending" + + // Failed indicates that a CertificateRequest has failed permanently, + // either due to timing out or some other critical failure. + // The `status.failureTime` field should be set in this case. + CertificateRequestReasonFailed = "Failed" + + // Issued indicates that a CertificateRequest has been completed, and that + // the `status.certificate` field is set. + CertificateRequestReasonIssued = "Issued" + + // Denied is a Ready condition reason that indicates that a + // CertificateRequest has been denied, and the CertificateRequest will never + // be issued. + // The `status.failureTime` field should be set in this case. + CertificateRequestReasonDenied = "Denied" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:storageversion + +// A CertificateRequest is used to request a signed certificate from one of the +// configured issuers. +// +// All fields within the CertificateRequest's `spec` are immutable after creation. +// A CertificateRequest will either succeed or fail, as denoted by its `Ready` status +// condition and its `status.failureTime` field. +// +// A CertificateRequest is a one-shot resource, meaning it represents a single +// point in time request for a certificate and cannot be re-used. +// +k8s:openapi-gen=true +type CertificateRequest struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Specification of the desired state of the CertificateRequest resource. + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec CertificateRequestSpec `json:"spec"` + + // Status of the CertificateRequest. + // This is set and managed automatically. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status CertificateRequestStatus `json:"status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CertificateRequestList is a list of CertificateRequests. +type CertificateRequestList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + // List of CertificateRequests + Items []CertificateRequest `json:"items"` +} + +// CertificateRequestSpec defines the desired state of CertificateRequest +// +// NOTE: It is important to note that the issuer can choose to ignore or change +// any of the requested attributes. How the issuer maps a certificate request +// to a signed certificate is the full responsibility of the issuer itself. +// For example, as an edge case, an issuer that inverts the isCA value is +// free to do so. +type CertificateRequestSpec struct { + // Requested 'duration' (i.e. lifetime) of the Certificate. Note that the + // issuer may choose to ignore the requested duration, just like any other + // requested attribute. + // +optional + Duration *metav1.Duration `json:"duration,omitempty"` + + // Reference to the issuer responsible for issuing the certificate. + // If the issuer is namespace-scoped, it must be in the same namespace + // as the Certificate. If the issuer is cluster-scoped, it can be used + // from any namespace. + // + // The `name` field of the reference must always be specified. + IssuerRef cmmeta.ObjectReference `json:"issuerRef"` + + // The PEM-encoded X.509 certificate signing request to be submitted to the + // issuer for signing. + // + // If the CSR has a BasicConstraints extension, its isCA attribute must + // match the `isCA` value of this CertificateRequest. + // If the CSR has a KeyUsage extension, its key usages must match the + // key usages in the `usages` field of this CertificateRequest. + // If the CSR has a ExtKeyUsage extension, its extended key usages + // must match the extended key usages in the `usages` field of this + // CertificateRequest. + Request []byte `json:"request"` + + // Requested basic constraints isCA value. Note that the issuer may choose + // to ignore the requested isCA value, just like any other requested attribute. + // + // NOTE: If the CSR in the `Request` field has a BasicConstraints extension, + // it must have the same isCA value as specified here. + // + // If true, this will automatically add the `cert sign` usage to the list + // of requested `usages`. + // +optional + IsCA bool `json:"isCA,omitempty"` + + // Requested key usages and extended key usages. + // + // NOTE: If the CSR in the `Request` field has uses the KeyUsage or + // ExtKeyUsage extension, these extensions must have the same values + // as specified here without any additional values. + // + // If unset, defaults to `digital signature` and `key encipherment`. + // +optional + Usages []KeyUsage `json:"usages,omitempty"` + + // Username contains the name of the user that created the CertificateRequest. + // Populated by the cert-manager webhook on creation and immutable. + // +optional + Username string `json:"username,omitempty"` + // UID contains the uid of the user that created the CertificateRequest. + // Populated by the cert-manager webhook on creation and immutable. + // +optional + UID string `json:"uid,omitempty"` + // Groups contains group membership of the user that created the CertificateRequest. + // Populated by the cert-manager webhook on creation and immutable. + // +listType=atomic + // +optional + Groups []string `json:"groups,omitempty"` + // Extra contains extra attributes of the user that created the CertificateRequest. + // Populated by the cert-manager webhook on creation and immutable. + // +optional + Extra map[string][]string `json:"extra,omitempty"` +} + +// CertificateRequestStatus defines the observed state of CertificateRequest and +// resulting signed certificate. +type CertificateRequestStatus struct { + // List of status conditions to indicate the status of a CertificateRequest. + // Known condition types are `Ready`, `InvalidRequest`, `Approved` and `Denied`. + // +listType=map + // +listMapKey=type + // +optional + Conditions []CertificateRequestCondition `json:"conditions,omitempty"` + + // The PEM encoded X.509 certificate resulting from the certificate + // signing request. + // If not set, the CertificateRequest has either not been completed or has + // failed. More information on failure can be found by checking the + // `conditions` field. + // +optional + Certificate []byte `json:"certificate,omitempty"` + + // The PEM encoded X.509 certificate of the signer, also known as the CA + // (Certificate Authority). + // This is set on a best-effort basis by different issuers. + // If not set, the CA is assumed to be unknown/not available. + // +optional + CA []byte `json:"ca,omitempty"` + + // FailureTime stores the time that this CertificateRequest failed. This is + // used to influence garbage collection and back-off. + // +optional + FailureTime *metav1.Time `json:"failureTime,omitempty"` +} + +// CertificateRequestCondition contains condition information for a CertificateRequest. +type CertificateRequestCondition struct { + // Type of the condition, known values are (`Ready`, `InvalidRequest`, + // `Approved`, `Denied`). + Type CertificateRequestConditionType `json:"type"` + + // Status of the condition, one of (`True`, `False`, `Unknown`). + Status cmmeta.ConditionStatus `json:"status"` + + // LastTransitionTime is the timestamp corresponding to the last status + // change of this condition. + // +optional + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + + // Reason is a brief machine readable explanation for the condition's last + // transition. + // +optional + Reason string `json:"reason,omitempty"` + + // Message is a human readable description of the details of the last + // transition, complementing reason. + // +optional + Message string `json:"message,omitempty"` +} + +// CertificateRequestConditionType represents an Certificate condition value. +type CertificateRequestConditionType string + +const ( + // CertificateRequestConditionReady indicates that a certificate is ready for use. + // This is defined as: + // - The target certificate exists in CertificateRequest.Status + CertificateRequestConditionReady CertificateRequestConditionType = "Ready" + + // CertificateRequestConditionInvalidRequest indicates that a certificate + // signer has refused to sign the request due to at least one of the input + // parameters being invalid. Additional information about why the request + // was rejected can be found in the `reason` and `message` fields. + CertificateRequestConditionInvalidRequest CertificateRequestConditionType = "InvalidRequest" + + // CertificateRequestConditionApproved indicates that a certificate request + // is approved and ready for signing. Condition must never have a status of + // `False`, and cannot be modified once set. Cannot be set alongside + // `Denied`. + CertificateRequestConditionApproved CertificateRequestConditionType = "Approved" + + // CertificateRequestConditionDenied indicates that a certificate request is + // denied, and must never be signed. Condition must never have a status of + // `False`, and cannot be modified once set. Cannot be set alongside + // `Approved`. + CertificateRequestConditionDenied CertificateRequestConditionType = "Denied" +) diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_issuer.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_issuer.go new file mode 100644 index 00000000000..c901d9e7a40 --- /dev/null +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_issuer.go @@ -0,0 +1,376 @@ +/* +Copyright 2020 The cert-manager Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + cmacme "github.com/cert-manager/cert-manager/pkg/apis/acme/v1" + cmmeta "github.com/cert-manager/cert-manager/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:storageversion + +// A ClusterIssuer represents a certificate issuing authority which can be +// referenced as part of `issuerRef` fields. +// It is similar to an Issuer, however it is cluster-scoped and therefore can +// be referenced by resources that exist in *any* namespace, not just the same +// namespace as the referent. +type ClusterIssuer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Desired state of the ClusterIssuer resource. + Spec IssuerSpec `json:"spec"` + + // Status of the ClusterIssuer. This is set and managed automatically. + // +optional + Status IssuerStatus `json:"status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterIssuerList is a list of Issuers +type ClusterIssuerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []ClusterIssuer `json:"items"` +} + +// +genclient +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:storageversion + +// An Issuer represents a certificate issuing authority which can be +// referenced as part of `issuerRef` fields. +// It is scoped to a single namespace and can therefore only be referenced by +// resources within the same namespace. +type Issuer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Desired state of the Issuer resource. + Spec IssuerSpec `json:"spec"` + + // Status of the Issuer. This is set and managed automatically. + // +optional + Status IssuerStatus `json:"status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IssuerList is a list of Issuers +type IssuerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Issuer `json:"items"` +} + +// IssuerSpec is the specification of an Issuer. This includes any +// configuration required for the issuer. +type IssuerSpec struct { + IssuerConfig `json:",inline"` +} + +// The configuration for the issuer. +// Only one of these can be set. +type IssuerConfig struct { + // ACME configures this issuer to communicate with a RFC8555 (ACME) server + // to obtain signed x509 certificates. + // +optional + ACME *cmacme.ACMEIssuer `json:"acme,omitempty"` + + // CA configures this issuer to sign certificates using a signing CA keypair + // stored in a Secret resource. + // This is used to build internal PKIs that are managed by cert-manager. + // +optional + CA *CAIssuer `json:"ca,omitempty"` + + // Vault configures this issuer to sign certificates using a HashiCorp Vault + // PKI backend. + // +optional + Vault *VaultIssuer `json:"vault,omitempty"` + + // SelfSigned configures this issuer to 'self sign' certificates using the + // private key used to create the CertificateRequest object. + // +optional + SelfSigned *SelfSignedIssuer `json:"selfSigned,omitempty"` + + // Venafi configures this issuer to sign certificates using a Venafi TPP + // or Venafi Cloud policy zone. + // +optional + Venafi *VenafiIssuer `json:"venafi,omitempty"` +} + +// Configures an issuer to sign certificates using a Venafi TPP +// or Cloud policy zone. +type VenafiIssuer struct { + // Zone is the Venafi Policy Zone to use for this issuer. + // All requests made to the Venafi platform will be restricted by the named + // zone policy. + // This field is required. + Zone string `json:"zone"` + + // TPP specifies Trust Protection Platform configuration settings. + // Only one of TPP or Cloud may be specified. + // +optional + TPP *VenafiTPP `json:"tpp,omitempty"` + + // Cloud specifies the Venafi cloud configuration settings. + // Only one of TPP or Cloud may be specified. + // +optional + Cloud *VenafiCloud `json:"cloud,omitempty"` +} + +// VenafiTPP defines connection configuration details for a Venafi TPP instance +type VenafiTPP struct { + // URL is the base URL for the vedsdk endpoint of the Venafi TPP instance, + // for example: "https://tpp.example.com/vedsdk". + URL string `json:"url"` + + // CredentialsRef is a reference to a Secret containing the username and + // password for the TPP server. + // The secret must contain two keys, 'username' and 'password'. + CredentialsRef cmmeta.LocalObjectReference `json:"credentialsRef"` + + // Base64-encoded bundle of PEM CAs which will be used to validate the certificate + // chain presented by the TPP server. Only used if using HTTPS; ignored for HTTP. + // If undefined, the certificate bundle in the cert-manager controller container + // is used to validate the chain. + // +optional + CABundle []byte `json:"caBundle,omitempty"` +} + +// VenafiCloud defines connection configuration details for Venafi Cloud +type VenafiCloud struct { + // URL is the base URL for Venafi Cloud. + // Defaults to "https://api.venafi.cloud/v1". + // +optional + URL string `json:"url,omitempty"` + + // APITokenSecretRef is a secret key selector for the Venafi Cloud API token. + APITokenSecretRef cmmeta.SecretKeySelector `json:"apiTokenSecretRef"` +} + +// Configures an issuer to 'self sign' certificates using the +// private key used to create the CertificateRequest object. +type SelfSignedIssuer struct { + // The CRL distribution points is an X.509 v3 certificate extension which identifies + // the location of the CRL from which the revocation of this certificate can be checked. + // If not set certificate will be issued without CDP. Values are strings. + // +optional + CRLDistributionPoints []string `json:"crlDistributionPoints,omitempty"` +} + +// Configures an issuer to sign certificates using a HashiCorp Vault +// PKI backend. +type VaultIssuer struct { + // Auth configures how cert-manager authenticates with the Vault server. + Auth VaultAuth `json:"auth"` + + // Server is the connection address for the Vault server, e.g: "https://vault.example.com:8200". + Server string `json:"server"` + + // Path is the mount path of the Vault PKI backend's `sign` endpoint, e.g: + // "my_pki_mount/sign/my-role-name". + Path string `json:"path"` + + // Name of the vault namespace. Namespaces is a set of features within Vault Enterprise that allows Vault environments to support Secure Multi-tenancy. e.g: "ns1" + // More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces + // +optional + Namespace string `json:"namespace,omitempty"` + + // Base64-encoded bundle of PEM CAs which will be used to validate the certificate + // chain presented by Vault. Only used if using HTTPS to connect to Vault and + // ignored for HTTP connections. + // Mutually exclusive with CABundleSecretRef. + // If neither CABundle nor CABundleSecretRef are defined, the certificate bundle in + // the cert-manager controller container is used to validate the TLS connection. + // +optional + CABundle []byte `json:"caBundle,omitempty"` + + // Reference to a Secret containing a bundle of PEM-encoded CAs to use when + // verifying the certificate chain presented by Vault when using HTTPS. + // Mutually exclusive with CABundle. + // If neither CABundle nor CABundleSecretRef are defined, the certificate bundle in + // the cert-manager controller container is used to validate the TLS connection. + // If no key for the Secret is specified, cert-manager will default to 'ca.crt'. + // +optional + CABundleSecretRef *cmmeta.SecretKeySelector `json:"caBundleSecretRef,omitempty"` +} + +// VaultAuth is configuration used to authenticate with a Vault server. The +// order of precedence is [`tokenSecretRef`, `appRole` or `kubernetes`]. +type VaultAuth struct { + // TokenSecretRef authenticates with Vault by presenting a token. + // +optional + TokenSecretRef *cmmeta.SecretKeySelector `json:"tokenSecretRef,omitempty"` + + // AppRole authenticates with Vault using the App Role auth mechanism, + // with the role and secret stored in a Kubernetes Secret resource. + // +optional + AppRole *VaultAppRole `json:"appRole,omitempty"` + + // Kubernetes authenticates with Vault by passing the ServiceAccount + // token stored in the named Secret resource to the Vault server. + // +optional + Kubernetes *VaultKubernetesAuth `json:"kubernetes,omitempty"` +} + +// VaultAppRole authenticates with Vault using the App Role auth mechanism, +// with the role and secret stored in a Kubernetes Secret resource. +type VaultAppRole struct { + // Path where the App Role authentication backend is mounted in Vault, e.g: + // "approle" + Path string `json:"path"` + + // RoleID configured in the App Role authentication backend when setting + // up the authentication backend in Vault. + RoleId string `json:"roleId"` + + // Reference to a key in a Secret that contains the App Role secret used + // to authenticate with Vault. + // The `key` field must be specified and denotes which entry within the Secret + // resource is used as the app role secret. + SecretRef cmmeta.SecretKeySelector `json:"secretRef"` +} + +// Authenticate against Vault using a Kubernetes ServiceAccount token stored in +// a Secret. +type VaultKubernetesAuth struct { + // The Vault mountPath here is the mount path to use when authenticating with + // Vault. For example, setting a value to `/v1/auth/foo`, will use the path + // `/v1/auth/foo/login` to authenticate with Vault. If unspecified, the + // default value "/v1/auth/kubernetes" will be used. + // +optional + Path string `json:"mountPath,omitempty"` + + // The required Secret field containing a Kubernetes ServiceAccount JWT used + // for authenticating with Vault. Use of 'ambient credentials' is not + // supported. + // +optional + SecretRef cmmeta.SecretKeySelector `json:"secretRef,omitempty"` + // Note: we don't use a pointer here for backwards compatibility. + + // A reference to a service account that will be used to request a bound + // token (also known as "projected token"). Compared to using "secretRef", + // using this field means that you don't rely on statically bound tokens. To + // use this field, you must configure an RBAC rule to let cert-manager + // request a token. + // +optional + ServiceAccountRef *ServiceAccountRef `json:"serviceAccountRef,omitempty"` + + // A required field containing the Vault Role to assume. A Role binds a + // Kubernetes ServiceAccount with a set of Vault policies. + Role string `json:"role"` +} + +// ServiceAccountRef is a service account used by cert-manager to request a +// token. The audience cannot be configured. The audience is generated by +// cert-manager and takes the form `vault://namespace-name/issuer-name` for an +// Issuer and `vault://issuer-name` for a ClusterIssuer. The expiration of the +// token is also set by cert-manager to 10 minutes. +type ServiceAccountRef struct { + // Name of the ServiceAccount used to request a token. + Name string `json:"name"` +} + +type CAIssuer struct { + // SecretName is the name of the secret used to sign Certificates issued + // by this Issuer. + SecretName string `json:"secretName"` + + // The CRL distribution points is an X.509 v3 certificate extension which identifies + // the location of the CRL from which the revocation of this certificate can be checked. + // If not set, certificates will be issued without distribution points set. + // +optional + CRLDistributionPoints []string `json:"crlDistributionPoints,omitempty"` + + // The OCSP server list is an X.509 v3 extension that defines a list of + // URLs of OCSP responders. The OCSP responders can be queried for the + // revocation status of an issued certificate. If not set, the + // certificate will be issued with no OCSP servers set. For example, an + // OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + // +optional + OCSPServers []string `json:"ocspServers,omitempty"` +} + +// IssuerStatus contains status information about an Issuer +type IssuerStatus struct { + // List of status conditions to indicate the status of a CertificateRequest. + // Known condition types are `Ready`. + // +listType=map + // +listMapKey=type + // +optional + Conditions []IssuerCondition `json:"conditions,omitempty"` + + // ACME specific status options. + // This field should only be set if the Issuer is configured to use an ACME + // server to issue certificates. + // +optional + ACME *cmacme.ACMEIssuerStatus `json:"acme,omitempty"` +} + +// IssuerCondition contains condition information for an Issuer. +type IssuerCondition struct { + // Type of the condition, known values are (`Ready`). + Type IssuerConditionType `json:"type"` + + // Status of the condition, one of (`True`, `False`, `Unknown`). + Status cmmeta.ConditionStatus `json:"status"` + + // LastTransitionTime is the timestamp corresponding to the last status + // change of this condition. + // +optional + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + + // Reason is a brief machine readable explanation for the condition's last + // transition. + // +optional + Reason string `json:"reason,omitempty"` + + // Message is a human readable description of the details of the last + // transition, complementing reason. + // +optional + Message string `json:"message,omitempty"` + + // If set, this represents the .metadata.generation that the condition was + // set based upon. + // For instance, if .metadata.generation is currently 12, but the + // .status.condition[x].observedGeneration is 9, the condition is out of date + // with respect to the current state of the Issuer. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` +} + +// IssuerConditionType represents an Issuer condition value. +type IssuerConditionType string + +const ( + // IssuerConditionReady represents the fact that a given Issuer condition + // is in ready state and able to issue certificates. + // If the `status` of this condition is `False`, CertificateRequest controllers + // should prevent attempts to sign certificates. + IssuerConditionReady IssuerConditionType = "Ready" +) diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/zz_generated.deepcopy.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..8ba5ea3aaf6 --- /dev/null +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/zz_generated.deepcopy.go @@ -0,0 +1,1047 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The cert-manager Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + acmev1 "github.com/cert-manager/cert-manager/pkg/apis/acme/v1" + apismetav1 "github.com/cert-manager/cert-manager/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CAIssuer) DeepCopyInto(out *CAIssuer) { + *out = *in + if in.CRLDistributionPoints != nil { + in, out := &in.CRLDistributionPoints, &out.CRLDistributionPoints + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OCSPServers != nil { + in, out := &in.OCSPServers, &out.OCSPServers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CAIssuer. +func (in *CAIssuer) DeepCopy() *CAIssuer { + if in == nil { + return nil + } + out := new(CAIssuer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Certificate) DeepCopyInto(out *Certificate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Certificate. +func (in *Certificate) DeepCopy() *Certificate { + if in == nil { + return nil + } + out := new(Certificate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Certificate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateAdditionalOutputFormat) DeepCopyInto(out *CertificateAdditionalOutputFormat) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateAdditionalOutputFormat. +func (in *CertificateAdditionalOutputFormat) DeepCopy() *CertificateAdditionalOutputFormat { + if in == nil { + return nil + } + out := new(CertificateAdditionalOutputFormat) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateCondition) DeepCopyInto(out *CertificateCondition) { + *out = *in + if in.LastTransitionTime != nil { + in, out := &in.LastTransitionTime, &out.LastTransitionTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateCondition. +func (in *CertificateCondition) DeepCopy() *CertificateCondition { + if in == nil { + return nil + } + out := new(CertificateCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateKeystores) DeepCopyInto(out *CertificateKeystores) { + *out = *in + if in.JKS != nil { + in, out := &in.JKS, &out.JKS + *out = new(JKSKeystore) + **out = **in + } + if in.PKCS12 != nil { + in, out := &in.PKCS12, &out.PKCS12 + *out = new(PKCS12Keystore) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateKeystores. +func (in *CertificateKeystores) DeepCopy() *CertificateKeystores { + if in == nil { + return nil + } + out := new(CertificateKeystores) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateList) DeepCopyInto(out *CertificateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Certificate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateList. +func (in *CertificateList) DeepCopy() *CertificateList { + if in == nil { + return nil + } + out := new(CertificateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CertificateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificatePrivateKey) DeepCopyInto(out *CertificatePrivateKey) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificatePrivateKey. +func (in *CertificatePrivateKey) DeepCopy() *CertificatePrivateKey { + if in == nil { + return nil + } + out := new(CertificatePrivateKey) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateRequest) DeepCopyInto(out *CertificateRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateRequest. +func (in *CertificateRequest) DeepCopy() *CertificateRequest { + if in == nil { + return nil + } + out := new(CertificateRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CertificateRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateRequestCondition) DeepCopyInto(out *CertificateRequestCondition) { + *out = *in + if in.LastTransitionTime != nil { + in, out := &in.LastTransitionTime, &out.LastTransitionTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateRequestCondition. +func (in *CertificateRequestCondition) DeepCopy() *CertificateRequestCondition { + if in == nil { + return nil + } + out := new(CertificateRequestCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateRequestList) DeepCopyInto(out *CertificateRequestList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CertificateRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateRequestList. +func (in *CertificateRequestList) DeepCopy() *CertificateRequestList { + if in == nil { + return nil + } + out := new(CertificateRequestList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CertificateRequestList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateRequestSpec) DeepCopyInto(out *CertificateRequestSpec) { + *out = *in + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(metav1.Duration) + **out = **in + } + out.IssuerRef = in.IssuerRef + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.Usages != nil { + in, out := &in.Usages, &out.Usages + *out = make([]KeyUsage, len(*in)) + copy(*out, *in) + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateRequestSpec. +func (in *CertificateRequestSpec) DeepCopy() *CertificateRequestSpec { + if in == nil { + return nil + } + out := new(CertificateRequestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateRequestStatus) DeepCopyInto(out *CertificateRequestStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]CertificateRequestCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.CA != nil { + in, out := &in.CA, &out.CA + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.FailureTime != nil { + in, out := &in.FailureTime, &out.FailureTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateRequestStatus. +func (in *CertificateRequestStatus) DeepCopy() *CertificateRequestStatus { + if in == nil { + return nil + } + out := new(CertificateRequestStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSecretTemplate) DeepCopyInto(out *CertificateSecretTemplate) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSecretTemplate. +func (in *CertificateSecretTemplate) DeepCopy() *CertificateSecretTemplate { + if in == nil { + return nil + } + out := new(CertificateSecretTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSpec) DeepCopyInto(out *CertificateSpec) { + *out = *in + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(X509Subject) + (*in).DeepCopyInto(*out) + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(metav1.Duration) + **out = **in + } + if in.RenewBefore != nil { + in, out := &in.RenewBefore, &out.RenewBefore + *out = new(metav1.Duration) + **out = **in + } + if in.DNSNames != nil { + in, out := &in.DNSNames, &out.DNSNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IPAddresses != nil { + in, out := &in.IPAddresses, &out.IPAddresses + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.URIs != nil { + in, out := &in.URIs, &out.URIs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.EmailAddresses != nil { + in, out := &in.EmailAddresses, &out.EmailAddresses + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretTemplate != nil { + in, out := &in.SecretTemplate, &out.SecretTemplate + *out = new(CertificateSecretTemplate) + (*in).DeepCopyInto(*out) + } + if in.Keystores != nil { + in, out := &in.Keystores, &out.Keystores + *out = new(CertificateKeystores) + (*in).DeepCopyInto(*out) + } + out.IssuerRef = in.IssuerRef + if in.Usages != nil { + in, out := &in.Usages, &out.Usages + *out = make([]KeyUsage, len(*in)) + copy(*out, *in) + } + if in.PrivateKey != nil { + in, out := &in.PrivateKey, &out.PrivateKey + *out = new(CertificatePrivateKey) + **out = **in + } + if in.EncodeUsagesInRequest != nil { + in, out := &in.EncodeUsagesInRequest, &out.EncodeUsagesInRequest + *out = new(bool) + **out = **in + } + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + if in.AdditionalOutputFormats != nil { + in, out := &in.AdditionalOutputFormats, &out.AdditionalOutputFormats + *out = make([]CertificateAdditionalOutputFormat, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSpec. +func (in *CertificateSpec) DeepCopy() *CertificateSpec { + if in == nil { + return nil + } + out := new(CertificateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateStatus) DeepCopyInto(out *CertificateStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]CertificateCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LastFailureTime != nil { + in, out := &in.LastFailureTime, &out.LastFailureTime + *out = (*in).DeepCopy() + } + if in.NotBefore != nil { + in, out := &in.NotBefore, &out.NotBefore + *out = (*in).DeepCopy() + } + if in.NotAfter != nil { + in, out := &in.NotAfter, &out.NotAfter + *out = (*in).DeepCopy() + } + if in.RenewalTime != nil { + in, out := &in.RenewalTime, &out.RenewalTime + *out = (*in).DeepCopy() + } + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(int) + **out = **in + } + if in.NextPrivateKeySecretName != nil { + in, out := &in.NextPrivateKeySecretName, &out.NextPrivateKeySecretName + *out = new(string) + **out = **in + } + if in.FailedIssuanceAttempts != nil { + in, out := &in.FailedIssuanceAttempts, &out.FailedIssuanceAttempts + *out = new(int) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateStatus. +func (in *CertificateStatus) DeepCopy() *CertificateStatus { + if in == nil { + return nil + } + out := new(CertificateStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterIssuer) DeepCopyInto(out *ClusterIssuer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterIssuer. +func (in *ClusterIssuer) DeepCopy() *ClusterIssuer { + if in == nil { + return nil + } + out := new(ClusterIssuer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterIssuer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterIssuerList) DeepCopyInto(out *ClusterIssuerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterIssuer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterIssuerList. +func (in *ClusterIssuerList) DeepCopy() *ClusterIssuerList { + if in == nil { + return nil + } + out := new(ClusterIssuerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterIssuerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Issuer) DeepCopyInto(out *Issuer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Issuer. +func (in *Issuer) DeepCopy() *Issuer { + if in == nil { + return nil + } + out := new(Issuer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Issuer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IssuerCondition) DeepCopyInto(out *IssuerCondition) { + *out = *in + if in.LastTransitionTime != nil { + in, out := &in.LastTransitionTime, &out.LastTransitionTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IssuerCondition. +func (in *IssuerCondition) DeepCopy() *IssuerCondition { + if in == nil { + return nil + } + out := new(IssuerCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IssuerConfig) DeepCopyInto(out *IssuerConfig) { + *out = *in + if in.ACME != nil { + in, out := &in.ACME, &out.ACME + *out = new(acmev1.ACMEIssuer) + (*in).DeepCopyInto(*out) + } + if in.CA != nil { + in, out := &in.CA, &out.CA + *out = new(CAIssuer) + (*in).DeepCopyInto(*out) + } + if in.Vault != nil { + in, out := &in.Vault, &out.Vault + *out = new(VaultIssuer) + (*in).DeepCopyInto(*out) + } + if in.SelfSigned != nil { + in, out := &in.SelfSigned, &out.SelfSigned + *out = new(SelfSignedIssuer) + (*in).DeepCopyInto(*out) + } + if in.Venafi != nil { + in, out := &in.Venafi, &out.Venafi + *out = new(VenafiIssuer) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IssuerConfig. +func (in *IssuerConfig) DeepCopy() *IssuerConfig { + if in == nil { + return nil + } + out := new(IssuerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IssuerList) DeepCopyInto(out *IssuerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Issuer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IssuerList. +func (in *IssuerList) DeepCopy() *IssuerList { + if in == nil { + return nil + } + out := new(IssuerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IssuerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IssuerSpec) DeepCopyInto(out *IssuerSpec) { + *out = *in + in.IssuerConfig.DeepCopyInto(&out.IssuerConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IssuerSpec. +func (in *IssuerSpec) DeepCopy() *IssuerSpec { + if in == nil { + return nil + } + out := new(IssuerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IssuerStatus) DeepCopyInto(out *IssuerStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]IssuerCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ACME != nil { + in, out := &in.ACME, &out.ACME + *out = new(acmev1.ACMEIssuerStatus) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IssuerStatus. +func (in *IssuerStatus) DeepCopy() *IssuerStatus { + if in == nil { + return nil + } + out := new(IssuerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JKSKeystore) DeepCopyInto(out *JKSKeystore) { + *out = *in + out.PasswordSecretRef = in.PasswordSecretRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JKSKeystore. +func (in *JKSKeystore) DeepCopy() *JKSKeystore { + if in == nil { + return nil + } + out := new(JKSKeystore) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PKCS12Keystore) DeepCopyInto(out *PKCS12Keystore) { + *out = *in + out.PasswordSecretRef = in.PasswordSecretRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PKCS12Keystore. +func (in *PKCS12Keystore) DeepCopy() *PKCS12Keystore { + if in == nil { + return nil + } + out := new(PKCS12Keystore) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSignedIssuer) DeepCopyInto(out *SelfSignedIssuer) { + *out = *in + if in.CRLDistributionPoints != nil { + in, out := &in.CRLDistributionPoints, &out.CRLDistributionPoints + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSignedIssuer. +func (in *SelfSignedIssuer) DeepCopy() *SelfSignedIssuer { + if in == nil { + return nil + } + out := new(SelfSignedIssuer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountRef) DeepCopyInto(out *ServiceAccountRef) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountRef. +func (in *ServiceAccountRef) DeepCopy() *ServiceAccountRef { + if in == nil { + return nil + } + out := new(ServiceAccountRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultAppRole) DeepCopyInto(out *VaultAppRole) { + *out = *in + out.SecretRef = in.SecretRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultAppRole. +func (in *VaultAppRole) DeepCopy() *VaultAppRole { + if in == nil { + return nil + } + out := new(VaultAppRole) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultAuth) DeepCopyInto(out *VaultAuth) { + *out = *in + if in.TokenSecretRef != nil { + in, out := &in.TokenSecretRef, &out.TokenSecretRef + *out = new(apismetav1.SecretKeySelector) + **out = **in + } + if in.AppRole != nil { + in, out := &in.AppRole, &out.AppRole + *out = new(VaultAppRole) + **out = **in + } + if in.Kubernetes != nil { + in, out := &in.Kubernetes, &out.Kubernetes + *out = new(VaultKubernetesAuth) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultAuth. +func (in *VaultAuth) DeepCopy() *VaultAuth { + if in == nil { + return nil + } + out := new(VaultAuth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultIssuer) DeepCopyInto(out *VaultIssuer) { + *out = *in + in.Auth.DeepCopyInto(&out.Auth) + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.CABundleSecretRef != nil { + in, out := &in.CABundleSecretRef, &out.CABundleSecretRef + *out = new(apismetav1.SecretKeySelector) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultIssuer. +func (in *VaultIssuer) DeepCopy() *VaultIssuer { + if in == nil { + return nil + } + out := new(VaultIssuer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultKubernetesAuth) DeepCopyInto(out *VaultKubernetesAuth) { + *out = *in + out.SecretRef = in.SecretRef + if in.ServiceAccountRef != nil { + in, out := &in.ServiceAccountRef, &out.ServiceAccountRef + *out = new(ServiceAccountRef) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultKubernetesAuth. +func (in *VaultKubernetesAuth) DeepCopy() *VaultKubernetesAuth { + if in == nil { + return nil + } + out := new(VaultKubernetesAuth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VenafiCloud) DeepCopyInto(out *VenafiCloud) { + *out = *in + out.APITokenSecretRef = in.APITokenSecretRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VenafiCloud. +func (in *VenafiCloud) DeepCopy() *VenafiCloud { + if in == nil { + return nil + } + out := new(VenafiCloud) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VenafiIssuer) DeepCopyInto(out *VenafiIssuer) { + *out = *in + if in.TPP != nil { + in, out := &in.TPP, &out.TPP + *out = new(VenafiTPP) + (*in).DeepCopyInto(*out) + } + if in.Cloud != nil { + in, out := &in.Cloud, &out.Cloud + *out = new(VenafiCloud) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VenafiIssuer. +func (in *VenafiIssuer) DeepCopy() *VenafiIssuer { + if in == nil { + return nil + } + out := new(VenafiIssuer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VenafiTPP) DeepCopyInto(out *VenafiTPP) { + *out = *in + out.CredentialsRef = in.CredentialsRef + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VenafiTPP. +func (in *VenafiTPP) DeepCopy() *VenafiTPP { + if in == nil { + return nil + } + out := new(VenafiTPP) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *X509Subject) DeepCopyInto(out *X509Subject) { + *out = *in + if in.Organizations != nil { + in, out := &in.Organizations, &out.Organizations + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Countries != nil { + in, out := &in.Countries, &out.Countries + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OrganizationalUnits != nil { + in, out := &in.OrganizationalUnits, &out.OrganizationalUnits + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Localities != nil { + in, out := &in.Localities, &out.Localities + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Provinces != nil { + in, out := &in.Provinces, &out.Provinces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.StreetAddresses != nil { + in, out := &in.StreetAddresses, &out.StreetAddresses + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PostalCodes != nil { + in, out := &in.PostalCodes, &out.PostalCodes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new X509Subject. +func (in *X509Subject) DeepCopy() *X509Subject { + if in == nil { + return nil + } + out := new(X509Subject) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/doc.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/doc.go new file mode 100644 index 00000000000..f391663af4e --- /dev/null +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2020 The cert-manager Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=meta.cert-manager.io + +// Package meta contains meta types for cert-manager APIs +package meta + +const GroupName = "meta.cert-manager.io" diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/doc.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/doc.go new file mode 100644 index 00000000000..9a673685d6e --- /dev/null +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2020 The cert-manager Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 contains meta types for cert-manager APIs +// +k8s:deepcopy-gen=package +// +gencrdrefdocs:force +// +groupName=meta.cert-manager.io +package v1 diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/register.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/register.go new file mode 100644 index 00000000000..d3c19886e40 --- /dev/null +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/register.go @@ -0,0 +1,51 @@ +/* +Copyright 2020 The cert-manager Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + cmmeta "github.com/cert-manager/cert-manager/pkg/apis/meta" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: cmmeta.GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + // No types to register in the meta group + return nil +} diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/types.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/types.go new file mode 100644 index 00000000000..24e72d15ffd --- /dev/null +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/types.go @@ -0,0 +1,79 @@ +/* +Copyright 2020 The cert-manager Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// ConditionStatus represents a condition's status. +// +kubebuilder:validation:Enum=True;False;Unknown +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in +// the condition; "ConditionFalse" means a resource is not in the condition; +// "ConditionUnknown" means kubernetes can't decide if a resource is in the +// condition or not. In the future, we could add other intermediate +// conditions, e.g. ConditionDegraded. +const ( + // ConditionTrue represents the fact that a given condition is true + ConditionTrue ConditionStatus = "True" + + // ConditionFalse represents the fact that a given condition is false + ConditionFalse ConditionStatus = "False" + + // ConditionUnknown represents the fact that a given condition is unknown + ConditionUnknown ConditionStatus = "Unknown" +) + +// A reference to an object in the same namespace as the referent. +// If the referent is a cluster-scoped resource (e.g. a ClusterIssuer), +// the reference instead refers to the resource with the given name in the +// configured 'cluster resource namespace', which is set as a flag on the +// controller component (and defaults to the namespace that cert-manager +// runs in). +type LocalObjectReference struct { + // Name of the resource being referred to. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + Name string `json:"name"` +} + +// ObjectReference is a reference to an object with a given name, kind and group. +type ObjectReference struct { + // Name of the resource being referred to. + Name string `json:"name"` + // Kind of the resource being referred to. + // +optional + Kind string `json:"kind,omitempty"` + // Group of the resource being referred to. + // +optional + Group string `json:"group,omitempty"` +} + +// A reference to a specific 'key' within a Secret resource. +// In some instances, `key` is a required field. +type SecretKeySelector struct { + // The name of the Secret resource being referred to. + LocalObjectReference `json:",inline"` + + // The key of the entry in the Secret resource's `data` field to be used. + // Some instances of this field may be defaulted, in others it may be + // required. + // +optional + Key string `json:"key,omitempty"` +} + +const ( + // Used as a data key in Secret resources to store a CA certificate. + TLSCAKey = "ca.crt" +) diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..9fa10e5e665 --- /dev/null +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -0,0 +1,71 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The cert-manager Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalObjectReference. +func (in *LocalObjectReference) DeepCopy() *LocalObjectReference { + if in == nil { + return nil + } + out := new(LocalObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. +func (in *ObjectReference) DeepCopy() *ObjectReference { + if in == nil { + return nil + } + out := new(ObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretKeySelector) DeepCopyInto(out *SecretKeySelector) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretKeySelector. +func (in *SecretKeySelector) DeepCopy() *SecretKeySelector { + if in == nil { + return nil + } + out := new(SecretKeySelector) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 038dfbdeda6..fd1d0448440 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -31,6 +31,14 @@ github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1 github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1 github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1 github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1 +# github.com/cert-manager/cert-manager v1.13.3 +## explicit; go 1.20 +github.com/cert-manager/cert-manager/pkg/apis/acme +github.com/cert-manager/cert-manager/pkg/apis/acme/v1 +github.com/cert-manager/cert-manager/pkg/apis/certmanager +github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1 +github.com/cert-manager/cert-manager/pkg/apis/meta +github.com/cert-manager/cert-manager/pkg/apis/meta/v1 # github.com/cespare/xxhash/v2 v2.3.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 @@ -1304,6 +1312,9 @@ knative.dev/reconciler-test/pkg/resources/serviceaccount knative.dev/reconciler-test/pkg/state knative.dev/reconciler-test/pkg/tracing knative.dev/reconciler-test/resources/certificate +# sigs.k8s.io/gateway-api v0.8.0 +## explicit; go 1.20 +sigs.k8s.io/gateway-api/apis/v1beta1 # sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd ## explicit; go 1.18 sigs.k8s.io/json diff --git a/vendor/sigs.k8s.io/gateway-api/LICENSE b/vendor/sigs.k8s.io/gateway-api/LICENSE new file mode 100644 index 00000000000..a5949bd7a2f --- /dev/null +++ b/vendor/sigs.k8s.io/gateway-api/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020 The Kubernetes Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/doc.go b/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/doc.go new file mode 100644 index 00000000000..328100aee81 --- /dev/null +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1beta1 contains API Schema definitions for the +// gateway.networking.k8s.io API group. +// +// +kubebuilder:object:generate=true +// +groupName=gateway.networking.k8s.io +package v1beta1 diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/gateway_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/gateway_types.go new file mode 100644 index 00000000000..033c1c2a16a --- /dev/null +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/gateway_types.go @@ -0,0 +1,946 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:resource:categories=gateway-api,shortName=gtw +// +kubebuilder:storageversion +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Class",type=string,JSONPath=`.spec.gatewayClassName` +// +kubebuilder:printcolumn:name="Address",type=string,JSONPath=`.status.addresses[*].value` +// +kubebuilder:printcolumn:name="Programmed",type=string,JSONPath=`.status.conditions[?(@.type=="Programmed")].status` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` + +// Gateway represents an instance of a service-traffic handling infrastructure +// by binding Listeners to a set of IP addresses. +type Gateway struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the desired state of Gateway. + Spec GatewaySpec `json:"spec"` + + // Status defines the current state of Gateway. + // + // +kubebuilder:default={conditions: {{type: "Accepted", status: "Unknown", reason:"Pending", message:"Waiting for controller", lastTransitionTime: "1970-01-01T00:00:00Z"},{type: "Programmed", status: "Unknown", reason:"Pending", message:"Waiting for controller", lastTransitionTime: "1970-01-01T00:00:00Z"}}} + Status GatewayStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// GatewayList contains a list of Gateways. +type GatewayList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Gateway `json:"items"` +} + +// GatewaySpec defines the desired state of Gateway. +// +// Not all possible combinations of options specified in the Spec are +// valid. Some invalid configurations can be caught synchronously via a +// webhook, but there are many cases that will require asynchronous +// signaling via the GatewayStatus block. +type GatewaySpec struct { + // GatewayClassName used for this Gateway. This is the name of a + // GatewayClass resource. + GatewayClassName ObjectName `json:"gatewayClassName"` + + // Listeners associated with this Gateway. Listeners define + // logical endpoints that are bound on this Gateway's addresses. + // At least one Listener MUST be specified. + // + // Each listener in a Gateway must have a unique combination of Hostname, + // Port, and Protocol. + // + // Within the HTTP Conformance Profile, the below combinations of port and + // protocol are considered Core and MUST be supported: + // + // 1. Port: 80, Protocol: HTTP + // 2. Port: 443, Protocol: HTTPS + // + // Within the TLS Conformance Profile, the below combinations of port and + // protocol are considered Core and MUST be supported: + // + // 1. Port: 443, Protocol: TLS + // + // Port and protocol combinations not listed above are considered Extended. + // + // An implementation MAY group Listeners by Port and then collapse each + // group of Listeners into a single Listener if the implementation + // determines that the Listeners in the group are "compatible". An + // implementation MAY also group together and collapse compatible + // Listeners belonging to different Gateways. + // + // For example, an implementation might consider Listeners to be + // compatible with each other if all of the following conditions are + // met: + // + // 1. Either each Listener within the group specifies the "HTTP" + // Protocol or each Listener within the group specifies either + // the "HTTPS" or "TLS" Protocol. + // + // 2. Each Listener within the group specifies a Hostname that is unique + // within the group. + // + // 3. As a special case, one Listener within a group may omit Hostname, + // in which case this Listener matches when no other Listener + // matches. + // + // If the implementation does collapse compatible Listeners, the + // hostname provided in the incoming client request MUST be + // matched to a Listener to find the correct set of Routes. + // The incoming hostname MUST be matched using the Hostname + // field for each Listener in order of most to least specific. + // That is, exact matches must be processed before wildcard + // matches. + // + // If this field specifies multiple Listeners that have the same + // Port value but are not compatible, the implementation must raise + // a "Conflicted" condition in the Listener status. + // + // Support: Core + // + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=64 + // +kubebuilder:validation:XValidation:message="tls must be specified for protocols ['HTTPS', 'TLS']",rule="self.all(l, l.protocol in ['HTTPS', 'TLS'] ? has(l.tls) : true)" + // +kubebuilder:validation:XValidation:message="tls must not be specified for protocols ['HTTP', 'TCP', 'UDP']",rule="self.all(l, l.protocol in ['HTTP', 'TCP', 'UDP'] ? !has(l.tls) : true)" + // +kubebuilder:validation:XValidation:message="hostname must not be specified for protocols ['TCP', 'UDP']",rule="self.all(l, l.protocol in ['TCP', 'UDP'] ? (!has(l.hostname) || l.hostname == '') : true)" + // +kubebuilder:validation:XValidation:message="Listener name must be unique within the Gateway",rule="self.all(l1, self.exists_one(l2, l1.name == l2.name))" + // +kubebuilder:validation:XValidation:message="Combination of port, protocol and hostname must be unique for each listener",rule="self.all(l1, self.exists_one(l2, l1.port == l2.port && l1.protocol == l2.protocol && (has(l1.hostname) && has(l2.hostname) ? l1.hostname == l2.hostname : true)))" + Listeners []Listener `json:"listeners"` + + // Addresses requested for this Gateway. This is optional and behavior can + // depend on the implementation. If a value is set in the spec and the + // requested address is invalid or unavailable, the implementation MUST + // indicate this in the associated entry in GatewayStatus.Addresses. + // + // The Addresses field represents a request for the address(es) on the + // "outside of the Gateway", that traffic bound for this Gateway will use. + // This could be the IP address or hostname of an external load balancer or + // other networking infrastructure, or some other address that traffic will + // be sent to. + // + // The .listener.hostname field is used to route traffic that has already + // arrived at the Gateway to the correct in-cluster destination. + // + // If no Addresses are specified, the implementation MAY schedule the + // Gateway in an implementation-specific manner, assigning an appropriate + // set of Addresses. + // + // The implementation MUST bind all Listeners to every GatewayAddress that + // it assigns to the Gateway and add a corresponding entry in + // GatewayStatus.Addresses. + // + // Support: Extended + // + // +optional + // + // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:validation:XValidation:message="IPAddress values must be unique",rule="self.all(a1, a1.type == 'IPAddress' ? self.exists_one(a2, a2.type == a1.type && a2.value == a1.value) : true )" + // +kubebuilder:validation:XValidation:message="Hostname values must be unique",rule="self.all(a1, a1.type == 'Hostname' ? self.exists_one(a2, a2.type == a1.type && a2.value == a1.value) : true )" + Addresses []GatewayAddress `json:"addresses,omitempty"` +} + +// Listener embodies the concept of a logical endpoint where a Gateway accepts +// network connections. +type Listener struct { + // Name is the name of the Listener. This name MUST be unique within a + // Gateway. + // + // Support: Core + Name SectionName `json:"name"` + + // Hostname specifies the virtual hostname to match for protocol types that + // define this concept. When unspecified, all hostnames are matched. This + // field is ignored for protocols that don't require hostname based + // matching. + // + // Implementations MUST apply Hostname matching appropriately for each of + // the following protocols: + // + // * TLS: The Listener Hostname MUST match the SNI. + // * HTTP: The Listener Hostname MUST match the Host header of the request. + // * HTTPS: The Listener Hostname SHOULD match at both the TLS and HTTP + // protocol layers as described above. If an implementation does not + // ensure that both the SNI and Host header match the Listener hostname, + // it MUST clearly document that. + // + // For HTTPRoute and TLSRoute resources, there is an interaction with the + // `spec.hostnames` array. When both listener and route specify hostnames, + // there MUST be an intersection between the values for a Route to be + // accepted. For more information, refer to the Route specific Hostnames + // documentation. + // + // Hostnames that are prefixed with a wildcard label (`*.`) are interpreted + // as a suffix match. That means that a match for `*.example.com` would match + // both `test.example.com`, and `foo.test.example.com`, but not `example.com`. + // + // Support: Core + // + // +optional + Hostname *Hostname `json:"hostname,omitempty"` + + // Port is the network port. Multiple listeners may use the + // same port, subject to the Listener compatibility rules. + // + // Support: Core + Port PortNumber `json:"port"` + + // Protocol specifies the network protocol this listener expects to receive. + // + // Support: Core + Protocol ProtocolType `json:"protocol"` + + // TLS is the TLS configuration for the Listener. This field is required if + // the Protocol field is "HTTPS" or "TLS". It is invalid to set this field + // if the Protocol field is "HTTP", "TCP", or "UDP". + // + // The association of SNIs to Certificate defined in GatewayTLSConfig is + // defined based on the Hostname field for this listener. + // + // The GatewayClass MUST use the longest matching SNI out of all + // available certificates for any TLS handshake. + // + // Support: Core + // + // +optional + TLS *GatewayTLSConfig `json:"tls,omitempty"` + + // AllowedRoutes defines the types of routes that MAY be attached to a + // Listener and the trusted namespaces where those Route resources MAY be + // present. + // + // Although a client request may match multiple route rules, only one rule + // may ultimately receive the request. Matching precedence MUST be + // determined in order of the following criteria: + // + // * The most specific match as defined by the Route type. + // * The oldest Route based on creation timestamp. For example, a Route with + // a creation timestamp of "2020-09-08 01:02:03" is given precedence over + // a Route with a creation timestamp of "2020-09-08 01:02:04". + // * If everything else is equivalent, the Route appearing first in + // alphabetical order (namespace/name) should be given precedence. For + // example, foo/bar is given precedence over foo/baz. + // + // All valid rules within a Route attached to this Listener should be + // implemented. Invalid Route rules can be ignored (sometimes that will mean + // the full Route). If a Route rule transitions from valid to invalid, + // support for that Route rule should be dropped to ensure consistency. For + // example, even if a filter specified by a Route rule is invalid, the rest + // of the rules within that Route should still be supported. + // + // Support: Core + // +kubebuilder:default={namespaces:{from: Same}} + // +optional + AllowedRoutes *AllowedRoutes `json:"allowedRoutes,omitempty"` +} + +// ProtocolType defines the application protocol accepted by a Listener. +// Implementations are not required to accept all the defined protocols. If an +// implementation does not support a specified protocol, it MUST set the +// "Accepted" condition to False for the affected Listener with a reason of +// "UnsupportedProtocol". +// +// Core ProtocolType values are listed in the table below. +// +// Implementations can define their own protocols if a core ProtocolType does not +// exist. Such definitions must use prefixed name, such as +// `mycompany.com/my-custom-protocol`. Un-prefixed names are reserved for core +// protocols. Any protocol defined by implementations will fall under +// Implementation-specific conformance. +// +// Valid values include: +// +// * "HTTP" - Core support +// * "example.com/bar" - Implementation-specific support +// +// Invalid values include: +// +// * "example.com" - must include path if domain is used +// * "foo.example.com" - must include path if domain is used +// +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=255 +// +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]([-a-zSA-Z0-9]*[a-zA-Z0-9])?$|[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[A-Za-z0-9]+$` +type ProtocolType string + +const ( + // Accepts cleartext HTTP/1.1 sessions over TCP. Implementations MAY also + // support HTTP/2 over cleartext. If implementations support HTTP/2 over + // cleartext on "HTTP" listeners, that MUST be clearly documented by the + // implementation. + HTTPProtocolType ProtocolType = "HTTP" + + // Accepts HTTP/1.1 or HTTP/2 sessions over TLS. + HTTPSProtocolType ProtocolType = "HTTPS" + + // Accepts TLS sessions over TCP. + TLSProtocolType ProtocolType = "TLS" + + // Accepts TCP sessions. + TCPProtocolType ProtocolType = "TCP" + + // Accepts UDP packets. + UDPProtocolType ProtocolType = "UDP" +) + +// GatewayTLSConfig describes a TLS configuration. +// +// +kubebuilder:validation:XValidation:message="certificateRefs must be specified when TLSModeType is Terminate",rule="self.mode == 'Terminate' ? size(self.certificateRefs) > 0 : true" +type GatewayTLSConfig struct { + // Mode defines the TLS behavior for the TLS session initiated by the client. + // There are two possible modes: + // + // - Terminate: The TLS session between the downstream client + // and the Gateway is terminated at the Gateway. This mode requires + // certificateRefs to be set and contain at least one element. + // - Passthrough: The TLS session is NOT terminated by the Gateway. This + // implies that the Gateway can't decipher the TLS stream except for + // the ClientHello message of the TLS protocol. + // CertificateRefs field is ignored in this mode. + // + // Support: Core + // + // +optional + // +kubebuilder:default=Terminate + Mode *TLSModeType `json:"mode,omitempty"` + + // CertificateRefs contains a series of references to Kubernetes objects that + // contains TLS certificates and private keys. These certificates are used to + // establish a TLS handshake for requests that match the hostname of the + // associated listener. + // + // A single CertificateRef to a Kubernetes Secret has "Core" support. + // Implementations MAY choose to support attaching multiple certificates to + // a Listener, but this behavior is implementation-specific. + // + // References to a resource in different namespace are invalid UNLESS there + // is a ReferenceGrant in the target namespace that allows the certificate + // to be attached. If a ReferenceGrant does not allow this reference, the + // "ResolvedRefs" condition MUST be set to False for this listener with the + // "RefNotPermitted" reason. + // + // This field is required to have at least one element when the mode is set + // to "Terminate" (default) and is optional otherwise. + // + // CertificateRefs can reference to standard Kubernetes resources, i.e. + // Secret, or implementation-specific custom resources. + // + // Support: Core - A single reference to a Kubernetes Secret of type kubernetes.io/tls + // + // Support: Implementation-specific (More than one reference or other resource types) + // + // +optional + // +kubebuilder:validation:MaxItems=64 + CertificateRefs []SecretObjectReference `json:"certificateRefs,omitempty"` + + // Options are a list of key/value pairs to enable extended TLS + // configuration for each implementation. For example, configuring the + // minimum TLS version or supported cipher suites. + // + // A set of common keys MAY be defined by the API in the future. To avoid + // any ambiguity, implementation-specific definitions MUST use + // domain-prefixed names, such as `example.com/my-custom-option`. + // Un-prefixed names are reserved for key names defined by Gateway API. + // + // Support: Implementation-specific + // + // +optional + // +kubebuilder:validation:MaxProperties=16 + Options map[AnnotationKey]AnnotationValue `json:"options,omitempty"` +} + +// TLSModeType type defines how a Gateway handles TLS sessions. +// +// +kubebuilder:validation:Enum=Terminate;Passthrough +type TLSModeType string + +const ( + // In this mode, TLS session between the downstream client + // and the Gateway is terminated at the Gateway. + TLSModeTerminate TLSModeType = "Terminate" + + // In this mode, the TLS session is NOT terminated by the Gateway. This + // implies that the Gateway can't decipher the TLS stream except for + // the ClientHello message of the TLS protocol. + // + // Note that SSL passthrough is only supported by TLSRoute. + TLSModePassthrough TLSModeType = "Passthrough" +) + +// AllowedRoutes defines which Routes may be attached to this Listener. +type AllowedRoutes struct { + // Namespaces indicates namespaces from which Routes may be attached to this + // Listener. This is restricted to the namespace of this Gateway by default. + // + // Support: Core + // + // +optional + // +kubebuilder:default={from: Same} + Namespaces *RouteNamespaces `json:"namespaces,omitempty"` + + // Kinds specifies the groups and kinds of Routes that are allowed to bind + // to this Gateway Listener. When unspecified or empty, the kinds of Routes + // selected are determined using the Listener protocol. + // + // A RouteGroupKind MUST correspond to kinds of Routes that are compatible + // with the application protocol specified in the Listener's Protocol field. + // If an implementation does not support or recognize this resource type, it + // MUST set the "ResolvedRefs" condition to False for this Listener with the + // "InvalidRouteKinds" reason. + // + // Support: Core + // + // +optional + // +kubebuilder:validation:MaxItems=8 + Kinds []RouteGroupKind `json:"kinds,omitempty"` +} + +// FromNamespaces specifies namespace from which Routes may be attached to a +// Gateway. +// +// +kubebuilder:validation:Enum=All;Selector;Same +type FromNamespaces string + +const ( + // Routes in all namespaces may be attached to this Gateway. + NamespacesFromAll FromNamespaces = "All" + // Only Routes in namespaces selected by the selector may be attached to + // this Gateway. + NamespacesFromSelector FromNamespaces = "Selector" + // Only Routes in the same namespace as the Gateway may be attached to this + // Gateway. + NamespacesFromSame FromNamespaces = "Same" +) + +// RouteNamespaces indicate which namespaces Routes should be selected from. +type RouteNamespaces struct { + // From indicates where Routes will be selected for this Gateway. Possible + // values are: + // + // * All: Routes in all namespaces may be used by this Gateway. + // * Selector: Routes in namespaces selected by the selector may be used by + // this Gateway. + // * Same: Only Routes in the same namespace may be used by this Gateway. + // + // Support: Core + // + // +optional + // +kubebuilder:default=Same + From *FromNamespaces `json:"from,omitempty"` + + // Selector must be specified when From is set to "Selector". In that case, + // only Routes in Namespaces matching this Selector will be selected by this + // Gateway. This field is ignored for other values of "From". + // + // Support: Core + // + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty"` +} + +// RouteGroupKind indicates the group and kind of a Route resource. +type RouteGroupKind struct { + // Group is the group of the Route. + // + // +optional + // +kubebuilder:default=gateway.networking.k8s.io + Group *Group `json:"group,omitempty"` + + // Kind is the kind of the Route. + Kind Kind `json:"kind"` +} + +// GatewayAddress describes an address that can be bound to a Gateway. +// +// +kubebuilder:validation:XValidation:message="Hostname value must only contain valid characters (matching ^(\\*\\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$)",rule="self.type == 'Hostname' ? self.value.matches(r\"\"\"^(\\*\\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$\"\"\"): true" +type GatewayAddress struct { + // Type of the address. + // + // +optional + // +kubebuilder:default=IPAddress + Type *AddressType `json:"type,omitempty"` + + // Value of the address. The validity of the values will depend + // on the type and support by the controller. + // + // Examples: `1.2.3.4`, `128::1`, `my-ip-address`. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=253 + Value string `json:"value"` +} + +// GatewayStatusAddress describes an address that is bound to a Gateway. +// +// +kubebuilder:validation:XValidation:message="Hostname value must only contain valid characters (matching ^(\\*\\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$)",rule="self.type == 'Hostname' ? self.value.matches(r\"\"\"^(\\*\\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$\"\"\"): true" +type GatewayStatusAddress struct { + // Type of the address. + // + // +optional + // +kubebuilder:default=IPAddress + Type *AddressType `json:"type,omitempty"` + + // Value of the address. The validity of the values will depend + // on the type and support by the controller. + // + // Examples: `1.2.3.4`, `128::1`, `my-ip-address`. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=253 + Value string `json:"value"` +} + +// GatewayStatus defines the observed state of Gateway. +type GatewayStatus struct { + // Addresses lists the IP addresses that have actually been + // bound to the Gateway. These addresses may differ from the + // addresses in the Spec, e.g. if the Gateway automatically + // assigns an address from a reserved pool. + // + // +optional + // + // +kubebuilder:validation:MaxItems=16 + Addresses []GatewayStatusAddress `json:"addresses,omitempty"` + + // Conditions describe the current conditions of the Gateway. + // + // Implementations should prefer to express Gateway conditions + // using the `GatewayConditionType` and `GatewayConditionReason` + // constants so that operators and tools can converge on a common + // vocabulary to describe Gateway state. + // + // Known condition types are: + // + // * "Accepted" + // * "Programmed" + // * "Ready" + // + // +optional + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MaxItems=8 + // +kubebuilder:default={{type: "Accepted", status: "Unknown", reason:"Pending", message:"Waiting for controller", lastTransitionTime: "1970-01-01T00:00:00Z"},{type: "Programmed", status: "Unknown", reason:"Pending", message:"Waiting for controller", lastTransitionTime: "1970-01-01T00:00:00Z"}} + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // Listeners provide status for each unique listener port defined in the Spec. + // + // +optional + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=64 + Listeners []ListenerStatus `json:"listeners,omitempty"` +} + +// GatewayConditionType is a type of condition associated with a +// Gateway. This type should be used with the GatewayStatus.Conditions +// field. +type GatewayConditionType string + +// GatewayConditionReason defines the set of reasons that explain why a +// particular Gateway condition type has been raised. +type GatewayConditionReason string + +const ( + // This condition indicates whether a Gateway has generated some + // configuration that is assumed to be ready soon in the underlying data + // plane. + // + // It is a positive-polarity summary condition, and so should always be + // present on the resource with ObservedGeneration set. + // + // It should be set to Unknown if the controller performs updates to the + // status before it has all the information it needs to be able to determine + // if the condition is true. + // + // Possible reasons for this condition to be True are: + // + // * "Programmed" + // + // Possible reasons for this condition to be False are: + // + // * "Invalid" + // * "Pending" + // * "NoResources" + // * "AddressNotAssigned" + // + // Possible reasons for this condition to be Unknown are: + // + // * "Pending" + // + // Controllers may raise this condition with other reasons, + // but should prefer to use the reasons listed above to improve + // interoperability. + GatewayConditionProgrammed GatewayConditionType = "Programmed" + + // This reason is used with the "Programmed" condition when the condition is + // true. + GatewayReasonProgrammed GatewayConditionReason = "Programmed" + + // This reason is used with the "Programmed" and "Accepted" conditions when the Gateway is + // syntactically or semantically invalid. + GatewayReasonInvalid GatewayConditionReason = "Invalid" + + // This reason is used with the "Programmed" condition when the + // Gateway is not scheduled because insufficient infrastructure + // resources are available. + GatewayReasonNoResources GatewayConditionReason = "NoResources" + + // This reason is used with the "Programmed" condition when none of the requested + // addresses have been assigned to the Gateway. This reason can be used to + // express a range of circumstances, including (but not limited to) IPAM + // address exhaustion, address not yet allocated, or a named address not being found. + GatewayReasonAddressNotAssigned GatewayConditionReason = "AddressNotAssigned" +) + +const ( + // This condition is true when the controller managing the Gateway is + // syntactically and semantically valid enough to produce some configuration + // in the underlying data plane. This does not indicate whether or not the + // configuration has been propagated to the data plane. + // + // Possible reasons for this condition to be True are: + // + // * "Accepted" + // * "ListenersNotValid" + // + // Possible reasons for this condition to be False are: + // + // * "Invalid" + // * "NotReconciled" + // * "UnsupportedAddress" + // * "ListenersNotValid" + // + // Possible reasons for this condition to be Unknown are: + // + // * "Pending" + // + // Controllers may raise this condition with other reasons, + // but should prefer to use the reasons listed above to improve + // interoperability. + GatewayConditionAccepted GatewayConditionType = "Accepted" + + // This reason is used with the "Accepted" condition when the condition is + // True. + GatewayReasonAccepted GatewayConditionReason = "Accepted" + + // This reason is used with the "Accepted" condition when one or + // more Listeners have an invalid or unsupported configuration + // and cannot be configured on the Gateway. + // This can be the reason when "Accepted" is "True" or "False", depending on whether + // the listener being invalid causes the entire Gateway to not be accepted. + GatewayReasonListenersNotValid GatewayConditionReason = "ListenersNotValid" + + // This reason is used with the "Accepted" and "Programmed" + // conditions when the status is "Unknown" and no controller has reconciled + // the Gateway. + GatewayReasonPending GatewayConditionReason = "Pending" + + // This reason is used with the "Accepted" condition when the Gateway could not be configured + // because the requested address is not supported. This reason could be used in a number of + // instances, including: + // + // * The address is already in use. + // * The type of address is not supported by the implementation. + GatewayReasonUnsupportedAddress GatewayConditionReason = "UnsupportedAddress" +) + +const ( + // Deprecated: use "Accepted" instead. + GatewayConditionScheduled GatewayConditionType = "Scheduled" + + // This reason is used with the "Scheduled" condition when the condition is + // True. + // + // Deprecated: use the "Accepted" condition with reason "Accepted" instead. + GatewayReasonScheduled GatewayConditionReason = "Scheduled" + + // Deprecated: Use "Pending" instead. + GatewayReasonNotReconciled GatewayConditionReason = "NotReconciled" +) + +const ( + // "Ready" is a condition type reserved for future use. It should not be used by implementations. + // + // If used in the future, "Ready" will represent the final state where all configuration is confirmed good + // _and has completely propagated to the data plane_. That is, it is a _guarantee_ that, as soon as something + // sees the Condition as `true`, then connections will be correctly routed _immediately_. + // + // This is a very strong guarantee, and to date no implementation has satisfied it enough to implement it. + // This reservation can be discussed in the future if necessary. + // + // Note: This condition is not really "deprecated", but rather "reserved"; however, deprecated triggers Go linters + // to alert about usage. + // Deprecated: Ready is reserved for future use + GatewayConditionReady GatewayConditionType = "Ready" + + // Deprecated: Ready is reserved for future use + GatewayReasonReady GatewayConditionReason = "Ready" + + // Deprecated: Ready is reserved for future use + GatewayReasonListenersNotReady GatewayConditionReason = "ListenersNotReady" +) + +// ListenerStatus is the status associated with a Listener. +type ListenerStatus struct { + // Name is the name of the Listener that this status corresponds to. + Name SectionName `json:"name"` + + // SupportedKinds is the list indicating the Kinds supported by this + // listener. This MUST represent the kinds an implementation supports for + // that Listener configuration. + // + // If kinds are specified in Spec that are not supported, they MUST NOT + // appear in this list and an implementation MUST set the "ResolvedRefs" + // condition to "False" with the "InvalidRouteKinds" reason. If both valid + // and invalid Route kinds are specified, the implementation MUST + // reference the valid Route kinds that have been specified. + // + // +kubebuilder:validation:MaxItems=8 + SupportedKinds []RouteGroupKind `json:"supportedKinds"` + + // AttachedRoutes represents the total number of accepted Routes that have been + // successfully attached to this Listener. + AttachedRoutes int32 `json:"attachedRoutes"` + + // Conditions describe the current condition of this listener. + // + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MaxItems=8 + Conditions []metav1.Condition `json:"conditions"` +} + +// ListenerConditionType is a type of condition associated with the +// listener. This type should be used with the ListenerStatus.Conditions +// field. +type ListenerConditionType string + +// ListenerConditionReason defines the set of reasons that explain +// why a particular Listener condition type has been raised. +type ListenerConditionReason string + +const ( + // This condition indicates that the controller was unable to resolve + // conflicting specification requirements for this Listener. If a + // Listener is conflicted, its network port should not be configured + // on any network elements. + // + // Possible reasons for this condition to be true are: + // + // * "HostnameConflict" + // * "ProtocolConflict" + // + // Possible reasons for this condition to be False are: + // + // * "NoConflicts" + // + // Controllers may raise this condition with other reasons, + // but should prefer to use the reasons listed above to improve + // interoperability. + ListenerConditionConflicted ListenerConditionType = "Conflicted" + + // This reason is used with the "Conflicted" condition when + // the Listener conflicts with hostnames in other Listeners. For + // example, this reason would be used when multiple Listeners on + // the same port use `example.com` in the hostname field. + ListenerReasonHostnameConflict ListenerConditionReason = "HostnameConflict" + + // This reason is used with the "Conflicted" condition when + // multiple Listeners are specified with the same Listener port + // number, but have conflicting protocol specifications. + ListenerReasonProtocolConflict ListenerConditionReason = "ProtocolConflict" + + // This reason is used with the "Conflicted" condition when the condition + // is False. + ListenerReasonNoConflicts ListenerConditionReason = "NoConflicts" +) + +const ( + // This condition indicates that the listener is syntactically and + // semantically valid, and that all features used in the listener's spec are + // supported. + // + // In general, a Listener will be marked as Accepted when the supplied + // configuration will generate at least some data plane configuration. + // + // For example, a Listener with an unsupported protocol will never generate + // any data plane config, and so will have Accepted set to `false.` + // Conversely, a Listener that does not have any Routes will be able to + // generate data plane config, and so will have Accepted set to `true`. + // + // Possible reasons for this condition to be True are: + // + // * "Accepted" + // + // Possible reasons for this condition to be False are: + // + // * "PortUnavailable" + // * "UnsupportedProtocol" + // + // Possible reasons for this condition to be Unknown are: + // + // * "Pending" + // + // Controllers may raise this condition with other reasons, + // but should prefer to use the reasons listed above to improve + // interoperability. + ListenerConditionAccepted ListenerConditionType = "Accepted" + + // Deprecated: use "Accepted" instead. + ListenerConditionDetached ListenerConditionType = "Detached" + + // This reason is used with the "Accepted" condition when the condition is + // True. + ListenerReasonAccepted ListenerConditionReason = "Accepted" + + // This reason is used with the "Detached" condition when the condition is + // False. + // + // Deprecated: use the "Accepted" condition with reason "Accepted" instead. + ListenerReasonAttached ListenerConditionReason = "Attached" + + // This reason is used with the "Accepted" condition when the Listener + // requests a port that cannot be used on the Gateway. This reason could be + // used in a number of instances, including: + // + // * The port is already in use. + // * The port is not supported by the implementation. + ListenerReasonPortUnavailable ListenerConditionReason = "PortUnavailable" + + // This reason is used with the "Accepted" condition when the + // Listener could not be attached to be Gateway because its + // protocol type is not supported. + ListenerReasonUnsupportedProtocol ListenerConditionReason = "UnsupportedProtocol" +) + +const ( + // This condition indicates whether the controller was able to + // resolve all the object references for the Listener. + // + // Possible reasons for this condition to be true are: + // + // * "ResolvedRefs" + // + // Possible reasons for this condition to be False are: + // + // * "InvalidCertificateRef" + // * "InvalidRouteKinds" + // * "RefNotPermitted" + // + // Controllers may raise this condition with other reasons, + // but should prefer to use the reasons listed above to improve + // interoperability. + ListenerConditionResolvedRefs ListenerConditionType = "ResolvedRefs" + + // This reason is used with the "ResolvedRefs" condition when the condition + // is true. + ListenerReasonResolvedRefs ListenerConditionReason = "ResolvedRefs" + + // This reason is used with the "ResolvedRefs" condition when the + // Listener has a TLS configuration with at least one TLS CertificateRef + // that is invalid or does not exist. + // A CertificateRef is considered invalid when it refers to a nonexistent + // or unsupported resource or kind, or when the data within that resource + // is malformed. + // This reason must be used only when the reference is allowed, either by + // referencing an object in the same namespace as the Gateway, or when + // a cross-namespace reference has been explicitly allowed by a ReferenceGrant. + // If the reference is not allowed, the reason RefNotPermitted must be used + // instead. + ListenerReasonInvalidCertificateRef ListenerConditionReason = "InvalidCertificateRef" + + // This reason is used with the "ResolvedRefs" condition when an invalid or + // unsupported Route kind is specified by the Listener. + ListenerReasonInvalidRouteKinds ListenerConditionReason = "InvalidRouteKinds" + + // This reason is used with the "ResolvedRefs" condition when the + // Listener has a TLS configuration that references an object in another + // namespace, where the object in the other namespace does not have a + // ReferenceGrant explicitly allowing the reference. + ListenerReasonRefNotPermitted ListenerConditionReason = "RefNotPermitted" +) + +const ( + // This condition indicates whether a Listener has generated some + // configuration that will soon be ready in the underlying data plane. + // + // It is a positive-polarity summary condition, and so should always be + // present on the resource with ObservedGeneration set. + // + // It should be set to Unknown if the controller performs updates to the + // status before it has all the information it needs to be able to determine + // if the condition is true. + // + // Possible reasons for this condition to be True are: + // + // * "Programmed" + // + // Possible reasons for this condition to be False are: + // + // * "Invalid" + // * "Pending" + // + // Possible reasons for this condition to be Unknown are: + // + // * "Pending" + // + // Controllers may raise this condition with other reasons, + // but should prefer to use the reasons listed above to improve + // interoperability. + ListenerConditionProgrammed ListenerConditionType = "Programmed" + + // This reason is used with the "Programmed" condition when the condition is + // true. + ListenerReasonProgrammed ListenerConditionReason = "Programmed" + + // This reason is used with the "Ready" and "Programmed" conditions when the + // Listener is syntactically or semantically invalid. + ListenerReasonInvalid ListenerConditionReason = "Invalid" + + // This reason is used with the "Accepted", "Ready" and "Programmed" + // conditions when the Listener is either not yet reconciled or not yet not + // online and ready to accept client traffic. + ListenerReasonPending ListenerConditionReason = "Pending" +) + +const ( + // "Ready" is a condition type reserved for future use. It should not be used by implementations. + // Note: This condition is not really "deprecated", but rather "reserved"; however, deprecated triggers Go linters + // to alert about usage. + // + // If used in the future, "Ready" will represent the final state where all configuration is confirmed good + // _and has completely propagated to the data plane_. That is, it is a _guarantee_ that, as soon as something + // sees the Condition as `true`, then connections will be correctly routed _immediately_. + // + // This is a very strong guarantee, and to date no implementation has satisfied it enough to implement it. + // This reservation can be discussed in the future if necessary. + // + // Deprecated: Ready is reserved for future use + ListenerConditionReady ListenerConditionType = "Ready" + + // Deprecated: Ready is reserved for future use + ListenerReasonReady ListenerConditionReason = "Ready" +) diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/gatewayclass_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/gatewayclass_types.go new file mode 100644 index 00000000000..9d166700d02 --- /dev/null +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/gatewayclass_types.go @@ -0,0 +1,212 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +kubebuilder:object:root=true +// +kubebuilder:resource:categories=gateway-api,scope=Cluster,shortName=gc +// +kubebuilder:storageversion +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Controller",type=string,JSONPath=`.spec.controllerName` +// +kubebuilder:printcolumn:name="Accepted",type=string,JSONPath=`.status.conditions[?(@.type=="Accepted")].status` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:printcolumn:name="Description",type=string,JSONPath=`.spec.description`,priority=1 + +// GatewayClass describes a class of Gateways available to the user for creating +// Gateway resources. +// +// It is recommended that this resource be used as a template for Gateways. This +// means that a Gateway is based on the state of the GatewayClass at the time it +// was created and changes to the GatewayClass or associated parameters are not +// propagated down to existing Gateways. This recommendation is intended to +// limit the blast radius of changes to GatewayClass or associated parameters. +// If implementations choose to propagate GatewayClass changes to existing +// Gateways, that MUST be clearly documented by the implementation. +// +// Whenever one or more Gateways are using a GatewayClass, implementations SHOULD +// add the `gateway-exists-finalizer.gateway.networking.k8s.io` finalizer on the +// associated GatewayClass. This ensures that a GatewayClass associated with a +// Gateway is not deleted while in use. +// +// GatewayClass is a Cluster level resource. +type GatewayClass struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the desired state of GatewayClass. + Spec GatewayClassSpec `json:"spec"` + + // Status defines the current state of GatewayClass. + // + // Implementations MUST populate status on all GatewayClass resources which + // specify their controller name. + // + // +kubebuilder:default={conditions: {{type: "Accepted", status: "Unknown", message: "Waiting for controller", reason: "Waiting", lastTransitionTime: "1970-01-01T00:00:00Z"}}} + Status GatewayClassStatus `json:"status,omitempty"` +} + +const ( + // GatewayClassFinalizerGatewaysExist should be added as a finalizer to the + // GatewayClass whenever there are provisioned Gateways using a + // GatewayClass. + GatewayClassFinalizerGatewaysExist = "gateway-exists-finalizer.gateway.networking.k8s.io" +) + +// GatewayClassSpec reflects the configuration of a class of Gateways. +type GatewayClassSpec struct { + // ControllerName is the name of the controller that is managing Gateways of + // this class. The value of this field MUST be a domain prefixed path. + // + // Example: "example.net/gateway-controller". + // + // This field is not mutable and cannot be empty. + // + // Support: Core + // + // +kubebuilder:validation:XValidation:message="Value is immutable",rule="self == oldSelf" + ControllerName GatewayController `json:"controllerName"` + + // ParametersRef is a reference to a resource that contains the configuration + // parameters corresponding to the GatewayClass. This is optional if the + // controller does not require any additional configuration. + // + // ParametersRef can reference a standard Kubernetes resource, i.e. ConfigMap, + // or an implementation-specific custom resource. The resource can be + // cluster-scoped or namespace-scoped. + // + // If the referent cannot be found, the GatewayClass's "InvalidParameters" + // status condition will be true. + // + // Support: Implementation-specific + // + // +optional + ParametersRef *ParametersReference `json:"parametersRef,omitempty"` + + // Description helps describe a GatewayClass with more details. + // + // +kubebuilder:validation:MaxLength=64 + // +optional + Description *string `json:"description,omitempty"` +} + +// ParametersReference identifies an API object containing controller-specific +// configuration resource within the cluster. +type ParametersReference struct { + // Group is the group of the referent. + Group Group `json:"group"` + + // Kind is kind of the referent. + Kind Kind `json:"kind"` + + // Name is the name of the referent. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=253 + Name string `json:"name"` + + // Namespace is the namespace of the referent. + // This field is required when referring to a Namespace-scoped resource and + // MUST be unset when referring to a Cluster-scoped resource. + // + // +optional + Namespace *Namespace `json:"namespace,omitempty"` +} + +// GatewayClassConditionType is the type for status conditions on +// Gateway resources. This type should be used with the +// GatewayClassStatus.Conditions field. +type GatewayClassConditionType string + +// GatewayClassConditionReason defines the set of reasons that explain why a +// particular GatewayClass condition type has been raised. +type GatewayClassConditionReason string + +const ( + // This condition indicates whether the GatewayClass has been accepted by + // the controller requested in the `spec.controller` field. + // + // This condition defaults to Unknown, and MUST be set by a controller when + // it sees a GatewayClass using its controller string. The status of this + // condition MUST be set to True if the controller will support provisioning + // Gateways using this class. Otherwise, this status MUST be set to False. + // If the status is set to False, the controller SHOULD set a Message and + // Reason as an explanation. + // + // Possible reasons for this condition to be true are: + // + // * "Accepted" + // + // Possible reasons for this condition to be False are: + // + // * "InvalidParameters" + // + // Possible reasons for this condition to be Unknown are: + // + // * "Pending" + // + // Controllers should prefer to use the values of GatewayClassConditionReason + // for the corresponding Reason, where appropriate. + GatewayClassConditionStatusAccepted GatewayClassConditionType = "Accepted" + + // This reason is used with the "Accepted" condition when the condition is + // true. + GatewayClassReasonAccepted GatewayClassConditionReason = "Accepted" + + // This reason is used with the "Accepted" condition when the + // GatewayClass was not accepted because the parametersRef field + // was invalid, with more detail in the message. + GatewayClassReasonInvalidParameters GatewayClassConditionReason = "InvalidParameters" + + // This reason is used with the "Accepted" condition when the + // requested controller has not yet made a decision about whether + // to admit the GatewayClass. It is the default Reason on a new + // GatewayClass. + GatewayClassReasonPending GatewayClassConditionReason = "Pending" + + // Deprecated: Use "Pending" instead. + GatewayClassReasonWaiting GatewayClassConditionReason = "Waiting" +) + +// GatewayClassStatus is the current status for the GatewayClass. +type GatewayClassStatus struct { + // Conditions is the current status from the controller for + // this GatewayClass. + // + // Controllers should prefer to publish conditions using values + // of GatewayClassConditionType for the type of each Condition. + // + // +optional + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MaxItems=8 + // +kubebuilder:default={{type: "Accepted", status: "Unknown", message: "Waiting for controller", reason: "Pending", lastTransitionTime: "1970-01-01T00:00:00Z"}} + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +kubebuilder:object:root=true + +// GatewayClassList contains a list of GatewayClass +type GatewayClassList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []GatewayClass `json:"items"` +} diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/httproute_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/httproute_types.go new file mode 100644 index 00000000000..e8216ed33cb --- /dev/null +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/httproute_types.go @@ -0,0 +1,1115 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:resource:categories=gateway-api +// +kubebuilder:storageversion +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Hostnames",type=string,JSONPath=`.spec.hostnames` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` + +// HTTPRoute provides a way to route HTTP requests. This includes the capability +// to match requests by hostname, path, header, or query param. Filters can be +// used to specify additional processing steps. Backends specify where matching +// requests should be routed. +type HTTPRoute struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the desired state of HTTPRoute. + Spec HTTPRouteSpec `json:"spec"` + + // Status defines the current state of HTTPRoute. + Status HTTPRouteStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// HTTPRouteList contains a list of HTTPRoute. +type HTTPRouteList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HTTPRoute `json:"items"` +} + +// HTTPRouteSpec defines the desired state of HTTPRoute +type HTTPRouteSpec struct { + CommonRouteSpec `json:",inline"` + + // Hostnames defines a set of hostnames that should match against the HTTP Host + // header to select a HTTPRoute used to process the request. Implementations + // MUST ignore any port value specified in the HTTP Host header while + // performing a match and (absent of any applicable header modification + // configuration) MUST forward this header unmodified to the backend. + // + // Valid values for Hostnames are determined by RFC 1123 definition of a + // hostname with 2 notable exceptions: + // + // 1. IPs are not allowed. + // 2. A hostname may be prefixed with a wildcard label (`*.`). The wildcard + // label must appear by itself as the first label. + // + // If a hostname is specified by both the Listener and HTTPRoute, there + // must be at least one intersecting hostname for the HTTPRoute to be + // attached to the Listener. For example: + // + // * A Listener with `test.example.com` as the hostname matches HTTPRoutes + // that have either not specified any hostnames, or have specified at + // least one of `test.example.com` or `*.example.com`. + // * A Listener with `*.example.com` as the hostname matches HTTPRoutes + // that have either not specified any hostnames or have specified at least + // one hostname that matches the Listener hostname. For example, + // `*.example.com`, `test.example.com`, and `foo.test.example.com` would + // all match. On the other hand, `example.com` and `test.example.net` would + // not match. + // + // Hostnames that are prefixed with a wildcard label (`*.`) are interpreted + // as a suffix match. That means that a match for `*.example.com` would match + // both `test.example.com`, and `foo.test.example.com`, but not `example.com`. + // + // If both the Listener and HTTPRoute have specified hostnames, any + // HTTPRoute hostnames that do not match the Listener hostname MUST be + // ignored. For example, if a Listener specified `*.example.com`, and the + // HTTPRoute specified `test.example.com` and `test.example.net`, + // `test.example.net` must not be considered for a match. + // + // If both the Listener and HTTPRoute have specified hostnames, and none + // match with the criteria above, then the HTTPRoute is not accepted. The + // implementation must raise an 'Accepted' Condition with a status of + // `False` in the corresponding RouteParentStatus. + // + // In the event that multiple HTTPRoutes specify intersecting hostnames (e.g. + // overlapping wildcard matching and exact matching hostnames), precedence must + // be given to rules from the HTTPRoute with the largest number of: + // + // * Characters in a matching non-wildcard hostname. + // * Characters in a matching hostname. + // + // If ties exist across multiple Routes, the matching precedence rules for + // HTTPRouteMatches takes over. + // + // Support: Core + // + // +optional + // +kubebuilder:validation:MaxItems=16 + Hostnames []Hostname `json:"hostnames,omitempty"` + + // Rules are a list of HTTP matchers, filters and actions. + // + // +optional + // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:default={{matches: {{path: {type: "PathPrefix", value: "/"}}}}} + Rules []HTTPRouteRule `json:"rules,omitempty"` +} + +// HTTPRouteRule defines semantics for matching an HTTP request based on +// conditions (matches), processing it (filters), and forwarding the request to +// an API object (backendRefs). +// +// +kubebuilder:validation:XValidation:message="RequestRedirect filter must not be used together with backendRefs",rule="(has(self.backendRefs) && size(self.backendRefs) > 0) ? (!has(self.filters) || self.filters.all(f, !has(f.requestRedirect))): true" +// +kubebuilder:validation:XValidation:message="When using RequestRedirect filter with path.replacePrefixMatch, exactly one PathPrefix match must be specified",rule="(has(self.filters) && self.filters.exists_one(f, has(f.requestRedirect) && has(f.requestRedirect.path) && f.requestRedirect.path.type == 'ReplacePrefixMatch' && has(f.requestRedirect.path.replacePrefixMatch))) ? ((size(self.matches) != 1 || !has(self.matches[0].path) || self.matches[0].path.type != 'PathPrefix') ? false : true) : true" +// +kubebuilder:validation:XValidation:message="When using URLRewrite filter with path.replacePrefixMatch, exactly one PathPrefix match must be specified",rule="(has(self.filters) && self.filters.exists_one(f, has(f.urlRewrite) && has(f.urlRewrite.path) && f.urlRewrite.path.type == 'ReplacePrefixMatch' && has(f.urlRewrite.path.replacePrefixMatch))) ? ((size(self.matches) != 1 || !has(self.matches[0].path) || self.matches[0].path.type != 'PathPrefix') ? false : true) : true" +// +kubebuilder:validation:XValidation:message="Within backendRefs, when using RequestRedirect filter with path.replacePrefixMatch, exactly one PathPrefix match must be specified",rule="(has(self.backendRefs) && self.backendRefs.exists_one(b, (has(b.filters) && b.filters.exists_one(f, has(f.requestRedirect) && has(f.requestRedirect.path) && f.requestRedirect.path.type == 'ReplacePrefixMatch' && has(f.requestRedirect.path.replacePrefixMatch))) )) ? ((size(self.matches) != 1 || !has(self.matches[0].path) || self.matches[0].path.type != 'PathPrefix') ? false : true) : true" +// +kubebuilder:validation:XValidation:message="Within backendRefs, When using URLRewrite filter with path.replacePrefixMatch, exactly one PathPrefix match must be specified",rule="(has(self.backendRefs) && self.backendRefs.exists_one(b, (has(b.filters) && b.filters.exists_one(f, has(f.urlRewrite) && has(f.urlRewrite.path) && f.urlRewrite.path.type == 'ReplacePrefixMatch' && has(f.urlRewrite.path.replacePrefixMatch))) )) ? ((size(self.matches) != 1 || !has(self.matches[0].path) || self.matches[0].path.type != 'PathPrefix') ? false : true) : true" +type HTTPRouteRule struct { + // Matches define conditions used for matching the rule against incoming + // HTTP requests. Each match is independent, i.e. this rule will be matched + // if **any** one of the matches is satisfied. + // + // For example, take the following matches configuration: + // + // ``` + // matches: + // - path: + // value: "/foo" + // headers: + // - name: "version" + // value: "v2" + // - path: + // value: "/v2/foo" + // ``` + // + // For a request to match against this rule, a request must satisfy + // EITHER of the two conditions: + // + // - path prefixed with `/foo` AND contains the header `version: v2` + // - path prefix of `/v2/foo` + // + // See the documentation for HTTPRouteMatch on how to specify multiple + // match conditions that should be ANDed together. + // + // If no matches are specified, the default is a prefix + // path match on "/", which has the effect of matching every + // HTTP request. + // + // Proxy or Load Balancer routing configuration generated from HTTPRoutes + // MUST prioritize matches based on the following criteria, continuing on + // ties. Across all rules specified on applicable Routes, precedence must be + // given to the match having: + // + // * "Exact" path match. + // * "Prefix" path match with largest number of characters. + // * Method match. + // * Largest number of header matches. + // * Largest number of query param matches. + // + // Note: The precedence of RegularExpression path matches are implementation-specific. + // + // If ties still exist across multiple Routes, matching precedence MUST be + // determined in order of the following criteria, continuing on ties: + // + // * The oldest Route based on creation timestamp. + // * The Route appearing first in alphabetical order by + // "{namespace}/{name}". + // + // If ties still exist within an HTTPRoute, matching precedence MUST be granted + // to the FIRST matching rule (in list order) with a match meeting the above + // criteria. + // + // When no rules matching a request have been successfully attached to the + // parent a request is coming from, a HTTP 404 status code MUST be returned. + // + // +optional + // +kubebuilder:validation:MaxItems=8 + // +kubebuilder:default={{path:{ type: "PathPrefix", value: "/"}}} + Matches []HTTPRouteMatch `json:"matches,omitempty"` + + // Filters define the filters that are applied to requests that match + // this rule. + // + // The effects of ordering of multiple behaviors are currently unspecified. + // This can change in the future based on feedback during the alpha stage. + // + // Conformance-levels at this level are defined based on the type of filter: + // + // - ALL core filters MUST be supported by all implementations. + // - Implementers are encouraged to support extended filters. + // - Implementation-specific custom filters have no API guarantees across + // implementations. + // + // Specifying the same filter multiple times is not supported unless explicitly + // indicated in the filter. + // + // All filters are expected to be compatible with each other except for the + // URLRewrite and RequestRedirect filters, which may not be combined. If an + // implementation can not support other combinations of filters, they must clearly + // document that limitation. In cases where incompatible or unsupported + // filters are specified and cause the `Accepted` condition to be set to status + // `False`, implementations may use the `IncompatibleFilters` reason to specify + // this configuration error. + // + // Support: Core + // + // +optional + // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:validation:XValidation:message="May specify either httpRouteFilterRequestRedirect or httpRouteFilterRequestRewrite, but not both",rule="!(self.exists(f, f.type == 'RequestRedirect') && self.exists(f, f.type == 'URLRewrite'))" + // +kubebuilder:validation:XValidation:message="RequestHeaderModifier filter cannot be repeated",rule="self.filter(f, f.type == 'RequestHeaderModifier').size() <= 1" + // +kubebuilder:validation:XValidation:message="ResponseHeaderModifier filter cannot be repeated",rule="self.filter(f, f.type == 'ResponseHeaderModifier').size() <= 1" + // +kubebuilder:validation:XValidation:message="RequestRedirect filter cannot be repeated",rule="self.filter(f, f.type == 'RequestRedirect').size() <= 1" + // +kubebuilder:validation:XValidation:message="URLRewrite filter cannot be repeated",rule="self.filter(f, f.type == 'URLRewrite').size() <= 1" + Filters []HTTPRouteFilter `json:"filters,omitempty"` + + // BackendRefs defines the backend(s) where matching requests should be + // sent. + // + // Failure behavior here depends on how many BackendRefs are specified and + // how many are invalid. + // + // If *all* entries in BackendRefs are invalid, and there are also no filters + // specified in this route rule, *all* traffic which matches this rule MUST + // receive a 500 status code. + // + // See the HTTPBackendRef definition for the rules about what makes a single + // HTTPBackendRef invalid. + // + // When a HTTPBackendRef is invalid, 500 status codes MUST be returned for + // requests that would have otherwise been routed to an invalid backend. If + // multiple backends are specified, and some are invalid, the proportion of + // requests that would otherwise have been routed to an invalid backend + // MUST receive a 500 status code. + // + // For example, if two backends are specified with equal weights, and one is + // invalid, 50 percent of traffic must receive a 500. Implementations may + // choose how that 50 percent is determined. + // + // Support: Core for Kubernetes Service + // + // Support: Extended for Kubernetes ServiceImport + // + // Support: Implementation-specific for any other resource + // + // Support for weight: Core + // + // +optional + // +kubebuilder:validation:MaxItems=16 + BackendRefs []HTTPBackendRef `json:"backendRefs,omitempty"` +} + +// PathMatchType specifies the semantics of how HTTP paths should be compared. +// Valid PathMatchType values, along with their support levels, are: +// +// * "Exact" - Core +// * "PathPrefix" - Core +// * "RegularExpression" - Implementation Specific +// +// PathPrefix and Exact paths must be syntactically valid: +// +// - Must begin with the `/` character +// - Must not contain consecutive `/` characters (e.g. `/foo///`, `//`). +// +// Note that values may be added to this enum, implementations +// must ensure that unknown values will not cause a crash. +// +// Unknown values here must result in the implementation setting the +// Accepted Condition for the Route to `status: False`, with a +// Reason of `UnsupportedValue`. +// +// +kubebuilder:validation:Enum=Exact;PathPrefix;RegularExpression +type PathMatchType string + +const ( + // Matches the URL path exactly and with case sensitivity. This means that + // an exact path match on `/abc` will only match requests to `/abc`, NOT + // `/abc/`, `/Abc`, or `/abcd`. + PathMatchExact PathMatchType = "Exact" + + // Matches based on a URL path prefix split by `/`. Matching is + // case sensitive and done on a path element by element basis. A + // path element refers to the list of labels in the path split by + // the `/` separator. When specified, a trailing `/` is ignored. + // + // For example, the paths `/abc`, `/abc/`, and `/abc/def` would all match + // the prefix `/abc`, but the path `/abcd` would not. + // + // "PathPrefix" is semantically equivalent to the "Prefix" path type in the + // Kubernetes Ingress API. + PathMatchPathPrefix PathMatchType = "PathPrefix" + + // Matches if the URL path matches the given regular expression with + // case sensitivity. + // + // Since `"RegularExpression"` has implementation-specific conformance, + // implementations can support POSIX, PCRE, RE2 or any other regular expression + // dialect. + // Please read the implementation's documentation to determine the supported + // dialect. + PathMatchRegularExpression PathMatchType = "RegularExpression" +) + +// HTTPPathMatch describes how to select a HTTP route by matching the HTTP request path. +// +// +kubebuilder:validation:XValidation:message="value must be an absolute path and start with '/' when type one of ['Exact', 'PathPrefix']",rule="(self.type in ['Exact','PathPrefix']) ? self.value.startsWith('/') : true" +// +kubebuilder:validation:XValidation:message="must not contain '//' when type one of ['Exact', 'PathPrefix']",rule="(self.type in ['Exact','PathPrefix']) ? !self.value.contains('//') : true" +// +kubebuilder:validation:XValidation:message="must not contain '/./' when type one of ['Exact', 'PathPrefix']",rule="(self.type in ['Exact','PathPrefix']) ? !self.value.contains('/./') : true" +// +kubebuilder:validation:XValidation:message="must not contain '/../' when type one of ['Exact', 'PathPrefix']",rule="(self.type in ['Exact','PathPrefix']) ? !self.value.contains('/../') : true" +// +kubebuilder:validation:XValidation:message="must not contain '%2f' when type one of ['Exact', 'PathPrefix']",rule="(self.type in ['Exact','PathPrefix']) ? !self.value.contains('%2f') : true" +// +kubebuilder:validation:XValidation:message="must not contain '%2F' when type one of ['Exact', 'PathPrefix']",rule="(self.type in ['Exact','PathPrefix']) ? !self.value.contains('%2F') : true" +// +kubebuilder:validation:XValidation:message="must not contain '#' when type one of ['Exact', 'PathPrefix']",rule="(self.type in ['Exact','PathPrefix']) ? !self.value.contains('#') : true" +// +kubebuilder:validation:XValidation:message="must not end with '/..' when type one of ['Exact', 'PathPrefix']",rule="(self.type in ['Exact','PathPrefix']) ? !self.value.endsWith('/..') : true" +// +kubebuilder:validation:XValidation:message="must not end with '/.' when type one of ['Exact', 'PathPrefix']",rule="(self.type in ['Exact','PathPrefix']) ? !self.value.endsWith('/.') : true" +// +kubebuilder:validation:XValidation:message="type must be one of ['Exact', 'PathPrefix', 'RegularExpression']",rule="self.type in ['Exact','PathPrefix'] || self.type == 'RegularExpression'" +// +kubebuilder:validation:XValidation:message="must only contain valid characters (matching ^(?:[-A-Za-z0-9/._~!$&'()*+,;=:@]|[%][0-9a-fA-F]{2})+$) for types ['Exact', 'PathPrefix']",rule="(self.type in ['Exact','PathPrefix']) ? self.value.matches(r\"\"\"^(?:[-A-Za-z0-9/._~!$&'()*+,;=:@]|[%][0-9a-fA-F]{2})+$\"\"\") : true" +type HTTPPathMatch struct { + // Type specifies how to match against the path Value. + // + // Support: Core (Exact, PathPrefix) + // + // Support: Implementation-specific (RegularExpression) + // + // +optional + // +kubebuilder:default=PathPrefix + Type *PathMatchType `json:"type,omitempty"` + + // Value of the HTTP path to match against. + // + // +optional + // +kubebuilder:default="/" + // +kubebuilder:validation:MaxLength=1024 + Value *string `json:"value,omitempty"` +} + +// HeaderMatchType specifies the semantics of how HTTP header values should be +// compared. Valid HeaderMatchType values, along with their conformance levels, are: +// +// * "Exact" - Core +// * "RegularExpression" - Implementation Specific +// +// Note that values may be added to this enum, implementations +// must ensure that unknown values will not cause a crash. +// +// Unknown values here must result in the implementation setting the +// Accepted Condition for the Route to `status: False`, with a +// Reason of `UnsupportedValue`. +// +// +kubebuilder:validation:Enum=Exact;RegularExpression +type HeaderMatchType string + +// HeaderMatchType constants. +const ( + HeaderMatchExact HeaderMatchType = "Exact" + HeaderMatchRegularExpression HeaderMatchType = "RegularExpression" +) + +// HTTPHeaderName is the name of an HTTP header. +// +// Valid values include: +// +// * "Authorization" +// * "Set-Cookie" +// +// Invalid values include: +// +// - ":method" - ":" is an invalid character. This means that HTTP/2 pseudo +// headers are not currently supported by this type. +// - "/invalid" - "/ " is an invalid character +type HTTPHeaderName HeaderName + +// HTTPHeaderMatch describes how to select a HTTP route by matching HTTP request +// headers. +type HTTPHeaderMatch struct { + // Type specifies how to match against the value of the header. + // + // Support: Core (Exact) + // + // Support: Implementation-specific (RegularExpression) + // + // Since RegularExpression HeaderMatchType has implementation-specific + // conformance, implementations can support POSIX, PCRE or any other dialects + // of regular expressions. Please read the implementation's documentation to + // determine the supported dialect. + // + // +optional + // +kubebuilder:default=Exact + Type *HeaderMatchType `json:"type,omitempty"` + + // Name is the name of the HTTP Header to be matched. Name matching MUST be + // case insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + // + // If multiple entries specify equivalent header names, only the first + // entry with an equivalent name MUST be considered for a match. Subsequent + // entries with an equivalent header name MUST be ignored. Due to the + // case-insensitivity of header names, "foo" and "Foo" are considered + // equivalent. + // + // When a header is repeated in an HTTP request, it is + // implementation-specific behavior as to how this is represented. + // Generally, proxies should follow the guidance from the RFC: + // https://www.rfc-editor.org/rfc/rfc7230.html#section-3.2.2 regarding + // processing a repeated header, with special handling for "Set-Cookie". + Name HTTPHeaderName `json:"name"` + + // Value is the value of HTTP Header to be matched. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=4096 + Value string `json:"value"` +} + +// QueryParamMatchType specifies the semantics of how HTTP query parameter +// values should be compared. Valid QueryParamMatchType values, along with their +// conformance levels, are: +// +// * "Exact" - Core +// * "RegularExpression" - Implementation Specific +// +// Note that values may be added to this enum, implementations +// must ensure that unknown values will not cause a crash. +// +// Unknown values here must result in the implementation setting the +// Accepted Condition for the Route to `status: False`, with a +// Reason of `UnsupportedValue`. +// +// +kubebuilder:validation:Enum=Exact;RegularExpression +type QueryParamMatchType string + +// QueryParamMatchType constants. +const ( + QueryParamMatchExact QueryParamMatchType = "Exact" + QueryParamMatchRegularExpression QueryParamMatchType = "RegularExpression" +) + +// HTTPQueryParamMatch describes how to select a HTTP route by matching HTTP +// query parameters. +type HTTPQueryParamMatch struct { + // Type specifies how to match against the value of the query parameter. + // + // Support: Extended (Exact) + // + // Support: Implementation-specific (RegularExpression) + // + // Since RegularExpression QueryParamMatchType has Implementation-specific + // conformance, implementations can support POSIX, PCRE or any other + // dialects of regular expressions. Please read the implementation's + // documentation to determine the supported dialect. + // + // +optional + // +kubebuilder:default=Exact + Type *QueryParamMatchType `json:"type,omitempty"` + + // Name is the name of the HTTP query param to be matched. This must be an + // exact string match. (See + // https://tools.ietf.org/html/rfc7230#section-2.7.3). + // + // If multiple entries specify equivalent query param names, only the first + // entry with an equivalent name MUST be considered for a match. Subsequent + // entries with an equivalent query param name MUST be ignored. + // + // If a query param is repeated in an HTTP request, the behavior is + // purposely left undefined, since different data planes have different + // capabilities. However, it is *recommended* that implementations should + // match against the first value of the param if the data plane supports it, + // as this behavior is expected in other load balancing contexts outside of + // the Gateway API. + // + // Users SHOULD NOT route traffic based on repeated query params to guard + // themselves against potential differences in the implementations. + Name HTTPHeaderName `json:"name"` + + // Value is the value of HTTP query param to be matched. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=1024 + Value string `json:"value"` +} + +// HTTPMethod describes how to select a HTTP route by matching the HTTP +// method as defined by +// [RFC 7231](https://datatracker.ietf.org/doc/html/rfc7231#section-4) and +// [RFC 5789](https://datatracker.ietf.org/doc/html/rfc5789#section-2). +// The value is expected in upper case. +// +// Note that values may be added to this enum, implementations +// must ensure that unknown values will not cause a crash. +// +// Unknown values here must result in the implementation setting the +// Accepted Condition for the Route to `status: False`, with a +// Reason of `UnsupportedValue`. +// +// +kubebuilder:validation:Enum=GET;HEAD;POST;PUT;DELETE;CONNECT;OPTIONS;TRACE;PATCH +type HTTPMethod string + +const ( + HTTPMethodGet HTTPMethod = "GET" + HTTPMethodHead HTTPMethod = "HEAD" + HTTPMethodPost HTTPMethod = "POST" + HTTPMethodPut HTTPMethod = "PUT" + HTTPMethodDelete HTTPMethod = "DELETE" + HTTPMethodConnect HTTPMethod = "CONNECT" + HTTPMethodOptions HTTPMethod = "OPTIONS" + HTTPMethodTrace HTTPMethod = "TRACE" + HTTPMethodPatch HTTPMethod = "PATCH" +) + +// HTTPRouteMatch defines the predicate used to match requests to a given +// action. Multiple match types are ANDed together, i.e. the match will +// evaluate to true only if all conditions are satisfied. +// +// For example, the match below will match a HTTP request only if its path +// starts with `/foo` AND it contains the `version: v1` header: +// +// ``` +// match: +// +// path: +// value: "/foo" +// headers: +// - name: "version" +// value "v1" +// +// ``` +type HTTPRouteMatch struct { + // Path specifies a HTTP request path matcher. If this field is not + // specified, a default prefix match on the "/" path is provided. + // + // +optional + // +kubebuilder:default={type: "PathPrefix", value: "/"} + Path *HTTPPathMatch `json:"path,omitempty"` + + // Headers specifies HTTP request header matchers. Multiple match values are + // ANDed together, meaning, a request must match all the specified headers + // to select the route. + // + // +listType=map + // +listMapKey=name + // +optional + // +kubebuilder:validation:MaxItems=16 + Headers []HTTPHeaderMatch `json:"headers,omitempty"` + + // QueryParams specifies HTTP query parameter matchers. Multiple match + // values are ANDed together, meaning, a request must match all the + // specified query parameters to select the route. + // + // Support: Extended + // + // +listType=map + // +listMapKey=name + // +optional + // +kubebuilder:validation:MaxItems=16 + QueryParams []HTTPQueryParamMatch `json:"queryParams,omitempty"` + + // Method specifies HTTP method matcher. + // When specified, this route will be matched only if the request has the + // specified method. + // + // Support: Extended + // + // +optional + Method *HTTPMethod `json:"method,omitempty"` +} + +// HTTPRouteFilter defines processing steps that must be completed during the +// request or response lifecycle. HTTPRouteFilters are meant as an extension +// point to express processing that may be done in Gateway implementations. Some +// examples include request or response modification, implementing +// authentication strategies, rate-limiting, and traffic shaping. API +// guarantee/conformance is defined based on the type of the filter. +// +// +kubebuilder:validation:XValidation:message="filter.requestHeaderModifier must be nil if the filter.type is not RequestHeaderModifier",rule="!(has(self.requestHeaderModifier) && self.type != 'RequestHeaderModifier')" +// +kubebuilder:validation:XValidation:message="filter.requestHeaderModifier must be specified for RequestHeaderModifier filter.type",rule="!(!has(self.requestHeaderModifier) && self.type == 'RequestHeaderModifier')" +// +kubebuilder:validation:XValidation:message="filter.responseHeaderModifier must be nil if the filter.type is not ResponseHeaderModifier",rule="!(has(self.responseHeaderModifier) && self.type != 'ResponseHeaderModifier')" +// +kubebuilder:validation:XValidation:message="filter.responseHeaderModifier must be specified for ResponseHeaderModifier filter.type",rule="!(!has(self.responseHeaderModifier) && self.type == 'ResponseHeaderModifier')" +// +kubebuilder:validation:XValidation:message="filter.requestMirror must be nil if the filter.type is not RequestMirror",rule="!(has(self.requestMirror) && self.type != 'RequestMirror')" +// +kubebuilder:validation:XValidation:message="filter.requestMirror must be specified for RequestMirror filter.type",rule="!(!has(self.requestMirror) && self.type == 'RequestMirror')" +// +kubebuilder:validation:XValidation:message="filter.requestRedirect must be nil if the filter.type is not RequestRedirect",rule="!(has(self.requestRedirect) && self.type != 'RequestRedirect')" +// +kubebuilder:validation:XValidation:message="filter.requestRedirect must be specified for RequestRedirect filter.type",rule="!(!has(self.requestRedirect) && self.type == 'RequestRedirect')" +// +kubebuilder:validation:XValidation:message="filter.urlRewrite must be nil if the filter.type is not URLRewrite",rule="!(has(self.urlRewrite) && self.type != 'URLRewrite')" +// +kubebuilder:validation:XValidation:message="filter.urlRewrite must be specified for URLRewrite filter.type",rule="!(!has(self.urlRewrite) && self.type == 'URLRewrite')" +// +kubebuilder:validation:XValidation:message="filter.extensionRef must be nil if the filter.type is not ExtensionRef",rule="!(has(self.extensionRef) && self.type != 'ExtensionRef')" +// +kubebuilder:validation:XValidation:message="filter.extensionRef must be specified for ExtensionRef filter.type",rule="!(!has(self.extensionRef) && self.type == 'ExtensionRef')" +type HTTPRouteFilter struct { + // Type identifies the type of filter to apply. As with other API fields, + // types are classified into three conformance levels: + // + // - Core: Filter types and their corresponding configuration defined by + // "Support: Core" in this package, e.g. "RequestHeaderModifier". All + // implementations must support core filters. + // + // - Extended: Filter types and their corresponding configuration defined by + // "Support: Extended" in this package, e.g. "RequestMirror". Implementers + // are encouraged to support extended filters. + // + // - Implementation-specific: Filters that are defined and supported by + // specific vendors. + // In the future, filters showing convergence in behavior across multiple + // implementations will be considered for inclusion in extended or core + // conformance levels. Filter-specific configuration for such filters + // is specified using the ExtensionRef field. `Type` should be set to + // "ExtensionRef" for custom filters. + // + // Implementers are encouraged to define custom implementation types to + // extend the core API with implementation-specific behavior. + // + // If a reference to a custom filter type cannot be resolved, the filter + // MUST NOT be skipped. Instead, requests that would have been processed by + // that filter MUST receive a HTTP error response. + // + // Note that values may be added to this enum, implementations + // must ensure that unknown values will not cause a crash. + // + // Unknown values here must result in the implementation setting the + // Accepted Condition for the Route to `status: False`, with a + // Reason of `UnsupportedValue`. + // + // +unionDiscriminator + // +kubebuilder:validation:Enum=RequestHeaderModifier;ResponseHeaderModifier;RequestMirror;RequestRedirect;URLRewrite;ExtensionRef + Type HTTPRouteFilterType `json:"type"` + + // RequestHeaderModifier defines a schema for a filter that modifies request + // headers. + // + // Support: Core + // + // +optional + RequestHeaderModifier *HTTPHeaderFilter `json:"requestHeaderModifier,omitempty"` + + // ResponseHeaderModifier defines a schema for a filter that modifies response + // headers. + // + // Support: Extended + // + // +optional + ResponseHeaderModifier *HTTPHeaderFilter `json:"responseHeaderModifier,omitempty"` + + // RequestMirror defines a schema for a filter that mirrors requests. + // Requests are sent to the specified destination, but responses from + // that destination are ignored. + // + // This filter can be used multiple times within the same rule. Note that + // not all implementations will be able to support mirroring to multiple + // backends. + // + // Support: Extended + // + // +optional + RequestMirror *HTTPRequestMirrorFilter `json:"requestMirror,omitempty"` + + // RequestRedirect defines a schema for a filter that responds to the + // request with an HTTP redirection. + // + // Support: Core + // + // +optional + RequestRedirect *HTTPRequestRedirectFilter `json:"requestRedirect,omitempty"` + + // URLRewrite defines a schema for a filter that modifies a request during forwarding. + // + // Support: Extended + // + // +optional + URLRewrite *HTTPURLRewriteFilter `json:"urlRewrite,omitempty"` + + // ExtensionRef is an optional, implementation-specific extension to the + // "filter" behavior. For example, resource "myroutefilter" in group + // "networking.example.net"). ExtensionRef MUST NOT be used for core and + // extended filters. + // + // This filter can be used multiple times within the same rule. + // + // Support: Implementation-specific + // + // +optional + ExtensionRef *LocalObjectReference `json:"extensionRef,omitempty"` +} + +// HTTPRouteFilterType identifies a type of HTTPRoute filter. +type HTTPRouteFilterType string + +const ( + // HTTPRouteFilterRequestHeaderModifier can be used to add or remove an HTTP + // header from an HTTP request before it is sent to the upstream target. + // + // Support in HTTPRouteRule: Core + // + // Support in HTTPBackendRef: Extended + HTTPRouteFilterRequestHeaderModifier HTTPRouteFilterType = "RequestHeaderModifier" + + // HTTPRouteFilterResponseHeaderModifier can be used to add or remove an HTTP + // header from an HTTP response before it is sent to the client. + // + // Support in HTTPRouteRule: Extended + // + // Support in HTTPBackendRef: Extended + HTTPRouteFilterResponseHeaderModifier HTTPRouteFilterType = "ResponseHeaderModifier" + + // HTTPRouteFilterRequestRedirect can be used to redirect a request to + // another location. This filter can also be used for HTTP to HTTPS + // redirects. This may not be used on the same Route rule or BackendRef as a + // URLRewrite filter. + // + // Support in HTTPRouteRule: Core + // + // Support in HTTPBackendRef: Extended + HTTPRouteFilterRequestRedirect HTTPRouteFilterType = "RequestRedirect" + + // HTTPRouteFilterURLRewrite can be used to modify a request during + // forwarding. At most one of these filters may be used on a Route rule. + // This may not be used on the same Route rule or BackendRef as a + // RequestRedirect filter. + // + // Support in HTTPRouteRule: Extended + // + // Support in HTTPBackendRef: Extended + HTTPRouteFilterURLRewrite HTTPRouteFilterType = "URLRewrite" + + // HTTPRouteFilterRequestMirror can be used to mirror HTTP requests to a + // different backend. The responses from this backend MUST be ignored by + // the Gateway. + // + // Support in HTTPRouteRule: Extended + // + // Support in HTTPBackendRef: Extended + HTTPRouteFilterRequestMirror HTTPRouteFilterType = "RequestMirror" + + // HTTPRouteFilterExtensionRef should be used for configuring custom + // HTTP filters. + // + // Support in HTTPRouteRule: Implementation-specific + // + // Support in HTTPBackendRef: Implementation-specific + HTTPRouteFilterExtensionRef HTTPRouteFilterType = "ExtensionRef" +) + +// HTTPHeader represents an HTTP Header name and value as defined by RFC 7230. +type HTTPHeader struct { + // Name is the name of the HTTP Header to be matched. Name matching MUST be + // case insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + // + // If multiple entries specify equivalent header names, the first entry with + // an equivalent name MUST be considered for a match. Subsequent entries + // with an equivalent header name MUST be ignored. Due to the + // case-insensitivity of header names, "foo" and "Foo" are considered + // equivalent. + Name HTTPHeaderName `json:"name"` + + // Value is the value of HTTP Header to be matched. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=4096 + Value string `json:"value"` +} + +// HTTPHeaderFilter defines a filter that modifies the headers of an HTTP +// request or response. Only one action for a given header name is permitted. +// Filters specifying multiple actions of the same or different type for any one +// header name are invalid and will be rejected by the webhook if installed. +// Configuration to set or add multiple values for a header must use RFC 7230 +// header value formatting, separating each value with a comma. +type HTTPHeaderFilter struct { + // Set overwrites the request with the given header (name, value) + // before the action. + // + // Input: + // GET /foo HTTP/1.1 + // my-header: foo + // + // Config: + // set: + // - name: "my-header" + // value: "bar" + // + // Output: + // GET /foo HTTP/1.1 + // my-header: bar + // + // +optional + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=16 + Set []HTTPHeader `json:"set,omitempty"` + + // Add adds the given header(s) (name, value) to the request + // before the action. It appends to any existing values associated + // with the header name. + // + // Input: + // GET /foo HTTP/1.1 + // my-header: foo + // + // Config: + // add: + // - name: "my-header" + // value: "bar,baz" + // + // Output: + // GET /foo HTTP/1.1 + // my-header: foo,bar,baz + // + // +optional + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=16 + Add []HTTPHeader `json:"add,omitempty"` + + // Remove the given header(s) from the HTTP request before the action. The + // value of Remove is a list of HTTP header names. Note that the header + // names are case-insensitive (see + // https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). + // + // Input: + // GET /foo HTTP/1.1 + // my-header1: foo + // my-header2: bar + // my-header3: baz + // + // Config: + // remove: ["my-header1", "my-header3"] + // + // Output: + // GET /foo HTTP/1.1 + // my-header2: bar + // + // +optional + // +listType=set + // +kubebuilder:validation:MaxItems=16 + Remove []string `json:"remove,omitempty"` +} + +// HTTPPathModifierType defines the type of path redirect or rewrite. +type HTTPPathModifierType string + +const ( + // This type of modifier indicates that the full path will be replaced + // by the specified value. + FullPathHTTPPathModifier HTTPPathModifierType = "ReplaceFullPath" + + // This type of modifier indicates that any prefix path matches will be + // replaced by the substitution value. For example, a path with a prefix + // match of "/foo" and a ReplacePrefixMatch substitution of "/bar" will have + // the "/foo" prefix replaced with "/bar" in matching requests. + // + // Note that this matches the behavior of the PathPrefix match type. This + // matches full path elements. A path element refers to the list of labels + // in the path split by the `/` separator. When specified, a trailing `/` is + // ignored. For example, the paths `/abc`, `/abc/`, and `/abc/def` would all + // match the prefix `/abc`, but the path `/abcd` would not. + PrefixMatchHTTPPathModifier HTTPPathModifierType = "ReplacePrefixMatch" +) + +// HTTPPathModifier defines configuration for path modifiers. +// +// +kubebuilder:validation:XValidation:message="replaceFullPath must be specified when type is set to 'ReplaceFullPath'",rule="self.type == 'ReplaceFullPath' ? has(self.replaceFullPath) : true" +// +kubebuilder:validation:XValidation:message="type must be 'ReplaceFullPath' when replaceFullPath is set",rule="has(self.replaceFullPath) ? self.type == 'ReplaceFullPath' : true" +// +kubebuilder:validation:XValidation:message="replacePrefixMatch must be specified when type is set to 'ReplacePrefixMatch'",rule="self.type == 'ReplacePrefixMatch' ? has(self.replacePrefixMatch) : true" +// +kubebuilder:validation:XValidation:message="type must be 'ReplacePrefixMatch' when replacePrefixMatch is set",rule="has(self.replacePrefixMatch) ? self.type == 'ReplacePrefixMatch' : true" +type HTTPPathModifier struct { + // Type defines the type of path modifier. Additional types may be + // added in a future release of the API. + // + // Note that values may be added to this enum, implementations + // must ensure that unknown values will not cause a crash. + // + // Unknown values here must result in the implementation setting the + // Accepted Condition for the Route to `status: False`, with a + // Reason of `UnsupportedValue`. + // + // +kubebuilder:validation:Enum=ReplaceFullPath;ReplacePrefixMatch + Type HTTPPathModifierType `json:"type"` + + // ReplaceFullPath specifies the value with which to replace the full path + // of a request during a rewrite or redirect. + // + // +kubebuilder:validation:MaxLength=1024 + // +optional + ReplaceFullPath *string `json:"replaceFullPath,omitempty"` + + // ReplacePrefixMatch specifies the value with which to replace the prefix + // match of a request during a rewrite or redirect. For example, a request + // to "/foo/bar" with a prefix match of "/foo" and a ReplacePrefixMatch + // of "/xyz" would be modified to "/xyz/bar". + // + // Note that this matches the behavior of the PathPrefix match type. This + // matches full path elements. A path element refers to the list of labels + // in the path split by the `/` separator. When specified, a trailing `/` is + // ignored. For example, the paths `/abc`, `/abc/`, and `/abc/def` would all + // match the prefix `/abc`, but the path `/abcd` would not. + // + // ReplacePrefixMatch is only compatible with a `PathPrefix` HTTPRouteMatch. + // Using any other HTTPRouteMatch type on the same HTTPRouteRule will result in + // the implementation setting the Accepted Condition for the Route to `status: False`. + // + // Request Path | Prefix Match | Replace Prefix | Modified Path + // -------------|--------------|----------------|---------- + // /foo/bar | /foo | /xyz | /xyz/bar + // /foo/bar | /foo | /xyz/ | /xyz/bar + // /foo/bar | /foo/ | /xyz | /xyz/bar + // /foo/bar | /foo/ | /xyz/ | /xyz/bar + // /foo | /foo | /xyz | /xyz + // /foo/ | /foo | /xyz | /xyz/ + // /foo/bar | /foo | | /bar + // /foo/ | /foo | | / + // /foo | /foo | | / + // /foo/ | /foo | / | / + // /foo | /foo | / | / + // + // +kubebuilder:validation:MaxLength=1024 + // +optional + ReplacePrefixMatch *string `json:"replacePrefixMatch,omitempty"` +} + +// HTTPRequestRedirect defines a filter that redirects a request. This filter +// MUST NOT be used on the same Route rule as a HTTPURLRewrite filter. +type HTTPRequestRedirectFilter struct { + // Scheme is the scheme to be used in the value of the `Location` header in + // the response. When empty, the scheme of the request is used. + // + // Scheme redirects can affect the port of the redirect, for more information, + // refer to the documentation for the port field of this filter. + // + // Note that values may be added to this enum, implementations + // must ensure that unknown values will not cause a crash. + // + // Unknown values here must result in the implementation setting the + // Accepted Condition for the Route to `status: False`, with a + // Reason of `UnsupportedValue`. + // + // Support: Extended + // + // +optional + // +kubebuilder:validation:Enum=http;https + Scheme *string `json:"scheme,omitempty"` + + // Hostname is the hostname to be used in the value of the `Location` + // header in the response. + // When empty, the hostname in the `Host` header of the request is used. + // + // Support: Core + // + // +optional + Hostname *PreciseHostname `json:"hostname,omitempty"` + + // Path defines parameters used to modify the path of the incoming request. + // The modified path is then used to construct the `Location` header. When + // empty, the request path is used as-is. + // + // Support: Extended + // + // +optional + Path *HTTPPathModifier `json:"path,omitempty"` + + // Port is the port to be used in the value of the `Location` + // header in the response. + // + // If no port is specified, the redirect port MUST be derived using the + // following rules: + // + // * If redirect scheme is not-empty, the redirect port MUST be the well-known + // port associated with the redirect scheme. Specifically "http" to port 80 + // and "https" to port 443. If the redirect scheme does not have a + // well-known port, the listener port of the Gateway SHOULD be used. + // * If redirect scheme is empty, the redirect port MUST be the Gateway + // Listener port. + // + // Implementations SHOULD NOT add the port number in the 'Location' + // header in the following cases: + // + // * A Location header that will use HTTP (whether that is determined via + // the Listener protocol or the Scheme field) _and_ use port 80. + // * A Location header that will use HTTPS (whether that is determined via + // the Listener protocol or the Scheme field) _and_ use port 443. + // + // Support: Extended + // + // +optional + Port *PortNumber `json:"port,omitempty"` + + // StatusCode is the HTTP status code to be used in response. + // + // Note that values may be added to this enum, implementations + // must ensure that unknown values will not cause a crash. + // + // Unknown values here must result in the implementation setting the + // Accepted Condition for the Route to `status: False`, with a + // Reason of `UnsupportedValue`. + // + // Support: Core + // + // +optional + // +kubebuilder:default=302 + // +kubebuilder:validation:Enum=301;302 + StatusCode *int `json:"statusCode,omitempty"` +} + +// HTTPURLRewriteFilter defines a filter that modifies a request during +// forwarding. At most one of these filters may be used on a Route rule. This +// MUST NOT be used on the same Route rule as a HTTPRequestRedirect filter. +// +// Support: Extended +type HTTPURLRewriteFilter struct { + // Hostname is the value to be used to replace the Host header value during + // forwarding. + // + // Support: Extended + // + // +optional + Hostname *PreciseHostname `json:"hostname,omitempty"` + + // Path defines a path rewrite. + // + // Support: Extended + // + // +optional + Path *HTTPPathModifier `json:"path,omitempty"` +} + +// HTTPRequestMirrorFilter defines configuration for the RequestMirror filter. +type HTTPRequestMirrorFilter struct { + // BackendRef references a resource where mirrored requests are sent. + // + // Mirrored requests must be sent only to a single destination endpoint + // within this BackendRef, irrespective of how many endpoints are present + // within this BackendRef. + // + // If the referent cannot be found, this BackendRef is invalid and must be + // dropped from the Gateway. The controller must ensure the "ResolvedRefs" + // condition on the Route status is set to `status: False` and not configure + // this backend in the underlying implementation. + // + // If there is a cross-namespace reference to an *existing* object + // that is not allowed by a ReferenceGrant, the controller must ensure the + // "ResolvedRefs" condition on the Route is set to `status: False`, + // with the "RefNotPermitted" reason and not configure this backend in the + // underlying implementation. + // + // In either error case, the Message of the `ResolvedRefs` Condition + // should be used to provide more detail about the problem. + // + // Support: Extended for Kubernetes Service + // + // Support: Implementation-specific for any other resource + BackendRef BackendObjectReference `json:"backendRef"` +} + +// HTTPBackendRef defines how a HTTPRoute should forward an HTTP request. +type HTTPBackendRef struct { + // BackendRef is a reference to a backend to forward matched requests to. + // + // A BackendRef can be invalid for the following reasons. In all cases, the + // implementation MUST ensure the `ResolvedRefs` Condition on the Route + // is set to `status: False`, with a Reason and Message that indicate + // what is the cause of the error. + // + // A BackendRef is invalid if: + // + // * It refers to an unknown or unsupported kind of resource. In this + // case, the Reason must be set to `InvalidKind` and Message of the + // Condition must explain which kind of resource is unknown or unsupported. + // + // * It refers to a resource that does not exist. In this case, the Reason must + // be set to `BackendNotFound` and the Message of the Condition must explain + // which resource does not exist. + // + // * It refers a resource in another namespace when the reference has not been + // explicitly allowed by a ReferenceGrant (or equivalent concept). In this + // case, the Reason must be set to `RefNotPermitted` and the Message of the + // Condition must explain which cross-namespace reference is not allowed. + // + // Support: Core for Kubernetes Service + // + // Support: Implementation-specific for any other resource + // + // Support for weight: Core + // + // +optional + BackendRef `json:",inline"` + + // Filters defined at this level should be executed if and only if the + // request is being forwarded to the backend defined here. + // + // Support: Implementation-specific (For broader support of filters, use the + // Filters field in HTTPRouteRule.) + // + // +optional + // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:validation:XValidation:message="May specify either httpRouteFilterRequestRedirect or httpRouteFilterRequestRewrite, but not both",rule="!(self.exists(f, f.type == 'RequestRedirect') && self.exists(f, f.type == 'URLRewrite'))" + // +kubebuilder:validation:XValidation:message="May specify either httpRouteFilterRequestRedirect or httpRouteFilterRequestRewrite, but not both",rule="!(self.exists(f, f.type == 'RequestRedirect') && self.exists(f, f.type == 'URLRewrite'))" + // +kubebuilder:validation:XValidation:message="RequestHeaderModifier filter cannot be repeated",rule="self.filter(f, f.type == 'RequestHeaderModifier').size() <= 1" + // +kubebuilder:validation:XValidation:message="ResponseHeaderModifier filter cannot be repeated",rule="self.filter(f, f.type == 'ResponseHeaderModifier').size() <= 1" + // +kubebuilder:validation:XValidation:message="RequestRedirect filter cannot be repeated",rule="self.filter(f, f.type == 'RequestRedirect').size() <= 1" + // +kubebuilder:validation:XValidation:message="URLRewrite filter cannot be repeated",rule="self.filter(f, f.type == 'URLRewrite').size() <= 1" + Filters []HTTPRouteFilter `json:"filters,omitempty"` +} + +// HTTPRouteStatus defines the observed state of HTTPRoute. +type HTTPRouteStatus struct { + RouteStatus `json:",inline"` +} diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/object_reference_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/object_reference_types.go new file mode 100644 index 00000000000..4ef1c37891a --- /dev/null +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/object_reference_types.go @@ -0,0 +1,147 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// LocalObjectReference identifies an API object within the namespace of the +// referrer. +// The API object must be valid in the cluster; the Group and Kind must +// be registered in the cluster for this reference to be valid. +// +// References to objects with invalid Group and Kind are not valid, and must +// be rejected by the implementation, with appropriate Conditions set +// on the containing object. +type LocalObjectReference struct { + // Group is the group of the referent. For example, "gateway.networking.k8s.io". + // When unspecified or empty string, core API group is inferred. + Group Group `json:"group"` + + // Kind is kind of the referent. For example "HTTPRoute" or "Service". + Kind Kind `json:"kind"` + + // Name is the name of the referent. + Name ObjectName `json:"name"` +} + +// SecretObjectReference identifies an API object including its namespace, +// defaulting to Secret. +// +// The API object must be valid in the cluster; the Group and Kind must +// be registered in the cluster for this reference to be valid. +// +// References to objects with invalid Group and Kind are not valid, and must +// be rejected by the implementation, with appropriate Conditions set +// on the containing object. +type SecretObjectReference struct { + // Group is the group of the referent. For example, "gateway.networking.k8s.io". + // When unspecified or empty string, core API group is inferred. + // + // +optional + // +kubebuilder:default="" + Group *Group `json:"group"` + + // Kind is kind of the referent. For example "Secret". + // + // +optional + // +kubebuilder:default=Secret + Kind *Kind `json:"kind"` + + // Name is the name of the referent. + Name ObjectName `json:"name"` + + // Namespace is the namespace of the backend. When unspecified, the local + // namespace is inferred. + // + // Note that when a namespace different than the local namespace is specified, + // a ReferenceGrant object is required in the referent namespace to allow that + // namespace's owner to accept the reference. See the ReferenceGrant + // documentation for details. + // + // Support: Core + // + // +optional + Namespace *Namespace `json:"namespace,omitempty"` +} + +// BackendObjectReference defines how an ObjectReference that is +// specific to BackendRef. It includes a few additional fields and features +// than a regular ObjectReference. +// +// Note that when a namespace different than the local namespace is specified, a +// ReferenceGrant object is required in the referent namespace to allow that +// namespace's owner to accept the reference. See the ReferenceGrant +// documentation for details. +// +// The API object must be valid in the cluster; the Group and Kind must +// be registered in the cluster for this reference to be valid. +// +// References to objects with invalid Group and Kind are not valid, and must +// be rejected by the implementation, with appropriate Conditions set +// on the containing object. +// +// +kubebuilder:validation:XValidation:message="Must have port for Service reference",rule="(size(self.group) == 0 && self.kind == 'Service') ? has(self.port) : true" +type BackendObjectReference struct { + // Group is the group of the referent. For example, "gateway.networking.k8s.io". + // When unspecified or empty string, core API group is inferred. + // + // +optional + // +kubebuilder:default="" + Group *Group `json:"group,omitempty"` + + // Kind is the Kubernetes resource kind of the referent. For example + // "Service". + // + // Defaults to "Service" when not specified. + // + // ExternalName services can refer to CNAME DNS records that may live + // outside of the cluster and as such are difficult to reason about in + // terms of conformance. They also may not be safe to forward to (see + // CVE-2021-25740 for more information). Implementations SHOULD NOT + // support ExternalName Services. + // + // Support: Core (Services with a type other than ExternalName) + // + // Support: Implementation-specific (Services with type ExternalName) + // + // +optional + // +kubebuilder:default=Service + Kind *Kind `json:"kind,omitempty"` + + // Name is the name of the referent. + Name ObjectName `json:"name"` + + // Namespace is the namespace of the backend. When unspecified, the local + // namespace is inferred. + // + // Note that when a namespace different than the local namespace is specified, + // a ReferenceGrant object is required in the referent namespace to allow that + // namespace's owner to accept the reference. See the ReferenceGrant + // documentation for details. + // + // Support: Core + // + // +optional + Namespace *Namespace `json:"namespace,omitempty"` + + // Port specifies the destination port number to use for this resource. + // Port is required when the referent is a Kubernetes Service. In this + // case, the port number is the service port number, not the target port. + // For other resources, destination port might be derived from the referent + // resource or this field. + // + // +optional + Port *PortNumber `json:"port,omitempty"` +} diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/referencegrant_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/referencegrant_types.go new file mode 100644 index 00000000000..0b0caf70882 --- /dev/null +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/referencegrant_types.go @@ -0,0 +1,143 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:resource:categories=gateway-api,shortName=refgrant +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:storageversion + +// ReferenceGrant identifies kinds of resources in other namespaces that are +// trusted to reference the specified kinds of resources in the same namespace +// as the policy. +// +// Each ReferenceGrant can be used to represent a unique trust relationship. +// Additional Reference Grants can be used to add to the set of trusted +// sources of inbound references for the namespace they are defined within. +// +// All cross-namespace references in Gateway API (with the exception of cross-namespace +// Gateway-route attachment) require a ReferenceGrant. +// +// ReferenceGrant is a form of runtime verification allowing users to assert +// which cross-namespace object references are permitted. Implementations that +// support ReferenceGrant MUST NOT permit cross-namespace references which have +// no grant, and MUST respond to the removal of a grant by revoking the access +// that the grant allowed. +type ReferenceGrant struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the desired state of ReferenceGrant. + Spec ReferenceGrantSpec `json:"spec,omitempty"` + + // Note that `Status` sub-resource has been excluded at the + // moment as it was difficult to work out the design. + // `Status` sub-resource may be added in future. +} + +// +kubebuilder:object:root=true +// ReferenceGrantList contains a list of ReferenceGrant. +type ReferenceGrantList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ReferenceGrant `json:"items"` +} + +// ReferenceGrantSpec identifies a cross namespace relationship that is trusted +// for Gateway API. +type ReferenceGrantSpec struct { + // From describes the trusted namespaces and kinds that can reference the + // resources described in "To". Each entry in this list MUST be considered + // to be an additional place that references can be valid from, or to put + // this another way, entries MUST be combined using OR. + // + // Support: Core + // + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=16 + From []ReferenceGrantFrom `json:"from"` + + // To describes the resources that may be referenced by the resources + // described in "From". Each entry in this list MUST be considered to be an + // additional place that references can be valid to, or to put this another + // way, entries MUST be combined using OR. + // + // Support: Core + // + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=16 + To []ReferenceGrantTo `json:"to"` +} + +// ReferenceGrantFrom describes trusted namespaces and kinds. +type ReferenceGrantFrom struct { + // Group is the group of the referent. + // When empty, the Kubernetes core API group is inferred. + // + // Support: Core + Group Group `json:"group"` + + // Kind is the kind of the referent. Although implementations may support + // additional resources, the following types are part of the "Core" + // support level for this field. + // + // When used to permit a SecretObjectReference: + // + // * Gateway + // + // When used to permit a BackendObjectReference: + // + // * GRPCRoute + // * HTTPRoute + // * TCPRoute + // * TLSRoute + // * UDPRoute + Kind Kind `json:"kind"` + + // Namespace is the namespace of the referent. + // + // Support: Core + Namespace Namespace `json:"namespace"` +} + +// ReferenceGrantTo describes what Kinds are allowed as targets of the +// references. +type ReferenceGrantTo struct { + // Group is the group of the referent. + // When empty, the Kubernetes core API group is inferred. + // + // Support: Core + Group Group `json:"group"` + + // Kind is the kind of the referent. Although implementations may support + // additional resources, the following types are part of the "Core" + // support level for this field: + // + // * Secret when used to permit a SecretObjectReference + // * Service when used to permit a BackendObjectReference + Kind Kind `json:"kind"` + + // Name is the name of the referent. When unspecified, this policy + // refers to all resources of the specified Group and Kind in the local + // namespace. + // + // +optional + Name *ObjectName `json:"name,omitempty"` +} diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/shared_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/shared_types.go new file mode 100644 index 00000000000..b18af1d0e06 --- /dev/null +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/shared_types.go @@ -0,0 +1,643 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ParentReference identifies an API object (usually a Gateway) that can be considered +// a parent of this resource (usually a route). There are two kinds of parent resources +// with "Core" support: +// +// * Gateway (Gateway conformance profile) +// * Service (Mesh conformance profile, experimental, ClusterIP Services only) +// +// This API may be extended in the future to support additional kinds of parent +// resources. +// +// The API object must be valid in the cluster; the Group and Kind must +// be registered in the cluster for this reference to be valid. +type ParentReference struct { + // Group is the group of the referent. + // When unspecified, "gateway.networking.k8s.io" is inferred. + // To set the core API group (such as for a "Service" kind referent), + // Group must be explicitly set to "" (empty string). + // + // Support: Core + // + // +kubebuilder:default=gateway.networking.k8s.io + // +optional + Group *Group `json:"group,omitempty"` + + // Kind is kind of the referent. + // + // There are two kinds of parent resources with "Core" support: + // + // * Gateway (Gateway conformance profile) + // * Service (Mesh conformance profile, experimental, ClusterIP Services only) + // + // Support for other resources is Implementation-Specific. + // + // +kubebuilder:default=Gateway + // +optional + Kind *Kind `json:"kind,omitempty"` + + // Namespace is the namespace of the referent. When unspecified, this refers + // to the local namespace of the Route. + // + // Note that there are specific rules for ParentRefs which cross namespace + // boundaries. Cross-namespace references are only valid if they are explicitly + // allowed by something in the namespace they are referring to. For example: + // Gateway has the AllowedRoutes field, and ReferenceGrant provides a + // generic way to enable any other kind of cross-namespace reference. + // + // ParentRefs from a Route to a Service in the same namespace are "producer" + // routes, which apply default routing rules to inbound connections from + // any namespace to the Service. + // + // ParentRefs from a Route to a Service in a different namespace are + // "consumer" routes, and these routing rules are only applied to outbound + // connections originating from the same namespace as the Route, for which + // the intended destination of the connections are a Service targeted as a + // ParentRef of the Route. + // + // Support: Core + // + // +optional + Namespace *Namespace `json:"namespace,omitempty"` + + // Name is the name of the referent. + // + // Support: Core + Name ObjectName `json:"name"` + + // SectionName is the name of a section within the target resource. In the + // following resources, SectionName is interpreted as the following: + // + // * Gateway: Listener Name. When both Port (experimental) and SectionName + // are specified, the name and port of the selected listener must match + // both specified values. + // * Service: Port Name. When both Port (experimental) and SectionName + // are specified, the name and port of the selected listener must match + // both specified values. Note that attaching Routes to Services as Parents + // is part of experimental Mesh support and is not supported for any other + // purpose. + // + // Implementations MAY choose to support attaching Routes to other resources. + // If that is the case, they MUST clearly document how SectionName is + // interpreted. + // + // When unspecified (empty string), this will reference the entire resource. + // For the purpose of status, an attachment is considered successful if at + // least one section in the parent resource accepts it. For example, Gateway + // listeners can restrict which Routes can attach to them by Route kind, + // namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from + // the referencing Route, the Route MUST be considered successfully + // attached. If no Gateway listeners accept attachment from this Route, the + // Route MUST be considered detached from the Gateway. + // + // Support: Core + // + // +optional + SectionName *SectionName `json:"sectionName,omitempty"` + + // Port is the network port this Route targets. It can be interpreted + // differently based on the type of parent resource. + // + // When the parent resource is a Gateway, this targets all listeners + // listening on the specified port that also support this kind of Route(and + // select this Route). It's not recommended to set `Port` unless the + // networking behaviors specified in a Route must apply to a specific port + // as opposed to a listener(s) whose port(s) may be changed. When both Port + // and SectionName are specified, the name and port of the selected listener + // must match both specified values. + // + // When the parent resource is a Service, this targets a specific port in the + // Service spec. When both Port (experimental) and SectionName are specified, + // the name and port of the selected port must match both specified values. + // + // Implementations MAY choose to support other parent resources. + // Implementations supporting other types of parent resources MUST clearly + // document how/if Port is interpreted. + // + // For the purpose of status, an attachment is considered successful as + // long as the parent resource accepts it partially. For example, Gateway + // listeners can restrict which Routes can attach to them by Route kind, + // namespace, or hostname. If 1 of 2 Gateway listeners accept attachment + // from the referencing Route, the Route MUST be considered successfully + // attached. If no Gateway listeners accept attachment from this Route, + // the Route MUST be considered detached from the Gateway. + // + // Support: Extended + // + // +optional + // + Port *PortNumber `json:"port,omitempty"` +} + +// CommonRouteSpec defines the common attributes that all Routes MUST include +// within their spec. +type CommonRouteSpec struct { + // ParentRefs references the resources (usually Gateways) that a Route wants + // to be attached to. Note that the referenced parent resource needs to + // allow this for the attachment to be complete. For Gateways, that means + // the Gateway needs to allow attachment from Routes of this kind and + // namespace. For Services, that means the Service must either be in the same + // namespace for a "producer" route, or the mesh implementation must support + // and allow "consumer" routes for the referenced Service. ReferenceGrant is + // not applicable for governing ParentRefs to Services - it is not possible to + // create a "producer" route for a Service in a different namespace from the + // Route. + // + // There are two kinds of parent resources with "Core" support: + // + // * Gateway (Gateway conformance profile) + // * Service (Mesh conformance profile, experimental, ClusterIP Services only) + // + // This API may be extended in the future to support additional kinds of parent + // resources. + // + // It is invalid to reference an identical parent more than once. It is + // valid to reference multiple distinct sections within the same parent + // resource, such as two separate Listeners on the same Gateway or two separate + // ports on the same Service. + // + // It is possible to separately reference multiple distinct objects that may + // be collapsed by an implementation. For example, some implementations may + // choose to merge compatible Gateway Listeners together. If that is the + // case, the list of routes attached to those resources should also be + // merged. + // + // Note that for ParentRefs that cross namespace boundaries, there are specific + // rules. Cross-namespace references are only valid if they are explicitly + // allowed by something in the namespace they are referring to. For example, + // Gateway has the AllowedRoutes field, and ReferenceGrant provides a + // generic way to enable other kinds of cross-namespace reference. + // + // ParentRefs from a Route to a Service in the same namespace are "producer" + // routes, which apply default routing rules to inbound connections from + // any namespace to the Service. + // + // ParentRefs from a Route to a Service in a different namespace are + // "consumer" routes, and these routing rules are only applied to outbound + // connections originating from the same namespace as the Route, for which + // the intended destination of the connections are a Service targeted as a + // ParentRef of the Route. + // + // +optional + // +kubebuilder:validation:MaxItems=32 + // + // + // + // + ParentRefs []ParentReference `json:"parentRefs,omitempty"` +} + +// PortNumber defines a network port. +// +// +kubebuilder:validation:Minimum=1 +// +kubebuilder:validation:Maximum=65535 +type PortNumber int32 + +// BackendRef defines how a Route should forward a request to a Kubernetes +// resource. +// +// Note that when a namespace different than the local namespace is specified, a +// ReferenceGrant object is required in the referent namespace to allow that +// namespace's owner to accept the reference. See the ReferenceGrant +// documentation for details. +type BackendRef struct { + // BackendObjectReference references a Kubernetes object. + BackendObjectReference `json:",inline"` + + // Weight specifies the proportion of requests forwarded to the referenced + // backend. This is computed as weight/(sum of all weights in this + // BackendRefs list). For non-zero values, there may be some epsilon from + // the exact proportion defined here depending on the precision an + // implementation supports. Weight is not a percentage and the sum of + // weights does not need to equal 100. + // + // If only one backend is specified and it has a weight greater than 0, 100% + // of the traffic is forwarded to that backend. If weight is set to 0, no + // traffic should be forwarded for this entry. If unspecified, weight + // defaults to 1. + // + // Support for this field varies based on the context where used. + // + // +optional + // +kubebuilder:default=1 + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=1000000 + Weight *int32 `json:"weight,omitempty"` +} + +// RouteConditionType is a type of condition for a route. +type RouteConditionType string + +// RouteConditionReason is a reason for a route condition. +type RouteConditionReason string + +const ( + // This condition indicates whether the route has been accepted or rejected + // by a Gateway, and why. + // + // Possible reasons for this condition to be true are: + // + // * "Accepted" + // + // Possible reasons for this condition to be False are: + // + // * "NotAllowedByListeners" + // * "NoMatchingListenerHostname" + // * "NoMatchingParent" + // * "UnsupportedValue" + // + // Possible reasons for this condition to be Unknown are: + // + // * "Pending" + // + // Controllers may raise this condition with other reasons, + // but should prefer to use the reasons listed above to improve + // interoperability. + RouteConditionAccepted RouteConditionType = "Accepted" + + // This reason is used with the "Accepted" condition when the Route has been + // accepted by the Gateway. + RouteReasonAccepted RouteConditionReason = "Accepted" + + // This reason is used with the "Accepted" condition when the route has not + // been accepted by a Gateway because the Gateway has no Listener whose + // allowedRoutes criteria permit the route + RouteReasonNotAllowedByListeners RouteConditionReason = "NotAllowedByListeners" + + // This reason is used with the "Accepted" condition when the Gateway has no + // compatible Listeners whose Hostname matches the route + RouteReasonNoMatchingListenerHostname RouteConditionReason = "NoMatchingListenerHostname" + + // This reason is used with the "Accepted" condition when there are + // no matching Parents. In the case of Gateways, this can occur when + // a Route ParentRef specifies a Port and/or SectionName that does not + // match any Listeners in the Gateway. + RouteReasonNoMatchingParent RouteConditionReason = "NoMatchingParent" + + // This reason is used with the "Accepted" condition when a value for an Enum + // is not recognized. + RouteReasonUnsupportedValue RouteConditionReason = "UnsupportedValue" + + // This reason is used with the "Accepted" when a controller has not yet + // reconciled the route. + RouteReasonPending RouteConditionReason = "Pending" + + // This reason is used with the "Accepted" condition when there + // are incompatible filters present on a route rule (for example if + // the URLRewrite and RequestRedirect are both present on an HTTPRoute). + RouteReasonIncompatibleFilters RouteConditionReason = "IncompatibleFilters" + + // This condition indicates whether the controller was able to resolve all + // the object references for the Route. + // + // Possible reasons for this condition to be true are: + // + // * "ResolvedRefs" + // + // Possible reasons for this condition to be false are: + // + // * "RefNotPermitted" + // * "InvalidKind" + // * "BackendNotFound" + // + // Controllers may raise this condition with other reasons, + // but should prefer to use the reasons listed above to improve + // interoperability. + RouteConditionResolvedRefs RouteConditionType = "ResolvedRefs" + + // This reason is used with the "ResolvedRefs" condition when the condition + // is true. + RouteReasonResolvedRefs RouteConditionReason = "ResolvedRefs" + + // This reason is used with the "ResolvedRefs" condition when + // one of the Listener's Routes has a BackendRef to an object in + // another namespace, where the object in the other namespace does + // not have a ReferenceGrant explicitly allowing the reference. + RouteReasonRefNotPermitted RouteConditionReason = "RefNotPermitted" + + // This reason is used with the "ResolvedRefs" condition when + // one of the Route's rules has a reference to an unknown or unsupported + // Group and/or Kind. + RouteReasonInvalidKind RouteConditionReason = "InvalidKind" + + // This reason is used with the "ResolvedRefs" condition when one of the + // Route's rules has a reference to a resource that does not exist. + RouteReasonBackendNotFound RouteConditionReason = "BackendNotFound" +) + +// RouteParentStatus describes the status of a route with respect to an +// associated Parent. +type RouteParentStatus struct { + // ParentRef corresponds with a ParentRef in the spec that this + // RouteParentStatus struct describes the status of. + ParentRef ParentReference `json:"parentRef"` + + // ControllerName is a domain/path string that indicates the name of the + // controller that wrote this status. This corresponds with the + // controllerName field on GatewayClass. + // + // Example: "example.net/gateway-controller". + // + // The format of this field is DOMAIN "/" PATH, where DOMAIN and PATH are + // valid Kubernetes names + // (https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + // + // Controllers MUST populate this field when writing status. Controllers should ensure that + // entries to status populated with their ControllerName are cleaned up when they are no + // longer necessary. + ControllerName GatewayController `json:"controllerName"` + + // Conditions describes the status of the route with respect to the Gateway. + // Note that the route's availability is also subject to the Gateway's own + // status conditions and listener status. + // + // If the Route's ParentRef specifies an existing Gateway that supports + // Routes of this kind AND that Gateway's controller has sufficient access, + // then that Gateway's controller MUST set the "Accepted" condition on the + // Route, to indicate whether the route has been accepted or rejected by the + // Gateway, and why. + // + // A Route MUST be considered "Accepted" if at least one of the Route's + // rules is implemented by the Gateway. + // + // There are a number of cases where the "Accepted" condition may not be set + // due to lack of controller visibility, that includes when: + // + // * The Route refers to a non-existent parent. + // * The Route is of a type that the controller does not support. + // * The Route is in a namespace the controller does not have access to. + // + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=8 + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// RouteStatus defines the common attributes that all Routes MUST include within +// their status. +type RouteStatus struct { + // Parents is a list of parent resources (usually Gateways) that are + // associated with the route, and the status of the route with respect to + // each parent. When this route attaches to a parent, the controller that + // manages the parent must add an entry to this list when the controller + // first sees the route and should update the entry as appropriate when the + // route or gateway is modified. + // + // Note that parent references that cannot be resolved by an implementation + // of this API will not be added to this list. Implementations of this API + // can only populate Route status for the Gateways/parent resources they are + // responsible for. + // + // A maximum of 32 Gateways will be represented in this list. An empty list + // means the route has not been attached to any Gateway. + // + // +kubebuilder:validation:MaxItems=32 + Parents []RouteParentStatus `json:"parents"` +} + +// Hostname is the fully qualified domain name of a network host. This matches +// the RFC 1123 definition of a hostname with 2 notable exceptions: +// +// 1. IPs are not allowed. +// 2. A hostname may be prefixed with a wildcard label (`*.`). The wildcard +// label must appear by itself as the first label. +// +// Hostname can be "precise" which is a domain name without the terminating +// dot of a network host (e.g. "foo.example.com") or "wildcard", which is a +// domain name prefixed with a single wildcard label (e.g. `*.example.com`). +// +// Note that as per RFC1035 and RFC1123, a *label* must consist of lower case +// alphanumeric characters or '-', and must start and end with an alphanumeric +// character. No other punctuation is allowed. +// +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=253 +// +kubebuilder:validation:Pattern=`^(\*\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$` +type Hostname string + +// PreciseHostname is the fully qualified domain name of a network host. This +// matches the RFC 1123 definition of a hostname with 1 notable exception that +// numeric IP addresses are not allowed. +// +// Note that as per RFC1035 and RFC1123, a *label* must consist of lower case +// alphanumeric characters or '-', and must start and end with an alphanumeric +// character. No other punctuation is allowed. +// +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=253 +// +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$` +type PreciseHostname string + +// Group refers to a Kubernetes Group. It must either be an empty string or a +// RFC 1123 subdomain. +// +// This validation is based off of the corresponding Kubernetes validation: +// https://github.com/kubernetes/apimachinery/blob/02cfb53916346d085a6c6c7c66f882e3c6b0eca6/pkg/util/validation/validation.go#L208 +// +// Valid values include: +// +// * "" - empty string implies core Kubernetes API group +// * "gateway.networking.k8s.io" +// * "foo.example.com" +// +// Invalid values include: +// +// * "example.com/bar" - "/" is an invalid character +// +// +kubebuilder:validation:MaxLength=253 +// +kubebuilder:validation:Pattern=`^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$` +type Group string + +// Kind refers to a Kubernetes Kind. +// +// Valid values include: +// +// * "Service" +// * "HTTPRoute" +// +// Invalid values include: +// +// * "invalid/kind" - "/" is an invalid character +// +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=63 +// +kubebuilder:validation:Pattern=`^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$` +type Kind string + +// ObjectName refers to the name of a Kubernetes object. +// Object names can have a variety of forms, including RFC1123 subdomains, +// RFC 1123 labels, or RFC 1035 labels. +// +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=253 +type ObjectName string + +// Namespace refers to a Kubernetes namespace. It must be a RFC 1123 label. +// +// This validation is based off of the corresponding Kubernetes validation: +// https://github.com/kubernetes/apimachinery/blob/02cfb53916346d085a6c6c7c66f882e3c6b0eca6/pkg/util/validation/validation.go#L187 +// +// This is used for Namespace name validation here: +// https://github.com/kubernetes/apimachinery/blob/02cfb53916346d085a6c6c7c66f882e3c6b0eca6/pkg/api/validation/generic.go#L63 +// +// Valid values include: +// +// * "example" +// +// Invalid values include: +// +// * "example.com" - "." is an invalid character +// +// +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]*[a-z0-9])?$` +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=63 +type Namespace string + +// SectionName is the name of a section in a Kubernetes resource. +// +// This validation is based off of the corresponding Kubernetes validation: +// https://github.com/kubernetes/apimachinery/blob/02cfb53916346d085a6c6c7c66f882e3c6b0eca6/pkg/util/validation/validation.go#L208 +// +// Valid values include: +// +// * "example.com" +// * "foo.example.com" +// +// Invalid values include: +// +// * "example.com/bar" - "/" is an invalid character +// +// +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$` +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=253 +type SectionName string + +// GatewayController is the name of a Gateway API controller. It must be a +// domain prefixed path. +// +// Valid values include: +// +// * "example.com/bar" +// +// Invalid values include: +// +// * "example.com" - must include path +// * "foo.example.com" - must include path +// +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=253 +// +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[A-Za-z0-9\/\-._~%!$&'()*+,;=:]+$` +type GatewayController string + +// AnnotationKey is the key of an annotation in Gateway API. This is used for +// validation of maps such as TLS options. This matches the Kubernetes +// "qualified name" validation that is used for annotations and other common +// values. +// +// Valid values include: +// +// * example +// * example.com +// * example.com/path +// * example.com/path.html +// +// Invalid values include: +// +// * example~ - "~" is an invalid character +// * example.com. - can not start or end with "." +// +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=253 +// +kubebuilder:validation:Pattern=`^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]/?)*$` +type AnnotationKey string + +// AnnotationValue is the value of an annotation in Gateway API. This is used +// for validation of maps such as TLS options. This roughly matches Kubernetes +// annotation validation, although the length validation in that case is based +// on the entire size of the annotations struct. +// +// +kubebuilder:validation:MinLength=0 +// +kubebuilder:validation:MaxLength=4096 +type AnnotationValue string + +// AddressType defines how a network address is represented as a text string. +// This may take two possible forms: +// +// * A predefined CamelCase string identifier (currently limited to `IPAddress` or `Hostname`) +// * A domain-prefixed string identifier (like `acme.io/CustomAddressType`) +// +// Values `IPAddress` and `Hostname` have Extended support. +// +// The `NamedAddress` value has been deprecated in favor of implementation +// specific domain-prefixed strings. +// +// All other values, including domain-prefixed values have Implementation-specific support, +// which are used in implementation-specific behaviors. Support for additional +// predefined CamelCase identifiers may be added in future releases. +// +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=253 +// +kubebuilder:validation:Pattern=`^Hostname|IPAddress|NamedAddress|[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[A-Za-z0-9\/\-._~%!$&'()*+,;=:]+$` +type AddressType string + +// HeaderName is the name of a header or query parameter. +// +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=256 +// +kubebuilder:validation:Pattern=`^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$` +// +k8s:deepcopy-gen=false +type HeaderName string + +const ( + // A textual representation of a numeric IP address. IPv4 + // addresses must be in dotted-decimal form. IPv6 addresses + // must be in a standard IPv6 text representation + // (see [RFC 5952](https://tools.ietf.org/html/rfc5952)). + // + // This type is intended for specific addresses. Address ranges are not + // supported (e.g. you can not use a CIDR range like 127.0.0.0/24 as an + // IPAddress). + // + // Support: Extended + IPAddressType AddressType = "IPAddress" + + // A Hostname represents a DNS based ingress point. This is similar to the + // corresponding hostname field in Kubernetes load balancer status. For + // example, this concept may be used for cloud load balancers where a DNS + // name is used to expose a load balancer. + // + // Support: Extended + HostnameAddressType AddressType = "Hostname" + + // A NamedAddress provides a way to reference a specific IP address by name. + // For example, this may be a name or other unique identifier that refers + // to a resource on a cloud provider such as a static IP. + // + // The `NamedAddress` type has been deprecated in favor of implementation + // specific domain-prefixed strings. + // + // Support: Implementation-specific + NamedAddressType AddressType = "NamedAddress" +) diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..d1d46269d26 --- /dev/null +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,1271 @@ +//go:build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedRoutes) DeepCopyInto(out *AllowedRoutes) { + *out = *in + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = new(RouteNamespaces) + (*in).DeepCopyInto(*out) + } + if in.Kinds != nil { + in, out := &in.Kinds, &out.Kinds + *out = make([]RouteGroupKind, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedRoutes. +func (in *AllowedRoutes) DeepCopy() *AllowedRoutes { + if in == nil { + return nil + } + out := new(AllowedRoutes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendObjectReference) DeepCopyInto(out *BackendObjectReference) { + *out = *in + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(Group) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(Kind) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(Namespace) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(PortNumber) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendObjectReference. +func (in *BackendObjectReference) DeepCopy() *BackendObjectReference { + if in == nil { + return nil + } + out := new(BackendObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendRef) DeepCopyInto(out *BackendRef) { + *out = *in + in.BackendObjectReference.DeepCopyInto(&out.BackendObjectReference) + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendRef. +func (in *BackendRef) DeepCopy() *BackendRef { + if in == nil { + return nil + } + out := new(BackendRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonRouteSpec) DeepCopyInto(out *CommonRouteSpec) { + *out = *in + if in.ParentRefs != nil { + in, out := &in.ParentRefs, &out.ParentRefs + *out = make([]ParentReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonRouteSpec. +func (in *CommonRouteSpec) DeepCopy() *CommonRouteSpec { + if in == nil { + return nil + } + out := new(CommonRouteSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Gateway) DeepCopyInto(out *Gateway) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gateway. +func (in *Gateway) DeepCopy() *Gateway { + if in == nil { + return nil + } + out := new(Gateway) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Gateway) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayAddress) DeepCopyInto(out *GatewayAddress) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(AddressType) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayAddress. +func (in *GatewayAddress) DeepCopy() *GatewayAddress { + if in == nil { + return nil + } + out := new(GatewayAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayClass) DeepCopyInto(out *GatewayClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayClass. +func (in *GatewayClass) DeepCopy() *GatewayClass { + if in == nil { + return nil + } + out := new(GatewayClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GatewayClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayClassList) DeepCopyInto(out *GatewayClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]GatewayClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayClassList. +func (in *GatewayClassList) DeepCopy() *GatewayClassList { + if in == nil { + return nil + } + out := new(GatewayClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GatewayClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayClassSpec) DeepCopyInto(out *GatewayClassSpec) { + *out = *in + if in.ParametersRef != nil { + in, out := &in.ParametersRef, &out.ParametersRef + *out = new(ParametersReference) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayClassSpec. +func (in *GatewayClassSpec) DeepCopy() *GatewayClassSpec { + if in == nil { + return nil + } + out := new(GatewayClassSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayClassStatus) DeepCopyInto(out *GatewayClassStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayClassStatus. +func (in *GatewayClassStatus) DeepCopy() *GatewayClassStatus { + if in == nil { + return nil + } + out := new(GatewayClassStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayList) DeepCopyInto(out *GatewayList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Gateway, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayList. +func (in *GatewayList) DeepCopy() *GatewayList { + if in == nil { + return nil + } + out := new(GatewayList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GatewayList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewaySpec) DeepCopyInto(out *GatewaySpec) { + *out = *in + if in.Listeners != nil { + in, out := &in.Listeners, &out.Listeners + *out = make([]Listener, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]GatewayAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewaySpec. +func (in *GatewaySpec) DeepCopy() *GatewaySpec { + if in == nil { + return nil + } + out := new(GatewaySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayStatus) DeepCopyInto(out *GatewayStatus) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]GatewayStatusAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Listeners != nil { + in, out := &in.Listeners, &out.Listeners + *out = make([]ListenerStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayStatus. +func (in *GatewayStatus) DeepCopy() *GatewayStatus { + if in == nil { + return nil + } + out := new(GatewayStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayStatusAddress) DeepCopyInto(out *GatewayStatusAddress) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(AddressType) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayStatusAddress. +func (in *GatewayStatusAddress) DeepCopy() *GatewayStatusAddress { + if in == nil { + return nil + } + out := new(GatewayStatusAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayTLSConfig) DeepCopyInto(out *GatewayTLSConfig) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(TLSModeType) + **out = **in + } + if in.CertificateRefs != nil { + in, out := &in.CertificateRefs, &out.CertificateRefs + *out = make([]SecretObjectReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[AnnotationKey]AnnotationValue, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayTLSConfig. +func (in *GatewayTLSConfig) DeepCopy() *GatewayTLSConfig { + if in == nil { + return nil + } + out := new(GatewayTLSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPBackendRef) DeepCopyInto(out *HTTPBackendRef) { + *out = *in + in.BackendRef.DeepCopyInto(&out.BackendRef) + if in.Filters != nil { + in, out := &in.Filters, &out.Filters + *out = make([]HTTPRouteFilter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPBackendRef. +func (in *HTTPBackendRef) DeepCopy() *HTTPBackendRef { + if in == nil { + return nil + } + out := new(HTTPBackendRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHeader) DeepCopyInto(out *HTTPHeader) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeader. +func (in *HTTPHeader) DeepCopy() *HTTPHeader { + if in == nil { + return nil + } + out := new(HTTPHeader) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHeaderFilter) DeepCopyInto(out *HTTPHeaderFilter) { + *out = *in + if in.Set != nil { + in, out := &in.Set, &out.Set + *out = make([]HTTPHeader, len(*in)) + copy(*out, *in) + } + if in.Add != nil { + in, out := &in.Add, &out.Add + *out = make([]HTTPHeader, len(*in)) + copy(*out, *in) + } + if in.Remove != nil { + in, out := &in.Remove, &out.Remove + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeaderFilter. +func (in *HTTPHeaderFilter) DeepCopy() *HTTPHeaderFilter { + if in == nil { + return nil + } + out := new(HTTPHeaderFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHeaderMatch) DeepCopyInto(out *HTTPHeaderMatch) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(HeaderMatchType) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeaderMatch. +func (in *HTTPHeaderMatch) DeepCopy() *HTTPHeaderMatch { + if in == nil { + return nil + } + out := new(HTTPHeaderMatch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPPathMatch) DeepCopyInto(out *HTTPPathMatch) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(PathMatchType) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPPathMatch. +func (in *HTTPPathMatch) DeepCopy() *HTTPPathMatch { + if in == nil { + return nil + } + out := new(HTTPPathMatch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPPathModifier) DeepCopyInto(out *HTTPPathModifier) { + *out = *in + if in.ReplaceFullPath != nil { + in, out := &in.ReplaceFullPath, &out.ReplaceFullPath + *out = new(string) + **out = **in + } + if in.ReplacePrefixMatch != nil { + in, out := &in.ReplacePrefixMatch, &out.ReplacePrefixMatch + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPPathModifier. +func (in *HTTPPathModifier) DeepCopy() *HTTPPathModifier { + if in == nil { + return nil + } + out := new(HTTPPathModifier) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPQueryParamMatch) DeepCopyInto(out *HTTPQueryParamMatch) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(QueryParamMatchType) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPQueryParamMatch. +func (in *HTTPQueryParamMatch) DeepCopy() *HTTPQueryParamMatch { + if in == nil { + return nil + } + out := new(HTTPQueryParamMatch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRequestMirrorFilter) DeepCopyInto(out *HTTPRequestMirrorFilter) { + *out = *in + in.BackendRef.DeepCopyInto(&out.BackendRef) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRequestMirrorFilter. +func (in *HTTPRequestMirrorFilter) DeepCopy() *HTTPRequestMirrorFilter { + if in == nil { + return nil + } + out := new(HTTPRequestMirrorFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRequestRedirectFilter) DeepCopyInto(out *HTTPRequestRedirectFilter) { + *out = *in + if in.Scheme != nil { + in, out := &in.Scheme, &out.Scheme + *out = new(string) + **out = **in + } + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(PreciseHostname) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(HTTPPathModifier) + (*in).DeepCopyInto(*out) + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(PortNumber) + **out = **in + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRequestRedirectFilter. +func (in *HTTPRequestRedirectFilter) DeepCopy() *HTTPRequestRedirectFilter { + if in == nil { + return nil + } + out := new(HTTPRequestRedirectFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRoute) DeepCopyInto(out *HTTPRoute) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRoute. +func (in *HTTPRoute) DeepCopy() *HTTPRoute { + if in == nil { + return nil + } + out := new(HTTPRoute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HTTPRoute) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteFilter) DeepCopyInto(out *HTTPRouteFilter) { + *out = *in + if in.RequestHeaderModifier != nil { + in, out := &in.RequestHeaderModifier, &out.RequestHeaderModifier + *out = new(HTTPHeaderFilter) + (*in).DeepCopyInto(*out) + } + if in.ResponseHeaderModifier != nil { + in, out := &in.ResponseHeaderModifier, &out.ResponseHeaderModifier + *out = new(HTTPHeaderFilter) + (*in).DeepCopyInto(*out) + } + if in.RequestMirror != nil { + in, out := &in.RequestMirror, &out.RequestMirror + *out = new(HTTPRequestMirrorFilter) + (*in).DeepCopyInto(*out) + } + if in.RequestRedirect != nil { + in, out := &in.RequestRedirect, &out.RequestRedirect + *out = new(HTTPRequestRedirectFilter) + (*in).DeepCopyInto(*out) + } + if in.URLRewrite != nil { + in, out := &in.URLRewrite, &out.URLRewrite + *out = new(HTTPURLRewriteFilter) + (*in).DeepCopyInto(*out) + } + if in.ExtensionRef != nil { + in, out := &in.ExtensionRef, &out.ExtensionRef + *out = new(LocalObjectReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteFilter. +func (in *HTTPRouteFilter) DeepCopy() *HTTPRouteFilter { + if in == nil { + return nil + } + out := new(HTTPRouteFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteList) DeepCopyInto(out *HTTPRouteList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HTTPRoute, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteList. +func (in *HTTPRouteList) DeepCopy() *HTTPRouteList { + if in == nil { + return nil + } + out := new(HTTPRouteList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HTTPRouteList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteMatch) DeepCopyInto(out *HTTPRouteMatch) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(HTTPPathMatch) + (*in).DeepCopyInto(*out) + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]HTTPHeaderMatch, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryParams != nil { + in, out := &in.QueryParams, &out.QueryParams + *out = make([]HTTPQueryParamMatch, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(HTTPMethod) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteMatch. +func (in *HTTPRouteMatch) DeepCopy() *HTTPRouteMatch { + if in == nil { + return nil + } + out := new(HTTPRouteMatch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteRule) DeepCopyInto(out *HTTPRouteRule) { + *out = *in + if in.Matches != nil { + in, out := &in.Matches, &out.Matches + *out = make([]HTTPRouteMatch, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Filters != nil { + in, out := &in.Filters, &out.Filters + *out = make([]HTTPRouteFilter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BackendRefs != nil { + in, out := &in.BackendRefs, &out.BackendRefs + *out = make([]HTTPBackendRef, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteRule. +func (in *HTTPRouteRule) DeepCopy() *HTTPRouteRule { + if in == nil { + return nil + } + out := new(HTTPRouteRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteSpec) DeepCopyInto(out *HTTPRouteSpec) { + *out = *in + in.CommonRouteSpec.DeepCopyInto(&out.CommonRouteSpec) + if in.Hostnames != nil { + in, out := &in.Hostnames, &out.Hostnames + *out = make([]Hostname, len(*in)) + copy(*out, *in) + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]HTTPRouteRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteSpec. +func (in *HTTPRouteSpec) DeepCopy() *HTTPRouteSpec { + if in == nil { + return nil + } + out := new(HTTPRouteSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteStatus) DeepCopyInto(out *HTTPRouteStatus) { + *out = *in + in.RouteStatus.DeepCopyInto(&out.RouteStatus) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteStatus. +func (in *HTTPRouteStatus) DeepCopy() *HTTPRouteStatus { + if in == nil { + return nil + } + out := new(HTTPRouteStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPURLRewriteFilter) DeepCopyInto(out *HTTPURLRewriteFilter) { + *out = *in + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(PreciseHostname) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(HTTPPathModifier) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPURLRewriteFilter. +func (in *HTTPURLRewriteFilter) DeepCopy() *HTTPURLRewriteFilter { + if in == nil { + return nil + } + out := new(HTTPURLRewriteFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Listener) DeepCopyInto(out *Listener) { + *out = *in + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(Hostname) + **out = **in + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(GatewayTLSConfig) + (*in).DeepCopyInto(*out) + } + if in.AllowedRoutes != nil { + in, out := &in.AllowedRoutes, &out.AllowedRoutes + *out = new(AllowedRoutes) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Listener. +func (in *Listener) DeepCopy() *Listener { + if in == nil { + return nil + } + out := new(Listener) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerStatus) DeepCopyInto(out *ListenerStatus) { + *out = *in + if in.SupportedKinds != nil { + in, out := &in.SupportedKinds, &out.SupportedKinds + *out = make([]RouteGroupKind, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerStatus. +func (in *ListenerStatus) DeepCopy() *ListenerStatus { + if in == nil { + return nil + } + out := new(ListenerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalObjectReference. +func (in *LocalObjectReference) DeepCopy() *LocalObjectReference { + if in == nil { + return nil + } + out := new(LocalObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParametersReference) DeepCopyInto(out *ParametersReference) { + *out = *in + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(Namespace) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParametersReference. +func (in *ParametersReference) DeepCopy() *ParametersReference { + if in == nil { + return nil + } + out := new(ParametersReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParentReference) DeepCopyInto(out *ParentReference) { + *out = *in + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(Group) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(Kind) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(Namespace) + **out = **in + } + if in.SectionName != nil { + in, out := &in.SectionName, &out.SectionName + *out = new(SectionName) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(PortNumber) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference. +func (in *ParentReference) DeepCopy() *ParentReference { + if in == nil { + return nil + } + out := new(ParentReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceGrant) DeepCopyInto(out *ReferenceGrant) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceGrant. +func (in *ReferenceGrant) DeepCopy() *ReferenceGrant { + if in == nil { + return nil + } + out := new(ReferenceGrant) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReferenceGrant) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceGrantFrom) DeepCopyInto(out *ReferenceGrantFrom) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceGrantFrom. +func (in *ReferenceGrantFrom) DeepCopy() *ReferenceGrantFrom { + if in == nil { + return nil + } + out := new(ReferenceGrantFrom) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceGrantList) DeepCopyInto(out *ReferenceGrantList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReferenceGrant, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceGrantList. +func (in *ReferenceGrantList) DeepCopy() *ReferenceGrantList { + if in == nil { + return nil + } + out := new(ReferenceGrantList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReferenceGrantList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceGrantSpec) DeepCopyInto(out *ReferenceGrantSpec) { + *out = *in + if in.From != nil { + in, out := &in.From, &out.From + *out = make([]ReferenceGrantFrom, len(*in)) + copy(*out, *in) + } + if in.To != nil { + in, out := &in.To, &out.To + *out = make([]ReferenceGrantTo, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceGrantSpec. +func (in *ReferenceGrantSpec) DeepCopy() *ReferenceGrantSpec { + if in == nil { + return nil + } + out := new(ReferenceGrantSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceGrantTo) DeepCopyInto(out *ReferenceGrantTo) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(ObjectName) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceGrantTo. +func (in *ReferenceGrantTo) DeepCopy() *ReferenceGrantTo { + if in == nil { + return nil + } + out := new(ReferenceGrantTo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteGroupKind) DeepCopyInto(out *RouteGroupKind) { + *out = *in + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(Group) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteGroupKind. +func (in *RouteGroupKind) DeepCopy() *RouteGroupKind { + if in == nil { + return nil + } + out := new(RouteGroupKind) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteNamespaces) DeepCopyInto(out *RouteNamespaces) { + *out = *in + if in.From != nil { + in, out := &in.From, &out.From + *out = new(FromNamespaces) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteNamespaces. +func (in *RouteNamespaces) DeepCopy() *RouteNamespaces { + if in == nil { + return nil + } + out := new(RouteNamespaces) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteParentStatus) DeepCopyInto(out *RouteParentStatus) { + *out = *in + in.ParentRef.DeepCopyInto(&out.ParentRef) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteParentStatus. +func (in *RouteParentStatus) DeepCopy() *RouteParentStatus { + if in == nil { + return nil + } + out := new(RouteParentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteStatus) DeepCopyInto(out *RouteStatus) { + *out = *in + if in.Parents != nil { + in, out := &in.Parents, &out.Parents + *out = make([]RouteParentStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteStatus. +func (in *RouteStatus) DeepCopy() *RouteStatus { + if in == nil { + return nil + } + out := new(RouteStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretObjectReference) DeepCopyInto(out *SecretObjectReference) { + *out = *in + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(Group) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(Kind) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(Namespace) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretObjectReference. +func (in *SecretObjectReference) DeepCopy() *SecretObjectReference { + if in == nil { + return nil + } + out := new(SecretObjectReference) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/zz_generated.register.go b/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/zz_generated.register.go new file mode 100644 index 00000000000..05d7898b385 --- /dev/null +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/zz_generated.register.go @@ -0,0 +1,73 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by register-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName specifies the group name used to register the objects. +const GroupName = "gateway.networking.k8s.io" + +// GroupVersion specifies the group and the version used to register the objects. +var GroupVersion = v1.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// SchemeGroupVersion is group version used to register these objects +// Deprecated: use GroupVersion instead. +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + // Depreciated: use Install instead + AddToScheme = localSchemeBuilder.AddToScheme + Install = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Gateway{}, + &GatewayClass{}, + &GatewayClassList{}, + &GatewayList{}, + &HTTPRoute{}, + &HTTPRouteList{}, + &ReferenceGrant{}, + &ReferenceGrantList{}, + ) + // AddToGroupVersion allows the serialization of client types like ListOptions. + v1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} From faef3d605cfdc1a5f362b906cc56051517ac8eed Mon Sep 17 00:00:00 2001 From: Matthias Wessendorf Date: Mon, 17 Feb 2025 16:42:09 +0100 Subject: [PATCH 2/3] Bump cert-manager and use their clientset Signed-off-by: Matthias Wessendorf --- go.mod | 2 +- hack/update-codegen.sh | 11 ++--------- 2 files changed, 3 insertions(+), 10 deletions(-) diff --git a/go.mod b/go.mod index 597f228b97e..ff2b7a3405c 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.22.7 require ( github.com/ahmetb/gen-crd-api-reference-docs v0.3.1-0.20210420163308-c1402a70e2f1 - github.com/cert-manager/cert-manager v1.13.3 + github.com/cert-manager/cert-manager v1.16.3 github.com/cloudevents/conformance v0.2.0 github.com/cloudevents/sdk-go/observability/opencensus/v2 v2.15.2 github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20240508060731-1ed9471c98bd diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index 5022868f8a4..69dd0a53bbe 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -44,13 +44,6 @@ kube::codegen::gen_client \ --with-watch \ "${REPO_ROOT_DIR}/pkg/apis" -kube::codegen::gen_client \ - --boilerplate "${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt" \ - --output-dir "${REPO_ROOT_DIR}/pkg/client/certmanager" \ - --output-pkg "knative.dev/eventing/pkg/client/certmanager" \ - --with-watch \ - "${REPO_ROOT_DIR}/vendor/github.com/cert-manager/cert-manager/pkg/apis" - group "Knative Codegen" # Knative Injection @@ -60,9 +53,9 @@ ${KNATIVE_CODEGEN_PKG}/hack/generate-knative.sh "injection" \ --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt # Knative Injection (for cert-manager) +OUTPUT_PKG="knative.dev/eventing/pkg/client/certmanager/injection" \ ${KNATIVE_CODEGEN_PKG}/hack/generate-knative.sh "injection" \ - knative.dev/eventing/pkg/client/certmanager github.com/cert-manager/cert-manager/pkg/apis \ - "certmanager:v1 acme:v1" \ + github.com/cert-manager/cert-manager/pkg/client github.com/cert-manager/cert-manager/pkg/apis "certmanager:v1 acme:v1" \ --disable-informer-init \ --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt From 252cff664a9bbad58d9eafc10e7b56c074bc3f94 Mon Sep 17 00:00:00 2001 From: Matthias Wessendorf Date: Mon, 17 Feb 2025 16:44:18 +0100 Subject: [PATCH 3/3] Update deps Signed-off-by: Matthias Wessendorf --- go.mod | 13 +- go.sum | 42 +- .../cert-manager/cert-manager/LICENSE | 1 - .../cert-manager/cert-manager/LICENSES | 249 +- .../cert-manager/pkg/apis/acme/v1/const.go | 3 +- .../pkg/apis/acme/v1/types_issuer.go | 179 +- .../pkg/apis/acme/v1/types_order.go | 2 +- .../pkg/apis/acme/v1/zz_generated.deepcopy.go | 143 +- .../pkg/apis/certmanager/v1/const.go | 5 + .../pkg/apis/certmanager/v1/types.go | 7 + .../apis/certmanager/v1/types_certificate.go | 133 +- .../v1/types_certificaterequest.go | 2 +- .../pkg/apis/certmanager/v1/types_issuer.go | 66 +- .../certmanager/v1/zz_generated.deepcopy.go | 148 +- .../klauspost/compress/.gitattributes | 2 + .../github.com/klauspost/compress/.gitignore | 32 + .../klauspost/compress/.goreleaser.yml | 123 + vendor/github.com/klauspost/compress/LICENSE | 304 ++ .../github.com/klauspost/compress/README.md | 700 +++ .../github.com/klauspost/compress/SECURITY.md | 25 + .../klauspost/compress/compressible.go | 85 + .../klauspost/compress/fse/README.md | 79 + .../klauspost/compress/fse/bitreader.go | 122 + .../klauspost/compress/fse/bitwriter.go | 167 + .../klauspost/compress/fse/bytereader.go | 47 + .../klauspost/compress/fse/compress.go | 683 +++ .../klauspost/compress/fse/decompress.go | 376 ++ .../github.com/klauspost/compress/fse/fse.go | 144 + vendor/github.com/klauspost/compress/gen.sh | 4 + .../klauspost/compress/huff0/.gitignore | 1 + .../klauspost/compress/huff0/README.md | 89 + .../klauspost/compress/huff0/bitreader.go | 229 + .../klauspost/compress/huff0/bitwriter.go | 102 + .../klauspost/compress/huff0/compress.go | 742 +++ .../klauspost/compress/huff0/decompress.go | 1167 +++++ .../compress/huff0/decompress_amd64.go | 226 + .../compress/huff0/decompress_amd64.s | 830 ++++ .../compress/huff0/decompress_generic.go | 299 ++ .../klauspost/compress/huff0/huff0.go | 337 ++ .../compress/internal/cpuinfo/cpuinfo.go | 34 + .../internal/cpuinfo/cpuinfo_amd64.go | 11 + .../compress/internal/cpuinfo/cpuinfo_amd64.s | 36 + .../compress/internal/snapref/LICENSE | 27 + .../compress/internal/snapref/decode.go | 264 ++ .../compress/internal/snapref/decode_other.go | 113 + .../compress/internal/snapref/encode.go | 289 ++ .../compress/internal/snapref/encode_other.go | 250 + .../compress/internal/snapref/snappy.go | 98 + vendor/github.com/klauspost/compress/s2sx.mod | 4 + vendor/github.com/klauspost/compress/s2sx.sum | 0 .../klauspost/compress/zstd/README.md | 441 ++ .../klauspost/compress/zstd/bitreader.go | 136 + .../klauspost/compress/zstd/bitwriter.go | 112 + .../klauspost/compress/zstd/blockdec.go | 729 +++ .../klauspost/compress/zstd/blockenc.go | 909 ++++ .../compress/zstd/blocktype_string.go | 85 + .../klauspost/compress/zstd/bytebuf.go | 131 + .../klauspost/compress/zstd/bytereader.go | 82 + .../klauspost/compress/zstd/decodeheader.go | 261 ++ .../klauspost/compress/zstd/decoder.go | 948 ++++ .../compress/zstd/decoder_options.go | 169 + .../klauspost/compress/zstd/dict.go | 565 +++ .../klauspost/compress/zstd/enc_base.go | 173 + .../klauspost/compress/zstd/enc_best.go | 560 +++ .../klauspost/compress/zstd/enc_better.go | 1252 +++++ .../klauspost/compress/zstd/enc_dfast.go | 1123 +++++ .../klauspost/compress/zstd/enc_fast.go | 891 ++++ .../klauspost/compress/zstd/encoder.go | 619 +++ .../compress/zstd/encoder_options.go | 339 ++ .../klauspost/compress/zstd/framedec.go | 413 ++ .../klauspost/compress/zstd/frameenc.go | 137 + .../klauspost/compress/zstd/fse_decoder.go | 307 ++ .../compress/zstd/fse_decoder_amd64.go | 65 + .../compress/zstd/fse_decoder_amd64.s | 126 + .../compress/zstd/fse_decoder_generic.go | 73 + .../klauspost/compress/zstd/fse_encoder.go | 701 +++ .../klauspost/compress/zstd/fse_predefined.go | 158 + .../klauspost/compress/zstd/hash.go | 35 + .../klauspost/compress/zstd/history.go | 116 + .../compress/zstd/internal/xxhash/LICENSE.txt | 22 + .../compress/zstd/internal/xxhash/README.md | 71 + .../compress/zstd/internal/xxhash/xxhash.go | 230 + .../zstd/internal/xxhash/xxhash_amd64.s | 210 + .../zstd/internal/xxhash/xxhash_arm64.s | 184 + .../zstd/internal/xxhash/xxhash_asm.go | 16 + .../zstd/internal/xxhash/xxhash_other.go | 76 + .../zstd/internal/xxhash/xxhash_safe.go | 11 + .../klauspost/compress/zstd/matchlen_amd64.go | 16 + .../klauspost/compress/zstd/matchlen_amd64.s | 66 + .../compress/zstd/matchlen_generic.go | 33 + .../klauspost/compress/zstd/seqdec.go | 503 ++ .../klauspost/compress/zstd/seqdec_amd64.go | 394 ++ .../klauspost/compress/zstd/seqdec_amd64.s | 4151 +++++++++++++++++ .../klauspost/compress/zstd/seqdec_generic.go | 237 + .../klauspost/compress/zstd/seqenc.go | 114 + .../klauspost/compress/zstd/snappy.go | 434 ++ .../github.com/klauspost/compress/zstd/zip.go | 141 + .../klauspost/compress/zstd/zstd.go | 121 + .../prometheus/client_golang/NOTICE | 5 - .../internal/github.com/golang/gddo/LICENSE | 27 + .../golang/gddo/httputil/header/header.go | 145 + .../golang/gddo/httputil/negotiate.go | 36 + .../client_golang/prometheus/go_collector.go | 55 +- .../prometheus/go_collector_latest.go | 19 +- .../client_golang/prometheus/histogram.go | 268 +- .../internal/go_collector_options.go | 2 + .../client_golang/prometheus/metric.go | 2 +- .../prometheus/process_collector.go | 29 +- .../prometheus/process_collector_other.go | 14 + .../prometheus/promhttp/delegator.go | 6 + .../client_golang/prometheus/promhttp/http.go | 113 +- .../client_golang/prometheus/registry.go | 17 +- .../client_golang/prometheus/summary.go | 42 + .../client_golang/prometheus/vec.go | 2 +- .../gengo/v2/generator/snippet_writer.go | 38 +- vendor/k8s.io/gengo/v2/namer/namer.go | 16 +- vendor/k8s.io/gengo/v2/parser/parse.go | 58 +- vendor/k8s.io/gengo/v2/types/types.go | 20 +- .../kube-openapi/pkg/generators/openapi.go | 2 +- .../pkg/generators/rules/names_match.go | 38 +- vendor/k8s.io/utils/integer/integer.go | 18 +- vendor/modules.txt | 33 +- .../gateway-api/apis/{v1beta1 => v1}/doc.go | 7 +- .../apis/{v1beta1 => v1}/gateway_types.go | 381 +- .../{v1beta1 => v1}/gatewayclass_types.go | 66 +- .../gateway-api/apis/v1/grpcroute_types.go | 627 +++ .../apis/{v1beta1 => v1}/httproute_types.go | 128 +- .../{v1beta1 => v1}/object_reference_types.go | 37 +- .../apis/{v1beta1 => v1}/shared_types.go | 254 +- .../{v1beta1 => v1}/zz_generated.deepcopy.go | 616 ++- .../{v1beta1 => v1}/zz_generated.register.go | 15 +- .../apis/v1beta1/referencegrant_types.go | 143 - 132 files changed, 30279 insertions(+), 720 deletions(-) create mode 100644 vendor/github.com/klauspost/compress/.gitattributes create mode 100644 vendor/github.com/klauspost/compress/.gitignore create mode 100644 vendor/github.com/klauspost/compress/.goreleaser.yml create mode 100644 vendor/github.com/klauspost/compress/LICENSE create mode 100644 vendor/github.com/klauspost/compress/README.md create mode 100644 vendor/github.com/klauspost/compress/SECURITY.md create mode 100644 vendor/github.com/klauspost/compress/compressible.go create mode 100644 vendor/github.com/klauspost/compress/fse/README.md create mode 100644 vendor/github.com/klauspost/compress/fse/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/fse/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/fse/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/fse/compress.go create mode 100644 vendor/github.com/klauspost/compress/fse/decompress.go create mode 100644 vendor/github.com/klauspost/compress/fse/fse.go create mode 100644 vendor/github.com/klauspost/compress/gen.sh create mode 100644 vendor/github.com/klauspost/compress/huff0/.gitignore create mode 100644 vendor/github.com/klauspost/compress/huff0/README.md create mode 100644 vendor/github.com/klauspost/compress/huff0/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/huff0/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/huff0/compress.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.s create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_generic.go create mode 100644 vendor/github.com/klauspost/compress/huff0/huff0.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/LICENSE create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/decode.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/decode_other.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/encode.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/encode_other.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/snappy.go create mode 100644 vendor/github.com/klauspost/compress/s2sx.mod create mode 100644 vendor/github.com/klauspost/compress/s2sx.sum create mode 100644 vendor/github.com/klauspost/compress/zstd/README.md create mode 100644 vendor/github.com/klauspost/compress/zstd/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blockdec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blockenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blocktype_string.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bytebuf.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decodeheader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decoder_options.go create mode 100644 vendor/github.com/klauspost/compress/zstd/dict.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_base.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_best.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_better.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_dfast.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_fast.go create mode 100644 vendor/github.com/klauspost/compress/zstd/encoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/encoder_options.go create mode 100644 vendor/github.com/klauspost/compress/zstd/framedec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/frameenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_encoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_predefined.go create mode 100644 vendor/github.com/klauspost/compress/zstd/hash.go create mode 100644 vendor/github.com/klauspost/compress/zstd/history.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/snappy.go create mode 100644 vendor/github.com/klauspost/compress/zstd/zip.go create mode 100644 vendor/github.com/klauspost/compress/zstd/zstd.go create mode 100644 vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE create mode 100644 vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go create mode 100644 vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go rename vendor/sigs.k8s.io/gateway-api/apis/{v1beta1 => v1}/doc.go (83%) rename vendor/sigs.k8s.io/gateway-api/apis/{v1beta1 => v1}/gateway_types.go (70%) rename vendor/sigs.k8s.io/gateway-api/apis/{v1beta1 => v1}/gatewayclass_types.go (74%) create mode 100644 vendor/sigs.k8s.io/gateway-api/apis/v1/grpcroute_types.go rename vendor/sigs.k8s.io/gateway-api/apis/{v1beta1 => v1}/httproute_types.go (89%) rename vendor/sigs.k8s.io/gateway-api/apis/{v1beta1 => v1}/object_reference_types.go (80%) rename vendor/sigs.k8s.io/gateway-api/apis/{v1beta1 => v1}/shared_types.go (73%) rename vendor/sigs.k8s.io/gateway-api/apis/{v1beta1 => v1}/zz_generated.deepcopy.go (73%) rename vendor/sigs.k8s.io/gateway-api/apis/{v1beta1 => v1}/zz_generated.register.go (93%) delete mode 100644 vendor/sigs.k8s.io/gateway-api/apis/v1beta1/referencegrant_types.go diff --git a/go.mod b/go.mod index ff2b7a3405c..28eb034e477 100644 --- a/go.mod +++ b/go.mod @@ -43,7 +43,7 @@ require ( k8s.io/apimachinery v0.31.4 k8s.io/apiserver v0.31.4 k8s.io/client-go v0.31.4 - k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 + k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 knative.dev/hack v0.0.0-20250128013659-5f7f0f50e9de knative.dev/hack/schema v0.0.0-20250128013659-5f7f0f50e9de knative.dev/pkg v0.0.0-20250211185550-c8bea7c326ff @@ -81,12 +81,13 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.9 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_golang v1.20.4 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect @@ -107,7 +108,7 @@ require ( golang.org/x/time v0.6.0 // indirect golang.org/x/tools v0.29.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/api v0.183.0 // indirect + google.golang.org/api v0.198.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20241202173237-19429a94021a // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a // indirect google.golang.org/grpc v1.70.0 // indirect @@ -118,11 +119,11 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/code-generator v0.31.4 // indirect k8s.io/gengo v0.0.0-20240404160639-a0386bf69313 // indirect - k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect + k8s.io/gengo/v2 v2.0.0-20240826214909-a7b603a56eb7 // indirect k8s.io/klog v1.0.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20240808142205-8e686545bdb8 // indirect - sigs.k8s.io/gateway-api v0.8.0 // indirect + k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 // indirect + sigs.k8s.io/gateway-api v1.1.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index 0e5c992651d..e8992645573 100644 --- a/go.sum +++ b/go.sum @@ -64,8 +64,8 @@ github.com/blendle/zapdriver v1.3.1/go.mod h1:mdXfREi6u5MArG4j9fewC+FGnXaBR+T4Ox github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/cert-manager/cert-manager v1.13.3 h1:3R4G0RI7K0OkTZhWlVOC5SGZMYa2NwqmQJoyKydrz/M= -github.com/cert-manager/cert-manager v1.13.3/go.mod h1:BM2+Pt/NmSv1Zr25/MHv6BgIEF9IUxA1xAjp80qkxgc= +github.com/cert-manager/cert-manager v1.16.3 h1:seEF5eidFaeduaCuM85PFEuzH/1X/HOV5Y8zDQrHgpc= +github.com/cert-manager/cert-manager v1.16.3/go.mod h1:6JQ/GAZ6dH+erqS1BbaqorPy8idJzCtWFUmJQBTjo6Q= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= @@ -269,6 +269,8 @@ github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dv github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= @@ -280,6 +282,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lightstep/tracecontext.go v0.0.0-20181129014701-1757c391b1ac h1:+2b6iGRJe3hvV/yVXrd41yVEjxuFHxasJqDhkIjS4gk= github.com/lightstep/tracecontext.go v0.0.0-20181129014701-1757c391b1ac/go.mod h1:Frd2bnT3w5FB5q49ENTfVlztJES+1k/7lyWX2+9gq/M= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= @@ -311,13 +315,13 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.2 h1:8mVmC9kjFFmA8H4pKMUhcblgifdkOIXPvbhN1T36q1M= -github.com/onsi/ginkgo/v2 v2.20.0 h1:PE84V2mHqoT1sglvHc8ZdQtPcwmvvt29WLEEO3xmdZw= -github.com/onsi/ginkgo/v2 v2.20.0/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= -github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.4.3 h1:9EGwpqkgnwdEIJ+Od7QVSEIH+ocmm5nPat0G7sjsSdg= github.com/openzipkin/zipkin-go v0.4.3/go.mod h1:M9wCJZFWCo2RiY+o1eBCEMe0Dp2S5LDHcMZmk3RmK7c= @@ -344,8 +348,8 @@ github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= +github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -482,8 +486,6 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -720,8 +722,8 @@ google.golang.org/api v0.25.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.183.0 h1:PNMeRDwo1pJdgNcFQ9GstuLe/noWKIc89pRWRLMvLwE= -google.golang.org/api v0.183.0/go.mod h1:q43adC5/pHoSZTx5h2mSmdF7NcyfW9JuDyIOJAgS9ZQ= +google.golang.org/api v0.198.0 h1:OOH5fZatk57iN0A7tjJQzt6aPfYQ1JiWkt1yGseazks= +google.golang.org/api v0.198.0/go.mod h1:/Lblzl3/Xqqk9hw/yS97TImKTUwnf1bv89v7+OagJzc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -842,18 +844,18 @@ k8s.io/code-generator v0.31.4/go.mod h1:yMDt13Kn7m4MMZ4LxB1KBzdZjEyxzdT4b4qXq+ln k8s.io/gengo v0.0.0-20201203183100-97869a43a9d9/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20240404160639-a0386bf69313 h1:wBIDZID8ju9pwOiLlV22YYKjFGtiNSWgHf5CnKLRUuM= k8s.io/gengo v0.0.0-20240404160639-a0386bf69313/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo= -k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8= +k8s.io/gengo/v2 v2.0.0-20240826214909-a7b603a56eb7 h1:cErOOTkQ3JW19o4lo91fFurouhP8NcoBvb7CkvhZZpk= +k8s.io/gengo/v2 v2.0.0-20240826214909-a7b603a56eb7/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU= k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20240808142205-8e686545bdb8 h1:1Wof1cGQgA5pqgo8MxKPtf+qN6Sh/0JzznmeGPm1HnE= -k8s.io/kube-openapi v0.0.0-20240808142205-8e686545bdb8/go.mod h1:Os6V6dZwLNii3vxFpxcNaTmH8LJJBkOTg1N0tOA0fvA= -k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= -k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 h1:1dWzkmJrrprYvjGwh9kEUxmcUV/CtNU8QM7h1FLWQOo= +k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38/go.mod h1:coRQXBK9NxO98XUv3ZD6AK3xzHCxV6+b7lrquKwaKzA= +k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 h1:MDF6h2H/h4tbzmtIKTuctcwZmY0tY9mD9fNT47QO6HI= +k8s.io/utils v0.0.0-20240921022957-49e7df575cb6/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= knative.dev/hack v0.0.0-20250128013659-5f7f0f50e9de h1:9xWkakMP0rpTpEIi9iRfkDsqhe2XbuyJdBkrlPB2Yjs= knative.dev/hack v0.0.0-20250128013659-5f7f0f50e9de/go.mod h1:R0ritgYtjLDO9527h5vb5X6gfvt5LCrJ55BNbVDsWiY= knative.dev/hack/schema v0.0.0-20250128013659-5f7f0f50e9de h1:omvHBnvV8dRjWqyB3EDvuv5EcJ09ZUY7xbbYcbMuCkI= @@ -865,8 +867,8 @@ knative.dev/reconciler-test v0.0.0-20250129131157-3424ad806aa1/go.mod h1:rpvG6Am rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/gateway-api v0.8.0 h1:isQQ3Jx2qFP7vaA3ls0846F0Amp9Eq14P08xbSwVbQg= -sigs.k8s.io/gateway-api v0.8.0/go.mod h1:okOnjPNBFbIS/Rw9kAhuIUaIkLhTKEu+ARIuXk2dgaM= +sigs.k8s.io/gateway-api v1.1.0 h1:DsLDXCi6jR+Xz8/xd0Z1PYl2Pn0TyaFMOPPZIj4inDM= +sigs.k8s.io/gateway-api v1.1.0/go.mod h1:ZH4lHrL2sDi0FHZ9jjneb8kKnGzFWyrTya35sWUTrRs= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= diff --git a/vendor/github.com/cert-manager/cert-manager/LICENSE b/vendor/github.com/cert-manager/cert-manager/LICENSE index d6456956733..261eeb9e9f8 100644 --- a/vendor/github.com/cert-manager/cert-manager/LICENSE +++ b/vendor/github.com/cert-manager/cert-manager/LICENSE @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ diff --git a/vendor/github.com/cert-manager/cert-manager/LICENSES b/vendor/github.com/cert-manager/cert-manager/LICENSES index 22764fe9def..0c644fe5a34 100644 --- a/vendor/github.com/cert-manager/cert-manager/LICENSES +++ b/vendor/github.com/cert-manager/cert-manager/LICENSES @@ -1,89 +1,109 @@ -cloud.google.com/go/compute/metadata,https://github.com/googleapis/google-cloud-go/blob/compute/metadata/v0.2.3/compute/metadata/LICENSE,Apache-2.0 -github.com/Azure/azure-sdk-for-go,https://github.com/Azure/azure-sdk-for-go/blob/v68.0.0/LICENSE.txt,MIT -github.com/Azure/go-autorest/autorest,https://github.com/Azure/go-autorest/blob/autorest/v0.11.29/autorest/LICENSE,Apache-2.0 -github.com/Azure/go-autorest/autorest/adal,https://github.com/Azure/go-autorest/blob/autorest/adal/v0.9.23/autorest/adal/LICENSE,Apache-2.0 -github.com/Azure/go-autorest/autorest/date,https://github.com/Azure/go-autorest/blob/autorest/date/v0.3.0/autorest/date/LICENSE,Apache-2.0 -github.com/Azure/go-autorest/autorest/to,https://github.com/Azure/go-autorest/blob/autorest/to/v0.4.0/autorest/to/LICENSE,Apache-2.0 -github.com/Azure/go-autorest/autorest/validation,https://github.com/Azure/go-autorest/blob/autorest/validation/v0.3.1/autorest/validation/LICENSE,Apache-2.0 -github.com/Azure/go-autorest/logger,https://github.com/Azure/go-autorest/blob/logger/v0.2.1/logger/LICENSE,Apache-2.0 -github.com/Azure/go-autorest/tracing,https://github.com/Azure/go-autorest/blob/tracing/v0.6.0/tracing/LICENSE,Apache-2.0 +cloud.google.com/go/auth,https://github.com/googleapis/google-cloud-go/blob/auth/v0.9.4/auth/LICENSE,Apache-2.0 +cloud.google.com/go/auth/oauth2adapt,https://github.com/googleapis/google-cloud-go/blob/auth/oauth2adapt/v0.2.4/auth/oauth2adapt/LICENSE,Apache-2.0 +cloud.google.com/go/compute/metadata,https://github.com/googleapis/google-cloud-go/blob/compute/metadata/v0.5.1/compute/metadata/LICENSE,Apache-2.0 +github.com/Azure/azure-sdk-for-go/sdk/azcore,https://github.com/Azure/azure-sdk-for-go/blob/sdk/azcore/v1.14.0/sdk/azcore/LICENSE.txt,MIT +github.com/Azure/azure-sdk-for-go/sdk/azidentity,https://github.com/Azure/azure-sdk-for-go/blob/sdk/azidentity/v1.7.0/sdk/azidentity/LICENSE.txt,MIT +github.com/Azure/azure-sdk-for-go/sdk/internal,https://github.com/Azure/azure-sdk-for-go/blob/sdk/internal/v1.10.0/sdk/internal/LICENSE.txt,MIT +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dns/armdns,https://github.com/Azure/azure-sdk-for-go/blob/sdk/resourcemanager/dns/armdns/v1.2.0/sdk/resourcemanager/dns/armdns/LICENSE.txt,MIT github.com/Azure/go-ntlmssp,https://github.com/Azure/go-ntlmssp/blob/754e69321358/LICENSE,MIT +github.com/AzureAD/microsoft-authentication-library-for-go/apps,https://github.com/AzureAD/microsoft-authentication-library-for-go/blob/v1.2.2/LICENSE,MIT +github.com/Khan/genqlient/graphql,https://github.com/Khan/genqlient/blob/v0.7.0/LICENSE,MIT github.com/NYTimes/gziphandler,https://github.com/NYTimes/gziphandler/blob/v1.1.1/LICENSE,Apache-2.0 -github.com/Venafi/vcert/v4,https://github.com/Venafi/vcert/blob/69f417ae176d/LICENSE,Apache-2.0 +github.com/Venafi/vcert/v5,https://github.com/Venafi/vcert/blob/v5.7.1/LICENSE,Apache-2.0 github.com/akamai/AkamaiOPEN-edgegrid-golang,https://github.com/akamai/AkamaiOPEN-edgegrid-golang/blob/v1.2.2/LICENSE,Apache-2.0 -github.com/antlr/antlr4/runtime/Go/antlr/v4,https://github.com/antlr/antlr4/blob/8188dc5388df/runtime/Go/antlr/v4/LICENSE,BSD-3-Clause -github.com/asaskevich/govalidator,https://github.com/asaskevich/govalidator/blob/21a406dcc535/LICENSE,MIT -github.com/aws/aws-sdk-go,https://github.com/aws/aws-sdk-go/blob/v1.45.7/LICENSE.txt,Apache-2.0 -github.com/aws/aws-sdk-go/internal/sync/singleflight,https://github.com/aws/aws-sdk-go/blob/v1.45.7/internal/sync/singleflight/LICENSE,BSD-3-Clause +github.com/antlr4-go/antlr/v4,https://github.com/antlr4-go/antlr/blob/v4.13.0/LICENSE,BSD-3-Clause +github.com/asaskevich/govalidator,https://github.com/asaskevich/govalidator/blob/a9d515a09cc2/LICENSE,MIT +github.com/aws/aws-sdk-go-v2,https://github.com/aws/aws-sdk-go-v2/blob/v1.31.0/LICENSE.txt,Apache-2.0 +github.com/aws/aws-sdk-go-v2/config,https://github.com/aws/aws-sdk-go-v2/blob/config/v1.27.36/config/LICENSE.txt,Apache-2.0 +github.com/aws/aws-sdk-go-v2/credentials,https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.34/credentials/LICENSE.txt,Apache-2.0 +github.com/aws/aws-sdk-go-v2/feature/ec2/imds,https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.16.14/feature/ec2/imds/LICENSE.txt,Apache-2.0 +github.com/aws/aws-sdk-go-v2/internal/configsources,https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.18/internal/configsources/LICENSE.txt,Apache-2.0 +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2,https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.18/internal/endpoints/v2/LICENSE.txt,Apache-2.0 +github.com/aws/aws-sdk-go-v2/internal/ini,https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.1/internal/ini/LICENSE.txt,Apache-2.0 +github.com/aws/aws-sdk-go-v2/internal/sync/singleflight,https://github.com/aws/aws-sdk-go-v2/blob/v1.31.0/internal/sync/singleflight/LICENSE,BSD-3-Clause +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding,https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.11.5/service/internal/accept-encoding/LICENSE.txt,Apache-2.0 +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url,https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.11.20/service/internal/presigned-url/LICENSE.txt,Apache-2.0 +github.com/aws/aws-sdk-go-v2/service/route53,https://github.com/aws/aws-sdk-go-v2/blob/service/route53/v1.44.0/service/route53/LICENSE.txt,Apache-2.0 +github.com/aws/aws-sdk-go-v2/service/sso,https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.23.0/service/sso/LICENSE.txt,Apache-2.0 +github.com/aws/aws-sdk-go-v2/service/ssooidc,https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.27.0/service/ssooidc/LICENSE.txt,Apache-2.0 +github.com/aws/aws-sdk-go-v2/service/sts,https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.31.0/service/sts/LICENSE.txt,Apache-2.0 +github.com/aws/smithy-go,https://github.com/aws/smithy-go/blob/v1.21.0/LICENSE,Apache-2.0 +github.com/aws/smithy-go/internal/sync/singleflight,https://github.com/aws/smithy-go/blob/v1.21.0/internal/sync/singleflight/LICENSE,BSD-3-Clause github.com/beorn7/perks/quantile,https://github.com/beorn7/perks/blob/v1.0.1/LICENSE,MIT github.com/blang/semver/v4,https://github.com/blang/semver/blob/v4.0.0/v4/LICENSE,MIT -github.com/cenkalti/backoff/v3,https://github.com/cenkalti/backoff/blob/v3.2.2/LICENSE,MIT -github.com/cenkalti/backoff/v4,https://github.com/cenkalti/backoff/blob/v4.2.1/LICENSE,MIT +github.com/cenkalti/backoff/v4,https://github.com/cenkalti/backoff/blob/v4.3.0/LICENSE,MIT github.com/cert-manager/cert-manager,https://github.com/cert-manager/cert-manager/blob/HEAD/LICENSE,Apache-2.0 github.com/cert-manager/cert-manager/pkg/issuer/acme/dns/azuredns,https://github.com/cert-manager/cert-manager/blob/HEAD/pkg/issuer/acme/dns/azuredns/LICENSE,MIT github.com/cert-manager/cert-manager/pkg/issuer/acme/dns/clouddns,https://github.com/cert-manager/cert-manager/blob/HEAD/pkg/issuer/acme/dns/clouddns/LICENSE,MIT github.com/cert-manager/cert-manager/pkg/issuer/acme/dns/cloudflare,https://github.com/cert-manager/cert-manager/blob/HEAD/pkg/issuer/acme/dns/cloudflare/LICENSE,MIT github.com/cert-manager/cert-manager/pkg/issuer/acme/dns/route53,https://github.com/cert-manager/cert-manager/blob/HEAD/pkg/issuer/acme/dns/route53/LICENSE,MIT github.com/cert-manager/cert-manager/pkg/issuer/acme/dns/util,https://github.com/cert-manager/cert-manager/blob/HEAD/pkg/issuer/acme/dns/util/LICENSE,MIT -github.com/cespare/xxhash/v2,https://github.com/cespare/xxhash/blob/v2.2.0/LICENSE.txt,MIT +github.com/cespare/xxhash/v2,https://github.com/cespare/xxhash/blob/v2.3.0/LICENSE.txt,MIT github.com/coreos/go-semver/semver,https://github.com/coreos/go-semver/blob/v0.3.1/LICENSE,Apache-2.0 github.com/coreos/go-systemd/v22,https://github.com/coreos/go-systemd/blob/v22.5.0/LICENSE,Apache-2.0 github.com/cpu/goacmedns,https://github.com/cpu/goacmedns/blob/v0.1.1/LICENSE,MIT github.com/davecgh/go-spew/spew,https://github.com/davecgh/go-spew/blob/d8f796af33cc/LICENSE,ISC -github.com/digitalocean/godo,https://github.com/digitalocean/godo/blob/v1.102.1/LICENSE.txt,MIT -github.com/digitalocean/godo,https://github.com/digitalocean/godo/blob/v1.102.1/LICENSE.txt,BSD-3-Clause -github.com/emicklei/go-restful/v3,https://github.com/emicklei/go-restful/blob/v3.11.0/LICENSE,MIT -github.com/evanphx/json-patch,https://github.com/evanphx/json-patch/blob/v5.6.0/LICENSE,BSD-3-Clause -github.com/evanphx/json-patch/v5,https://github.com/evanphx/json-patch/blob/v5.6.0/v5/LICENSE,BSD-3-Clause +github.com/digitalocean/godo,https://github.com/digitalocean/godo/blob/v1.125.0/LICENSE.txt,MIT +github.com/digitalocean/godo,https://github.com/digitalocean/godo/blob/v1.125.0/LICENSE.txt,BSD-3-Clause +github.com/emicklei/go-restful/v3,https://github.com/emicklei/go-restful/blob/v3.12.1/LICENSE,MIT +github.com/evanphx/json-patch/v5,https://github.com/evanphx/json-patch/blob/v5.9.0/v5/LICENSE,BSD-3-Clause github.com/felixge/httpsnoop,https://github.com/felixge/httpsnoop/blob/v1.0.4/LICENSE.txt,MIT -github.com/fsnotify/fsnotify,https://github.com/fsnotify/fsnotify/blob/v1.6.0/LICENSE,BSD-3-Clause -github.com/go-asn1-ber/asn1-ber,https://github.com/go-asn1-ber/asn1-ber/blob/v1.5.4/LICENSE,MIT -github.com/go-jose/go-jose/v3,https://github.com/go-jose/go-jose/blob/v3.0.1/LICENSE,Apache-2.0 -github.com/go-jose/go-jose/v3/json,https://github.com/go-jose/go-jose/blob/v3.0.1/json/LICENSE,BSD-3-Clause -github.com/go-ldap/ldap/v3,https://github.com/go-ldap/ldap/blob/v3.4.5/v3/LICENSE,MIT -github.com/go-logr/logr,https://github.com/go-logr/logr/blob/v1.3.0/LICENSE,Apache-2.0 +github.com/fsnotify/fsnotify,https://github.com/fsnotify/fsnotify/blob/v1.7.0/LICENSE,BSD-3-Clause +github.com/fxamacker/cbor/v2,https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE,MIT +github.com/go-asn1-ber/asn1-ber,https://github.com/go-asn1-ber/asn1-ber/blob/v1.5.6/LICENSE,MIT +github.com/go-http-utils/headers,https://github.com/go-http-utils/headers/blob/fed159eddc2a/LICENSE,MIT +github.com/go-jose/go-jose/v4,https://github.com/go-jose/go-jose/blob/v4.0.2/LICENSE,Apache-2.0 +github.com/go-jose/go-jose/v4/json,https://github.com/go-jose/go-jose/blob/v4.0.2/json/LICENSE,BSD-3-Clause +github.com/go-ldap/ldap/v3,https://github.com/go-ldap/ldap/blob/v3.4.8/v3/LICENSE,MIT +github.com/go-logr/logr,https://github.com/go-logr/logr/blob/v1.4.2/LICENSE,Apache-2.0 github.com/go-logr/stdr,https://github.com/go-logr/stdr/blob/v1.2.2/LICENSE,Apache-2.0 -github.com/go-logr/zapr,https://github.com/go-logr/zapr/blob/v1.2.4/LICENSE,Apache-2.0 -github.com/go-openapi/jsonpointer,https://github.com/go-openapi/jsonpointer/blob/v0.19.6/LICENSE,Apache-2.0 -github.com/go-openapi/jsonreference,https://github.com/go-openapi/jsonreference/blob/v0.20.2/LICENSE,Apache-2.0 -github.com/go-openapi/swag,https://github.com/go-openapi/swag/blob/v0.22.3/LICENSE,Apache-2.0 +github.com/go-logr/zapr,https://github.com/go-logr/zapr/blob/v1.3.0/LICENSE,Apache-2.0 +github.com/go-openapi/jsonpointer,https://github.com/go-openapi/jsonpointer/blob/v0.21.0/LICENSE,Apache-2.0 +github.com/go-openapi/jsonreference,https://github.com/go-openapi/jsonreference/blob/v0.21.0/LICENSE,Apache-2.0 +github.com/go-openapi/swag,https://github.com/go-openapi/swag/blob/v0.23.0/LICENSE,Apache-2.0 github.com/gogo/protobuf,https://github.com/gogo/protobuf/blob/v1.3.2/LICENSE,BSD-3-Clause -github.com/golang-jwt/jwt/v4,https://github.com/golang-jwt/jwt/blob/v4.5.0/LICENSE,MIT +github.com/golang-jwt/jwt/v5,https://github.com/golang-jwt/jwt/blob/v5.2.1/LICENSE,MIT github.com/golang/groupcache/lru,https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE,Apache-2.0 -github.com/golang/protobuf,https://github.com/golang/protobuf/blob/v1.5.3/LICENSE,BSD-3-Clause +github.com/golang/protobuf,https://github.com/golang/protobuf/blob/v1.5.4/LICENSE,BSD-3-Clause github.com/golang/snappy,https://github.com/golang/snappy/blob/v0.0.4/LICENSE,BSD-3-Clause -github.com/google/cel-go,https://github.com/google/cel-go/blob/v0.16.0/LICENSE,Apache-2.0 -github.com/google/cel-go,https://github.com/google/cel-go/blob/v0.16.0/LICENSE,BSD-3-Clause +github.com/google/cel-go,https://github.com/google/cel-go/blob/v0.20.1/LICENSE,Apache-2.0 +github.com/google/cel-go,https://github.com/google/cel-go/blob/v0.20.1/LICENSE,BSD-3-Clause github.com/google/gnostic-models,https://github.com/google/gnostic-models/blob/v0.6.8/LICENSE,Apache-2.0 github.com/google/go-cmp/cmp,https://github.com/google/go-cmp/blob/v0.6.0/LICENSE,BSD-3-Clause github.com/google/go-querystring/query,https://github.com/google/go-querystring/blob/v1.1.0/LICENSE,BSD-3-Clause github.com/google/gofuzz,https://github.com/google/gofuzz/blob/v1.2.0/LICENSE,Apache-2.0 -github.com/google/s2a-go,https://github.com/google/s2a-go/blob/v0.1.7/LICENSE.md,Apache-2.0 -github.com/google/uuid,https://github.com/google/uuid/blob/v1.3.1/LICENSE,BSD-3-Clause -github.com/googleapis/enterprise-certificate-proxy/client,https://github.com/googleapis/enterprise-certificate-proxy/blob/v0.2.5/LICENSE,Apache-2.0 -github.com/googleapis/gax-go/v2,https://github.com/googleapis/gax-go/blob/v2.12.0/v2/LICENSE,BSD-3-Clause +github.com/google/s2a-go,https://github.com/google/s2a-go/blob/v0.1.8/LICENSE.md,Apache-2.0 +github.com/google/uuid,https://github.com/google/uuid/blob/v1.6.0/LICENSE,BSD-3-Clause +github.com/googleapis/enterprise-certificate-proxy/client,https://github.com/googleapis/enterprise-certificate-proxy/blob/v0.3.4/LICENSE,Apache-2.0 +github.com/googleapis/gax-go/v2,https://github.com/googleapis/gax-go/blob/v2.13.0/v2/LICENSE,BSD-3-Clause +github.com/gorilla/websocket,https://github.com/gorilla/websocket/blob/v1.5.1/LICENSE,BSD-3-Clause github.com/grpc-ecosystem/go-grpc-prometheus,https://github.com/grpc-ecosystem/go-grpc-prometheus/blob/v1.2.0/LICENSE,Apache-2.0 -github.com/grpc-ecosystem/grpc-gateway/v2,https://github.com/grpc-ecosystem/grpc-gateway/blob/v2.16.0/LICENSE.txt,BSD-3-Clause +github.com/grpc-ecosystem/grpc-gateway/v2,https://github.com/grpc-ecosystem/grpc-gateway/blob/v2.20.0/LICENSE,BSD-3-Clause github.com/hashicorp/errwrap,https://github.com/hashicorp/errwrap/blob/v1.1.0/LICENSE,MPL-2.0 github.com/hashicorp/go-cleanhttp,https://github.com/hashicorp/go-cleanhttp/blob/v0.5.2/LICENSE,MPL-2.0 github.com/hashicorp/go-multierror,https://github.com/hashicorp/go-multierror/blob/v1.1.1/LICENSE,MPL-2.0 -github.com/hashicorp/go-retryablehttp,https://github.com/hashicorp/go-retryablehttp/blob/v0.7.4/LICENSE,MPL-2.0 +github.com/hashicorp/go-retryablehttp,https://github.com/hashicorp/go-retryablehttp/blob/v0.7.7/LICENSE,MPL-2.0 github.com/hashicorp/go-rootcerts,https://github.com/hashicorp/go-rootcerts/blob/v1.0.2/LICENSE,MPL-2.0 -github.com/hashicorp/go-secure-stdlib/parseutil,https://github.com/hashicorp/go-secure-stdlib/blob/parseutil/v0.1.7/parseutil/LICENSE,MPL-2.0 +github.com/hashicorp/go-secure-stdlib/parseutil,https://github.com/hashicorp/go-secure-stdlib/blob/parseutil/v0.1.8/parseutil/LICENSE,MPL-2.0 github.com/hashicorp/go-secure-stdlib/strutil,https://github.com/hashicorp/go-secure-stdlib/blob/strutil/v0.1.2/strutil/LICENSE,MPL-2.0 -github.com/hashicorp/go-sockaddr,https://github.com/hashicorp/go-sockaddr/blob/v1.0.2/LICENSE,MPL-2.0 +github.com/hashicorp/go-sockaddr,https://github.com/hashicorp/go-sockaddr/blob/v1.0.6/LICENSE,MPL-2.0 github.com/hashicorp/hcl,https://github.com/hashicorp/hcl/blob/v1.0.1-vault-5/LICENSE,MPL-2.0 -github.com/hashicorp/vault/api,https://github.com/hashicorp/vault/blob/api/v1.10.0/api/LICENSE,MPL-2.0 -github.com/hashicorp/vault/sdk/helper,https://github.com/hashicorp/vault/blob/sdk/v0.10.0/sdk/LICENSE,MPL-2.0 -github.com/imdario/mergo,https://github.com/imdario/mergo/blob/v0.3.13/LICENSE,BSD-3-Clause +github.com/hashicorp/vault/api,https://github.com/hashicorp/vault/blob/api/v1.15.0/api/LICENSE,MPL-2.0 +github.com/hashicorp/vault/sdk/helper,https://github.com/hashicorp/vault/blob/sdk/v0.14.0/sdk/LICENSE,MPL-2.0 +github.com/imdario/mergo,https://github.com/imdario/mergo/blob/v0.3.16/LICENSE,BSD-3-Clause github.com/jmespath/go-jmespath,https://github.com/jmespath/go-jmespath/blob/b0104c826a24/LICENSE,Apache-2.0 github.com/josharian/intern,https://github.com/josharian/intern/blob/v1.0.0/license.md,MIT github.com/json-iterator/go,https://github.com/json-iterator/go/blob/v1.1.12/LICENSE,MIT +github.com/klauspost/compress,https://github.com/klauspost/compress/blob/v1.17.9/LICENSE,MIT +github.com/klauspost/compress,https://github.com/klauspost/compress/blob/v1.17.9/LICENSE,Apache-2.0 +github.com/klauspost/compress,https://github.com/klauspost/compress/blob/v1.17.9/LICENSE,BSD-3-Clause +github.com/klauspost/compress/internal/snapref,https://github.com/klauspost/compress/blob/v1.17.9/internal/snapref/LICENSE,BSD-3-Clause +github.com/klauspost/compress/zstd/internal/xxhash,https://github.com/klauspost/compress/blob/v1.17.9/zstd/internal/xxhash/LICENSE.txt,MIT github.com/kr/pretty,https://github.com/kr/pretty/blob/v0.3.1/License,MIT github.com/kr/text,https://github.com/kr/text/blob/v0.2.0/License,MIT +github.com/kylelemons/godebug,https://github.com/kylelemons/godebug/blob/v1.1.0/LICENSE,Apache-2.0 github.com/mailru/easyjson,https://github.com/mailru/easyjson/blob/v0.7.7/LICENSE,MIT -github.com/matttproud/golang_protobuf_extensions/pbutil,https://github.com/matttproud/golang_protobuf_extensions/blob/v1.0.4/LICENSE,Apache-2.0 -github.com/miekg/dns,https://github.com/miekg/dns/blob/v1.1.55/LICENSE,BSD-3-Clause +github.com/miekg/dns,https://github.com/miekg/dns/blob/v1.1.62/LICENSE,BSD-3-Clause github.com/mitchellh/go-homedir,https://github.com/mitchellh/go-homedir/blob/v1.1.0/LICENSE,MIT github.com/mitchellh/mapstructure,https://github.com/mitchellh/mapstructure/blob/v1.5.0/LICENSE,MIT github.com/modern-go/concurrent,https://github.com/modern-go/concurrent/blob/bacd9c7ef1dd/LICENSE,Apache-2.0 @@ -92,78 +112,85 @@ github.com/munnerz/goautoneg,https://github.com/munnerz/goautoneg/blob/a7dc8b61c github.com/patrickmn/go-cache,https://github.com/patrickmn/go-cache/blob/v2.1.0/LICENSE,MIT github.com/pavlo-v-chernykh/keystore-go/v4,https://github.com/pavlo-v-chernykh/keystore-go/blob/v4.5.0/LICENSE,MIT github.com/pierrec/lz4,https://github.com/pierrec/lz4/blob/v2.6.1/LICENSE,BSD-3-Clause +github.com/pkg/browser,https://github.com/pkg/browser/blob/5ac0b6a4141c/LICENSE,BSD-2-Clause github.com/pkg/errors,https://github.com/pkg/errors/blob/v0.9.1/LICENSE,BSD-2-Clause -github.com/prometheus/client_golang/prometheus,https://github.com/prometheus/client_golang/blob/v1.16.0/LICENSE,Apache-2.0 -github.com/prometheus/client_model/go,https://github.com/prometheus/client_model/blob/v0.4.0/LICENSE,Apache-2.0 -github.com/prometheus/common,https://github.com/prometheus/common/blob/v0.44.0/LICENSE,Apache-2.0 -github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg,https://github.com/prometheus/common/blob/v0.44.0/internal/bitbucket.org/ww/goautoneg/README.txt,BSD-3-Clause -github.com/prometheus/procfs,https://github.com/prometheus/procfs/blob/v0.10.1/LICENSE,Apache-2.0 -github.com/rogpeppe/go-internal/fmtsort,https://github.com/rogpeppe/go-internal/blob/v1.11.0/LICENSE,BSD-3-Clause +github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil,https://github.com/prometheus/client_golang/blob/v1.20.4/internal/github.com/golang/gddo/LICENSE,BSD-3-Clause +github.com/prometheus/client_golang/prometheus,https://github.com/prometheus/client_golang/blob/v1.20.4/LICENSE,Apache-2.0 +github.com/prometheus/client_model/go,https://github.com/prometheus/client_model/blob/v0.6.1/LICENSE,Apache-2.0 +github.com/prometheus/common,https://github.com/prometheus/common/blob/v0.55.0/LICENSE,Apache-2.0 +github.com/prometheus/procfs,https://github.com/prometheus/procfs/blob/v0.15.1/LICENSE,Apache-2.0 +github.com/rogpeppe/go-internal/fmtsort,https://github.com/rogpeppe/go-internal/blob/v1.12.0/LICENSE,BSD-3-Clause github.com/ryanuber/go-glob,https://github.com/ryanuber/go-glob/blob/v1.0.0/LICENSE,MIT github.com/sirupsen/logrus,https://github.com/sirupsen/logrus/blob/v1.9.3/LICENSE,MIT -github.com/spf13/cobra,https://github.com/spf13/cobra/blob/v1.7.0/LICENSE.txt,Apache-2.0 +github.com/sosodev/duration,https://github.com/sosodev/duration/blob/v1.3.1/LICENSE,MIT +github.com/spf13/cobra,https://github.com/spf13/cobra/blob/v1.8.1/LICENSE.txt,Apache-2.0 github.com/spf13/pflag,https://github.com/spf13/pflag/blob/v1.0.5/LICENSE,BSD-3-Clause -github.com/stoewer/go-strcase,https://github.com/stoewer/go-strcase/blob/v1.2.0/LICENSE,MIT -github.com/youmark/pkcs8,https://github.com/youmark/pkcs8/blob/1326539a0a0a/LICENSE,MIT -go.etcd.io/etcd/api/v3,https://github.com/etcd-io/etcd/blob/api/v3.5.9/api/LICENSE,Apache-2.0 -go.etcd.io/etcd/client/pkg/v3,https://github.com/etcd-io/etcd/blob/client/pkg/v3.5.9/client/pkg/LICENSE,Apache-2.0 -go.etcd.io/etcd/client/v3,https://github.com/etcd-io/etcd/blob/client/v3.5.9/client/v3/LICENSE,Apache-2.0 +github.com/stoewer/go-strcase,https://github.com/stoewer/go-strcase/blob/v1.3.0/LICENSE,MIT +github.com/vektah/gqlparser/v2,https://github.com/vektah/gqlparser/blob/v2.5.15/LICENSE,MIT +github.com/x448/float16,https://github.com/x448/float16/blob/v0.8.4/LICENSE,MIT +github.com/youmark/pkcs8,https://github.com/youmark/pkcs8/blob/3c2c7870ae76/LICENSE,MIT +go.etcd.io/etcd/api/v3,https://github.com/etcd-io/etcd/blob/api/v3.5.14/api/LICENSE,Apache-2.0 +go.etcd.io/etcd/client/pkg/v3,https://github.com/etcd-io/etcd/blob/client/pkg/v3.5.14/client/pkg/LICENSE,Apache-2.0 +go.etcd.io/etcd/client/v3,https://github.com/etcd-io/etcd/blob/client/v3.5.14/client/v3/LICENSE,Apache-2.0 go.opencensus.io,https://github.com/census-instrumentation/opencensus-go/blob/v0.24.0/LICENSE,Apache-2.0 -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc,https://github.com/open-telemetry/opentelemetry-go-contrib/blob/instrumentation/google.golang.org/grpc/otelgrpc/v0.46.0/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE,Apache-2.0 -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp,https://github.com/open-telemetry/opentelemetry-go-contrib/blob/instrumentation/net/http/otelhttp/v0.46.0/instrumentation/net/http/otelhttp/LICENSE,Apache-2.0 -go.opentelemetry.io/otel,https://github.com/open-telemetry/opentelemetry-go/blob/v1.20.0/LICENSE,Apache-2.0 -go.opentelemetry.io/otel/exporters/otlp/otlptrace,https://github.com/open-telemetry/opentelemetry-go/blob/exporters/otlp/otlptrace/v1.20.0/exporters/otlp/otlptrace/LICENSE,Apache-2.0 -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc,https://github.com/open-telemetry/opentelemetry-go/blob/exporters/otlp/otlptrace/otlptracegrpc/v1.20.0/exporters/otlp/otlptrace/otlptracegrpc/LICENSE,Apache-2.0 -go.opentelemetry.io/otel/metric,https://github.com/open-telemetry/opentelemetry-go/blob/metric/v1.20.0/metric/LICENSE,Apache-2.0 -go.opentelemetry.io/otel/sdk,https://github.com/open-telemetry/opentelemetry-go/blob/sdk/v1.20.0/sdk/LICENSE,Apache-2.0 -go.opentelemetry.io/otel/trace,https://github.com/open-telemetry/opentelemetry-go/blob/trace/v1.20.0/trace/LICENSE,Apache-2.0 -go.opentelemetry.io/proto/otlp,https://github.com/open-telemetry/opentelemetry-proto-go/blob/otlp/v1.0.0/otlp/LICENSE,Apache-2.0 +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc,https://github.com/open-telemetry/opentelemetry-go-contrib/blob/instrumentation/google.golang.org/grpc/otelgrpc/v0.54.0/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE,Apache-2.0 +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp,https://github.com/open-telemetry/opentelemetry-go-contrib/blob/instrumentation/net/http/otelhttp/v0.54.0/instrumentation/net/http/otelhttp/LICENSE,Apache-2.0 +go.opentelemetry.io/otel,https://github.com/open-telemetry/opentelemetry-go/blob/v1.29.0/LICENSE,Apache-2.0 +go.opentelemetry.io/otel/exporters/otlp/otlptrace,https://github.com/open-telemetry/opentelemetry-go/blob/exporters/otlp/otlptrace/v1.28.0/exporters/otlp/otlptrace/LICENSE,Apache-2.0 +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc,https://github.com/open-telemetry/opentelemetry-go/blob/exporters/otlp/otlptrace/otlptracegrpc/v1.27.0/exporters/otlp/otlptrace/otlptracegrpc/LICENSE,Apache-2.0 +go.opentelemetry.io/otel/metric,https://github.com/open-telemetry/opentelemetry-go/blob/metric/v1.29.0/metric/LICENSE,Apache-2.0 +go.opentelemetry.io/otel/sdk,https://github.com/open-telemetry/opentelemetry-go/blob/sdk/v1.28.0/sdk/LICENSE,Apache-2.0 +go.opentelemetry.io/otel/trace,https://github.com/open-telemetry/opentelemetry-go/blob/trace/v1.29.0/trace/LICENSE,Apache-2.0 +go.opentelemetry.io/proto/otlp,https://github.com/open-telemetry/opentelemetry-proto-go/blob/otlp/v1.3.1/otlp/LICENSE,Apache-2.0 go.uber.org/multierr,https://github.com/uber-go/multierr/blob/v1.11.0/LICENSE.txt,MIT -go.uber.org/zap,https://github.com/uber-go/zap/blob/v1.25.0/LICENSE.txt,MIT -golang.org/x/crypto,https://cs.opensource.google/go/x/crypto/+/v0.14.0:LICENSE,BSD-3-Clause -golang.org/x/exp,https://cs.opensource.google/go/x/exp/+/92128663:LICENSE,BSD-3-Clause -golang.org/x/net,https://cs.opensource.google/go/x/net/+/v0.17.0:LICENSE,BSD-3-Clause -golang.org/x/oauth2,https://cs.opensource.google/go/x/oauth2/+/v0.12.0:LICENSE,BSD-3-Clause -golang.org/x/sync,https://cs.opensource.google/go/x/sync/+/v0.3.0:LICENSE,BSD-3-Clause -golang.org/x/sys,https://cs.opensource.google/go/x/sys/+/v0.14.0:LICENSE,BSD-3-Clause -golang.org/x/term,https://cs.opensource.google/go/x/term/+/v0.13.0:LICENSE,BSD-3-Clause -golang.org/x/text,https://cs.opensource.google/go/x/text/+/v0.13.0:LICENSE,BSD-3-Clause -golang.org/x/time/rate,https://cs.opensource.google/go/x/time/+/v0.3.0:LICENSE,BSD-3-Clause +go.uber.org/zap,https://github.com/uber-go/zap/blob/v1.27.0/LICENSE,MIT +golang.org/x/crypto,https://cs.opensource.google/go/x/crypto/+/v0.31.0:LICENSE,BSD-3-Clause +golang.org/x/exp,https://cs.opensource.google/go/x/exp/+/8a7402ab:LICENSE,BSD-3-Clause +golang.org/x/net,https://cs.opensource.google/go/x/net/+/v0.33.0:LICENSE,BSD-3-Clause +golang.org/x/oauth2,https://cs.opensource.google/go/x/oauth2/+/v0.23.0:LICENSE,BSD-3-Clause +golang.org/x/sync,https://cs.opensource.google/go/x/sync/+/v0.10.0:LICENSE,BSD-3-Clause +golang.org/x/sys,https://cs.opensource.google/go/x/sys/+/v0.28.0:LICENSE,BSD-3-Clause +golang.org/x/term,https://cs.opensource.google/go/x/term/+/v0.27.0:LICENSE,BSD-3-Clause +golang.org/x/text,https://cs.opensource.google/go/x/text/+/v0.21.0:LICENSE,BSD-3-Clause +golang.org/x/time/rate,https://cs.opensource.google/go/x/time/+/v0.6.0:LICENSE,BSD-3-Clause gomodules.xyz/jsonpatch/v2,https://github.com/gomodules/jsonpatch/blob/v2.4.0/v2/LICENSE,Apache-2.0 -google.golang.org/api,https://github.com/googleapis/google-api-go-client/blob/v0.140.0/LICENSE,BSD-3-Clause -google.golang.org/api/internal/third_party/uritemplates,https://github.com/googleapis/google-api-go-client/blob/v0.140.0/internal/third_party/uritemplates/LICENSE,BSD-3-Clause -google.golang.org/genproto/googleapis/api,https://github.com/googleapis/go-genproto/blob/b8732ec3820d/googleapis/api/LICENSE,Apache-2.0 -google.golang.org/genproto/googleapis/rpc,https://github.com/googleapis/go-genproto/blob/2d3300fd4832/googleapis/rpc/LICENSE,Apache-2.0 -google.golang.org/grpc,https://github.com/grpc/grpc-go/blob/v1.59.0/LICENSE,Apache-2.0 -google.golang.org/protobuf,https://github.com/protocolbuffers/protobuf-go/blob/v1.31.0/LICENSE,BSD-3-Clause +google.golang.org/api,https://github.com/googleapis/google-api-go-client/blob/v0.198.0/LICENSE,BSD-3-Clause +google.golang.org/api/internal/third_party/uritemplates,https://github.com/googleapis/google-api-go-client/blob/v0.198.0/internal/third_party/uritemplates/LICENSE,BSD-3-Clause +google.golang.org/genproto/googleapis/api,https://github.com/googleapis/go-genproto/blob/7e3bb234dfed/googleapis/api/LICENSE,Apache-2.0 +google.golang.org/genproto/googleapis/rpc,https://github.com/googleapis/go-genproto/blob/8af14fe29dc1/googleapis/rpc/LICENSE,Apache-2.0 +google.golang.org/grpc,https://github.com/grpc/grpc-go/blob/v1.66.2/LICENSE,Apache-2.0 +google.golang.org/protobuf,https://github.com/protocolbuffers/protobuf-go/blob/v1.34.2/LICENSE,BSD-3-Clause +gopkg.in/evanphx/json-patch.v4,https://github.com/evanphx/json-patch/blob/v4.12.0/LICENSE,BSD-3-Clause gopkg.in/inf.v0,https://github.com/go-inf/inf/blob/v0.9.1/LICENSE,BSD-3-Clause -gopkg.in/ini.v1,https://github.com/go-ini/ini/blob/v1.62.0/LICENSE,Apache-2.0 +gopkg.in/ini.v1,https://github.com/go-ini/ini/blob/v1.67.0/LICENSE,Apache-2.0 gopkg.in/natefinch/lumberjack.v2,https://github.com/natefinch/lumberjack/blob/v2.2.1/LICENSE,MIT gopkg.in/yaml.v2,https://github.com/go-yaml/yaml/blob/v2.4.0/LICENSE,Apache-2.0 gopkg.in/yaml.v3,https://github.com/go-yaml/yaml/blob/v3.0.1/LICENSE,MIT -k8s.io/api,https://github.com/kubernetes/api/blob/v0.28.1/LICENSE,Apache-2.0 -k8s.io/apiextensions-apiserver/pkg,https://github.com/kubernetes/apiextensions-apiserver/blob/v0.28.1/LICENSE,Apache-2.0 -k8s.io/apimachinery/pkg,https://github.com/kubernetes/apimachinery/blob/v0.28.1/LICENSE,Apache-2.0 -k8s.io/apimachinery/third_party/forked/golang,https://github.com/kubernetes/apimachinery/blob/v0.28.1/third_party/forked/golang/LICENSE,BSD-3-Clause -k8s.io/apiserver,https://github.com/kubernetes/apiserver/blob/v0.28.1/LICENSE,Apache-2.0 -k8s.io/client-go,https://github.com/kubernetes/client-go/blob/v0.28.1/LICENSE,Apache-2.0 -k8s.io/component-base,https://github.com/kubernetes/component-base/blob/v0.28.1/LICENSE,Apache-2.0 -k8s.io/klog/v2,https://github.com/kubernetes/klog/blob/v2.100.1/LICENSE,Apache-2.0 -k8s.io/kms,https://github.com/kubernetes/kms/blob/v0.28.1/LICENSE,Apache-2.0 -k8s.io/kube-aggregator/pkg/apis/apiregistration,https://github.com/kubernetes/kube-aggregator/blob/v0.28.1/LICENSE,Apache-2.0 -k8s.io/kube-openapi/pkg,https://github.com/kubernetes/kube-openapi/blob/d090da108d2f/LICENSE,Apache-2.0 -k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json,https://github.com/kubernetes/kube-openapi/blob/d090da108d2f/pkg/internal/third_party/go-json-experiment/json/LICENSE,BSD-3-Clause -k8s.io/kube-openapi/pkg/validation/errors,https://github.com/kubernetes/kube-openapi/blob/d090da108d2f/pkg/validation/errors/LICENSE,Apache-2.0 -k8s.io/kube-openapi/pkg/validation/spec,https://github.com/kubernetes/kube-openapi/blob/d090da108d2f/pkg/validation/spec/LICENSE,Apache-2.0 -k8s.io/kube-openapi/pkg/validation/strfmt,https://github.com/kubernetes/kube-openapi/blob/d090da108d2f/pkg/validation/strfmt/LICENSE,Apache-2.0 -k8s.io/utils,https://github.com/kubernetes/utils/blob/3b25d923346b/LICENSE,Apache-2.0 -k8s.io/utils/internal/third_party/forked/golang,https://github.com/kubernetes/utils/blob/3b25d923346b/internal/third_party/forked/golang/LICENSE,BSD-3-Clause -sigs.k8s.io/apiserver-network-proxy/konnectivity-client,https://github.com/kubernetes-sigs/apiserver-network-proxy/blob/konnectivity-client/v0.1.2/konnectivity-client/LICENSE,Apache-2.0 -sigs.k8s.io/controller-runtime,https://github.com/kubernetes-sigs/controller-runtime/blob/v0.16.1/LICENSE,Apache-2.0 -sigs.k8s.io/gateway-api,https://github.com/kubernetes-sigs/gateway-api/blob/v0.8.0/LICENSE,Apache-2.0 +k8s.io/api,https://github.com/kubernetes/api/blob/v0.31.1/LICENSE,Apache-2.0 +k8s.io/apiextensions-apiserver/pkg,https://github.com/kubernetes/apiextensions-apiserver/blob/v0.31.1/LICENSE,Apache-2.0 +k8s.io/apimachinery/pkg,https://github.com/kubernetes/apimachinery/blob/v0.31.1/LICENSE,Apache-2.0 +k8s.io/apimachinery/third_party/forked/golang,https://github.com/kubernetes/apimachinery/blob/v0.31.1/third_party/forked/golang/LICENSE,BSD-3-Clause +k8s.io/apiserver,https://github.com/kubernetes/apiserver/blob/v0.31.1/LICENSE,Apache-2.0 +k8s.io/client-go,https://github.com/kubernetes/client-go/blob/v0.31.1/LICENSE,Apache-2.0 +k8s.io/component-base,https://github.com/kubernetes/component-base/blob/v0.31.1/LICENSE,Apache-2.0 +k8s.io/klog/v2,https://github.com/kubernetes/klog/blob/v2.130.1/LICENSE,Apache-2.0 +k8s.io/kms,https://github.com/kubernetes/kms/blob/v0.31.1/LICENSE,Apache-2.0 +k8s.io/kube-aggregator/pkg/apis/apiregistration,https://github.com/kubernetes/kube-aggregator/blob/v0.31.1/LICENSE,Apache-2.0 +k8s.io/kube-openapi/pkg,https://github.com/kubernetes/kube-openapi/blob/9e1beecbcb38/LICENSE,Apache-2.0 +k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json,https://github.com/kubernetes/kube-openapi/blob/9e1beecbcb38/pkg/internal/third_party/go-json-experiment/json/LICENSE,BSD-3-Clause +k8s.io/kube-openapi/pkg/validation/errors,https://github.com/kubernetes/kube-openapi/blob/9e1beecbcb38/pkg/validation/errors/LICENSE,Apache-2.0 +k8s.io/kube-openapi/pkg/validation/spec,https://github.com/kubernetes/kube-openapi/blob/9e1beecbcb38/pkg/validation/spec/LICENSE,Apache-2.0 +k8s.io/kube-openapi/pkg/validation/strfmt,https://github.com/kubernetes/kube-openapi/blob/9e1beecbcb38/pkg/validation/strfmt/LICENSE,Apache-2.0 +k8s.io/utils,https://github.com/kubernetes/utils/blob/49e7df575cb6/LICENSE,Apache-2.0 +k8s.io/utils/internal/third_party/forked/golang,https://github.com/kubernetes/utils/blob/49e7df575cb6/internal/third_party/forked/golang/LICENSE,BSD-3-Clause +sigs.k8s.io/apiserver-network-proxy/konnectivity-client,https://github.com/kubernetes-sigs/apiserver-network-proxy/blob/konnectivity-client/v0.30.3/konnectivity-client/LICENSE,Apache-2.0 +sigs.k8s.io/controller-runtime,https://github.com/kubernetes-sigs/controller-runtime/blob/v0.19.0/LICENSE,Apache-2.0 +sigs.k8s.io/gateway-api,https://github.com/kubernetes-sigs/gateway-api/blob/v1.1.0/LICENSE,Apache-2.0 sigs.k8s.io/json,https://github.com/kubernetes-sigs/json/blob/bc3834ca7abd/LICENSE,Apache-2.0 sigs.k8s.io/json,https://github.com/kubernetes-sigs/json/blob/bc3834ca7abd/LICENSE,BSD-3-Clause -sigs.k8s.io/structured-merge-diff/v4,https://github.com/kubernetes-sigs/structured-merge-diff/blob/v4.3.0/LICENSE,Apache-2.0 -sigs.k8s.io/yaml,https://github.com/kubernetes-sigs/yaml/blob/v1.3.0/LICENSE,MIT -sigs.k8s.io/yaml,https://github.com/kubernetes-sigs/yaml/blob/v1.3.0/LICENSE,BSD-3-Clause -software.sslmate.com/src/go-pkcs12,https://github.com/SSLMate/go-pkcs12/blob/v0.2.1/LICENSE,BSD-3-Clause +sigs.k8s.io/structured-merge-diff/v4,https://github.com/kubernetes-sigs/structured-merge-diff/blob/v4.4.1/LICENSE,Apache-2.0 +sigs.k8s.io/yaml,https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/LICENSE,MIT +sigs.k8s.io/yaml,https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/LICENSE,Apache-2.0 +sigs.k8s.io/yaml,https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/LICENSE,BSD-3-Clause +sigs.k8s.io/yaml/goyaml.v2,https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/goyaml.v2/LICENSE,Apache-2.0 +software.sslmate.com/src/go-pkcs12,https://github.com/SSLMate/go-pkcs12/blob/v0.5.0/LICENSE,BSD-3-Clause diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/const.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/const.go index 287ed2f6d7d..ffc76533ac1 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/const.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/const.go @@ -17,5 +17,6 @@ limitations under the License. package v1 const ( - ACMEFinalizer = "finalizer.acme.cert-manager.io" + ACMELegacyFinalizer = "finalizer.acme.cert-manager.io" + ACMEDomainQualifiedFinalizer = "acme.cert-manager.io/finalizer" ) diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_issuer.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_issuer.go index 9f663280cbc..04daf9dec5f 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_issuer.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_issuer.go @@ -19,7 +19,7 @@ package v1 import ( corev1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - gwapi "sigs.k8s.io/gateway-api/apis/v1beta1" + gwapi "sigs.k8s.io/gateway-api/apis/v1" cmmeta "github.com/cert-manager/cert-manager/pkg/apis/meta/v1" ) @@ -48,8 +48,9 @@ type ACMEIssuer struct { // endpoint. // For example, for Let's Encrypt's DST crosssign you would use: // "DST Root CA X3" or "ISRG Root X1" for the newer Let's Encrypt root CA. - // This value picks the first certificate bundle in the ACME alternative - // chains that has a certificate with this value as its issuer's CN + // This value picks the first certificate bundle in the combined set of + // ACME default and alternative chains that has a root-most certificate with + // this value as its issuer's commonname. // +optional // +kubebuilder:validation:MaxLength=64 PreferredChain string `json:"preferredChain,omitempty"` @@ -109,7 +110,7 @@ type ACMEIssuer struct { // Enables requesting a Not After date on certificates that matches the // duration of the certificate. This is not supported by all ACME servers // like Let's Encrypt. If set to true when the ACME server does not support - // it it will create an error on the Order. + // it, it will create an error on the Order. // Defaults to false. // +optional EnableDurationFeature bool `json:"enableDurationFeature,omitempty"` @@ -285,6 +286,11 @@ type ACMEChallengeSolverHTTP01GatewayHTTPRoute struct { // the HTTPRoute. Usually, the parentRef references a Gateway. See: // https://gateway-api.sigs.k8s.io/api-types/httproute/#attaching-to-gateways ParentRefs []gwapi.ParentReference `json:"parentRefs,omitempty"` + + // Optional pod template used to configure the ACME challenge solver pods + // used for HTTP01 challenges. + // +optional + PodTemplate *ACMEChallengeSolverHTTP01IngressPodTemplate `json:"podTemplate,omitempty"` } type ACMEChallengeSolverHTTP01IngressPodTemplate struct { @@ -303,7 +309,7 @@ type ACMEChallengeSolverHTTP01IngressPodTemplate struct { } type ACMEChallengeSolverHTTP01IngressPodObjectMeta struct { - // Annotations that should be added to the create ACME HTTP01 solver pods. + // Annotations that should be added to the created ACME HTTP01 solver pods. // +optional Annotations map[string]string `json:"annotations,omitempty"` @@ -337,7 +343,11 @@ type ACMEChallengeSolverHTTP01IngressPodSpec struct { // If specified, the pod's imagePullSecrets // +optional - ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty" patchMergeKey:"name" patchStrategy:"merge"` + + // If specified, the pod's security context + // +optional + SecurityContext *ACMEChallengeSolverHTTP01IngressPodSecurityContext `json:"securityContext,omitempty"` } type ACMEChallengeSolverHTTP01IngressTemplate struct { @@ -408,6 +418,80 @@ type ACMEChallengeSolverDNS01 struct { Webhook *ACMEIssuerDNS01ProviderWebhook `json:"webhook,omitempty"` } +type ACMEChallengeSolverHTTP01IngressPodSecurityContext struct { + // The SELinux context to be applied to all containers. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in SecurityContext. If set in + // both SecurityContext and PodSecurityContext, the value specified in SecurityContext + // takes precedence for that container. + // Note that this field cannot be set when spec.os.name is windows. + // +optional + SELinuxOptions *corev1.SELinuxOptions `json:"seLinuxOptions,omitempty"` + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // Note that this field cannot be set when spec.os.name is windows. + // +optional + RunAsUser *int64 `json:"runAsUser,omitempty"` + // The GID to run the entrypoint of the container process. + // Uses runtime default if unset. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // Note that this field cannot be set when spec.os.name is windows. + // +optional + RunAsGroup *int64 `json:"runAsGroup,omitempty"` + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsNonRoot *bool `json:"runAsNonRoot,omitempty"` + // A list of groups applied to the first process run in each container, in addition + // to the container's primary GID, the fsGroup (if specified), and group memberships + // defined in the container image for the uid of the container process. If unspecified, + // no additional groups are added to any container. Note that group memberships + // defined in the container image for the uid of the container process are still effective, + // even if they are not included in this list. + // Note that this field cannot be set when spec.os.name is windows. + // +optional + SupplementalGroups []int64 `json:"supplementalGroups,omitempty"` + // A special supplemental group that applies to all containers in a pod. + // Some volume types allow the Kubelet to change the ownership of that volume + // to be owned by the pod: + // + // 1. The owning GID will be the FSGroup + // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + // 3. The permission bits are OR'd with rw-rw---- + // + // If unset, the Kubelet will not modify the ownership and permissions of any volume. + // Note that this field cannot be set when spec.os.name is windows. + // +optional + FSGroup *int64 `json:"fsGroup,omitempty"` + // Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + // sysctls (by the container runtime) might fail to launch. + // Note that this field cannot be set when spec.os.name is windows. + // +optional + Sysctls []corev1.Sysctl `json:"sysctls,omitempty"` + // fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + // before being exposed inside Pod. This field will only apply to + // volume types which support fsGroup based ownership(and permissions). + // It will have no effect on ephemeral volume types such as: secret, configmaps + // and emptydir. + // Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + // Note that this field cannot be set when spec.os.name is windows. + // +optional + FSGroupChangePolicy *corev1.PodFSGroupChangePolicy `json:"fsGroupChangePolicy,omitempty"` + // The seccomp options to use by the containers in this pod. + // Note that this field cannot be set when spec.os.name is windows. + // +optional + SeccompProfile *corev1.SeccompProfile `json:"seccompProfile,omitempty"` +} + // CNAMEStrategy configures how the DNS01 provider should handle CNAME records // when found in DNS zones. // By default, the None strategy will be applied (i.e. do not follow CNAMEs). @@ -478,6 +562,10 @@ type ACMEIssuerDNS01ProviderDigitalOcean struct { // ACMEIssuerDNS01ProviderRoute53 is a structure containing the Route 53 // configuration for AWS type ACMEIssuerDNS01ProviderRoute53 struct { + // Auth configures how cert-manager authenticates. + // +optional + Auth *Route53Auth `json:"auth,omitempty"` + // The AccessKeyID is used for authentication. // Cannot be set when SecretAccessKeyID is set. // If neither the Access Key nor Key ID are set, we fall-back to using env @@ -507,29 +595,88 @@ type ACMEIssuerDNS01ProviderRoute53 struct { // +optional Role string `json:"role,omitempty"` - // If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. + // If set, the provider will manage only this zone in Route53 and will not do a lookup using the route53:ListHostedZonesByName api call. // +optional HostedZoneID string `json:"hostedZoneID,omitempty"` - // Always set the region when using AccessKeyID and SecretAccessKey - Region string `json:"region"` + // Override the AWS region. + // + // Route53 is a global service and does not have regional endpoints but the + // region specified here (or via environment variables) is used as a hint to + // help compute the correct AWS credential scope and partition when it + // connects to Route53. See: + // - [Amazon Route 53 endpoints and quotas](https://docs.aws.amazon.com/general/latest/gr/r53.html) + // - [Global services](https://docs.aws.amazon.com/whitepapers/latest/aws-fault-isolation-boundaries/global-services.html) + // + // If you omit this region field, cert-manager will use the region from + // AWS_REGION and AWS_DEFAULT_REGION environment variables, if they are set + // in the cert-manager controller Pod. + // + // The `region` field is not needed if you use [IAM Roles for Service Accounts (IRSA)](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html). + // Instead an AWS_REGION environment variable is added to the cert-manager controller Pod by: + // [Amazon EKS Pod Identity Webhook](https://github.com/aws/amazon-eks-pod-identity-webhook). + // In this case this `region` field value is ignored. + // + // The `region` field is not needed if you use [EKS Pod Identities](https://docs.aws.amazon.com/eks/latest/userguide/pod-identities.html). + // Instead an AWS_REGION environment variable is added to the cert-manager controller Pod by: + // [Amazon EKS Pod Identity Agent](https://github.com/aws/eks-pod-identity-agent), + // In this case this `region` field value is ignored. + // + // +optional + Region string `json:"region,omitempty"` +} + +// Route53Auth is configuration used to authenticate with a Route53. +type Route53Auth struct { + // Kubernetes authenticates with Route53 using AssumeRoleWithWebIdentity + // by passing a bound ServiceAccount token. + Kubernetes *Route53KubernetesAuth `json:"kubernetes"` +} + +// Route53KubernetesAuth is a configuration to authenticate against Route53 +// using a bound Kubernetes ServiceAccount token. +type Route53KubernetesAuth struct { + // A reference to a service account that will be used to request a bound + // token (also known as "projected token"). To use this field, you must + // configure an RBAC rule to let cert-manager request a token. + ServiceAccountRef *ServiceAccountRef `json:"serviceAccountRef"` +} + +// ServiceAccountRef is a service account used by cert-manager to request a +// token. The expiration of the token is also set by cert-manager to 10 minutes. +type ServiceAccountRef struct { + // Name of the ServiceAccount used to request a token. + Name string `json:"name"` + + // TokenAudiences is an optional list of audiences to include in the + // token passed to AWS. The default token consisting of the issuer's namespace + // and name is always included. + // If unset the audience defaults to `sts.amazonaws.com`. + // +optional + TokenAudiences []string `json:"audiences,omitempty"` } // ACMEIssuerDNS01ProviderAzureDNS is a structure containing the // configuration for Azure DNS type ACMEIssuerDNS01ProviderAzureDNS struct { - // if both this and ClientSecret are left unset MSI will be used + // Auth: Azure Service Principal: + // The ClientID of the Azure Service Principal used to authenticate with Azure DNS. + // If set, ClientSecret and TenantID must also be set. // +optional ClientID string `json:"clientID,omitempty"` - // if both this and ClientID are left unset MSI will be used + // Auth: Azure Service Principal: + // A reference to a Secret containing the password associated with the Service Principal. + // If set, ClientID and TenantID must also be set. // +optional ClientSecret *cmmeta.SecretKeySelector `json:"clientSecretSecretRef,omitempty"` // ID of the Azure subscription SubscriptionID string `json:"subscriptionID"` - // when specifying ClientID and ClientSecret then this field is also needed + // Auth: Azure Service Principal: + // The TenantID of the Azure Service Principal used to authenticate with Azure DNS. + // If set, ClientID and ClientSecret must also be set. // +optional TenantID string `json:"tenantID,omitempty"` @@ -544,17 +691,23 @@ type ACMEIssuerDNS01ProviderAzureDNS struct { // +optional Environment AzureDNSEnvironment `json:"environment,omitempty"` - // managed identity configuration, can not be used at the same time as clientID, clientSecretSecretRef or tenantID + // Auth: Azure Workload Identity or Azure Managed Service Identity: + // Settings to enable Azure Workload Identity or Azure Managed Service Identity + // If set, ClientID, ClientSecret and TenantID must not be set. // +optional ManagedIdentity *AzureManagedIdentity `json:"managedIdentity,omitempty"` } +// AzureManagedIdentity contains the configuration for Azure Workload Identity or Azure Managed Service Identity +// If the AZURE_FEDERATED_TOKEN_FILE environment variable is set, the Azure Workload Identity will be used. +// Otherwise, we fall-back to using Azure Managed Service Identity. type AzureManagedIdentity struct { // client ID of the managed identity, can not be used at the same time as resourceID // +optional ClientID string `json:"clientID,omitempty"` // resource ID of the managed identity, can not be used at the same time as clientID + // Cannot be used for Azure Managed Service Identity // +optional ResourceID string `json:"resourceID,omitempty"` } diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_order.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_order.go index e9a50a30134..f945a43ff00 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_order.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_order.go @@ -223,7 +223,7 @@ const ( Processing State = "processing" // Invalid signifies that an ACME resource is invalid for some reason. - // If an Order is marked 'invalid', one of its validations be have invalid for some reason. + // If an Order is marked 'invalid', one of its validations must be invalid for some reason. // This is a final state. Invalid State = "invalid" diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/zz_generated.deepcopy.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/zz_generated.deepcopy.go index b5472216ccd..09f27f5cc5f 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/zz_generated.deepcopy.go @@ -27,7 +27,7 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" - v1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1" + apisv1 "sigs.k8s.io/gateway-api/apis/v1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -202,11 +202,16 @@ func (in *ACMEChallengeSolverHTTP01GatewayHTTPRoute) DeepCopyInto(out *ACMEChall } if in.ParentRefs != nil { in, out := &in.ParentRefs, &out.ParentRefs - *out = make([]v1beta1.ParentReference, len(*in)) + *out = make([]apisv1.ParentReference, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.PodTemplate != nil { + in, out := &in.PodTemplate, &out.PodTemplate + *out = new(ACMEChallengeSolverHTTP01IngressPodTemplate) + (*in).DeepCopyInto(*out) + } return } @@ -316,6 +321,67 @@ func (in *ACMEChallengeSolverHTTP01IngressPodObjectMeta) DeepCopy() *ACMEChallen return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACMEChallengeSolverHTTP01IngressPodSecurityContext) DeepCopyInto(out *ACMEChallengeSolverHTTP01IngressPodSecurityContext) { + *out = *in + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(corev1.SELinuxOptions) + **out = **in + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + *out = new(int64) + **out = **in + } + if in.RunAsGroup != nil { + in, out := &in.RunAsGroup, &out.RunAsGroup + *out = new(int64) + **out = **in + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + *out = new(bool) + **out = **in + } + if in.SupplementalGroups != nil { + in, out := &in.SupplementalGroups, &out.SupplementalGroups + *out = make([]int64, len(*in)) + copy(*out, *in) + } + if in.FSGroup != nil { + in, out := &in.FSGroup, &out.FSGroup + *out = new(int64) + **out = **in + } + if in.Sysctls != nil { + in, out := &in.Sysctls, &out.Sysctls + *out = make([]corev1.Sysctl, len(*in)) + copy(*out, *in) + } + if in.FSGroupChangePolicy != nil { + in, out := &in.FSGroupChangePolicy, &out.FSGroupChangePolicy + *out = new(corev1.PodFSGroupChangePolicy) + **out = **in + } + if in.SeccompProfile != nil { + in, out := &in.SeccompProfile, &out.SeccompProfile + *out = new(corev1.SeccompProfile) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACMEChallengeSolverHTTP01IngressPodSecurityContext. +func (in *ACMEChallengeSolverHTTP01IngressPodSecurityContext) DeepCopy() *ACMEChallengeSolverHTTP01IngressPodSecurityContext { + if in == nil { + return nil + } + out := new(ACMEChallengeSolverHTTP01IngressPodSecurityContext) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ACMEChallengeSolverHTTP01IngressPodSpec) DeepCopyInto(out *ACMEChallengeSolverHTTP01IngressPodSpec) { *out = *in @@ -343,6 +409,11 @@ func (in *ACMEChallengeSolverHTTP01IngressPodSpec) DeepCopyInto(out *ACMEChallen *out = make([]corev1.LocalObjectReference, len(*in)) copy(*out, *in) } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(ACMEChallengeSolverHTTP01IngressPodSecurityContext) + (*in).DeepCopyInto(*out) + } return } @@ -588,6 +659,11 @@ func (in *ACMEIssuerDNS01ProviderRFC2136) DeepCopy() *ACMEIssuerDNS01ProviderRFC // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ACMEIssuerDNS01ProviderRoute53) DeepCopyInto(out *ACMEIssuerDNS01ProviderRoute53) { *out = *in + if in.Auth != nil { + in, out := &in.Auth, &out.Auth + *out = new(Route53Auth) + (*in).DeepCopyInto(*out) + } if in.SecretAccessKeyID != nil { in, out := &in.SecretAccessKeyID, &out.SecretAccessKeyID *out = new(metav1.SecretKeySelector) @@ -917,3 +993,66 @@ func (in *OrderStatus) DeepCopy() *OrderStatus { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Route53Auth) DeepCopyInto(out *Route53Auth) { + *out = *in + if in.Kubernetes != nil { + in, out := &in.Kubernetes, &out.Kubernetes + *out = new(Route53KubernetesAuth) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Route53Auth. +func (in *Route53Auth) DeepCopy() *Route53Auth { + if in == nil { + return nil + } + out := new(Route53Auth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Route53KubernetesAuth) DeepCopyInto(out *Route53KubernetesAuth) { + *out = *in + if in.ServiceAccountRef != nil { + in, out := &in.ServiceAccountRef, &out.ServiceAccountRef + *out = new(ServiceAccountRef) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Route53KubernetesAuth. +func (in *Route53KubernetesAuth) DeepCopy() *Route53KubernetesAuth { + if in == nil { + return nil + } + out := new(Route53KubernetesAuth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountRef) DeepCopyInto(out *ServiceAccountRef) { + *out = *in + if in.TokenAudiences != nil { + in, out := &in.TokenAudiences, &out.TokenAudiences + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountRef. +func (in *ServiceAccountRef) DeepCopy() *ServiceAccountRef { + if in == nil { + return nil + } + out := new(ServiceAccountRef) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/const.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/const.go index 7b8a8b0b678..9d61baebaf2 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/const.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/const.go @@ -40,4 +40,9 @@ const ( // (/v1/auth/kubernetes). The endpoint will then be called at `/login`, so // left as the default, `/v1/auth/kubernetes/login` will be called. DefaultVaultKubernetesAuthMountPath = "/v1/auth/kubernetes" + + // Default mount path location for client certificate authentication + // (/v1/auth/cert). The endpoint will then be called at `/login`, so + // left as the default, `/v1/auth/cert/login` will be called. + DefaultVaultClientCertificateAuthMountPath = "/v1/auth/cert" ) diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types.go index 276722793e9..1afeedf022e 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types.go @@ -50,6 +50,9 @@ const ( // Annotation key for certificate renewBefore. RenewBeforeAnnotationKey = "cert-manager.io/renew-before" + // Annotation key for certificate renewBeforePercentage. + RenewBeforePercentageAnnotationKey = "cert-manager.io/renew-before-percentage" + // Annotation key for emails subjectAltNames. EmailsAnnotationKey = "cert-manager.io/email-sans" @@ -145,6 +148,10 @@ const ( // controller only processes Ingresses with this annotation either unset, or // set to either the configured value or the empty string. IngressClassAnnotationKey = "kubernetes.io/ingress.class" + + // IngressSecretTemplate can be used to set the secretTemplate field in the generated Certificate. + // The value is a JSON representation of secretTemplate and must not have any unknown fields. + IngressSecretTemplate = "cert-manager.io/secret-template" ) // Annotation names for CertificateRequests diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificate.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificate.go index 64e789443bb..68e2ccfb753 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificate.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificate.go @@ -22,7 +22,7 @@ import ( cmmeta "github.com/cert-manager/cert-manager/pkg/apis/meta/v1" ) -// NOTE: Be mindful of adding OpenAPI validation- see https://github.com/cert-manager/cert-manager/issues/3644 +// NOTE: Be mindful of adding OpenAPI validation - see https://github.com/cert-manager/cert-manager/issues/3644 // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -128,9 +128,6 @@ type CertificateSpec struct { // More info: https://github.com/cert-manager/cert-manager/issues/4424 // // Cannot be set if the `subject` or `commonName` field is set. - // This is an Alpha Feature and is only enabled with the - // `--feature-gates=LiteralCertificateSubject=true` option set on both - // the controller and webhook components. // +optional LiteralSubject string `json:"literalSubject,omitempty"` @@ -167,9 +164,27 @@ type CertificateSpec struct { // If unset, this defaults to 1/3 of the issued certificate's lifetime. // Minimum accepted value is 5 minutes. // Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration. + // Cannot be set if the `renewBeforePercentage` field is set. // +optional RenewBefore *metav1.Duration `json:"renewBefore,omitempty"` + // `renewBeforePercentage` is like `renewBefore`, except it is a relative percentage + // rather than an absolute duration. For example, if a certificate is valid for 60 + // minutes, and `renewBeforePercentage=25`, cert-manager will begin to attempt to + // renew the certificate 45 minutes after it was issued (i.e. when there are 15 + // minutes (25%) remaining until the certificate is no longer valid). + // + // NOTE: The actual lifetime of the issued certificate is used to determine the + // renewal time. If an issuer returns a certificate with a different lifetime than + // the one requested, cert-manager will use the lifetime of the issued certificate. + // + // Value must be an integer in the range (0,100). The minimum effective + // `renewBefore` derived from the `renewBeforePercentage` and `duration` fields is 5 + // minutes. + // Cannot be set if the `renewBefore` field is set. + // +optional + RenewBeforePercentage *int32 `json:"renewBeforePercentage,omitempty"` + // Requested DNS subject alternative names. // +optional DNSNames []string `json:"dnsNames,omitempty"` @@ -182,6 +197,13 @@ type CertificateSpec struct { // +optional URIs []string `json:"uris,omitempty"` + // `otherNames` is an escape hatch for SAN that allows any type. We currently restrict the support to string like otherNames, cf RFC 5280 p 37 + // Any UTF8 String valued otherName can be passed with by setting the keys oid: x.x.x.x and UTF8Value: somevalue for `otherName`. + // Most commonly this would be UPN set with oid: 1.3.6.1.4.1.311.20.2.3 + // You should ensure that any OID passed is valid for the UTF8String type as we do not explicitly validate this. + // +optional + OtherNames []OtherName `json:"otherNames,omitempty"` + // Requested email subject alternative names. // +optional EmailAddresses []string `json:"emailAddresses,omitempty"` @@ -258,11 +280,31 @@ type CertificateSpec struct { // Defines extra output formats of the private key and signed certificate chain // to be written to this Certificate's target Secret. // - // This is an Alpha Feature and is only enabled with the - // `--feature-gates=AdditionalCertificateOutputFormats=true` option set on both + // This is a Beta Feature enabled by default. It can be disabled with the + // `--feature-gates=AdditionalCertificateOutputFormats=false` option set on both // the controller and webhook components. // +optional AdditionalOutputFormats []CertificateAdditionalOutputFormat `json:"additionalOutputFormats,omitempty"` + + // x.509 certificate NameConstraint extension which MUST NOT be used in a non-CA certificate. + // More Info: https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.10 + // + // This is an Alpha Feature and is only enabled with the + // `--feature-gates=NameConstraints=true` option set on both + // the controller and webhook components. + // +optional + NameConstraints *NameConstraints `json:"nameConstraints,omitempty"` +} + +type OtherName struct { + // OID is the object identifier for the otherName SAN. + // The object identifier must be expressed as a dotted string, for + // example, "1.2.840.113556.1.4.221". + OID string `json:"oid,omitempty"` + + // utf8Value is the string value of the otherName SAN. + // The utf8Value accepts any valid UTF8 string to set as value for the otherName SAN. + UTF8Value string `json:"utf8Value,omitempty"` } // CertificatePrivateKey contains configuration options for private keys @@ -274,7 +316,7 @@ type CertificatePrivateKey struct { // re-issuance is being processed. // // If set to `Never`, a private key will only be generated if one does not - // already exist in the target `spec.secretName`. If one does exists but it + // already exist in the target `spec.secretName`. If one does exist but it // does not have the correct algorithm or size, a warning will be raised // to await user intervention. // If set to `Always`, a private key matching the specified requirements @@ -323,7 +365,7 @@ type PrivateKeyRotationPolicy string var ( // RotationPolicyNever means a private key will only be generated if one // does not already exist in the target `spec.secretName`. - // If one does exists but it does not have the correct algorithm or size, + // If one does exist but it does not have the correct algorithm or size, // a warning will be raised to await user intervention. RotationPolicyNever PrivateKeyRotationPolicy = "Never" @@ -433,6 +475,11 @@ type JKSKeystore struct { // PasswordSecretRef is a reference to a key in a Secret resource // containing the password used to encrypt the JKS keystore. PasswordSecretRef cmmeta.SecretKeySelector `json:"passwordSecretRef"` + + // Alias specifies the alias of the key in the keystore, required by the JKS format. + // If not provided, the default alias `certificate` will be used. + // +optional + Alias *string `json:"alias,omitempty"` } // PKCS12 configures options for storing a PKCS12 keystore in the @@ -452,8 +499,34 @@ type PKCS12Keystore struct { // PasswordSecretRef is a reference to a key in a Secret resource // containing the password used to encrypt the PKCS12 keystore. PasswordSecretRef cmmeta.SecretKeySelector `json:"passwordSecretRef"` + + // Profile specifies the key and certificate encryption algorithms and the HMAC algorithm + // used to create the PKCS12 keystore. Default value is `LegacyRC2` for backward compatibility. + // + // If provided, allowed values are: + // `LegacyRC2`: Deprecated. Not supported by default in OpenSSL 3 or Java 20. + // `LegacyDES`: Less secure algorithm. Use this option for maximal compatibility. + // `Modern2023`: Secure algorithm. Use this option in case you have to always use secure algorithms + // (eg. because of company policy). Please note that the security of the algorithm is not that important + // in reality, because the unencrypted certificate and private key are also stored in the Secret. + // +optional + Profile PKCS12Profile `json:"profile,omitempty"` } +// +kubebuilder:validation:Enum=LegacyRC2;LegacyDES;Modern2023 +type PKCS12Profile string + +const ( + // see: https://pkg.go.dev/software.sslmate.com/src/go-pkcs12#LegacyRC2 + LegacyRC2PKCS12Profile PKCS12Profile = "LegacyRC2" + + // see: https://pkg.go.dev/software.sslmate.com/src/go-pkcs12#LegacyDES + LegacyDESPKCS12Profile PKCS12Profile = "LegacyDES" + + // see: https://pkg.go.dev/software.sslmate.com/src/go-pkcs12#Modern2023 + Modern2023PKCS12Profile PKCS12Profile = "Modern2023" +) + // CertificateStatus defines the observed state of Certificate type CertificateStatus struct { // List of status conditions to indicate the status of certificates. @@ -463,7 +536,7 @@ type CertificateStatus struct { // +optional Conditions []CertificateCondition `json:"conditions,omitempty"` - // LastFailureTime is set only if the lastest issuance for this + // LastFailureTime is set only if the latest issuance for this // Certificate failed and contains the time of the failure. If an // issuance has failed, the delay till the next issuance will be // calculated using formula time.Hour * 2 ^ (failedIssuanceAttempts - @@ -522,7 +595,7 @@ type CertificateStatus struct { FailedIssuanceAttempts *int `json:"failedIssuanceAttempts,omitempty"` } -// CertificateCondition contains condition information for an Certificate. +// CertificateCondition contains condition information for a Certificate. type CertificateCondition struct { // Type of the condition, known values are (`Ready`, `Issuing`). Type CertificateConditionType `json:"type"` @@ -554,7 +627,7 @@ type CertificateCondition struct { ObservedGeneration int64 `json:"observedGeneration,omitempty"` } -// CertificateConditionType represents an Certificate condition value. +// CertificateConditionType represents a Certificate condition value. type CertificateConditionType string const ( @@ -596,3 +669,41 @@ type CertificateSecretTemplate struct { // +optional Labels map[string]string `json:"labels,omitempty"` } + +// NameConstraints is a type to represent x509 NameConstraints +type NameConstraints struct { + // if true then the name constraints are marked critical. + // + // +optional + Critical bool `json:"critical,omitempty"` + // Permitted contains the constraints in which the names must be located. + // + // +optional + Permitted *NameConstraintItem `json:"permitted,omitempty"` + // Excluded contains the constraints which must be disallowed. Any name matching a + // restriction in the excluded field is invalid regardless + // of information appearing in the permitted + // + // +optional + Excluded *NameConstraintItem `json:"excluded,omitempty"` +} + +type NameConstraintItem struct { + // DNSDomains is a list of DNS domains that are permitted or excluded. + // + // +optional + DNSDomains []string `json:"dnsDomains,omitempty"` + // IPRanges is a list of IP Ranges that are permitted or excluded. + // This should be a valid CIDR notation. + // + // +optional + IPRanges []string `json:"ipRanges,omitempty"` + // EmailAddresses is a list of Email Addresses that are permitted or excluded. + // + // +optional + EmailAddresses []string `json:"emailAddresses,omitempty"` + // URIDomains is a list of URI domains that are permitted or excluded. + // + // +optional + URIDomains []string `json:"uriDomains,omitempty"` +} diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificaterequest.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificaterequest.go index 59797c76c75..8f31d84c0a2 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificaterequest.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificaterequest.go @@ -220,7 +220,7 @@ type CertificateRequestCondition struct { Message string `json:"message,omitempty"` } -// CertificateRequestConditionType represents an Certificate condition value. +// CertificateRequestConditionType represents a Certificate condition value. type CertificateRequestConditionType string const ( diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_issuer.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_issuer.go index c901d9e7a40..efb1f5286da 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_issuer.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_issuer.go @@ -149,9 +149,9 @@ type VenafiTPP struct { // for example: "https://tpp.example.com/vedsdk". URL string `json:"url"` - // CredentialsRef is a reference to a Secret containing the username and - // password for the TPP server. - // The secret must contain two keys, 'username' and 'password'. + // CredentialsRef is a reference to a Secret containing the Venafi TPP API credentials. + // The secret must contain the key 'access-token' for the Access Token Authentication, + // or two keys, 'username' and 'password' for the API Keys Authentication. CredentialsRef cmmeta.LocalObjectReference `json:"credentialsRef"` // Base64-encoded bundle of PEM CAs which will be used to validate the certificate @@ -160,6 +160,14 @@ type VenafiTPP struct { // is used to validate the chain. // +optional CABundle []byte `json:"caBundle,omitempty"` + + // Reference to a Secret containing a base64-encoded bundle of PEM CAs + // which will be used to validate the certificate chain presented by the TPP server. + // Only used if using HTTPS; ignored for HTTP. Mutually exclusive with CABundle. + // If neither CABundle nor CABundleSecretRef is defined, the certificate bundle in + // the cert-manager controller container is used to validate the TLS connection. + // +optional + CABundleSecretRef *cmmeta.SecretKeySelector `json:"caBundleSecretRef,omitempty"` } // VenafiCloud defines connection configuration details for Venafi Cloud @@ -218,10 +226,20 @@ type VaultIssuer struct { // If no key for the Secret is specified, cert-manager will default to 'ca.crt'. // +optional CABundleSecretRef *cmmeta.SecretKeySelector `json:"caBundleSecretRef,omitempty"` + + // Reference to a Secret containing a PEM-encoded Client Certificate to use when the + // Vault server requires mTLS. + // +optional + ClientCertSecretRef *cmmeta.SecretKeySelector `json:"clientCertSecretRef,omitempty"` + + // Reference to a Secret containing a PEM-encoded Client Private Key to use when the + // Vault server requires mTLS. + // +optional + ClientKeySecretRef *cmmeta.SecretKeySelector `json:"clientKeySecretRef,omitempty"` } // VaultAuth is configuration used to authenticate with a Vault server. The -// order of precedence is [`tokenSecretRef`, `appRole` or `kubernetes`]. +// order of precedence is [`tokenSecretRef`, `appRole`, `clientCertificate` or `kubernetes`]. type VaultAuth struct { // TokenSecretRef authenticates with Vault by presenting a token. // +optional @@ -232,6 +250,12 @@ type VaultAuth struct { // +optional AppRole *VaultAppRole `json:"appRole,omitempty"` + // ClientCertificate authenticates with Vault by presenting a client + // certificate during the request's TLS handshake. + // Works only when using HTTPS protocol. + // +optional + ClientCertificate *VaultClientCertificateAuth `json:"clientCertificate,omitempty"` + // Kubernetes authenticates with Vault by passing the ServiceAccount // token stored in the named Secret resource to the Vault server. // +optional @@ -256,6 +280,28 @@ type VaultAppRole struct { SecretRef cmmeta.SecretKeySelector `json:"secretRef"` } +// VaultKubernetesAuth is used to authenticate against Vault using a client +// certificate stored in a Secret. +type VaultClientCertificateAuth struct { + // The Vault mountPath here is the mount path to use when authenticating with + // Vault. For example, setting a value to `/v1/auth/foo`, will use the path + // `/v1/auth/foo/login` to authenticate with Vault. If unspecified, the + // default value "/v1/auth/cert" will be used. + // +optional + Path string `json:"mountPath,omitempty"` + + // Reference to Kubernetes Secret of type "kubernetes.io/tls" (hence containing + // tls.crt and tls.key) used to authenticate to Vault using TLS client + // authentication. + // +optional + SecretName string `json:"secretName,omitempty"` + + // Name of the certificate role to authenticate against. + // If not set, matching any certificate role, if available. + // +optional + Name string `json:"name,omitempty"` +} + // Authenticate against Vault using a Kubernetes ServiceAccount token stored in // a Secret. type VaultKubernetesAuth struct { @@ -287,13 +333,17 @@ type VaultKubernetesAuth struct { } // ServiceAccountRef is a service account used by cert-manager to request a -// token. The audience cannot be configured. The audience is generated by +// token. Default audience is generated by // cert-manager and takes the form `vault://namespace-name/issuer-name` for an // Issuer and `vault://issuer-name` for a ClusterIssuer. The expiration of the // token is also set by cert-manager to 10 minutes. type ServiceAccountRef struct { // Name of the ServiceAccount used to request a token. Name string `json:"name"` + // TokenAudiences is an optional list of extra audiences to include in the token passed to Vault. The default token + // consisting of the issuer's namespace and name is always included. + // +optional + TokenAudiences []string `json:"audiences,omitempty"` } type CAIssuer struct { @@ -314,6 +364,12 @@ type CAIssuer struct { // OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". // +optional OCSPServers []string `json:"ocspServers,omitempty"` + + // IssuingCertificateURLs is a list of URLs which this issuer should embed into certificates + // it creates. See https://www.rfc-editor.org/rfc/rfc5280#section-4.2.2.1 for more details. + // As an example, such a URL might be "http://ca.domain.com/ca.crt". + // +optional + IssuingCertificateURLs []string `json:"issuingCertificateURLs,omitempty"` } // IssuerStatus contains status information about an Issuer diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/zz_generated.deepcopy.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/zz_generated.deepcopy.go index 8ba5ea3aaf6..9c024c6afe6 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/zz_generated.deepcopy.go @@ -41,6 +41,11 @@ func (in *CAIssuer) DeepCopyInto(out *CAIssuer) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.IssuingCertificateURLs != nil { + in, out := &in.IssuingCertificateURLs, &out.IssuingCertificateURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -124,7 +129,7 @@ func (in *CertificateKeystores) DeepCopyInto(out *CertificateKeystores) { if in.JKS != nil { in, out := &in.JKS, &out.JKS *out = new(JKSKeystore) - **out = **in + (*in).DeepCopyInto(*out) } if in.PKCS12 != nil { in, out := &in.PKCS12, &out.PKCS12 @@ -411,6 +416,11 @@ func (in *CertificateSpec) DeepCopyInto(out *CertificateSpec) { *out = new(metav1.Duration) **out = **in } + if in.RenewBeforePercentage != nil { + in, out := &in.RenewBeforePercentage, &out.RenewBeforePercentage + *out = new(int32) + **out = **in + } if in.DNSNames != nil { in, out := &in.DNSNames, &out.DNSNames *out = make([]string, len(*in)) @@ -426,6 +436,11 @@ func (in *CertificateSpec) DeepCopyInto(out *CertificateSpec) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.OtherNames != nil { + in, out := &in.OtherNames, &out.OtherNames + *out = make([]OtherName, len(*in)) + copy(*out, *in) + } if in.EmailAddresses != nil { in, out := &in.EmailAddresses, &out.EmailAddresses *out = make([]string, len(*in)) @@ -467,6 +482,11 @@ func (in *CertificateSpec) DeepCopyInto(out *CertificateSpec) { *out = make([]CertificateAdditionalOutputFormat, len(*in)) copy(*out, *in) } + if in.NameConstraints != nil { + in, out := &in.NameConstraints, &out.NameConstraints + *out = new(NameConstraints) + (*in).DeepCopyInto(*out) + } return } @@ -766,6 +786,11 @@ func (in *IssuerStatus) DeepCopy() *IssuerStatus { func (in *JKSKeystore) DeepCopyInto(out *JKSKeystore) { *out = *in out.PasswordSecretRef = in.PasswordSecretRef + if in.Alias != nil { + in, out := &in.Alias, &out.Alias + *out = new(string) + **out = **in + } return } @@ -779,6 +804,84 @@ func (in *JKSKeystore) DeepCopy() *JKSKeystore { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NameConstraintItem) DeepCopyInto(out *NameConstraintItem) { + *out = *in + if in.DNSDomains != nil { + in, out := &in.DNSDomains, &out.DNSDomains + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IPRanges != nil { + in, out := &in.IPRanges, &out.IPRanges + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.EmailAddresses != nil { + in, out := &in.EmailAddresses, &out.EmailAddresses + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.URIDomains != nil { + in, out := &in.URIDomains, &out.URIDomains + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NameConstraintItem. +func (in *NameConstraintItem) DeepCopy() *NameConstraintItem { + if in == nil { + return nil + } + out := new(NameConstraintItem) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NameConstraints) DeepCopyInto(out *NameConstraints) { + *out = *in + if in.Permitted != nil { + in, out := &in.Permitted, &out.Permitted + *out = new(NameConstraintItem) + (*in).DeepCopyInto(*out) + } + if in.Excluded != nil { + in, out := &in.Excluded, &out.Excluded + *out = new(NameConstraintItem) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NameConstraints. +func (in *NameConstraints) DeepCopy() *NameConstraints { + if in == nil { + return nil + } + out := new(NameConstraints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OtherName) DeepCopyInto(out *OtherName) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OtherName. +func (in *OtherName) DeepCopy() *OtherName { + if in == nil { + return nil + } + out := new(OtherName) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PKCS12Keystore) DeepCopyInto(out *PKCS12Keystore) { *out = *in @@ -820,6 +923,11 @@ func (in *SelfSignedIssuer) DeepCopy() *SelfSignedIssuer { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceAccountRef) DeepCopyInto(out *ServiceAccountRef) { *out = *in + if in.TokenAudiences != nil { + in, out := &in.TokenAudiences, &out.TokenAudiences + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -863,6 +971,11 @@ func (in *VaultAuth) DeepCopyInto(out *VaultAuth) { *out = new(VaultAppRole) **out = **in } + if in.ClientCertificate != nil { + in, out := &in.ClientCertificate, &out.ClientCertificate + *out = new(VaultClientCertificateAuth) + **out = **in + } if in.Kubernetes != nil { in, out := &in.Kubernetes, &out.Kubernetes *out = new(VaultKubernetesAuth) @@ -881,6 +994,22 @@ func (in *VaultAuth) DeepCopy() *VaultAuth { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultClientCertificateAuth) DeepCopyInto(out *VaultClientCertificateAuth) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultClientCertificateAuth. +func (in *VaultClientCertificateAuth) DeepCopy() *VaultClientCertificateAuth { + if in == nil { + return nil + } + out := new(VaultClientCertificateAuth) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VaultIssuer) DeepCopyInto(out *VaultIssuer) { *out = *in @@ -895,6 +1024,16 @@ func (in *VaultIssuer) DeepCopyInto(out *VaultIssuer) { *out = new(apismetav1.SecretKeySelector) **out = **in } + if in.ClientCertSecretRef != nil { + in, out := &in.ClientCertSecretRef, &out.ClientCertSecretRef + *out = new(apismetav1.SecretKeySelector) + **out = **in + } + if in.ClientKeySecretRef != nil { + in, out := &in.ClientKeySecretRef, &out.ClientKeySecretRef + *out = new(apismetav1.SecretKeySelector) + **out = **in + } return } @@ -915,7 +1054,7 @@ func (in *VaultKubernetesAuth) DeepCopyInto(out *VaultKubernetesAuth) { if in.ServiceAccountRef != nil { in, out := &in.ServiceAccountRef, &out.ServiceAccountRef *out = new(ServiceAccountRef) - **out = **in + (*in).DeepCopyInto(*out) } return } @@ -982,6 +1121,11 @@ func (in *VenafiTPP) DeepCopyInto(out *VenafiTPP) { *out = make([]byte, len(*in)) copy(*out, *in) } + if in.CABundleSecretRef != nil { + in, out := &in.CABundleSecretRef, &out.CABundleSecretRef + *out = new(apismetav1.SecretKeySelector) + **out = **in + } return } diff --git a/vendor/github.com/klauspost/compress/.gitattributes b/vendor/github.com/klauspost/compress/.gitattributes new file mode 100644 index 00000000000..402433593c0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitattributes @@ -0,0 +1,2 @@ +* -text +*.bin -text -diff diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore new file mode 100644 index 00000000000..d31b3781527 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitignore @@ -0,0 +1,32 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +/s2/cmd/_s2sx/sfx-exe + +# Linux perf files +perf.data +perf.data.old + +# gdb history +.gdb_history diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml new file mode 100644 index 00000000000..a22953805c6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -0,0 +1,123 @@ +# This is an example goreleaser.yaml file with some sane defaults. +# Make sure to check the documentation at http://goreleaser.com +before: + hooks: + - ./gen.sh + +builds: + - + id: "s2c" + binary: s2c + main: ./s2/cmd/s2c/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + - + id: "s2d" + binary: s2d + main: ./s2/cmd/s2d/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + - + id: "s2sx" + binary: s2sx + main: ./s2/cmd/_s2sx/main.go + flags: + - -modfile=s2sx.mod + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + +archives: + - + id: s2-binaries + name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + format_overrides: + - goos: windows + format: zip + files: + - unpack/* + - s2/LICENSE + - s2/README.md +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ .Tag }}-next" +changelog: + sort: asc + filters: + exclude: + - '^doc:' + - '^docs:' + - '^test:' + - '^tests:' + - '^Update\sREADME.md' + +nfpms: + - + file_name_template: "s2_package__{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + vendor: Klaus Post + homepage: https://github.com/klauspost/compress + maintainer: Klaus Post + description: S2 Compression Tool + license: BSD 3-Clause + formats: + - deb + - rpm diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE new file mode 100644 index 00000000000..87d55747778 --- /dev/null +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -0,0 +1,304 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ + +Files: gzhttp/* + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2017 The New York Times Company + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ + +Files: s2/cmd/internal/readahead/* + +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------------- +Files: snappy/* +Files: internal/snapref/* + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----------------- + +Files: s2/cmd/internal/filepathx/* + +Copyright 2016 The filepathx Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md new file mode 100644 index 00000000000..05c7359e481 --- /dev/null +++ b/vendor/github.com/klauspost/compress/README.md @@ -0,0 +1,700 @@ +# compress + +This package provides various compression algorithms. + +* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go. +* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy. +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). +* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams. +* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. +* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. +* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. + +[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) +[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) +[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) + +# changelog + +* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6) + * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923 + * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925 + +* Jan 26th, 2024 - [v1.17.5](https://github.com/klauspost/compress/releases/tag/v1.17.5) + * flate: Fix reset with dictionary on custom window encodes https://github.com/klauspost/compress/pull/912 + * zstd: Add Frame header encoding and stripping https://github.com/klauspost/compress/pull/908 + * zstd: Limit better/best default window to 8MB https://github.com/klauspost/compress/pull/913 + * zstd: Speed improvements by @greatroar in https://github.com/klauspost/compress/pull/896 https://github.com/klauspost/compress/pull/910 + * s2: Fix callbacks for skippable blocks and disallow 0xfe (Padding) by @Jille in https://github.com/klauspost/compress/pull/916 https://github.com/klauspost/compress/pull/917 +https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/compress/pull/918 + +* Dec 1st, 2023 - [v1.17.4](https://github.com/klauspost/compress/releases/tag/v1.17.4) + * huff0: Speed up symbol counting by @greatroar in https://github.com/klauspost/compress/pull/887 + * huff0: Remove byteReader by @greatroar in https://github.com/klauspost/compress/pull/886 + * gzhttp: Allow overriding decompression on transport https://github.com/klauspost/compress/pull/892 + * gzhttp: Clamp compression level https://github.com/klauspost/compress/pull/890 + * gzip: Error out if reserved bits are set https://github.com/klauspost/compress/pull/891 + +* Nov 15th, 2023 - [v1.17.3](https://github.com/klauspost/compress/releases/tag/v1.17.3) + * fse: Fix max header size https://github.com/klauspost/compress/pull/881 + * zstd: Improve better/best compression https://github.com/klauspost/compress/pull/877 + * gzhttp: Fix missing content type on Close https://github.com/klauspost/compress/pull/883 + +* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2) + * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876 + +* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1) + * s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871 + * flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869 + * s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867 + +* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0) + * Add experimental dictionary builder https://github.com/klauspost/compress/pull/853 + * Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838 + * flate: Add limited window compression https://github.com/klauspost/compress/pull/843 + * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 + * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 + * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 + +
+ See changes to v1.16.x + + +* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7) + * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829 + * s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832 + +* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6) + * zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806 + * zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824 + * gzhttp: Handle informational headers by @rtribotte in https://github.com/klauspost/compress/pull/815 + * s2: Improve Better compression slightly https://github.com/klauspost/compress/pull/663 + +* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5) + * zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802 + * gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804 + +* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4) + * zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784 + * zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792 + * zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785 + * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 + * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 + * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 + * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 + +* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) + * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 + * gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767 + * s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766 + * zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773 + * huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774 + +* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0) + * s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685 + * s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752 + * s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755 + * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 + * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 + * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 +
+ +
+ See changes to v1.15.x + +* Jan 21st, 2023 (v1.15.15) + * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739 + * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 + * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745 + * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740 + +* Jan 3rd, 2023 (v1.15.14) + + * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718 + * zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720 + * export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722 + * s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723 + +* Dec 11, 2022 (v1.15.13) + * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691 + * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708 + +* Oct 26, 2022 (v1.15.12) + + * zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680 + * gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683 + +* Sept 26, 2022 (v1.15.11) + + * flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678 + * zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677 + * zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668 + * zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667 + +* Sept 16, 2022 (v1.15.10) + + * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 + * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 + * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 + * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657 + * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 + * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 + * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 + * Use arrays for constant size copies https://github.com/klauspost/compress/pull/659 + +* July 21, 2022 (v1.15.9) + + * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 + * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644 + * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643 + +* July 13, 2022 (v1.15.8) + + * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641 + * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638 + * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636 + * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637 + * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634 + * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640 + * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639 + +* June 29, 2022 (v1.15.7) + + * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633 + * zip: Merge upstream https://github.com/klauspost/compress/pull/631 + * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624 + * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598 + * flate: Faster histograms https://github.com/klauspost/compress/pull/620 + * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622 + +* June 3, 2022 (v1.15.6) + * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613 + * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611 + * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605 + * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606 + * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608 + * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612 + * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609 + * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607 + * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614 + * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610 + +* May 25, 2022 (v1.15.5) + * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 + * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 + * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 + * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 + * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 + * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 + * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 + * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 + * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 + + +* May 11, 2022 (v1.15.4) + * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) + * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) + * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) + * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) + +* May 5, 2022 (v1.15.3) + * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) + * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) + +* Apr 26, 2022 (v1.15.2) + * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) + * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) + * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) + * Minimum version is Go 1.16, added CI test on 1.18. + +* Mar 11, 2022 (v1.15.1) + * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) + * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) + * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) + * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) + * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) + +* Mar 3, 2022 (v1.15.0) + * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) + * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) + * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) + * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) + * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) + * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) + +Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. + +Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. + +While the release has been extensively tested, it is recommended to testing when upgrading. + +
+ +
+ See changes to v1.14.x + +* Feb 22, 2022 (v1.14.4) + * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) + * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) + * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 + * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) + +* Feb 17, 2022 (v1.14.3) + * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) + * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) + * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) + +* Jan 25, 2022 (v1.14.2) + * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) + * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) + * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) + * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) + * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) + * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) + +* Jan 11, 2022 (v1.14.1) + * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) + * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) + * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) + * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) + * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) +
+ +
+ See changes to v1.13.x + +* Aug 30, 2021 (v1.13.5) + * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) + * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) + * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426) + * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421) + +* Aug 12, 2021 (v1.13.4) + * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy). + * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415) + +* Aug 3, 2021 (v1.13.3) + * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404) + * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411) + * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406) + * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399) + * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401) + * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410) + +* Jun 14, 2021 (v1.13.1) + * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396) + * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394) + * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389) + * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395) + +* Jun 3, 2021 (v1.13.0) + * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. + * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) + * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) +
+ + +
+ See changes to v1.12.x + +* May 25, 2021 (v1.12.3) + * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) + * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) + * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) + +* Apr 27, 2021 (v1.12.2) + * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365) + * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363) + * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367) + * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358) + * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362) + * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) + +* Apr 14, 2021 (v1.12.1) + * snappy package removed. Upstream added as dependency. + * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353) + * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352) + * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348) + * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) + * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) + * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) +
+ +
+ See changes to v1.11.x + +* Mar 26, 2021 (v1.11.13) + * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) + * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336) + * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338) + * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341) + +* Mar 5, 2021 (v1.11.12) + * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives). + * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328) + +* Mar 1, 2021 (v1.11.9) + * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324) + * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325) + * s2: Fix binaries. + +* Feb 25, 2021 (v1.11.8) + * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. + * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) + * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) + * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) + * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313) + +* Jan 14, 2021 (v1.11.7) + * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309) + * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310) + * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311) + * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308) + * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312) + +* Jan 7, 2021 (v1.11.6) + * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306) + * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305) + +* Dec 20, 2020 (v1.11.4) + * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304) + * Add header decoder [#299](https://github.com/klauspost/compress/pull/299) + * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297) + * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300) + * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303) + +* Nov 15, 2020 (v1.11.3) + * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293) + * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295) + +* Oct 11, 2020 (v1.11.2) + * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291) + +* Oct 1, 2020 (v1.11.1) + * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286) + +* Sept 8, 2020 (v1.11.0) + * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281) + * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282) + * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274) +
+ +
+ See changes to v1.10.x + +* July 8, 2020 (v1.10.11) + * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) + * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275) + +* June 23, 2020 (v1.10.10) + * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270) + +* June 16, 2020 (v1.10.9): + * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268) + * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266) + * Fuzzit tests removed. The service has been purchased and is no longer available. + +* June 5, 2020 (v1.10.8): + * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265) + +* June 1, 2020 (v1.10.7): + * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) + * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259) + * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263) + +* May 21, 2020: (v1.10.6) + * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252) + * zstd: Stricter decompression checks. + +* April 12, 2020: (v1.10.5) + * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239) + +* Apr 8, 2020: (v1.10.4) + * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247) +* Mar 11, 2020: (v1.10.3) + * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245) + * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244) + * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240) + * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241) + * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238) + +* Feb 27, 2020: (v1.10.2) + * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232) + * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227) + +* Feb 18, 2020: (v1.10.1) + * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226) + * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224) + * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224) + +* Feb 4, 2020: (v1.10.0) + * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216) + * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218) + * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214) + * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186) + +
+ +
+ See changes prior to v1.10.0 + +* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206). +* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) +* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed. +* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases. +* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192) +* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder. +* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199) +* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features +* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197) +* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198) +* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit. +* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191) +* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188) +* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187) +* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines. +* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate. +* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184) +* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate. +* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180) +* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB. +* Nov 11, 2019: Reduce inflate memory use by 1KB. +* Nov 10, 2019: Less allocations in deflate bit writer. +* Nov 10, 2019: Fix inconsistent error returned by zstd decoder. +* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174) +* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173) +* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) +* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105) + +
+ +
+ See changes prior to v1.9.0 + +* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169) +* Oct 3, 2019: Fix inconsistent results on broken zstd streams. +* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) +* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools). +* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip). +* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes). +* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option. +* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables. +* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode. +* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding. +* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. +* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing. +* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing. +* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147) +* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146) +* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144) +* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142) +* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder. +* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder. +* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content. +* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix. +* June 17, 2019: zstd decompression bugfix. +* June 17, 2019: fix 32 bit builds. +* June 17, 2019: Easier use in modules (less dependencies). +* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio. +* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression. +* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels. +* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression! +* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels. +* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added. +* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). +* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. +* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). +* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. +* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. +* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. +* May 28, 2017: Reduce allocations when resetting decoder. +* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. +* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). +* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. +* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. +* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. +* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. +* Mar 24, 2016: Small speedup for level 1-3. +* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. +* Feb 19, 2016: Handle small payloads faster in level 1-3. +* Feb 19, 2016: Added faster level 2 + 3 compression modes. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. +* Feb 14, 2016: Snappy: Merge upstream changes. +* Feb 14, 2016: Snappy: Fix aggressive skipping. +* Feb 14, 2016: Snappy: Update benchmark. +* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. +* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. +* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. +* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. +* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. +* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. +* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. +* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. +* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! +* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). +* Nov 20 2015: Small optimization to bit writer on 64 bit systems. +* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). +* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. +* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file +* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. + +
+ +# deflate usage + +The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: + +| old import | new import | Documentation +|--------------------|-----------------------------------------|--------------------| +| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) +| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) +| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) +| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) + +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). + +You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. + +The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). + +Currently there is only minor speedup on decompression (mostly CRC32 calculation). + +Memory usage is typically 1MB for a Writer. stdlib is in the same range. +If you expect to have a lot of concurrently allocated Writers consider using +the stateless compress described below. + +For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). + +To disable all assembly add `-tags=noasm`. This works across all packages. + +# Stateless compression + +This package offers stateless compression as a special option for gzip/deflate. +It will do compression but without maintaining any state between Write calls. + +This means there will be no memory kept between Write calls, but compression and speed will be suboptimal. + +This is only relevant in cases where you expect to run many thousands of compressors concurrently, +but with very little activity. This is *not* intended for regular web servers serving individual requests. + +Because of this, the size of actual Write calls will affect output size. + +In gzip, specify level `-3` / `gzip.StatelessCompression` to enable. + +For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter) + +A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: + +```go + // replace 'ioutil.Discard' with your output. + gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) + if err != nil { + return err + } + defer gzw.Close() + + w := bufio.NewWriterSize(gzw, 4096) + defer w.Flush() + + // Write to 'w' +``` + +This will only use up to 4KB in memory when the writer is idle. + +Compression is almost always worse than the fastest compression level +and each write will allocate (a little) memory. + +# Performance Update 2018 + +It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. + +The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. + +The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. + +The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). + + +## Overall differences. + +There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. + +The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. + +This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. + +There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. + +## Web Content + +This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. + +Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. + +Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. + +## Object files + +This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. + +The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. + +The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. + +## Highly Compressible File + +This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. + +It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. + +So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". + +## Medium-High Compressible + +This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. + +We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. + +## Medium Compressible + +I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. + +The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. + + +## Un-compressible Content + +This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. + + +## Huffman only compression + +This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. + +This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). + +Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. + +The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). + +The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. + +For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). + +This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. + +# Other packages + +Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code): + +* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. +* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. +* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. +* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression. +* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression. +* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index. +* [github.com/ybirader/pzip](https://github.com/ybirader/pzip) - Fast concurrent zip archiver and extractor. + +# license + +This code is licensed under the same conditions as the original Go code. See LICENSE file. diff --git a/vendor/github.com/klauspost/compress/SECURITY.md b/vendor/github.com/klauspost/compress/SECURITY.md new file mode 100644 index 00000000000..ca6685e2b72 --- /dev/null +++ b/vendor/github.com/klauspost/compress/SECURITY.md @@ -0,0 +1,25 @@ +# Security Policy + +## Supported Versions + +Security updates are applied only to the latest release. + +## Vulnerability Definition + +A security vulnerability is a bug that with certain input triggers a crash or an infinite loop. Most calls will have varying execution time and only in rare cases will slow operation be considered a security vulnerability. + +Corrupted output generally is not considered a security vulnerability, unless independent operations are able to affect each other. Note that not all functionality is re-entrant and safe to use concurrently. + +Out-of-memory crashes only applies if the en/decoder uses an abnormal amount of memory, with appropriate options applied, to limit maximum window size, concurrency, etc. However, if you are in doubt you are welcome to file a security issue. + +It is assumed that all callers are trusted, meaning internal data exposed through reflection or inspection of returned data structures is not considered a vulnerability. + +Vulnerabilities resulting from compiler/assembler errors should be reported upstream. Depending on the severity this package may or may not implement a workaround. + +## Reporting a Vulnerability + +If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. + +Please disclose it at [security advisory](https://github.com/klauspost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that. + +This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base. diff --git a/vendor/github.com/klauspost/compress/compressible.go b/vendor/github.com/klauspost/compress/compressible.go new file mode 100644 index 00000000000..ea5a692d513 --- /dev/null +++ b/vendor/github.com/klauspost/compress/compressible.go @@ -0,0 +1,85 @@ +package compress + +import "math" + +// Estimate returns a normalized compressibility estimate of block b. +// Values close to zero are likely uncompressible. +// Values above 0.1 are likely to be compressible. +// Values above 0.5 are very compressible. +// Very small lengths will return 0. +func Estimate(b []byte) float64 { + if len(b) < 16 { + return 0 + } + + // Correctly predicted order 1 + hits := 0 + lastMatch := false + var o1 [256]byte + var hist [256]int + c1 := byte(0) + for _, c := range b { + if c == o1[c1] { + // We only count a hit if there was two correct predictions in a row. + if lastMatch { + hits++ + } + lastMatch = true + } else { + lastMatch = false + } + o1[c1] = c + c1 = c + hist[c]++ + } + + // Use x^0.6 to give better spread + prediction := math.Pow(float64(hits)/float64(len(b)), 0.6) + + // Calculate histogram distribution + variance := float64(0) + avg := float64(len(b)) / 256 + + for _, v := range hist { + Δ := float64(v) - avg + variance += Δ * Δ + } + + stddev := math.Sqrt(float64(variance)) / float64(len(b)) + exp := math.Sqrt(1 / float64(len(b))) + + // Subtract expected stddev + stddev -= exp + if stddev < 0 { + stddev = 0 + } + stddev *= 1 + exp + + // Use x^0.4 to give better spread + entropy := math.Pow(stddev, 0.4) + + // 50/50 weight between prediction and histogram distribution + return math.Pow((prediction+entropy)/2, 0.9) +} + +// ShannonEntropyBits returns the number of bits minimum required to represent +// an entropy encoding of the input bytes. +// https://en.wiktionary.org/wiki/Shannon_entropy +func ShannonEntropyBits(b []byte) int { + if len(b) == 0 { + return 0 + } + var hist [256]int + for _, c := range b { + hist[c]++ + } + shannon := float64(0) + invTotal := 1.0 / float64(len(b)) + for _, v := range hist[:] { + if v > 0 { + n := float64(v) + shannon += math.Ceil(-math.Log2(n*invTotal) * n) + } + } + return int(math.Ceil(shannon)) +} diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md new file mode 100644 index 00000000000..ea7324da671 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/README.md @@ -0,0 +1,79 @@ +# Finite State Entropy + +This package provides Finite State Entropy encoding and decoding. + +Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) +encoding provides a fast near-optimal symbol encoding/decoding +for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) + +## News + + * Feb 2018: First implementation released. Consider this beta software for now. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `(error)` | An internal error occurred. | + +As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). + +# Performance + +A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. +All compression functions are currently only running on the calling goroutine so only one core will be used per block. + +The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input +is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be +beneficial to transpose all your input values down by 64. + +With moderate block sizes around 64k speed are typically 200MB/s per core for compression and +around 300MB/s decompression speed. + +The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. + +# Plans + +At one point, more internals will be exposed to facilitate more "expert" usage of the components. + +A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go new file mode 100644 index 00000000000..f65eb3909cf --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitreader.go @@ -0,0 +1,122 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "encoding/binary" + "errors" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) uint16 { + if n == 0 || b.bitsRead >= 64 { + return 0 + } + return b.getBitsFast(n) +} + +// getBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) getBitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.bitsRead >= 64 && b.off == 0 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go new file mode 100644 index 00000000000..e82fa3bb7b6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -0,0 +1,167 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16ZeroNC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +// This is fastest if bits can be zero. +func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { + if bits == 0 { + return + } + value <<= (16 - bits) & 15 + value >>= (16 - bits) & 15 + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.bitContainer >>= v << 3 + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go new file mode 100644 index 00000000000..abade2d6052 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bytereader.go @@ -0,0 +1,47 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go new file mode 100644 index 00000000000..074018d8f94 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -0,0 +1,683 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "errors" + "fmt" +) + +// Compress the input bytes. Input must be < 2GB. +// Provide a Scratch buffer to avoid memory allocations. +// Note that the output is also kept in the scratch buffer. +// If input is too hard to compress, ErrIncompressible is returned. +// If input is a single byte value repeated ErrUseRLE is returned. +func Compress(in []byte, s *Scratch) ([]byte, error) { + if len(in) <= 1 { + return nil, ErrIncompressible + } + if len(in) > (2<<30)-1 { + return nil, errors.New("input too big, must be < 2GB") + } + s, err := s.prepare(in) + if err != nil { + return nil, err + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + if maxCount == 0 { + maxCount = s.countSimple(in) + } + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount == len(in) { + // One symbol, use RLE + return nil, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, ErrIncompressible + } + s.optimalTableLog() + err = s.normalizeCount() + if err != nil { + return nil, err + } + err = s.writeCount() + if err != nil { + return nil, err + } + + if false { + err = s.validateNorm() + if err != nil { + return nil, err + } + } + + err = s.buildCTable() + if err != nil { + return nil, err + } + err = s.compress(in) + if err != nil { + return nil, err + } + s.Out = s.bw.out + // Check if we compressed. + if len(s.Out) >= len(in) { + return nil, ErrIncompressible + } + return s.Out, nil +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + first.deltaFindState + c.state = c.stateTable[lu] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encode(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16NC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encodeZero(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) + c.bw.flush() +} + +// compress is the main compression loop that will encode the input from the last byte to the first. +func (s *Scratch) compress(src []byte) error { + if len(src) <= 2 { + return errors.New("compress: src too small") + } + tt := s.ct.symbolTT[:256] + s.bw.reset(s.Out) + + // Our two states each encodes every second byte. + // Last byte encoded (first byte decoded) will always be encoded by c1. + var c1, c2 cState + + // Encode so remaining size is divisible by 4. + ip := len(src) + if ip&1 == 1 { + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + c1.encodeZero(tt[src[ip-3]]) + ip -= 3 + } else { + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + ip -= 2 + } + if ip&2 != 0 { + c2.encodeZero(tt[src[ip-1]]) + c1.encodeZero(tt[src[ip-2]]) + ip -= 2 + } + src = src[:ip] + + // Main compression loop. + switch { + case !s.zeroBits && s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush. + // We do not need to check if any output is 0 bits. + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + c2.encode(tt[v2]) + c1.encode(tt[v3]) + } + case !s.zeroBits: + // We do not need to check if any output is 0 bits. + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + s.bw.flush32() + c2.encode(tt[v2]) + c1.encode(tt[v3]) + } + case s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + } + default: + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + s.bw.flush32() + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + } + } + + // Flush final state. + // Used to initialize state when decoding. + c2.flush(s.actualTableLog) + c1.flush(s.actualTableLog) + + s.bw.close() + return nil +} + +// writeCount will write the normalized histogram count to header. +// This is read back by readNCount. +func (s *Scratch) writeCount() error { + var ( + tableLog = s.actualTableLog + tableSize = 1 << tableLog + previous0 bool + charnum uint16 + + maxHeaderSize = ((int(s.symbolLen)*int(tableLog) + 4 + 2) >> 3) + 3 + + // Write Table Size + bitStream = uint32(tableLog - minTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + ) + if cap(s.Out) < maxHeaderSize { + s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) + } + outP := uint(0) + out := s.Out[:maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return errors.New("internal error: remaining<1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += (bitCount + 7) / 8 + + if charnum > s.symbolLen { + return errors.New("internal error: charnum > s.symbolLen") + } + s.Out = out[:outP] + return nil +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaFindState int32 + deltaNbBits uint32 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *Scratch) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *Scratch) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [maxSymbolValue + 2]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = int32(total - 1) + total++ + default: + maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = int32(total - v) + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int) { + for _, v := range in { + s.count[v]++ + } + m, symlen := uint32(0), s.symbolLen + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + symlen = uint16(i) + 1 + } + s.symbolLen = symlen + return int(m) +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 + minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > maxTableLog { + tableLog = maxTableLog + } + s.actualTableLog = tableLog +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +func (s *Scratch) normalizeCount() error { + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(s.br.remain()) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(s.br.remain() >> tableLog) + ) + + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + return s.normalizeCount2() + } + s.norm[largest] += stillToDistribute + return nil +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *Scratch) normalizeCount2() error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(s.br.remain()) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// validateNorm validates the normalized histogram table. +func (s *Scratch) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 { + if previous0 { + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + n0 += 24 + if b.off < iend-5 { + b.advance(2) + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 16 + bitCount += 16 + } + } + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + for charnum < n0 { + s.norm[charnum&0xff] = 0 + charnum++ + } + + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*(threshold) - 1) - (remaining) + var count int32 + + if (int32(bitStream) & (threshold - 1)) < max { + count = int32(bitStream) & (threshold - 1) + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + count-- // extra accuracy + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + } + bitStream = b.Uint32() >> (bitCount & 31) + } + s.symbolLen = charnum + + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return nil +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +type decSymbol struct { + newState uint16 + symbol uint8 + nbBits uint8 +} + +// allocDtable will allocate decoding tables if they are not big enough. +func (s *Scratch) allocDtable() { + tableSize := 1 << s.actualTableLog + if cap(s.decTable) < tableSize { + s.decTable = make([]decSymbol, tableSize) + } + s.decTable = s.decTable[:tableSize] + + if cap(s.ct.tableSymbol) < 256 { + s.ct.tableSymbol = make([]byte, 256) + } + s.ct.tableSymbol = s.ct.tableSymbol[:256] + + if cap(s.ct.stateTable) < 256 { + s.ct.stateTable = make([]uint16, 256) + } + s.ct.stateTable = s.ct.stateTable[:256] +} + +// buildDtable will build the decoding table. +func (s *Scratch) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + s.allocDtable() + symbolNext := s.ct.stateTable[:256] + + // Init, lay down lowprob symbols + s.zeroBits = false + { + largeLimit := int16(1 << (s.actualTableLog - 1)) + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.decTable[highThreshold].symbol = uint8(i) + highThreshold-- + symbolNext[i] = 1 + } else { + if v >= largeLimit { + s.zeroBits = true + } + symbolNext[i] = uint16(v) + } + } + } + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.decTable[position].symbol = uint8(ss) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.decTable { + symbol := v.symbol + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.decTable[u].nbBits = nBits + newState := (nextState << nBits) - tableSize + if newState >= tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.decTable[u].newState = newState + } + } + return nil +} + +// decompress will decompress the bitstream. +// If the buffer is over-read an error is returned. +func (s *Scratch) decompress() error { + br := &s.bits + if err := br.init(s.br.unread()); err != nil { + return err + } + + var s1, s2 decoder + // Initialize and decode first state and symbol. + s1.init(br, s.decTable, s.actualTableLog) + s2.init(br, s.decTable, s.actualTableLog) + + // Use temp table to avoid bound checks/append penalty. + var tmp = s.ct.tableSymbol[:256] + var off uint8 + + // Main part + if !s.zeroBits { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.nextFast() + tmp[off+1] = s2.nextFast() + br.fillFast() + tmp[off+2] = s1.nextFast() + tmp[off+3] = s2.nextFast() + off += 4 + // When off is 0, we have overflowed and should write. + if off == 0 { + s.Out = append(s.Out, tmp...) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } else { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.next() + tmp[off+1] = s2.next() + br.fillFast() + tmp[off+2] = s1.next() + tmp[off+3] = s2.next() + off += 4 + if off == 0 { + s.Out = append(s.Out, tmp...) + // When off is 0, we have overflowed and should write. + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } + s.Out = append(s.Out, tmp[:off]...) + + // Final bits, a bit more expensive check + for { + if s1.finished() { + s.Out = append(s.Out, s1.final(), s2.final()) + break + } + br.fill() + s.Out = append(s.Out, s1.next()) + if s2.finished() { + s.Out = append(s.Out, s2.final(), s1.final()) + break + } + s.Out = append(s.Out, s2.next()) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + return br.close() +} + +// decoder keeps track of the current state and updates it from the bitstream. +type decoder struct { + state uint16 + br *bitReader + dt []decSymbol +} + +// init will initialize the decoder and read the first state from the stream. +func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { + d.dt = dt + d.br = in + d.state = in.getBits(tableLog) +} + +// next returns the next symbol and sets the next state. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) next() uint8 { + n := &d.dt[d.state] + lowBits := d.br.getBits(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} + +// finished returns true if all bits have been read from the bitstream +// and the next state would require reading bits from the input. +func (d *decoder) finished() bool { + return d.br.finished() && d.dt[d.state].nbBits > 0 +} + +// final returns the current state symbol without decoding the next. +func (d *decoder) final() uint8 { + return d.dt[d.state].symbol +} + +// nextFast returns the next symbol and sets the next state. +// This can only be used if no symbols are 0 bits. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) nextFast() uint8 { + n := d.dt[d.state] + lowBits := d.br.getBitsFast(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go new file mode 100644 index 00000000000..535cbadfdea --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fse.go @@ -0,0 +1,144 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +// Package fse provides Finite State Entropy encoding and decoding. +// +// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding +// for byte blocks as implemented in zstd. +// +// See https://github.com/klauspost/compress/tree/master/fse for more information. +package fse + +import ( + "errors" + "fmt" + "math/bits" +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = 14 + defaultMemoryUsage = 13 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + defaultTablelog = defaultMemoryUsage - 2 + minTablelog = 5 + maxSymbolValue = 255 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") +) + +// Scratch provides temporary storage for compression and decompression. +type Scratch struct { + // Private + count [maxSymbolValue + 1]uint32 + norm [maxSymbolValue + 1]int16 + br byteReader + bits bitReader + bw bitWriter + ct cTable // Compression tables. + decTable []decSymbol // Decompression table. + maxCount int // count of the most probable symbol + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // DecompressLimit limits the maximum decoded size acceptable. + // If > 0 decompression will stop when approximately this many bytes + // has been decoded. + // If 0, maximum size will be 2GB. + DecompressLimit int + + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + TableLog uint8 +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +// The returned slice will always be length 256. +func (s *Scratch) Histogram() []uint32 { + return s.count[:] +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// prepare will prepare and allocate scratch tables used for both compression and decompression. +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = 255 + } + if s.TableLog == 0 { + s.TableLog = defaultTablelog + } + if s.TableLog > maxTableLog { + return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + s.br.init(in) + if s.DecompressLimit == 0 { + // Max size 2GB. + s.DecompressLimit = (2 << 30) - 1 + } + + return s, nil +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/gen.sh b/vendor/github.com/klauspost/compress/gen.sh new file mode 100644 index 00000000000..aff942205f1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gen.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +cd s2/cmd/_s2sx/ || exit 1 +go generate . diff --git a/vendor/github.com/klauspost/compress/huff0/.gitignore b/vendor/github.com/klauspost/compress/huff0/.gitignore new file mode 100644 index 00000000000..b3d262958f8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/.gitignore @@ -0,0 +1 @@ +/huff0-fuzz.zip diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md new file mode 100644 index 00000000000..8b6e5c66383 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/README.md @@ -0,0 +1,89 @@ +# Huff0 entropy compression + +This package provides Huff0 encoding and decoding as used in zstd. + +[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), +a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU +(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) + +## News + +This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. + +This ensures that most functionality is well tested. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and +[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | +| `(error)` | An internal error occurred. | + + +As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. + +## Tables and re-use + +Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. + +The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) +that controls this behaviour. See the documentation for details. This can be altered between each block. + +Do however note that this information is *not* stored in the output block and it is up to the users of the package to +record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, +based on the boolean reported back from the CompressXX call. + +If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the +[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. + +## Decompressing + +The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). +This will initialize the decoding tables. +You can supply the complete block to `ReadTable` and it will return the data part of the block +which can be given to the decompressor. + +Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) +or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. + +For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. + +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go new file mode 100644 index 00000000000..e36d9742f94 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -0,0 +1,229 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +import ( + "encoding/binary" + "errors" + "fmt" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderBytes struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderBytes) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderBytes) peekByteFast() uint8 { + got := uint8(b.value >> 56) + return got +} + +func (b *bitReaderBytes) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderBytes) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. +func (b *bitReaderBytes) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderBytes) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReaderBytes) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +func (b *bitReaderBytes) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderBytes) close() error { + // Release reference. + b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +// bitReaderShifted reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderShifted struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderShifted) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { + return uint16(b.value >> ((64 - n) & 63)) +} + +func (b *bitReaderShifted) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderShifted) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. +func (b *bitReaderShifted) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderShifted) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) + b.bitsRead -= 8 + b.off-- + } +} + +func (b *bitReaderShifted) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderShifted) close() error { + // Release reference. + b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go new file mode 100644 index 00000000000..0ebc9aaac76 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -0,0 +1,102 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encSymbol(ct cTable, symbol byte) { + enc := ct[symbol] + b.bitContainer |= uint64(enc.val) << (b.nBits & 63) + if false { + if enc.nBits == 0 { + panic("nbits 0") + } + } + b.nBits += enc.nBits +} + +// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { + encA := ct[av] + encB := ct[bv] + sh := b.nBits & 63 + combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) + b.bitContainer |= combined << sh + if false { + if encA.nBits == 0 { + panic("nbitsA 0") + } + if encB.nBits == 0 { + panic("nbitsB 0") + } + } + b.nBits += encA.nBits + encB.nBits +} + +// encFourSymbols adds up to 32 bits from four symbols. +// It will not check if there is space for them, +// so the caller must ensure that b has been flushed recently. +func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) { + bitsA := encA.nBits + bitsB := bitsA + encB.nBits + bitsC := bitsB + encC.nBits + bitsD := bitsC + encD.nBits + combined := uint64(encA.val) | + (uint64(encB.val) << (bitsA & 63)) | + (uint64(encC.val) << (bitsB & 63)) | + (uint64(encD.val) << (bitsC & 63)) + b.bitContainer |= combined << (b.nBits & 63) + b.nBits += bitsD +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() +} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go new file mode 100644 index 00000000000..84aa3d12f00 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -0,0 +1,742 @@ +package huff0 + +import ( + "fmt" + "math" + "runtime" + "sync" +) + +// Compress1X will compress the input. +// The output can be decoded using Decompress1X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + return compress(in, s, s.compress1X) +} + +// Compress4X will compress the input. The input is split into 4 independent blocks +// and compressed similar to Compress1X. +// The output can be decoded using Decompress4X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + if false { + // TODO: compress4Xp only slightly faster. + const parallelThreshold = 8 << 10 + if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { + return compress(in, s, s.compress4X) + } + return compress(in, s, s.compress4Xp) + } + return compress(in, s, s.compress4X) +} + +func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { + // Nuke previous table if we cannot reuse anyway. + if s.Reuse == ReusePolicyNone { + s.prevTable = s.prevTable[:0] + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return nil, false, ErrIncompressible + } + // One symbol, use RLE + return nil, false, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, false, ErrIncompressible + } + if s.Reuse == ReusePolicyMust && !canReuse { + // We must reuse, but we can't. + return nil, false, ErrIncompressible + } + if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse { + keepTable := s.cTable + keepTL := s.actualTableLog + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + s.cTable = keepTable + s.actualTableLog = keepTL + if err == nil && len(s.Out) < wantSize { + s.OutData = s.Out + return s.Out, true, nil + } + if s.Reuse == ReusePolicyMust { + return nil, false, ErrIncompressible + } + // Do not attempt to re-use later. + s.prevTable = s.prevTable[:0] + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return nil, false, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + if s.Reuse == ReusePolicyAllow && canReuse { + hSize := len(s.Out) + oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) + newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) + if oldSize <= hSize+newSize || hSize+12 >= wantSize { + // Retain cTable even if we re-use. + keepTable := s.cTable + keepTL := s.actualTableLog + + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + + // Restore ctable. + s.cTable = keepTable + s.actualTableLog = keepTL + if err != nil { + return nil, false, err + } + if len(s.Out) >= wantSize { + return nil, false, ErrIncompressible + } + s.OutData = s.Out + return s.Out, true, nil + } + } + + // Use new table + err = s.cTable.write(s) + if err != nil { + s.OutTable = nil + return nil, false, err + } + s.OutTable = s.Out + + // Compress using new table + s.Out, err = compressor(in) + if err != nil { + s.OutTable = nil + return nil, false, err + } + if len(s.Out) >= wantSize { + s.OutTable = nil + return nil, false, ErrIncompressible + } + // Move current table into previous. + s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] + s.OutData = s.Out[len(s.OutTable):] + return s.Out, false, nil +} + +// EstimateSizes will estimate the data sizes +func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) { + s, err = s.prepare(in) + if err != nil { + return 0, 0, 0, err + } + + // Create histogram, if none was provided. + tableSz, dataSz, reuseSz = -1, -1, -1 + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return 0, 0, 0, ErrIncompressible + } + // One symbol, use RLE + return 0, 0, 0, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return 0, 0, 0, ErrIncompressible + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return 0, 0, 0, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + tableSz, err = s.cTable.estTableSize(s) + if err != nil { + return 0, 0, 0, err + } + if canReuse { + reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen]) + } + dataSz = s.cTable.estimateSize(s.count[:s.symbolLen]) + + // Restore + return tableSz, dataSz, reuseSz, nil +} + +func (s *Scratch) compress1X(src []byte) ([]byte, error) { + return s.compress1xDo(s.Out, src), nil +} + +func (s *Scratch) compress1xDo(dst, src []byte) []byte { + var bw = bitWriter{out: dst} + + // N is length divisible by 4. + n := len(src) + n -= n & 3 + cTable := s.cTable[:256] + + // Encode last bytes. + for i := len(src) & 3; i > 0; i-- { + bw.encSymbol(cTable, src[n+i-1]) + } + n -= 4 + if s.actualTableLog <= 8 { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]]) + } + } else { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.flush32() + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + } + } + bw.close() + return bw.out +} + +var sixZeros [6]byte + +func (s *Scratch) compress4X(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + segmentSize := (len(src) + 3) / 4 + + // Add placeholder for output length + offsetIdx := len(s.Out) + s.Out = append(s.Out, sixZeros[:]...) + + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + idx := len(s.Out) + s.Out = s.compress1xDo(s.Out, toDo) + if len(s.Out)-idx > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + length := len(s.Out) - idx + s.Out[i*2+offsetIdx] = byte(length) + s.Out[i*2+offsetIdx+1] = byte(length >> 8) + } + } + + return s.Out, nil +} + +// compress4Xp will compress 4 streams using separate goroutines. +func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + // Add placeholder for output length + s.Out = s.Out[:6] + + segmentSize := (len(src) + 3) / 4 + var wg sync.WaitGroup + wg.Add(4) + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + // Separate goroutine for each block. + go func(i int) { + s.tmpOut[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) + wg.Done() + }(i) + } + wg.Wait() + for i := 0; i < 4; i++ { + o := s.tmpOut[i] + if len(o) > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + s.Out[i*2] = byte(len(o)) + s.Out[i*2+1] = byte(len(o) >> 8) + } + + // Write output. + s.Out = append(s.Out, o...) + } + return s.Out, nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { + reuse = true + _ = s.count // Assert that s != nil to speed up the following loop. + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + if len(s.prevTable) > 0 { + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + s.symbolLen = uint16(i) + 1 + if i >= len(s.prevTable) { + reuse = false + } else if s.prevTable[i].nBits == 0 { + reuse = false + } + } + return int(m), reuse + } + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + s.symbolLen = uint16(i) + 1 + } + return int(m), false +} + +func (s *Scratch) canUseTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 && c[i].nBits == 0 { + return false + } + } + return true +} + +//lint:ignore U1000 used for debugging +func (s *Scratch) validateTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 { + if c[i].nBits == 0 { + return false + } + if c[i].nBits > s.actualTableLog { + return false + } + } + } + return true +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBit32(uint32(s.srcLen)) + 1 + minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBit32(uint32(s.srcLen-1))) - 1 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > tableLogMax { + tableLog = tableLogMax + } + s.actualTableLog = tableLog +} + +type cTableEntry struct { + val uint16 + nBits uint8 + // We have 8 bits extra +} + +const huffNodesMask = huffNodesLen - 1 + +func (s *Scratch) buildCTable() error { + s.optimalTableLog() + s.huffSort() + if cap(s.cTable) < maxSymbolValue+1 { + s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) + } else { + s.cTable = s.cTable[:s.symbolLen] + for i := range s.cTable { + s.cTable[i] = cTableEntry{} + } + } + + var startNode = int16(s.symbolLen) + nonNullRank := s.symbolLen - 1 + + nodeNb := startNode + huffNode := s.nodes[1 : huffNodesLen+1] + + // This overlays the slice above, but allows "-1" index lookups. + // Different from reference implementation. + huffNode0 := s.nodes[0 : huffNodesLen+1] + + for huffNode[nonNullRank].count() == 0 { + nonNullRank-- + } + + lowS := int16(nonNullRank) + nodeRoot := nodeNb + lowS - 1 + lowN := nodeNb + huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count()) + huffNode[lowS].setParent(nodeNb) + huffNode[lowS-1].setParent(nodeNb) + nodeNb++ + lowS -= 2 + for n := nodeNb; n <= nodeRoot; n++ { + huffNode[n].setCount(1 << 30) + } + // fake entry, strong barrier + huffNode0[0].setCount(1 << 31) + + // create parents + for nodeNb <= nodeRoot { + var n1, n2 int16 + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { + n1 = lowS + lowS-- + } else { + n1 = lowN + lowN++ + } + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { + n2 = lowS + lowS-- + } else { + n2 = lowN + lowN++ + } + + huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count()) + huffNode0[n1+1].setParent(nodeNb) + huffNode0[n2+1].setParent(nodeNb) + nodeNb++ + } + + // distribute weights (unlimited tree height) + huffNode[nodeRoot].setNbBits(0) + for n := nodeRoot - 1; n >= startNode; n-- { + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) + } + for n := uint16(0); n <= nonNullRank; n++ { + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) + } + s.actualTableLog = s.setMaxHeight(int(nonNullRank)) + maxNbBits := s.actualTableLog + + // fill result into tree (val, nbBits) + if maxNbBits > tableLogMax { + return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) + } + var nbPerRank [tableLogMax + 1]uint16 + var valPerRank [16]uint16 + for _, v := range huffNode[:nonNullRank+1] { + nbPerRank[v.nbBits()]++ + } + // determine stating value per rank + { + min := uint16(0) + for n := maxNbBits; n > 0; n-- { + // get starting value within each rank + valPerRank[n] = min + min += nbPerRank[n] + min >>= 1 + } + } + + // push nbBits per symbol, symbol order + for _, v := range huffNode[:nonNullRank+1] { + s.cTable[v.symbol()].nBits = v.nbBits() + } + + // assign value within rank, symbol order + t := s.cTable[:s.symbolLen] + for n, val := range t { + nbits := val.nBits & 15 + v := valPerRank[nbits] + t[n].val = v + valPerRank[nbits] = v + 1 + } + + return nil +} + +// huffSort will sort symbols, decreasing order. +func (s *Scratch) huffSort() { + type rankPos struct { + base uint32 + current uint32 + } + + // Clear nodes + nodes := s.nodes[:huffNodesLen+1] + s.nodes = nodes + nodes = nodes[1 : huffNodesLen+1] + + // Sort into buckets based on length of symbol count. + var rank [32]rankPos + for _, v := range s.count[:s.symbolLen] { + r := highBit32(v+1) & 31 + rank[r].base++ + } + // maxBitLength is log2(BlockSizeMax) + 1 + const maxBitLength = 18 + 1 + for n := maxBitLength; n > 0; n-- { + rank[n-1].base += rank[n].base + } + for n := range rank[:maxBitLength] { + rank[n].current = rank[n].base + } + for n, c := range s.count[:s.symbolLen] { + r := (highBit32(c+1) + 1) & 31 + pos := rank[r].current + rank[r].current++ + prev := nodes[(pos-1)&huffNodesMask] + for pos > rank[r].base && c > prev.count() { + nodes[pos&huffNodesMask] = prev + pos-- + prev = nodes[(pos-1)&huffNodesMask] + } + nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n)) + } +} + +func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { + maxNbBits := s.actualTableLog + huffNode := s.nodes[1 : huffNodesLen+1] + //huffNode = huffNode[: huffNodesLen] + + largestBits := huffNode[lastNonNull].nbBits() + + // early exit : no elt > maxNbBits + if largestBits <= maxNbBits { + return largestBits + } + totalCost := int(0) + baseCost := int(1) << (largestBits - maxNbBits) + n := uint32(lastNonNull) + + for huffNode[n].nbBits() > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits())) + huffNode[n].setNbBits(maxNbBits) + n-- + } + // n stops at huffNode[n].nbBits <= maxNbBits + + for huffNode[n].nbBits() == maxNbBits { + n-- + } + // n end at index of smallest symbol using < maxNbBits + + // renorm totalCost + totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ + + // repay normalized cost + { + const noSymbol = 0xF0F0F0F0 + var rankLast [tableLogMax + 2]uint32 + + for i := range rankLast[:] { + rankLast[i] = noSymbol + } + + // Get pos of last (smallest) symbol per rank + { + currentNbBits := maxNbBits + for pos := int(n); pos >= 0; pos-- { + if huffNode[pos].nbBits() >= currentNbBits { + continue + } + currentNbBits = huffNode[pos].nbBits() // < maxNbBits + rankLast[maxNbBits-currentNbBits] = uint32(pos) + } + } + + for totalCost > 0 { + nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 + + for ; nBitsToDecrease > 1; nBitsToDecrease-- { + highPos := rankLast[nBitsToDecrease] + lowPos := rankLast[nBitsToDecrease-1] + if highPos == noSymbol { + continue + } + if lowPos == noSymbol { + break + } + highTotal := huffNode[highPos].count() + lowTotal := 2 * huffNode[lowPos].count() + if highTotal <= lowTotal { + break + } + } + // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) + // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary + // FIXME: try to remove + for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { + nBitsToDecrease++ + } + totalCost -= 1 << (nBitsToDecrease - 1) + if rankLast[nBitsToDecrease-1] == noSymbol { + // this rank is no longer empty + rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] + } + huffNode[rankLast[nBitsToDecrease]].setNbBits(1 + + huffNode[rankLast[nBitsToDecrease]].nbBits()) + if rankLast[nBitsToDecrease] == 0 { + /* special case, reached largest symbol */ + rankLast[nBitsToDecrease] = noSymbol + } else { + rankLast[nBitsToDecrease]-- + if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease { + rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ + } + } + } + + for totalCost < 0 { /* Sometimes, cost correction overshoot */ + if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ + for huffNode[n].nbBits() == maxNbBits { + n-- + } + huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1) + rankLast[1] = n + 1 + totalCost++ + continue + } + huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1) + rankLast[1]++ + totalCost++ + } + } + return maxNbBits +} + +// A nodeElt is the fields +// +// count uint32 +// parent uint16 +// symbol byte +// nbBits uint8 +// +// in some order, all squashed into an integer so that the compiler +// always loads and stores entire nodeElts instead of separate fields. +type nodeElt uint64 + +func makeNodeElt(count uint32, symbol byte) nodeElt { + return nodeElt(count) | nodeElt(symbol)<<48 +} + +func (e *nodeElt) count() uint32 { return uint32(*e) } +func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) } +func (e *nodeElt) symbol() byte { return byte(*e >> 48) } +func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) } + +func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) } +func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 } +func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go new file mode 100644 index 00000000000..54bd08b25c0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -0,0 +1,1167 @@ +package huff0 + +import ( + "errors" + "fmt" + "io" + "sync" + + "github.com/klauspost/compress/fse" +) + +type dTable struct { + single []dEntrySingle +} + +// single-symbols decoding +type dEntrySingle struct { + entry uint16 +} + +// Uses special code for all tables that are < 8 bits. +const use8BitTables = true + +// ReadTable will read a table from the input. +// The size of the input may be larger than the table definition. +// Any content remaining after the table definition will be returned. +// If no Scratch is provided a new one is allocated. +// The returned Scratch can be used for encoding or decoding input using this table. +func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { + s, err = s.prepare(nil) + if err != nil { + return s, nil, err + } + if len(in) <= 1 { + return s, nil, errors.New("input too small for table") + } + iSize := in[0] + in = in[1:] + if iSize >= 128 { + // Uncompressed + oSize := iSize - 127 + iSize = (oSize + 1) / 2 + if int(iSize) > len(in) { + return s, nil, errors.New("input too small for table") + } + for n := uint8(0); n < oSize; n += 2 { + v := in[n/2] + s.huffWeight[n] = v >> 4 + s.huffWeight[n+1] = v & 15 + } + s.symbolLen = uint16(oSize) + in = in[iSize:] + } else { + if len(in) < int(iSize) { + return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in)) + } + // FSE compressed weights + s.fse.DecompressLimit = 255 + hw := s.huffWeight[:] + s.fse.Out = hw + b, err := fse.Decompress(in[:iSize], s.fse) + s.fse.Out = nil + if err != nil { + return s, nil, fmt.Errorf("fse decompress returned: %w", err) + } + if len(b) > 255 { + return s, nil, errors.New("corrupt input: output table too large") + } + s.symbolLen = uint16(len(b)) + in = in[iSize:] + } + + // collect weight stats + var rankStats [16]uint32 + weightTotal := uint32(0) + for _, v := range s.huffWeight[:s.symbolLen] { + if v > tableLogMax { + return s, nil, errors.New("corrupt input: weight too large") + } + v2 := v & 15 + rankStats[v2]++ + // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0. + weightTotal += (1 << v2) >> 1 + } + if weightTotal == 0 { + return s, nil, errors.New("corrupt input: weights zero") + } + + // get last non-null symbol weight (implied, total must be 2^n) + { + tableLog := highBit32(weightTotal) + 1 + if tableLog > tableLogMax { + return s, nil, errors.New("corrupt input: tableLog too big") + } + s.actualTableLog = uint8(tableLog) + // determine last weight + { + total := uint32(1) << tableLog + rest := total - weightTotal + verif := uint32(1) << highBit32(rest) + lastWeight := highBit32(rest) + 1 + if verif != rest { + // last value must be a clean power of 2 + return s, nil, errors.New("corrupt input: last value not power of two") + } + s.huffWeight[s.symbolLen] = uint8(lastWeight) + s.symbolLen++ + rankStats[lastWeight]++ + } + } + + if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { + // by construction : at least 2 elts of rank 1, must be even + return s, nil, errors.New("corrupt input: min elt size, even check failed ") + } + + // TODO: Choose between single/double symbol decoding + + // Calculate starting value for each rank + { + var nextRankStart uint32 + for n := uint8(1); n < s.actualTableLog+1; n++ { + current := nextRankStart + nextRankStart += rankStats[n] << (n - 1) + rankStats[n] = current + } + } + + // fill DTable (always full size) + tSize := 1 << tableLogMax + if len(s.dt.single) != tSize { + s.dt.single = make([]dEntrySingle, tSize) + } + cTable := s.prevTable + if cap(cTable) < maxSymbolValue+1 { + cTable = make([]cTableEntry, 0, maxSymbolValue+1) + } + cTable = cTable[:maxSymbolValue+1] + s.prevTable = cTable[:s.symbolLen] + s.prevTableLog = s.actualTableLog + + for n, w := range s.huffWeight[:s.symbolLen] { + if w == 0 { + cTable[n] = cTableEntry{ + val: 0, + nBits: 0, + } + continue + } + length := (uint32(1) << w) >> 1 + d := dEntrySingle{ + entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8), + } + + rank := &rankStats[w] + cTable[n] = cTableEntry{ + val: uint16(*rank >> (w - 1)), + nBits: uint8(d.entry), + } + + single := s.dt.single[*rank : *rank+length] + for i := range single { + single[i] = d + } + *rank += length + } + + return s, in, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { + if cap(s.Out) < s.MaxDecodedSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:s.MaxDecodedSize] + s.Out, err = s.Decoder().Decompress1X(s.Out, in) + return s.Out, err +} + +// Decompress4X will decompress a 4X encoded stream. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// The length of the supplied input must match the end of a block exactly. +// The destination size of the uncompressed data must be known and provided. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { + if dstSize > s.MaxDecodedSize { + return nil, ErrMaxDecodedSizeExceeded + } + if cap(s.Out) < dstSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:dstSize] + s.Out, err = s.Decoder().Decompress4X(s.Out, in) + return s.Out, err +} + +// Decoder will return a stateless decoder that can be used by multiple +// decompressors concurrently. +// Before this is called, the table must be initialized with ReadTable. +// The Decoder is still linked to the scratch buffer so that cannot be reused. +// However, it is safe to discard the scratch. +func (s *Scratch) Decoder() *Decoder { + return &Decoder{ + dt: s.dt, + actualTableLog: s.actualTableLog, + bufs: &s.decPool, + } +} + +// Decoder provides stateless decoding. +type Decoder struct { + dt dTable + actualTableLog uint8 + bufs *sync.Pool +} + +func (d *Decoder) buffer() *[4][256]byte { + buf, ok := d.bufs.Get().(*[4][256]byte) + if ok { + return buf + } + return &[4][256]byte{} +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress1X8BitExactly(dst, src) + } + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + switch d.actualTableLog { + case 8: + const shift = 0 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 7: + const shift = 8 - 7 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 6: + const shift = 8 - 6 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 5: + const shift = 8 - 5 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 4: + const shift = 8 - 4 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 3: + const shift = 8 - 3 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 2: + const shift = 8 - 2 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 1: + const shift = 8 - 1 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + default: + d.bufs.Put(bufs) + return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + shift := (8 - d.actualTableLog) & 7 + + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()>>shift] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + const shift = 56 + + //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress4X8bitExactly(dst, src) + } + + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + shift := (56 + (8 - d.actualTableLog)) & 63 + + const tlSize = 1 << 8 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + off += 4 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + if br.finished() { + d.bufs.Put(buf) + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[uint8(br.value>>shift)].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + d.bufs.Put(buf) + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const shift = 56 + const tlSize = 1 << 8 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + off += 4 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + // copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + if br.finished() { + d.bufs.Put(buf) + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[br.peekByteFast()].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + d.bufs.Put(buf) + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// matches will compare a decoding table to a coding table. +// Errors are written to the writer. +// Nothing will be written if table is ok. +func (s *Scratch) matches(ct cTable, w io.Writer) { + if s == nil || len(s.dt.single) == 0 { + return + } + dt := s.dt.single[:1<>8) == byte(sym) { + fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) + errs++ + break + } + } + if errs == 0 { + broken-- + } + continue + } + // Unused bits in input + ub := tablelog - enc.nBits + top := enc.val << ub + // decoder looks at top bits. + dec := dt[top] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 0 { + fmt.Fprintf(w, "%d errros in base, stopping\n", errs) + continue + } + // Ensure that all combinations are covered. + for i := uint16(0); i < (1 << ub); i++ { + vval := top | i + dec := dt[vval] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 20 { + fmt.Fprintf(w, "%d errros, stopping\n", errs) + break + } + } + if errs == 0 { + ok++ + broken-- + } + } + if broken > 0 { + fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) + } +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go new file mode 100644 index 00000000000..ba7e8e6b027 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -0,0 +1,226 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// This file contains the specialisation of Decoder.Decompress4X +// and Decoder.Decompress1X that use an asm implementation of thir main loops. +package huff0 + +import ( + "errors" + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog > 8. +// +//go:noescape +func decompress4x_main_loop_amd64(ctx *decompress4xContext) + +// decompress4x_8b_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog <= 8 which decodes 4 entries +// per loop. +// +//go:noescape +func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) + +// fallback8BitSize is the size where using Go version is faster. +const fallback8BitSize = 800 + +type decompress4xContext struct { + pbr *[4]bitReaderShifted + peekBits uint8 + out *byte + dstEvery int + tbl *dEntrySingle + decoded int + limit *byte +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + + use8BitTables := d.actualTableLog <= 8 + if cap(dst) < fallback8BitSize && use8BitTables { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + var decoded int + + if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { + ctx := decompress4xContext{ + pbr: &br, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + out: &out[0], + dstEvery: dstEvery, + tbl: &single[0], + limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. + } + if use8BitTables { + decompress4x_8b_main_loop_amd64(&ctx) + } else { + decompress4x_main_loop_amd64(&ctx) + } + + decoded = ctx.decoded + out = out[decoded/4:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_amd64(ctx *decompress1xContext) + +// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_bmi2(ctx *decompress1xContext) + +type decompress1xContext struct { + pbr *bitReaderShifted + peekBits uint8 + out *byte + outCap int + tbl *dEntrySingle + decoded int +} + +// Error reported by asm implementations +const error_max_decoded_size_exeeded = -1 + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:maxDecodedSize] + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + + if maxDecodedSize >= 4 { + ctx := decompress1xContext{ + pbr: &br, + out: &dst[0], + outCap: maxDecodedSize, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + tbl: &d.dt.single[0], + } + + if cpuinfo.HasBMI2() { + decompress1x_main_loop_bmi2(&ctx) + } else { + decompress1x_main_loop_amd64(&ctx) + } + if ctx.decoded == error_max_decoded_size_exeeded { + return nil, ErrMaxDecodedSizeExceeded + } + + dst = dst[:ctx.decoded] + } + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s new file mode 100644 index 00000000000..c4c7ab2d1fe --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -0,0 +1,830 @@ +// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. + +//go:build amd64 && !appengine && !noasm && gc + +// func decompress4x_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_main_loop_amd64(SB), $0-8 + // Preload values + MOVQ ctx+0(FP), AX + MOVBQZX 8(AX), DI + MOVQ 16(AX), BX + MOVQ 48(AX), SI + MOVQ 24(AX), R8 + MOVQ 32(AX), R9 + MOVQ (AX), R10 + + // Main loop +main_loop: + XORL DX, DX + CMPQ BX, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill0 + MOVQ 24(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ (R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 24(R10) + ORQ R13, R11 + + // exhausted += (br0.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br0.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX) + + // update the bitreader structure + MOVQ R11, 32(R10) + MOVB R12, 40(R10) + + // br1.fillFast32() + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill1 + MOVQ 72(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 48(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 72(R10) + ORQ R13, R11 + + // exhausted += (br1.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br1.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX)(R8*1) + + // update the bitreader structure + MOVQ R11, 80(R10) + MOVB R12, 88(R10) + + // br2.fillFast32() + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill2 + MOVQ 120(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 96(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 120(R10) + ORQ R13, R11 + + // exhausted += (br2.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br2.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX)(R8*2) + + // update the bitreader structure + MOVQ R11, 128(R10) + MOVB R12, 136(R10) + + // br3.fillFast32() + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill3 + MOVQ 168(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 144(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 168(R10) + ORQ R13, R11 + + // exhausted += (br3.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br3.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + LEAQ (R8)(R8*2), CX + MOVW AX, (BX)(CX*1) + + // update the bitreader structure + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x02, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 + // Preload values + MOVQ ctx+0(FP), CX + MOVBQZX 8(CX), DI + MOVQ 16(CX), BX + MOVQ 48(CX), SI + MOVQ 24(CX), R8 + MOVQ 32(CX), R9 + MOVQ (CX), R10 + + // Main loop +main_loop: + XORL DX, DX + CMPQ BX, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill0 + MOVQ 24(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ (R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 24(R10) + ORQ R14, R11 + + // exhausted += (br0.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX) + + // update the bitreader structure + MOVQ R11, 32(R10) + MOVB R12, 40(R10) + + // br1.fillFast32() + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill1 + MOVQ 72(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 48(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 72(R10) + ORQ R14, R11 + + // exhausted += (br1.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX)(R8*1) + + // update the bitreader structure + MOVQ R11, 80(R10) + MOVB R12, 88(R10) + + // br2.fillFast32() + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill2 + MOVQ 120(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 96(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 120(R10) + ORQ R14, R11 + + // exhausted += (br2.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX)(R8*2) + + // update the bitreader structure + MOVQ R11, 128(R10) + MOVB R12, 136(R10) + + // br3.fillFast32() + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill3 + MOVQ 168(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 144(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 168(R10) + ORQ R14, R11 + + // exhausted += (br3.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + LEAQ (R8)(R8*2), CX + MOVL AX, (BX)(CX*1) + + // update the bitreader structure + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x04, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress1x_main_loop_amd64(ctx *decompress1xContext) +TEXT ·decompress1x_main_loop_amd64(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exceeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exceeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_1_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_2_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exceeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET + +// func decompress1x_main_loop_bmi2(ctx *decompress1xContext) +// Requires: BMI2 +TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exceeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exceeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_1_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_2_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exceeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go new file mode 100644 index 00000000000..908c17de63f --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go @@ -0,0 +1,299 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// This file contains a generic implementation of Decoder.Decompress4X. +package huff0 + +import ( + "errors" + "fmt" +) + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 2 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + off += 2 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + //copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress1X8Bit(dst, src) + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + dt := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + for br.off >= 8 { + br.fillFast() + v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + // Refill + br.fillFast() + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go new file mode 100644 index 00000000000..77ecd68e0a7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -0,0 +1,337 @@ +// Package huff0 provides fast huffman encoding as used in zstd. +// +// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. +package huff0 + +import ( + "errors" + "fmt" + "math" + "math/bits" + "sync" + + "github.com/klauspost/compress/fse" +) + +const ( + maxSymbolValue = 255 + + // zstandard limits tablelog to 11, see: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description + tableLogMax = 11 + tableLogDefault = 11 + minTablelog = 5 + huffNodesLen = 512 + + // BlockSizeMax is maximum input size for a single block uncompressed. + BlockSizeMax = 1<<18 - 1 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") + + // ErrTooBig is return if input is too large for a single block. + ErrTooBig = errors.New("input too big") + + // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. + ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") +) + +type ReusePolicy uint8 + +const ( + // ReusePolicyAllow will allow reuse if it produces smaller output. + ReusePolicyAllow ReusePolicy = iota + + // ReusePolicyPrefer will re-use aggressively if possible. + // This will not check if a new table will produce smaller output, + // except if the current table is impossible to use or + // compressed output is bigger than input. + ReusePolicyPrefer + + // ReusePolicyNone will disable re-use of tables. + // This is slightly faster than ReusePolicyAllow but may produce larger output. + ReusePolicyNone + + // ReusePolicyMust must allow reuse and produce smaller output. + ReusePolicyMust +) + +type Scratch struct { + count [maxSymbolValue + 1]uint32 + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // OutTable will contain the table data only, if a new table has been generated. + // Slice of the returned data. + OutTable []byte + + // OutData will contain the compressed data. + // Slice of the returned data. + OutData []byte + + // MaxDecodedSize will set the maximum allowed output size. + // This value will automatically be set to BlockSizeMax if not set. + // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. + MaxDecodedSize int + + srcLen int + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + // Must be <= 11 and >= 5. + TableLog uint8 + + // Reuse will specify the reuse policy + Reuse ReusePolicy + + // WantLogLess allows to specify a log 2 reduction that should at least be achieved, + // otherwise the block will be returned as incompressible. + // The reduction should then at least be (input size >> WantLogLess) + // If WantLogLess == 0 any improvement will do. + WantLogLess uint8 + + symbolLen uint16 // Length of active part of the symbol table. + maxCount int // count of the most probable symbol + clearCount bool // clear count + actualTableLog uint8 // Selected tablelog. + prevTableLog uint8 // Tablelog for previous table + prevTable cTable // Table used for previous compression. + cTable cTable // compression table + dt dTable // decompression table + nodes []nodeElt + tmpOut [4][]byte + fse *fse.Scratch + decPool sync.Pool // *[4][256]byte buffers. + huffWeight [maxSymbolValue + 1]byte +} + +// TransferCTable will transfer the previously used compression table. +func (s *Scratch) TransferCTable(src *Scratch) { + if cap(s.prevTable) < len(src.prevTable) { + s.prevTable = make(cTable, 0, maxSymbolValue+1) + } + s.prevTable = s.prevTable[:len(src.prevTable)] + copy(s.prevTable, src.prevTable) + s.prevTableLog = src.prevTableLog +} + +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if len(in) > BlockSizeMax { + return nil, ErrTooBig + } + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = maxSymbolValue + } + if s.TableLog == 0 { + s.TableLog = tableLogDefault + } + if s.TableLog > tableLogMax || s.TableLog < minTablelog { + return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) + } + if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { + s.MaxDecodedSize = BlockSizeMax + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + s.Out = s.Out[:0] + + s.OutTable = nil + s.OutData = nil + if cap(s.nodes) < huffNodesLen+1 { + s.nodes = make([]nodeElt, 0, huffNodesLen+1) + } + s.nodes = s.nodes[:0] + if s.fse == nil { + s.fse = &fse.Scratch{} + } + s.srcLen = len(in) + + return s, nil +} + +type cTable []cTableEntry + +func (c cTable) write(s *Scratch) error { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + s.Out = append(s.Out, uint8(len(b))) + s.Out = append(s.Out, b...) + return nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return ErrIncompressible + } + op := s.Out + // special case, pack weights 4 bits/weight. + op = append(op, 128|(maxSymbolValue-1)) + // be sure it doesn't cause msan issue in final combination + huffWeight[maxSymbolValue] = 0 + for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { + op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) + } + s.Out = op + return nil +} + +func (c cTable) estTableSize(s *Scratch) (sz int, err error) { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + sz += 1 + len(b) + return sz, nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return 0, ErrIncompressible + } + // special case, pack weights 4 bits/weight. + sz += 1 + int(maxSymbolValue/2) + return sz, nil +} + +// estimateSize returns the estimated size in bytes of the input represented in the +// histogram supplied. +func (c cTable) estimateSize(hist []uint32) int { + nbBits := uint32(7) + for i, v := range c[:len(hist)] { + nbBits += uint32(v.nBits) * hist[i] + } + return int(nbBits >> 3) +} + +// minSize returns the minimum possible size considering the shannon limit. +func (s *Scratch) minSize(total int) int { + nbBits := float64(7) + fTotal := float64(total) + for _, v := range s.count[:s.symbolLen] { + n := float64(v) + if n > 0 { + nbBits += math.Log2(fTotal/n) * n + } + } + return int(nbBits) >> 3 +} + +func highBit32(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go new file mode 100644 index 00000000000..3954c51219b --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go @@ -0,0 +1,34 @@ +// Package cpuinfo gives runtime info about the current CPU. +// +// This is a very limited module meant for use internally +// in this project. For more versatile solution check +// https://github.com/klauspost/cpuid. +package cpuinfo + +// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. +func HasBMI1() bool { + return hasBMI1 +} + +// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. +func HasBMI2() bool { + return hasBMI2 +} + +// DisableBMI2 will disable BMI2, for testing purposes. +// Call returned function to restore previous state. +func DisableBMI2() func() { + old := hasBMI2 + hasBMI2 = false + return func() { + hasBMI2 = old + } +} + +// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. +func HasBMI() bool { + return HasBMI1() && HasBMI2() +} + +var hasBMI1 bool +var hasBMI2 bool diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go new file mode 100644 index 00000000000..e802579c4f9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go @@ -0,0 +1,11 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package cpuinfo + +// go:noescape +func x86extensions() (bmi1, bmi2 bool) + +func init() { + hasBMI1, hasBMI2 = x86extensions() +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s new file mode 100644 index 00000000000..4465fbe9e90 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s @@ -0,0 +1,36 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +TEXT ·x86extensions(SB), NOSPLIT, $0 + // 1. determine max EAX value + XORQ AX, AX + CPUID + + CMPQ AX, $7 + JB unsupported + + // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" + MOVQ $7, AX + MOVQ $0, CX + CPUID + + BTQ $3, BX // bit 3 = BMI1 + SETCS AL + + BTQ $8, BX // bit 8 = BMI2 + SETCS AH + + MOVB AL, bmi1+0(FP) + MOVB AH, bmi2+1(FP) + RET + +unsupported: + XORQ AX, AX + MOVB AL, bmi1+0(FP) + MOVB AL, bmi2+1(FP) + RET diff --git a/vendor/github.com/klauspost/compress/internal/snapref/LICENSE b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE new file mode 100644 index 00000000000..6050c10f4c8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go new file mode 100644 index 00000000000..40796a49d65 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode.go @@ -0,0 +1,264 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Decode handles the Snappy block format, not the Snappy stream format. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +// +// Reader handles the Snappy stream format, not the Snappy block format. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +func (r *Reader) fill() error { + for r.i >= r.j { + if !r.readFull(r.buf[:4], true) { + return r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.decoded[:n], false) { + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return r.err + } + } + + return nil +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + c := r.decoded[r.i] + r.i++ + return c, nil +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go new file mode 100644 index 00000000000..77395a6b8b9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go @@ -0,0 +1,113 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset >= length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go new file mode 100644 index 00000000000..13c6040a5de --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode.go @@ -0,0 +1,289 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Encode handles the Snappy block format, not the Snappy stream format. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +// +// Writer handles the Snappy stream format, not the Snappy block format. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go new file mode 100644 index 00000000000..2754bac6f16 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -0,0 +1,250 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// EncodeBlockInto exposes encodeBlock but checks dst size. +func EncodeBlockInto(dst, src []byte) (d int) { + if MaxEncodedLen(len(src)) > len(dst) { + return 0 + } + + // encodeBlock breaks on too big blocks, so split. + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return d +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go new file mode 100644 index 00000000000..34d01f4aa63 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snapref implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snapref + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod new file mode 100644 index 00000000000..5a4412f9070 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2sx.mod @@ -0,0 +1,4 @@ +module github.com/klauspost/compress + +go 1.19 + diff --git a/vendor/github.com/klauspost/compress/s2sx.sum b/vendor/github.com/klauspost/compress/s2sx.sum new file mode 100644 index 00000000000..e69de29bb2d diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md new file mode 100644 index 00000000000..92e2347bbc0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -0,0 +1,441 @@ +# zstd + +[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. +It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder. +A high performance compression algorithm is implemented. For now focused on speed. + +This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. + +This package is pure Go and without use of "unsafe". + +The `zstd` package is provided as open source software using a Go standard license. + +Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. + +For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go). + +## Installation + +Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. + +[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/compress/zstd.svg)](https://pkg.go.dev/github.com/klauspost/compress/zstd) + +## Compressor + +### Status: + +STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively +used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates. + +There may still be specific combinations of data types/size/settings that could lead to edge cases, +so as always, testing is recommended. + +For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. + +* The "Fastest" compression ratio is roughly equivalent to zstd level 1. +* The "Default" compression ratio is roughly equivalent to zstd level 3 (default). +* The "Better" compression ratio is roughly equivalent to zstd level 7. +* The "Best" compression ratio is roughly equivalent to zstd level 11. + +In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. +The compression ratio compared to stdlib is around level 3, but usually 3x as fast. + + +### Usage + +An Encoder can be used for either compressing a stream via the +`io.WriteCloser` interface supported by the Encoder or as multiple independent +tasks via the `EncodeAll` function. +Smaller encodes are encouraged to use the EncodeAll function. +Use `NewWriter` to create a new instance that can be used for both. + +To create a writer with default options, do like this: + +```Go +// Compress input to output. +func Compress(in io.Reader, out io.Writer) error { + enc, err := zstd.NewWriter(out) + if err != nil { + return err + } + _, err = io.Copy(enc, in) + if err != nil { + enc.Close() + return err + } + return enc.Close() +} +``` + +Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called. +Even if your encode fails, you should still call `Close()` to release any resources that may be held up. + +The above is fine for big encodes. However, whenever possible try to *reuse* the writer. + +To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. +This will allow the encoder to reuse all resources and avoid wasteful allocations. + +Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part +of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change +in the future. So if you want to limit concurrency for future updates, specify the concurrency +you would like. + +If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` +which will compress input as each block is completed, blocking on writes until each has completed. + +You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined +compression settings can be specified. + +#### Future Compatibility Guarantees + +This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change. + +The goal will be to keep the default efficiency at the default zstd (level 3). +However the encoding should never be assumed to remain the same, +and you should not use hashes of compressed output for similarity checks. + +The Encoder can be assumed to produce the same output from the exact same code version. +However, the may be modes in the future that break this, +although they will not be enabled without an explicit option. + +This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder. + +Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59), +[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) +and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames). + +#### Blocks + +For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. + +`EncodeAll` will encode all input in src and append it to dst. +This function can be called concurrently. +Each call will only run on a same goroutine as the caller. + +Encoded blocks can be concatenated and the result will be the combined input stream. +Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. + +Especially when encoding blocks you should take special care to reuse the encoder. +This will effectively make it run without allocations after a warmup period. +To make it run completely without allocations, supply a destination buffer with space for all content. + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a writer that caches compressors. +// For this operation type we supply a nil Reader. +var encoder, _ = zstd.NewWriter(nil) + +// Compress a buffer. +// If you have a destination buffer, the allocation in the call can also be eliminated. +func Compress(src []byte) []byte { + return encoder.EncodeAll(src, make([]byte, 0, len(src))) +} +``` + +You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` +option when creating the writer. + +Using the Encoder for both a stream and individual blocks concurrently is safe. + +### Performance + +I have collected some speed examples to compare speed and compression against other compressors. + +* `file` is the input file. +* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. +* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best". +* `insize`/`outsize` is the input/output size. +* `millis` is the number of milliseconds used for compression. +* `mb/s` is megabytes (2^20 bytes) per second. + +``` +Silesia Corpus: +http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip + +This package: +file out level insize outsize millis mb/s +silesia.tar zskp 1 211947520 73821326 634 318.47 +silesia.tar zskp 2 211947520 67655404 1508 133.96 +silesia.tar zskp 3 211947520 64746933 3000 67.37 +silesia.tar zskp 4 211947520 60073508 16926 11.94 + +cgo zstd: +silesia.tar zstd 1 211947520 73605392 543 371.56 +silesia.tar zstd 3 211947520 66793289 864 233.68 +silesia.tar zstd 6 211947520 62916450 1913 105.66 +silesia.tar zstd 9 211947520 60212393 5063 39.92 + +gzip, stdlib/this package: +silesia.tar gzstd 1 211947520 80007735 1498 134.87 +silesia.tar gzkp 1 211947520 80088272 1009 200.31 + +GOB stream of binary data. Highly compressible. +https://files.klauspost.com/compress/gob-stream.7z + +file out level insize outsize millis mb/s +gob-stream zskp 1 1911399616 233948096 3230 564.34 +gob-stream zskp 2 1911399616 203997694 4997 364.73 +gob-stream zskp 3 1911399616 173526523 13435 135.68 +gob-stream zskp 4 1911399616 162195235 47559 38.33 + +gob-stream zstd 1 1911399616 249810424 2637 691.26 +gob-stream zstd 3 1911399616 208192146 3490 522.31 +gob-stream zstd 6 1911399616 193632038 6687 272.56 +gob-stream zstd 9 1911399616 177620386 16175 112.70 + +gob-stream gzstd 1 1911399616 357382013 9046 201.49 +gob-stream gzkp 1 1911399616 359136669 4885 373.08 + +The test data for the Large Text Compression Benchmark is the first +10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. +http://mattmahoney.net/dc/textdata.html + +file out level insize outsize millis mb/s +enwik9 zskp 1 1000000000 343833605 3687 258.64 +enwik9 zskp 2 1000000000 317001237 7672 124.29 +enwik9 zskp 3 1000000000 291915823 15923 59.89 +enwik9 zskp 4 1000000000 261710291 77697 12.27 + +enwik9 zstd 1 1000000000 358072021 3110 306.65 +enwik9 zstd 3 1000000000 313734672 4784 199.35 +enwik9 zstd 6 1000000000 295138875 10290 92.68 +enwik9 zstd 9 1000000000 278348700 28549 33.40 + +enwik9 gzstd 1 1000000000 382578136 8608 110.78 +enwik9 gzkp 1 1000000000 382781160 5628 169.45 + +Highly compressible JSON file. +https://files.klauspost.com/compress/github-june-2days-2019.json.zst + +file out level insize outsize millis mb/s +github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 +github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 +github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 +github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 + +github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 +github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 +github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 +github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 + +github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 +github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 + +VM Image, Linux mint with a few installed applications: +https://files.klauspost.com/compress/rawstudio-mint14.7z + +file out level insize outsize millis mb/s +rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 +rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 +rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 +rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 + +rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 +rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 +rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 +rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 + +rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 +rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 + +CSV data: +https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst + +file out level insize outsize millis mb/s +nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 +nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 +nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 +nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 + +nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 +nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 +nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 +nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 + +nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 +nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 +``` + +## Decompressor + +Status: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. + +This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), +kindly supplied by [fuzzit.dev](https://fuzzit.dev/). +The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, +or run it past its limits with ANY input provided. + +### Usage + +The package has been designed for two main usages, big streams of data and smaller in-memory buffers. +There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`. + +For streaming use a simple setup could look like this: + +```Go +import "github.com/klauspost/compress/zstd" + +func Decompress(in io.Reader, out io.Writer) error { + d, err := zstd.NewReader(in) + if err != nil { + return err + } + defer d.Close() + + // Copy content... + _, err = io.Copy(out, d) + return err +} +``` + +It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, +when running with default settings. +Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. + +Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. +However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data +as it is being requested only. + +For decoding buffers, it could look something like this: + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a reader that caches decompressors. +// For this operation type we supply a nil Reader. +var decoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0)) + +// Decompress a buffer. We don't supply a destination buffer, +// so it will be allocated by the decoder. +func Decompress(src []byte) ([]byte, error) { + return decoder.DecodeAll(src, nil) +} +``` + +Both of these cases should provide the functionality needed. +The decoder can be used for *concurrent* decompression of multiple buffers. +By default 4 decompressors will be created. + +It will only allow a certain number of concurrent operations to run. +To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. + +### Dictionaries + +Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed. + +Dictionaries are added individually to Decoders. +Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. +To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data. +Several dictionaries can be added at once. + +The dictionary will be used automatically for the data that specifies them. +A re-used Decoder will still contain the dictionaries registered. + +When registering multiple dictionaries with the same ID, the last one will be used. + +It is possible to use dictionaries when compressing data. + +To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used +and it will likely be used even if it doesn't improve compression. + +The used dictionary must be used to decompress the content. + +For any real gains, the dictionary should be built with similar data. +If an unsuitable dictionary is used the output may be slightly larger than using no dictionary. +Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data. +For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). + +For now there is a fixed startup performance penalty for compressing content with dictionaries. +This will likely be improved over time. Just be aware to test performance when implementing. + +### Allocation-less operation + +The decoder has been designed to operate without allocations after a warmup. + +This means that you should *store* the decoder for best performance. +To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream. +A decoder can safely be re-used even if the previous stream failed. + +To release the resources, you must call the `Close()` function on a decoder. +After this it can *no longer be reused*, but all running goroutines will be stopped. +So you *must* use this if you will no longer need the Reader. + +For decompressing smaller buffers a single decoder can be used. +When decoding buffers, you can supply a destination slice with length 0 and your expected capacity. +In this case no unneeded allocations should be made. + +### Concurrency + +The buffer decoder does everything on the same goroutine and does nothing concurrently. +It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. + +The stream decoder will create goroutines that: + +1) Reads input and splits the input into blocks. +2) Decompression of literals. +3) Decompression of sequences. +4) Reconstruction of output stream. + +So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. + +The concurrency level will, for streams, determine how many blocks ahead the compression will start. + +Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. + +In practice this means that concurrency is often limited to utilizing about 3 cores effectively. + +### Benchmarks + +The first two are streaming decodes and the last are smaller inputs. + +Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. + +``` +BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op +BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op + +Concurrent blocks, performance: + +BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op +``` + +This reflects the performance around May 2022, but this may be out of date. + +## Zstd inside ZIP files + +It is possible to use zstandard to compress individual files inside zip archives. +While this isn't widely supported it can be useful for internal files. + +To support the compression and decompression of these files you must register a compressor and decompressor. + +It is highly recommended registering the (de)compressors on individual zip Reader/Writer and NOT +use the global registration functions. The main reason for this is that 2 registrations from +different packages will result in a panic. + +It is a good idea to only have a single compressor and decompressor, since they can be used for multiple zip +files concurrently, and using a single instance will allow reusing some resources. + +See [this example](https://pkg.go.dev/github.com/klauspost/compress/zstd#example-ZipCompressor) for +how to compress and decompress files inside zip archives. + +# Contributions + +Contributions are always welcome. +For new features/fixes, remember to add tests and for performance enhancements include benchmarks. + +For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). + +This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go new file mode 100644 index 00000000000..25ca983941d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -0,0 +1,136 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "math/bits" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + value uint64 // Maybe use [16]byte, but shifting is awkward. + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) int { + if n == 0 /*|| b.bitsRead >= 64 */ { + return 0 + } + return int(b.get32BitsFast(n)) +} + +// get32BitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) get32BitsFast(n uint8) uint32 { + const regMask = 64 - 1 + v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + v := b.in[len(b.in)-4:] + b.in = b.in[:len(b.in)-4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + v := b.in[len(b.in)-8:] + b.in = b.in[:len(b.in)-8] + b.value = binary.LittleEndian.Uint64(v) + b.bitsRead = 0 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if len(b.in) >= 4 { + v := b.in[len(b.in)-4:] + b.in = b.in[:len(b.in)-4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + return + } + + b.bitsRead -= uint8(8 * len(b.in)) + for len(b.in) > 0 { + b.value = (b.value << 8) | uint64(b.in[len(b.in)-1]) + b.in = b.in[:len(b.in)-1] + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return len(b.in) == 0 && b.bitsRead >= 64 +} + +// overread returns true if more bits have been requested than is on the stream. +func (b *bitReader) overread() bool { + return b.bitsRead > 64 +} + +// remain returns the number of bits remaining. +func (b *bitReader) remain() uint { + return 8*uint(len(b.in)) + 64 - uint(b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if !b.finished() { + return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go new file mode 100644 index 00000000000..1952f175b0d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -0,0 +1,112 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package zstd + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits32NC will add up to 31 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits32NC(value uint32, bits uint8) { + b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits64NC will add up to 64 bits. +// There must be space for 32 bits. +func (b *bitWriter) addBits64NC(value uint64, bits uint8) { + if bits <= 31 { + b.addBits32Clean(uint32(value), bits) + return + } + b.addBits32Clean(uint32(value), 32) + b.flush32() + b.addBits32Clean(uint32(value>>32), bits-32) +} + +// addBits32Clean will add up to 32 bits. +// It will not check if there is space for them. +// The input must not contain more bits than specified. +func (b *bitWriter) addBits32Clean(value uint32, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go new file mode 100644 index 00000000000..03744fbc765 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -0,0 +1,729 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "hash/crc32" + "io" + "os" + "path/filepath" + "sync" + + "github.com/klauspost/compress/huff0" + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type blockType uint8 + +//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex + +const ( + blockTypeRaw blockType = iota + blockTypeRLE + blockTypeCompressed + blockTypeReserved +) + +type literalsBlockType uint8 + +const ( + literalsBlockRaw literalsBlockType = iota + literalsBlockRLE + literalsBlockCompressed + literalsBlockTreeless +) + +const ( + // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) + maxCompressedBlockSize = 128 << 10 + + compressedBlockOverAlloc = 16 + maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc + + // Maximum possible block size (all Raw+Uncompressed). + maxBlockSize = (1 << 21) - 1 + + maxMatchLen = 131074 + maxSequences = 0x7f00 + 0xffff + + // We support slightly less than the reference decoder to be able to + // use ints on 32 bit archs. + maxOffsetBits = 30 +) + +var ( + huffDecoderPool = sync.Pool{New: func() interface{} { + return &huff0.Scratch{} + }} + + fseDecoderPool = sync.Pool{New: func() interface{} { + return &fseDecoder{} + }} +) + +type blockDec struct { + // Raw source data of the block. + data []byte + dataStorage []byte + + // Destination of the decoded data. + dst []byte + + // Buffer for literals data. + literalBuf []byte + + // Window size of the block. + WindowSize uint64 + + err error + + // Check against this crc, if hasCRC is true. + checkCRC uint32 + hasCRC bool + + // Frame to use for singlethreaded decoding. + // Should not be used by the decoder itself since parent may be another frame. + localFrame *frameDec + + sequence []seqVals + + async struct { + newHist *history + literals []byte + seqData []byte + seqSize int // Size of uncompressed sequences + fcs uint64 + } + + // Block is RLE, this is the size. + RLESize uint32 + + Type blockType + + // Is this the last block of a frame? + Last bool + + // Use less memory + lowMem bool +} + +func (b *blockDec) String() string { + if b == nil { + return "" + } + return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize) +} + +func newBlockDec(lowMem bool) *blockDec { + b := blockDec{ + lowMem: lowMem, + } + return &b +} + +// reset will reset the block. +// Input must be a start of a block and will be at the end of the block when returned. +func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { + b.WindowSize = windowSize + tmp, err := br.readSmall(3) + if err != nil { + println("Reading block header:", err) + return err + } + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + b.Last = bh&1 != 0 + b.Type = blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + maxSize := maxCompressedBlockSizeAlloc + switch b.Type { + case blockTypeReserved: + return ErrReservedBlockType + case blockTypeRLE: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + b.RLESize = uint32(cSize) + if b.lowMem { + maxSize = cSize + } + cSize = 1 + case blockTypeCompressed: + if debugDecoder { + println("Data size on stream:", cSize) + } + b.RLESize = 0 + maxSize = maxCompressedBlockSizeAlloc + if windowSize < maxCompressedBlockSize && b.lowMem { + maxSize = int(windowSize) + compressedBlockOverAlloc + } + if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { + if debugDecoder { + printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrCompressedSizeTooBig + } + // Empty compressed blocks must at least be 2 bytes + // for Literals_Block_Type and one for Sequences_Section_Header. + if cSize < 2 { + return ErrBlockTooSmall + } + case blockTypeRaw: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + + b.RLESize = 0 + // We do not need a destination for raw blocks. + maxSize = -1 + default: + panic("Invalid block type") + } + + // Read block data. + if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize { + // byteBuf doesn't need a destination buffer. + if b.lowMem || cSize > maxCompressedBlockSize { + b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) + } else { + b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) + } + } + b.data, err = br.readBig(cSize, b.dataStorage) + if err != nil { + if debugDecoder { + println("Reading block:", err, "(", cSize, ")", len(b.data)) + printf("%T", br) + } + return err + } + if cap(b.dst) <= maxSize { + b.dst = make([]byte, 0, maxSize+1) + } + return nil +} + +// sendEOF will make the decoder send EOF on this frame. +func (b *blockDec) sendErr(err error) { + b.Last = true + b.Type = blockTypeReserved + b.err = err +} + +// Close will release resources. +// Closed blockDec cannot be reset. +func (b *blockDec) Close() { +} + +// decodeBuf +func (b *blockDec) decodeBuf(hist *history) error { + switch b.Type { + case blockTypeRLE: + if cap(b.dst) < int(b.RLESize) { + if b.lowMem { + b.dst = make([]byte, b.RLESize) + } else { + b.dst = make([]byte, maxCompressedBlockSize) + } + } + b.dst = b.dst[:b.RLESize] + v := b.data[0] + for i := range b.dst { + b.dst[i] = v + } + hist.appendKeep(b.dst) + return nil + case blockTypeRaw: + hist.appendKeep(b.data) + return nil + case blockTypeCompressed: + saved := b.dst + // Append directly to history + if hist.ignoreBuffer == 0 { + b.dst = hist.b + hist.b = nil + } else { + b.dst = b.dst[:0] + } + err := b.decodeCompressed(hist) + if debugDecoder { + println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) + } + if hist.ignoreBuffer == 0 { + hist.b = b.dst + b.dst = saved + } else { + hist.appendKeep(b.dst) + } + return err + case blockTypeReserved: + // Used for returning errors. + return b.err + default: + panic("Invalid block type") + } +} + +func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { + // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header + if len(in) < 2 { + return in, ErrBlockTooSmall + } + + litType := literalsBlockType(in[0] & 3) + var litRegenSize int + var litCompSize int + sizeFormat := (in[0] >> 2) & 3 + var fourStreams bool + var literals []byte + switch litType { + case literalsBlockRaw, literalsBlockRLE: + switch sizeFormat { + case 0, 2: + // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte. + litRegenSize = int(in[0] >> 3) + in = in[1:] + case 1: + // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes. + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + in = in[2:] + case 3: + // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) + in = in[3:] + } + case literalsBlockCompressed, literalsBlockTreeless: + switch sizeFormat { + case 0, 1: + // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + litRegenSize = int(n & 1023) + litCompSize = int(n >> 10) + fourStreams = sizeFormat == 1 + in = in[3:] + case 2: + fourStreams = true + if len(in) < 4 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + litRegenSize = int(n & 16383) + litCompSize = int(n >> 14) + in = in[4:] + case 3: + fourStreams = true + if len(in) < 5 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) + litRegenSize = int(n & 262143) + litCompSize = int(n >> 18) + in = in[5:] + } + } + if debugDecoder { + println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) + } + if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { + return in, ErrWindowSizeExceeded + } + + switch litType { + case literalsBlockRaw: + if len(in) < litRegenSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) + return in, ErrBlockTooSmall + } + literals = in[:litRegenSize] + in = in[litRegenSize:] + //printf("Found %d uncompressed literals\n", litRegenSize) + case literalsBlockRLE: + if len(in) < 1 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) + return in, ErrBlockTooSmall + } + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + literals = b.literalBuf[:litRegenSize] + v := in[0] + for i := range literals { + literals[i] = v + } + in = in[1:] + if debugDecoder { + printf("Found %d RLE compressed literals\n", litRegenSize) + } + case literalsBlockTreeless: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return in, ErrBlockTooSmall + } + // Store compressed literals, so we defer decoding until we get history. + literals = in[:litCompSize] + in = in[litCompSize:] + if debugDecoder { + printf("Found %d compressed literals\n", litCompSize) + } + huff := hist.huffTree + if huff == nil { + return in, errors.New("literal block was treeless, but no history was defined") + } + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + var err error + // Use our out buffer. + huff.MaxDecodedSize = litRegenSize + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + // Make sure we don't leak our literals buffer + if err != nil { + println("decompressing literals:", err) + return in, err + } + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + + case literalsBlockCompressed: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return in, ErrBlockTooSmall + } + literals = in[:litCompSize] + in = in[litCompSize:] + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + huff := hist.huffTree + if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { + huff = huffDecoderPool.Get().(*huff0.Scratch) + if huff == nil { + huff = &huff0.Scratch{} + } + } + var err error + if debugDecoder { + println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals)) + } + huff, literals, err = huff0.ReadTable(literals, huff) + if err != nil { + println("reading huffman table:", err) + return in, err + } + hist.huffTree = huff + huff.MaxDecodedSize = litRegenSize + // Use our out buffer. + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + if err != nil { + println("decoding compressed literals:", err) + return in, err + } + // Make sure we don't leak our literals buffer + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + // Re-cap to get extra size. + literals = b.literalBuf[:len(literals)] + if debugDecoder { + printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) + } + } + hist.decoders.literals = literals + return in, nil +} + +// decodeCompressed will start decompressing a block. +func (b *blockDec) decodeCompressed(hist *history) error { + in := b.data + in, err := b.decodeLiterals(in, hist) + if err != nil { + return err + } + err = b.prepareSequences(in, hist) + if err != nil { + return err + } + if hist.decoders.nSeqs == 0 { + b.dst = append(b.dst, hist.decoders.literals...) + return nil + } + before := len(hist.decoders.out) + err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) + if err != nil { + return err + } + if hist.decoders.maxSyncLen > 0 { + hist.decoders.maxSyncLen += uint64(before) + hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) + } + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + return nil +} + +func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { + if debugDecoder { + printf("prepareSequences: %d byte(s) input\n", len(in)) + } + // Decode Sequences + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section + if len(in) < 1 { + return ErrBlockTooSmall + } + var nSeqs int + seqHeader := in[0] + switch { + case seqHeader < 128: + nSeqs = int(seqHeader) + in = in[1:] + case seqHeader < 255: + if len(in) < 2 { + return ErrBlockTooSmall + } + nSeqs = int(seqHeader-128)<<8 | int(in[1]) + in = in[2:] + case seqHeader == 255: + if len(in) < 3 { + return ErrBlockTooSmall + } + nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) + in = in[3:] + } + if nSeqs == 0 && len(in) != 0 { + // When no sequences, there should not be any more data... + if debugDecoder { + printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) + } + return ErrUnexpectedBlockSize + } + + var seqs = &hist.decoders + seqs.nSeqs = nSeqs + if nSeqs > 0 { + if len(in) < 1 { + return ErrBlockTooSmall + } + br := byteReader{b: in, off: 0} + compMode := br.Uint8() + br.advance(1) + if debugDecoder { + printf("Compression modes: 0b%b", compMode) + } + if compMode&3 != 0 { + return errors.New("corrupt block: reserved bits not zero") + } + for i := uint(0); i < 3; i++ { + mode := seqCompMode((compMode >> (6 - i*2)) & 3) + if debugDecoder { + println("Table", tableIndex(i), "is", mode) + } + var seq *sequenceDec + switch tableIndex(i) { + case tableLiteralLengths: + seq = &seqs.litLengths + case tableOffsets: + seq = &seqs.offsets + case tableMatchLengths: + seq = &seqs.matchLengths + default: + panic("unknown table") + } + switch mode { + case compModePredefined: + if seq.fse != nil && !seq.fse.preDefined { + fseDecoderPool.Put(seq.fse) + } + seq.fse = &fsePredef[i] + case compModeRLE: + if br.remain() < 1 { + return ErrBlockTooSmall + } + v := br.Uint8() + br.advance(1) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + symb, err := decSymbolValue(v, symbolTableX[i]) + if err != nil { + printf("RLE Transform table (%v) error: %v", tableIndex(i), err) + return err + } + seq.fse.setRLE(symb) + if debugDecoder { + printf("RLE set to 0x%x, code: %v", symb, v) + } + case compModeFSE: + println("Reading table for", tableIndex(i)) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) + if err != nil { + println("Read table error:", err) + return err + } + err = seq.fse.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debugDecoder { + println("Read table ok", "symbolLen:", seq.fse.symbolLen) + } + case compModeRepeat: + seq.repeat = true + } + if br.overread() { + return io.ErrUnexpectedEOF + } + } + in = br.unread() + } + if debugDecoder { + println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") + } + + if nSeqs == 0 { + if len(b.sequence) > 0 { + b.sequence = b.sequence[:0] + } + return nil + } + br := seqs.br + if br == nil { + br = &bitReader{} + } + if err := br.init(in); err != nil { + return err + } + + if err := seqs.initialize(br, hist, b.dst); err != nil { + println("initializing sequences:", err) + return err + } + // Extract blocks... + if false && hist.dict == nil { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) + var buf bytes.Buffer + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) + buf.Write(in) + os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) + } + + return nil +} + +func (b *blockDec) decodeSequences(hist *history) error { + if cap(b.sequence) < hist.decoders.nSeqs { + if b.lowMem { + b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) + } else { + b.sequence = make([]seqVals, 0, 0x7F00+0xffff) + } + } + b.sequence = b.sequence[:hist.decoders.nSeqs] + if hist.decoders.nSeqs == 0 { + hist.decoders.seqSize = len(hist.decoders.literals) + return nil + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.prevOffset = hist.recentOffsets + + err := hist.decoders.decode(b.sequence) + hist.recentOffsets = hist.decoders.prevOffset + return err +} + +func (b *blockDec) executeSequences(hist *history) error { + hbytes := hist.b + if len(hbytes) > hist.windowSize { + hbytes = hbytes[len(hbytes)-hist.windowSize:] + // We do not need history anymore. + if hist.dict != nil { + hist.dict.content = nil + } + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.out = b.dst[:0] + err := hist.decoders.execute(b.sequence, hbytes) + if err != nil { + return err + } + return b.updateHistory(hist) +} + +func (b *blockDec) updateHistory(hist *history) error { + if len(b.data) > maxCompressedBlockSize { + return fmt.Errorf("compressed block size too large (%d)", len(b.data)) + } + // Set output and release references. + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + + if b.Last { + // if last block we don't care about history. + println("Last block, no history returned") + hist.b = hist.b[:0] + return nil + } else { + hist.append(b.dst) + if debugDecoder { + println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) + } + } + hist.decoders.out, hist.decoders.literals = nil, nil + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go new file mode 100644 index 00000000000..32a7f401d5d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -0,0 +1,909 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + + "github.com/klauspost/compress/huff0" +) + +type blockEnc struct { + size int + literals []byte + sequences []seq + coders seqCoders + litEnc *huff0.Scratch + dictLitEnc *huff0.Scratch + wr bitWriter + + extraLits int + output []byte + recentOffsets [3]uint32 + prevRecentOffsets [3]uint32 + + last bool + lowMem bool +} + +// init should be used once the block has been created. +// If called more than once, the effect is the same as calling reset. +func (b *blockEnc) init() { + if b.lowMem { + // 1K literals + if cap(b.literals) < 1<<10 { + b.literals = make([]byte, 0, 1<<10) + } + const defSeqs = 20 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + // 1K + if cap(b.output) < 1<<10 { + b.output = make([]byte, 0, 1<<10) + } + } else { + if cap(b.literals) < maxCompressedBlockSize { + b.literals = make([]byte, 0, maxCompressedBlockSize) + } + const defSeqs = 2000 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + if cap(b.output) < maxCompressedBlockSize { + b.output = make([]byte, 0, maxCompressedBlockSize) + } + } + + if b.coders.mlEnc == nil { + b.coders.mlEnc = &fseEncoder{} + b.coders.mlPrev = &fseEncoder{} + b.coders.ofEnc = &fseEncoder{} + b.coders.ofPrev = &fseEncoder{} + b.coders.llEnc = &fseEncoder{} + b.coders.llPrev = &fseEncoder{} + } + b.litEnc = &huff0.Scratch{WantLogLess: 4} + b.reset(nil) +} + +// initNewEncode can be used to reset offsets and encoders to the initial state. +func (b *blockEnc) initNewEncode() { + b.recentOffsets = [3]uint32{1, 4, 8} + b.litEnc.Reuse = huff0.ReusePolicyNone + b.coders.setPrev(nil, nil, nil) +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) reset(prev *blockEnc) { + b.extraLits = 0 + b.literals = b.literals[:0] + b.size = 0 + b.sequences = b.sequences[:0] + b.output = b.output[:0] + b.last = false + if prev != nil { + b.recentOffsets = prev.prevRecentOffsets + } + b.dictLitEnc = nil +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) swapEncoders(prev *blockEnc) { + b.coders.swap(&prev.coders) + b.litEnc, prev.litEnc = prev.litEnc, b.litEnc +} + +// blockHeader contains the information for a block header. +type blockHeader uint32 + +// setLast sets the 'last' indicator on a block. +func (h *blockHeader) setLast(b bool) { + if b { + *h = *h | 1 + } else { + const mask = (1 << 24) - 2 + *h = *h & mask + } +} + +// setSize will store the compressed size of a block. +func (h *blockHeader) setSize(v uint32) { + const mask = 7 + *h = (*h)&mask | blockHeader(v<<3) +} + +// setType sets the block type. +func (h *blockHeader) setType(t blockType) { + const mask = 1 | (((1 << 24) - 1) ^ 7) + *h = (*h & mask) | blockHeader(t<<1) +} + +// appendTo will append the block header to a slice. +func (h blockHeader) appendTo(b []byte) []byte { + return append(b, uint8(h), uint8(h>>8), uint8(h>>16)) +} + +// String returns a string representation of the block. +func (h blockHeader) String() string { + return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1) +} + +// literalsHeader contains literals header information. +type literalsHeader uint64 + +// setType can be used to set the type of literal block. +func (h *literalsHeader) setType(t literalsBlockType) { + const mask = math.MaxUint64 - 3 + *h = (*h & mask) | literalsHeader(t) +} + +// setSize can be used to set a single size, for uncompressed and RLE content. +func (h *literalsHeader) setSize(regenLen int) { + inBits := bits.Len32(uint32(regenLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case inBits < 5: + lh |= (uint64(regenLen) << 3) | (1 << 60) + if debugEncoder { + got := int(lh>>3) & 0xff + if got != regenLen { + panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) + } + } + case inBits < 12: + lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60) + case inBits < 20: + lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60) + default: + panic(fmt.Errorf("internal error: block too big (%d)", regenLen)) + } + *h = literalsHeader(lh) +} + +// setSizes will set the size of a compressed literals section and the input length. +func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { + compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case compBits <= 10 && inBits <= 10: + if !single { + lh |= 1 << 2 + } + lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) + if debugEncoder { + const mmask = (1 << 24) - 1 + n := (lh >> 4) & mmask + if int(n&1023) != inLen { + panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits)) + } + if int(n>>10) != compLen { + panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits)) + } + } + case compBits <= 14 && inBits <= 14: + lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + case compBits <= 18 && inBits <= 18: + lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + default: + panic("internal error: block too big") + } + *h = literalsHeader(lh) +} + +// appendTo will append the literals header to a byte slice. +func (h literalsHeader) appendTo(b []byte) []byte { + size := uint8(h >> 60) + switch size { + case 1: + b = append(b, uint8(h)) + case 2: + b = append(b, uint8(h), uint8(h>>8)) + case 3: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16)) + case 4: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24)) + case 5: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32)) + default: + panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size)) + } + return b +} + +// size returns the output size with currently set values. +func (h literalsHeader) size() int { + return int(h >> 60) +} + +func (h literalsHeader) String() string { + return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60) +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) pushOffsets() { + b.prevRecentOffsets = b.recentOffsets +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) popOffsets() { + b.recentOffsets = b.prevRecentOffsets +} + +// matchOffset will adjust recent offsets and return the adjusted one, +// if it matches a previous offset. +func (b *blockEnc) matchOffset(offset, lits uint32) uint32 { + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if true { + if lits > 0 { + switch offset { + case b.recentOffsets[0]: + offset = 1 + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } else { + switch offset { + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 1 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[0] - 1: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } + } else { + offset += 3 + } + return offset +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRaw(a []byte) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(a))) + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output[:0]) + b.output = append(b.output, a...) + if debugEncoder { + println("Adding RAW block, length", len(a), "last:", b.last) + } +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(src))) + bh.setType(blockTypeRaw) + dst = bh.appendTo(dst) + dst = append(dst, src...) + if debugEncoder { + println("Adding RAW block, length", len(src), "last:", b.last) + } + return dst +} + +// encodeLits can be used if the block is only litLen. +func (b *blockEnc) encodeLits(lits []byte, raw bool) error { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(lits))) + + // Don't compress extremely small blocks + if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { + if debugEncoder { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + } + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(lits) >= 1024 { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(lits, b.litEnc) + } else if len(lits) > 16 { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(lits, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + if err == nil && len(out)+5 > len(lits) { + // If we are close, we may still be worse or equal to raw. + var lh literalsHeader + lh.setSizes(len(out), len(lits), single) + if len(out)+lh.size() >= len(lits) { + err = huff0.ErrIncompressible + } + } + switch err { + case huff0.ErrIncompressible: + if debugEncoder { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + case huff0.ErrUseRLE: + if debugEncoder { + println("Adding RLE block, length", len(lits)) + } + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits[0]) + return nil + case nil: + default: + return err + } + // Compressed... + // Now, allow reuse + b.litEnc.Reuse = huff0.ReusePolicyAllow + bh.setType(blockTypeCompressed) + var lh literalsHeader + if reUsed { + if debugEncoder { + println("Reused tree, compressed to", len(out)) + } + lh.setType(literalsBlockTreeless) + } else { + if debugEncoder { + println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + } + // Set sizes + lh.setSizes(len(out), len(lits), single) + bh.setSize(uint32(len(out) + lh.size() + 1)) + + // Write block headers. + b.output = bh.appendTo(b.output) + b.output = lh.appendTo(b.output) + // Add compressed data. + b.output = append(b.output, out...) + // No sequences. + b.output = append(b.output, 0) + return nil +} + +// encodeRLE will encode an RLE block. +func (b *blockEnc) encodeRLE(val byte, length uint32) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(length) + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, val) +} + +// fuzzFseEncoder can be used to fuzz the FSE encoder. +func fuzzFseEncoder(data []byte) int { + if len(data) > maxSequences || len(data) < 2 { + return 0 + } + enc := fseEncoder{} + hist := enc.Histogram() + maxSym := uint8(0) + for i, v := range data { + v = v & 63 + data[i] = v + hist[v]++ + if v > maxSym { + maxSym = v + } + } + if maxSym == 0 { + // All 0 + return 0 + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + cnt := maxCount(hist[:maxSym]) + if cnt == len(data) { + // RLE + return 0 + } + enc.HistogramFinished(maxSym, cnt) + err := enc.normalizeCount(len(data)) + if err != nil { + return 0 + } + _, err = enc.writeCount(nil) + if err != nil { + panic(err) + } + return 1 +} + +// encode will encode the block and append the output in b.output. +// Previous offset codes must be pushed if more blocks are expected. +func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { + if len(b.sequences) == 0 { + return b.encodeLits(b.literals, rawAllLits) + } + if len(b.sequences) == 1 && len(org) > 0 && len(b.literals) <= 1 { + // Check common RLE cases. + seq := b.sequences[0] + if seq.litLen == uint32(len(b.literals)) && seq.offset-3 == 1 { + // Offset == 1 and 0 or 1 literals. + b.encodeRLE(org[0], b.sequences[0].matchLen+zstdMinMatch+seq.litLen) + return nil + } + } + + // We want some difference to at least account for the headers. + saved := b.size - len(b.literals) - (b.size >> 6) + if saved < 16 { + if org == nil { + return errIncompressible + } + b.popOffsets() + return b.encodeLits(org, rawAllLits) + } + + var bh blockHeader + var lh literalsHeader + bh.setLast(b.last) + bh.setType(blockTypeCompressed) + // Store offset of the block header. Needed when we know the size. + bhOffset := len(b.output) + b.output = bh.appendTo(b.output) + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(b.literals) >= 1024 && !raw { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) + } else if len(b.literals) > 16 && !raw { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + if err == nil && len(out)+5 > len(b.literals) { + // If we are close, we may still be worse or equal to raw. + var lh literalsHeader + lh.setSize(len(b.literals)) + szRaw := lh.size() + lh.setSizes(len(out), len(b.literals), single) + szComp := lh.size() + if len(out)+szComp >= len(b.literals)+szRaw { + err = huff0.ErrIncompressible + } + } + switch err { + case huff0.ErrIncompressible: + lh.setType(literalsBlockRaw) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals...) + if debugEncoder { + println("Adding literals RAW, length", len(b.literals)) + } + case huff0.ErrUseRLE: + lh.setType(literalsBlockRLE) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals[0]) + if debugEncoder { + println("Adding literals RLE") + } + case nil: + // Compressed litLen... + if reUsed { + if debugEncoder { + println("reused tree") + } + lh.setType(literalsBlockTreeless) + } else { + if debugEncoder { + println("new tree, size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + if debugEncoder { + _, _, err := huff0.ReadTable(out, nil) + if err != nil { + panic(err) + } + } + } + lh.setSizes(len(out), len(b.literals), single) + if debugEncoder { + printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) + println("Adding literal header:", lh) + } + b.output = lh.appendTo(b.output) + b.output = append(b.output, out...) + b.litEnc.Reuse = huff0.ReusePolicyAllow + if debugEncoder { + println("Adding literals compressed") + } + default: + if debugEncoder { + println("Adding literals ERROR:", err) + } + return err + } + // Sequence compression + + // Write the number of sequences + switch { + case len(b.sequences) < 128: + b.output = append(b.output, uint8(len(b.sequences))) + case len(b.sequences) < 0x7f00: // TODO: this could be wrong + n := len(b.sequences) + b.output = append(b.output, 128+uint8(n>>8), uint8(n)) + default: + n := len(b.sequences) - 0x7f00 + b.output = append(b.output, 255, uint8(n), uint8(n>>8)) + } + if debugEncoder { + println("Encoding", len(b.sequences), "sequences") + } + b.genCodes() + llEnc := b.coders.llEnc + ofEnc := b.coders.ofEnc + mlEnc := b.coders.mlEnc + err = llEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = ofEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = mlEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + + // Choose the best compression mode for each type. + // Will evaluate the new vs predefined and previous. + chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) { + // See if predefined/previous is better + hist := cur.count[:cur.symbolLen] + nSize := cur.approxSize(hist) + cur.maxHeaderSize() + predefSize := preDef.approxSize(hist) + prevSize := prev.approxSize(hist) + + // Add a small penalty for new encoders. + // Don't bother with extremely small (<2 byte gains). + nSize = nSize + (nSize+2*8*16)>>4 + switch { + case predefSize <= prevSize && predefSize <= nSize || forcePreDef: + if debugEncoder { + println("Using predefined", predefSize>>3, "<=", nSize>>3) + } + return preDef, compModePredefined + case prevSize <= nSize: + if debugEncoder { + println("Using previous", prevSize>>3, "<=", nSize>>3) + } + return prev, compModeRepeat + default: + if debugEncoder { + println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") + println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) + } + return cur, compModeFSE + } + } + + // Write compression mode + var mode uint8 + if llEnc.useRLE { + mode |= uint8(compModeRLE) << 6 + llEnc.setRLE(b.sequences[0].llCode) + if debugEncoder { + println("llEnc.useRLE") + } + } else { + var m seqCompMode + llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths]) + mode |= uint8(m) << 6 + } + if ofEnc.useRLE { + mode |= uint8(compModeRLE) << 4 + ofEnc.setRLE(b.sequences[0].ofCode) + if debugEncoder { + println("ofEnc.useRLE") + } + } else { + var m seqCompMode + ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets]) + mode |= uint8(m) << 4 + } + + if mlEnc.useRLE { + mode |= uint8(compModeRLE) << 2 + mlEnc.setRLE(b.sequences[0].mlCode) + if debugEncoder { + println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) + } + } else { + var m seqCompMode + mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths]) + mode |= uint8(m) << 2 + } + b.output = append(b.output, mode) + if debugEncoder { + printf("Compression modes: 0b%b", mode) + } + b.output, err = llEnc.writeCount(b.output) + if err != nil { + return err + } + start := len(b.output) + b.output, err = ofEnc.writeCount(b.output) + if err != nil { + return err + } + if false { + println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount) + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen) + for i, v := range ofEnc.norm[:ofEnc.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v) + } + } + b.output, err = mlEnc.writeCount(b.output) + if err != nil { + return err + } + + // Maybe in block? + wr := &b.wr + wr.reset(b.output) + + var ll, of, ml cState + + // Current sequence + seq := len(b.sequences) - 1 + s := b.sequences[seq] + llEnc.setBits(llBitsTable[:]) + mlEnc.setBits(mlBitsTable[:]) + ofEnc.setBits(nil) + + llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256] + + // We have 3 bounds checks here (and in the loop). + // Since we are iterating backwards it is kinda hard to avoid. + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + ll.init(wr, &llEnc.ct, llB) + of.init(wr, &ofEnc.ct, ofB) + wr.flush32() + ml.init(wr, &mlEnc.ct, mlB) + + // Each of these lookups also generates a bounds check. + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.flush32() + wr.addBits32NC(s.offset, ofB.outBits) + if debugSequences { + println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) + } + seq-- + // Store sequences in reverse... + for seq >= 0 { + s = b.sequences[seq] + + ofB := ofTT[s.ofCode] + wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits. + //of.encode(ofB) + nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16 + dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState) + wr.addBits16NC(of.state, uint8(nbBitsOut)) + of.state = of.stateTable[dstState] + + // Accumulate extra bits. + outBits := ofB.outBits & 31 + extraBits := uint64(s.offset & bitMask32[outBits]) + extraBitsN := outBits + + mlB := mlTT[s.mlCode] + //ml.encode(mlB) + nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16 + dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState) + wr.addBits16NC(ml.state, uint8(nbBitsOut)) + ml.state = ml.stateTable[dstState] + + outBits = mlB.outBits & 31 + extraBits = extraBits<> 16 + dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState) + wr.addBits16NC(ll.state, uint8(nbBitsOut)) + ll.state = ll.stateTable[dstState] + + outBits = llB.outBits & 31 + extraBits = extraBits<= b.size { + // Discard and encode as raw block. + b.output = b.encodeRawTo(b.output[:bhOffset], org) + b.popOffsets() + b.litEnc.Reuse = huff0.ReusePolicyNone + return nil + } + + // Size is output minus block header. + bh.setSize(uint32(len(b.output)-bhOffset) - 3) + if debugEncoder { + println("Rewriting block header", bh) + } + _ = bh.appendTo(b.output[bhOffset:bhOffset]) + b.coders.setPrev(llEnc, mlEnc, ofEnc) + return nil +} + +var errIncompressible = errors.New("incompressible") + +func (b *blockEnc) genCodes() { + if len(b.sequences) == 0 { + // nothing to do + return + } + if len(b.sequences) > math.MaxUint16 { + panic("can only encode up to 64K sequences") + } + // No bounds checks after here: + llH := b.coders.llEnc.Histogram() + ofH := b.coders.ofEnc.Histogram() + mlH := b.coders.mlEnc.Histogram() + for i := range llH { + llH[i] = 0 + } + for i := range ofH { + ofH[i] = 0 + } + for i := range mlH { + mlH[i] = 0 + } + + var llMax, ofMax, mlMax uint8 + for i := range b.sequences { + seq := &b.sequences[i] + v := llCode(seq.litLen) + seq.llCode = v + llH[v]++ + if v > llMax { + llMax = v + } + + v = ofCode(seq.offset) + seq.ofCode = v + ofH[v]++ + if v > ofMax { + ofMax = v + } + + v = mlCode(seq.matchLen) + seq.mlCode = v + mlH[v]++ + if v > mlMax { + mlMax = v + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) + } + } + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) + } + if debugAsserts && ofMax > maxOffsetBits { + panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) + } + if debugAsserts && llMax > maxLiteralLengthSymbol { + panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) + } + + b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) + b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) + b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) +} diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go new file mode 100644 index 00000000000..01a01e486e1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go @@ -0,0 +1,85 @@ +// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. + +package zstd + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[blockTypeRaw-0] + _ = x[blockTypeRLE-1] + _ = x[blockTypeCompressed-2] + _ = x[blockTypeReserved-3] +} + +const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" + +var _blockType_index = [...]uint8{0, 12, 24, 43, 60} + +func (i blockType) String() string { + if i >= blockType(len(_blockType_index)-1) { + return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[literalsBlockRaw-0] + _ = x[literalsBlockRLE-1] + _ = x[literalsBlockCompressed-2] + _ = x[literalsBlockTreeless-3] +} + +const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" + +var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} + +func (i literalsBlockType) String() string { + if i >= literalsBlockType(len(_literalsBlockType_index)-1) { + return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[compModePredefined-0] + _ = x[compModeRLE-1] + _ = x[compModeFSE-2] + _ = x[compModeRepeat-3] +} + +const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" + +var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} + +func (i seqCompMode) String() string { + if i >= seqCompMode(len(_seqCompMode_index)-1) { + return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[tableLiteralLengths-0] + _ = x[tableOffsets-1] + _ = x[tableMatchLengths-2] +} + +const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" + +var _tableIndex_index = [...]uint8{0, 19, 31, 48} + +func (i tableIndex) String() string { + if i >= tableIndex(len(_tableIndex_index)-1) { + return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go new file mode 100644 index 00000000000..55a388553df --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -0,0 +1,131 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "io" +) + +type byteBuffer interface { + // Read up to 8 bytes. + // Returns io.ErrUnexpectedEOF if this cannot be satisfied. + readSmall(n int) ([]byte, error) + + // Read >8 bytes. + // MAY use the destination slice. + readBig(n int, dst []byte) ([]byte, error) + + // Read a single byte. + readByte() (byte, error) + + // Skip n bytes. + skipN(n int64) error +} + +// in-memory buffer +type byteBuf []byte + +func (b *byteBuf) readSmall(n int) ([]byte, error) { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) readByte() (byte, error) { + bb := *b + if len(bb) < 1 { + return 0, io.ErrUnexpectedEOF + } + r := bb[0] + *b = bb[1:] + return r, nil +} + +func (b *byteBuf) skipN(n int64) error { + bb := *b + if n < 0 { + return fmt.Errorf("negative skip (%d) requested", n) + } + if int64(len(bb)) < n { + return io.ErrUnexpectedEOF + } + *b = bb[n:] + return nil +} + +// wrapper around a reader. +type readerWrapper struct { + r io.Reader + tmp [8]byte +} + +func (r *readerWrapper) readSmall(n int) ([]byte, error) { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + n2, err := io.ReadFull(r.r, r.tmp[:n]) + // We only really care about the actual bytes read. + if err != nil { + if err == io.EOF { + return nil, io.ErrUnexpectedEOF + } + if debugDecoder { + println("readSmall: got", n2, "want", n, "err", err) + } + return nil, err + } + return r.tmp[:n], nil +} + +func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { + if cap(dst) < n { + dst = make([]byte, n) + } + n2, err := io.ReadFull(r.r, dst[:n]) + if err == io.EOF && n > 0 { + err = io.ErrUnexpectedEOF + } + return dst[:n2], err +} + +func (r *readerWrapper) readByte() (byte, error) { + n2, err := io.ReadFull(r.r, r.tmp[:1]) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, err + } + if n2 != 1 { + return 0, io.ErrUnexpectedEOF + } + return r.tmp[0], nil +} + +func (r *readerWrapper) skipN(n int64) error { + n2, err := io.CopyN(io.Discard, r.r, n) + if n2 != n { + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go new file mode 100644 index 00000000000..0e59a242d8d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go @@ -0,0 +1,82 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// overread returns whether we have advanced too far. +func (b *byteReader) overread() bool { + return b.off > len(b.b) +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := int32(b2[3]) + v2 := int32(b2[2]) + v1 := int32(b2[1]) + v0 := int32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint8 returns the next byte +func (b *byteReader) Uint8() uint8 { + v := b.b[b.off] + return v +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + if r := b.remain(); r < 4 { + // Very rare + v := uint32(0) + for i := 1; i <= r; i++ { + v = (v << 8) | uint32(b.b[len(b.b)-i]) + } + return v + } + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint32NC returns a little endian uint32 starting at current offset. +// The caller must be sure if there are at least 4 bytes left. +func (b byteReader) Uint32NC() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go new file mode 100644 index 00000000000..6a5a2988b6f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -0,0 +1,261 @@ +// Copyright 2020+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "encoding/binary" + "errors" + "io" +) + +// HeaderMaxSize is the maximum size of a Frame and Block Header. +// If less is sent to Header.Decode it *may* still contain enough information. +const HeaderMaxSize = 14 + 3 + +// Header contains information about the first frame and block within that. +type Header struct { + // SingleSegment specifies whether the data is to be decompressed into a + // single contiguous memory segment. + // It implies that WindowSize is invalid and that FrameContentSize is valid. + SingleSegment bool + + // WindowSize is the window of data to keep while decoding. + // Will only be set if SingleSegment is false. + WindowSize uint64 + + // Dictionary ID. + // If 0, no dictionary. + DictionaryID uint32 + + // HasFCS specifies whether FrameContentSize has a valid value. + HasFCS bool + + // FrameContentSize is the expected uncompressed size of the entire frame. + FrameContentSize uint64 + + // Skippable will be true if the frame is meant to be skipped. + // This implies that FirstBlock.OK is false. + Skippable bool + + // SkippableID is the user-specific ID for the skippable frame. + // Valid values are between 0 to 15, inclusive. + SkippableID int + + // SkippableSize is the length of the user data to skip following + // the header. + SkippableSize uint32 + + // HeaderSize is the raw size of the frame header. + // + // For normal frames, it includes the size of the magic number and + // the size of the header (per section 3.1.1.1). + // It does not include the size for any data blocks (section 3.1.1.2) nor + // the size for the trailing content checksum. + // + // For skippable frames, this counts the size of the magic number + // along with the size of the size field of the payload. + // It does not include the size of the skippable payload itself. + // The total frame size is the HeaderSize plus the SkippableSize. + HeaderSize int + + // First block information. + FirstBlock struct { + // OK will be set if first block could be decoded. + OK bool + + // Is this the last block of a frame? + Last bool + + // Is the data compressed? + // If true CompressedSize will be populated. + // Unfortunately DecompressedSize cannot be determined + // without decoding the blocks. + Compressed bool + + // DecompressedSize is the expected decompressed size of the block. + // Will be 0 if it cannot be determined. + DecompressedSize int + + // CompressedSize of the data in the block. + // Does not include the block header. + // Will be equal to DecompressedSize if not Compressed. + CompressedSize int + } + + // If set there is a checksum present for the block content. + // The checksum field at the end is always 4 bytes long. + HasCheckSum bool +} + +// Decode the header from the beginning of the stream. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) Decode(in []byte) error { + _, err := h.DecodeAndStrip(in) + return err +} + +// DecodeAndStrip will decode the header from the beginning of the stream +// and on success return the remaining bytes. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) DecodeAndStrip(in []byte) (remain []byte, err error) { + *h = Header{} + if len(in) < 4 { + return nil, io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + b, in := in[:4], in[4:] + if string(b) != frameMagic { + if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 { + return nil, ErrMagicMismatch + } + if len(in) < 4 { + return nil, io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + h.Skippable = true + h.SkippableID = int(b[0] & 0xf) + h.SkippableSize = binary.LittleEndian.Uint32(in) + return in[4:], nil + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + if len(in) < 1 { + return nil, io.ErrUnexpectedEOF + } + fhd, in := in[0], in[1:] + h.HeaderSize++ + h.SingleSegment = fhd&(1<<5) != 0 + h.HasCheckSum = fhd&(1<<2) != 0 + if fhd&(1<<3) != 0 { + return nil, errors.New("reserved bit set on frame header") + } + + if !h.SingleSegment { + if len(in) < 1 { + return nil, io.ErrUnexpectedEOF + } + var wd byte + wd, in = in[0], in[1:] + h.HeaderSize++ + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + h.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + if len(in) < int(size) { + return nil, io.ErrUnexpectedEOF + } + b, in = in[:size], in[size:] + h.HeaderSize += int(size) + switch len(b) { + case 1: + h.DictionaryID = uint32(b[0]) + case 2: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if h.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + + if fcsSize > 0 { + h.HasFCS = true + if len(in) < fcsSize { + return nil, io.ErrUnexpectedEOF + } + b, in = in[:fcsSize], in[fcsSize:] + h.HeaderSize += int(fcsSize) + switch len(b) { + case 1: + h.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + h.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + } + + // Frame Header done, we will not fail from now on. + if len(in) < 3 { + return in, nil + } + tmp := in[:3] + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + h.FirstBlock.Last = bh&1 != 0 + blockType := blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + switch blockType { + case blockTypeReserved: + return in, nil + case blockTypeRLE: + h.FirstBlock.Compressed = true + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = 1 + case blockTypeCompressed: + h.FirstBlock.Compressed = true + h.FirstBlock.CompressedSize = cSize + case blockTypeRaw: + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = cSize + default: + panic("Invalid block type") + } + + h.FirstBlock.OK = true + return in, nil +} + +// AppendTo will append the encoded header to the dst slice. +// There is no error checking performed on the header values. +func (h *Header) AppendTo(dst []byte) ([]byte, error) { + if h.Skippable { + magic := [4]byte{0x50, 0x2a, 0x4d, 0x18} + magic[0] |= byte(h.SkippableID & 0xf) + dst = append(dst, magic[:]...) + f := h.SkippableSize + return append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)), nil + } + f := frameHeader{ + ContentSize: h.FrameContentSize, + WindowSize: uint32(h.WindowSize), + SingleSegment: h.SingleSegment, + Checksum: h.HasCheckSum, + DictID: h.DictionaryID, + } + return f.appendTo(dst), nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go new file mode 100644 index 00000000000..bbca17234aa --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -0,0 +1,948 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "context" + "encoding/binary" + "io" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Decoder provides decoding of zstandard streams. +// The decoder has been designed to operate without allocations after a warmup. +// This means that you should store the decoder for best performance. +// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream. +// A decoder can safely be re-used even if the previous stream failed. +// To release the resources, you must call the Close() function on a decoder. +type Decoder struct { + o decoderOptions + + // Unreferenced decoders, ready for use. + decoders chan *blockDec + + // Current read position used for Reader functionality. + current decoderState + + // sync stream decoding + syncStream struct { + decodedFrame uint64 + br readerWrapper + enabled bool + inFrame bool + dstBuf []byte + } + + frame *frameDec + + // Custom dictionaries. + dicts map[uint32]*dict + + // streamWg is the waitgroup for all streams + streamWg sync.WaitGroup +} + +// decoderState is used for maintaining state when the decoder +// is used for streaming. +type decoderState struct { + // current block being written to stream. + decodeOutput + + // output in order to be written to stream. + output chan decodeOutput + + // cancel remaining output. + cancel context.CancelFunc + + // crc of current frame + crc *xxhash.Digest + + flushed bool +} + +var ( + // Check the interfaces we want to support. + _ = io.WriterTo(&Decoder{}) + _ = io.Reader(&Decoder{}) +) + +// NewReader creates a new decoder. +// A nil Reader can be provided in which case Reset can be used to start a decode. +// +// A Decoder can be used in two modes: +// +// 1) As a stream, or +// 2) For stateless decoding using DecodeAll. +// +// Only a single stream can be decoded concurrently, but the same decoder +// can run multiple concurrent stateless decodes. It is even possible to +// use stateless decodes while a stream is being decoded. +// +// The Reset function can be used to initiate a new stream, which will considerably +// reduce the allocations normally caused by NewReader. +func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { + initPredefined() + var d Decoder + d.o.setDefault() + for _, o := range opts { + err := o(&d.o) + if err != nil { + return nil, err + } + } + d.current.crc = xxhash.New() + d.current.flushed = true + + if r == nil { + d.current.err = ErrDecoderNilInput + } + + // Transfer option dicts. + d.dicts = make(map[uint32]*dict, len(d.o.dicts)) + for _, dc := range d.o.dicts { + d.dicts[dc.id] = dc + } + d.o.dicts = nil + + // Create decoders + d.decoders = make(chan *blockDec, d.o.concurrent) + for i := 0; i < d.o.concurrent; i++ { + dec := newBlockDec(d.o.lowMem) + dec.localFrame = newFrameDec(d.o) + d.decoders <- dec + } + + if r == nil { + return &d, nil + } + return &d, d.Reset(r) +} + +// Read bytes from the decompressed stream into p. +// Returns the number of bytes written and any error that occurred. +// When the stream is done, io.EOF will be returned. +func (d *Decoder) Read(p []byte) (int, error) { + var n int + for { + if len(d.current.b) > 0 { + filled := copy(p, d.current.b) + p = p[filled:] + d.current.b = d.current.b[filled:] + n += filled + } + if len(p) == 0 { + break + } + if len(d.current.b) == 0 { + // We have an error and no more data + if d.current.err != nil { + break + } + if !d.nextBlock(n == 0) { + return n, d.current.err + } + } + } + if len(d.current.b) > 0 { + if debugDecoder { + println("returning", n, "still bytes left:", len(d.current.b)) + } + // Only return error at end of block + return n, nil + } + if d.current.err != nil { + d.drainOutput() + } + if debugDecoder { + println("returning", n, d.current.err, len(d.decoders)) + } + return n, d.current.err +} + +// Reset will reset the decoder the supplied stream after the current has finished processing. +// Note that this functionality cannot be used after Close has been called. +// Reset can be called with a nil reader to release references to the previous reader. +// After being called with a nil reader, no other operations than Reset or DecodeAll or Close +// should be used. +func (d *Decoder) Reset(r io.Reader) error { + if d.current.err == ErrDecoderClosed { + return d.current.err + } + + d.drainOutput() + + d.syncStream.br.r = nil + if r == nil { + d.current.err = ErrDecoderNilInput + if len(d.current.b) > 0 { + d.current.b = d.current.b[:0] + } + d.current.flushed = true + return nil + } + + // If bytes buffer and < 5MB, do sync decoding anyway. + if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap { + bb2 := bb + if debugDecoder { + println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) + } + b := bb2.Bytes() + var dst []byte + if cap(d.syncStream.dstBuf) > 0 { + dst = d.syncStream.dstBuf[:0] + } + + dst, err := d.DecodeAll(b, dst) + if err == nil { + err = io.EOF + } + // Save output buffer + d.syncStream.dstBuf = dst + d.current.b = dst + d.current.err = err + d.current.flushed = true + if debugDecoder { + println("sync decode to", len(dst), "bytes, err:", err) + } + return nil + } + // Remove current block. + d.stashDecoder() + d.current.decodeOutput = decodeOutput{} + d.current.err = nil + d.current.flushed = false + d.current.d = nil + d.syncStream.dstBuf = nil + + // Ensure no-one else is still running... + d.streamWg.Wait() + if d.frame == nil { + d.frame = newFrameDec(d.o) + } + + if d.o.concurrent == 1 { + return d.startSyncDecoder(r) + } + + d.current.output = make(chan decodeOutput, d.o.concurrent) + ctx, cancel := context.WithCancel(context.Background()) + d.current.cancel = cancel + d.streamWg.Add(1) + go d.startStreamDecoder(ctx, r, d.current.output) + + return nil +} + +// drainOutput will drain the output until errEndOfStream is sent. +func (d *Decoder) drainOutput() { + if d.current.cancel != nil { + if debugDecoder { + println("cancelling current") + } + d.current.cancel() + d.current.cancel = nil + } + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) + } + d.decoders <- d.current.d + d.current.d = nil + d.current.b = nil + } + if d.current.output == nil || d.current.flushed { + println("current already flushed") + return + } + for v := range d.current.output { + if v.d != nil { + if debugDecoder { + printf("re-adding decoder %p", v.d) + } + d.decoders <- v.d + } + } + d.current.output = nil + d.current.flushed = true +} + +// WriteTo writes data to w until there's no more data to write or when an error occurs. +// The return value n is the number of bytes written. +// Any error encountered during the write is also returned. +func (d *Decoder) WriteTo(w io.Writer) (int64, error) { + var n int64 + for { + if len(d.current.b) > 0 { + n2, err2 := w.Write(d.current.b) + n += int64(n2) + if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) { + d.current.err = err2 + } else if n2 != len(d.current.b) { + d.current.err = io.ErrShortWrite + } + } + if d.current.err != nil { + break + } + d.nextBlock(true) + } + err := d.current.err + if err != nil { + d.drainOutput() + } + if err == io.EOF { + err = nil + } + return n, err +} + +// DecodeAll allows stateless decoding of a blob of bytes. +// Output will be appended to dst, so if the destination size is known +// you can pre-allocate the destination slice to avoid allocations. +// DecodeAll can be used concurrently. +// The Decoder concurrency limits will be respected. +func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { + if d.decoders == nil { + return dst, ErrDecoderClosed + } + + // Grab a block decoder and frame decoder. + block := <-d.decoders + frame := block.localFrame + initialSize := len(dst) + defer func() { + if debugDecoder { + printf("re-adding decoder: %p", block) + } + frame.rawInput = nil + frame.bBuf = nil + if frame.history.decoders.br != nil { + frame.history.decoders.br.in = nil + } + d.decoders <- block + }() + frame.bBuf = input + + for { + frame.history.reset() + err := frame.reset(&frame.bBuf) + if err != nil { + if err == io.EOF { + if debugDecoder { + println("frame reset return EOF") + } + return dst, nil + } + return dst, err + } + if err = d.setDict(frame); err != nil { + return nil, err + } + if frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize) + } + return dst, ErrWindowSizeExceeded + } + if frame.FrameContentSize != fcsUnknown { + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if cap(dst)-len(dst) < int(frame.FrameContentSize) { + dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } + + if cap(dst) == 0 && !d.o.limitToCap { + // Allocate len(input) * 2 by default if nothing is provided + // and we didn't get frame content size. + size := len(input) * 2 + // Cap to 1 MB. + if size > 1<<20 { + size = 1 << 20 + } + if uint64(size) > d.o.maxDecodedSize { + size = int(d.o.maxDecodedSize) + } + dst = make([]byte, 0, size) + } + + dst, err = frame.runDecoder(dst, block) + if err != nil { + return dst, err + } + if uint64(len(dst)-initialSize) > d.o.maxDecodedSize { + return dst, ErrDecoderSizeExceeded + } + if len(frame.bBuf) == 0 { + if debugDecoder { + println("frame dbuf empty") + } + break + } + } + return dst, nil +} + +// nextBlock returns the next block. +// If an error occurs d.err will be set. +// Optionally the function can block for new output. +// If non-blocking mode is used the returned boolean will be false +// if no data was available without blocking. +func (d *Decoder) nextBlock(blocking bool) (ok bool) { + if d.current.err != nil { + // Keep error state. + return false + } + d.current.b = d.current.b[:0] + + // SYNC: + if d.syncStream.enabled { + if !blocking { + return false + } + ok = d.nextBlockSync() + if !ok { + d.stashDecoder() + } + return ok + } + + //ASYNC: + d.stashDecoder() + if blocking { + d.current.decodeOutput, ok = <-d.current.output + } else { + select { + case d.current.decodeOutput, ok = <-d.current.output: + default: + return false + } + } + if !ok { + // This should not happen, so signal error state... + d.current.err = io.ErrUnexpectedEOF + return false + } + next := d.current.decodeOutput + if next.d != nil && next.d.async.newHist != nil { + d.current.crc.Reset() + } + if debugDecoder { + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) + println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) + } + + if d.o.ignoreChecksum { + return true + } + + if len(next.b) > 0 { + d.current.crc.Write(next.b) + } + if next.err == nil && next.d != nil && next.d.hasCRC { + got := uint32(d.current.crc.Sum64()) + if got != next.d.checkCRC { + if debugDecoder { + printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC) + } + d.current.err = ErrCRCMismatch + } else { + if debugDecoder { + printf("CRC ok %08x\n", got) + } + } + } + + return true +} + +func (d *Decoder) nextBlockSync() (ok bool) { + if d.current.d == nil { + d.current.d = <-d.decoders + } + for len(d.current.b) == 0 { + if !d.syncStream.inFrame { + d.frame.history.reset() + d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err == nil { + d.current.err = d.setDict(d.frame) + } + if d.current.err != nil { + return false + } + if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { + d.current.err = ErrDecoderSizeExceeded + return false + } + + d.syncStream.decodedFrame = 0 + d.syncStream.inFrame = true + } + d.current.err = d.frame.next(d.current.d) + if d.current.err != nil { + return false + } + d.frame.history.ensureBlock() + if debugDecoder { + println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) + } + histBefore := len(d.frame.history.b) + d.current.err = d.current.d.decodeBuf(&d.frame.history) + + if d.current.err != nil { + println("error after:", d.current.err) + return false + } + d.current.b = d.frame.history.b[histBefore:] + if debugDecoder { + println("history after:", len(d.frame.history.b)) + } + + // Check frame size (before CRC) + d.syncStream.decodedFrame += uint64(len(d.current.b)) + if d.syncStream.decodedFrame > d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeExceeded + return false + } + + // Check FCS + if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeMismatch + return false + } + + // Update/Check CRC + if d.frame.HasCheckSum { + if !d.o.ignoreChecksum { + d.frame.crc.Write(d.current.b) + } + if d.current.d.Last { + if !d.o.ignoreChecksum { + d.current.err = d.frame.checkCRC() + } else { + d.current.err = d.frame.consumeCRC() + } + if d.current.err != nil { + println("CRC error:", d.current.err) + return false + } + } + } + d.syncStream.inFrame = !d.current.d.Last + } + return true +} + +func (d *Decoder) stashDecoder() { + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p", d.current.d) + } + d.decoders <- d.current.d + d.current.d = nil + } +} + +// Close will release all resources. +// It is NOT possible to reuse the decoder after this. +func (d *Decoder) Close() { + if d.current.err == ErrDecoderClosed { + return + } + d.drainOutput() + if d.current.cancel != nil { + d.current.cancel() + d.streamWg.Wait() + d.current.cancel = nil + } + if d.decoders != nil { + close(d.decoders) + for dec := range d.decoders { + dec.Close() + } + d.decoders = nil + } + if d.current.d != nil { + d.current.d.Close() + d.current.d = nil + } + d.current.err = ErrDecoderClosed +} + +// IOReadCloser returns the decoder as an io.ReadCloser for convenience. +// Any changes to the decoder will be reflected, so the returned ReadCloser +// can be reused along with the decoder. +// io.WriterTo is also supported by the returned ReadCloser. +func (d *Decoder) IOReadCloser() io.ReadCloser { + return closeWrapper{d: d} +} + +// closeWrapper wraps a function call as a closer. +type closeWrapper struct { + d *Decoder +} + +// WriteTo forwards WriteTo calls to the decoder. +func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { + return c.d.WriteTo(w) +} + +// Read forwards read calls to the decoder. +func (c closeWrapper) Read(p []byte) (n int, err error) { + return c.d.Read(p) +} + +// Close closes the decoder. +func (c closeWrapper) Close() error { + c.d.Close() + return nil +} + +type decodeOutput struct { + d *blockDec + b []byte + err error +} + +func (d *Decoder) startSyncDecoder(r io.Reader) error { + d.frame.history.reset() + d.syncStream.br = readerWrapper{r: r} + d.syncStream.inFrame = false + d.syncStream.enabled = true + d.syncStream.decodedFrame = 0 + return nil +} + +// Create Decoder: +// ASYNC: +// Spawn 3 go routines. +// 0: Read frames and decode block literals. +// 1: Decode sequences. +// 2: Execute sequences, send to output. +func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { + defer d.streamWg.Done() + br := readerWrapper{r: r} + + var seqDecode = make(chan *blockDec, d.o.concurrent) + var seqExecute = make(chan *blockDec, d.o.concurrent) + + // Async 1: Decode sequences... + go func() { + var hist history + var hasErr bool + + for block := range seqDecode { + if hasErr { + if block != nil { + seqExecute <- block + } + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 1: new history, recent:", block.async.newHist.recentOffsets) + } + hist.reset() + hist.decoders = block.async.newHist.decoders + hist.recentOffsets = block.async.newHist.recentOffsets + hist.windowSize = block.async.newHist.windowSize + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqExecute <- block + continue + } + + hist.decoders.literals = block.async.literals + block.err = block.prepareSequences(block.async.seqData, &hist) + if debugDecoder && block.err != nil { + println("prepareSequences returned:", block.err) + } + hasErr = block.err != nil + if block.err == nil { + block.err = block.decodeSequences(&hist) + if debugDecoder && block.err != nil { + println("decodeSequences returned:", block.err) + } + hasErr = block.err != nil + // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] + block.async.seqSize = hist.decoders.seqSize + } + seqExecute <- block + } + close(seqExecute) + hist.reset() + }() + + var wg sync.WaitGroup + wg.Add(1) + + // Async 3: Execute sequences... + frameHistCache := d.frame.history.b + go func() { + var hist history + var decodedFrame uint64 + var fcs uint64 + var hasErr bool + for block := range seqExecute { + out := decodeOutput{err: block.err, d: block} + if block.err != nil || hasErr { + hasErr = true + output <- out + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 2: new history") + } + hist.reset() + hist.windowSize = block.async.newHist.windowSize + hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + + if cap(hist.b) < hist.allocFrameBuffer { + if cap(frameHistCache) >= hist.allocFrameBuffer { + hist.b = frameHistCache + } else { + hist.b = make([]byte, 0, hist.allocFrameBuffer) + println("Alloc history sized", hist.allocFrameBuffer) + } + } + hist.b = hist.b[:0] + fcs = block.async.fcs + decodedFrame = 0 + } + do := decodeOutput{err: block.err, d: block} + switch block.Type { + case blockTypeRLE: + if debugDecoder { + println("add rle block length:", block.RLESize) + } + + if cap(block.dst) < int(block.RLESize) { + if block.lowMem { + block.dst = make([]byte, block.RLESize) + } else { + block.dst = make([]byte, maxCompressedBlockSize) + } + } + block.dst = block.dst[:block.RLESize] + v := block.data[0] + for i := range block.dst { + block.dst[i] = v + } + hist.append(block.dst) + do.b = block.dst + case blockTypeRaw: + if debugDecoder { + println("add raw block length:", len(block.data)) + } + hist.append(block.data) + do.b = block.data + case blockTypeCompressed: + if debugDecoder { + println("execute with history length:", len(hist.b), "window:", hist.windowSize) + } + hist.decoders.seqSize = block.async.seqSize + hist.decoders.literals = block.async.literals + do.err = block.executeSequences(&hist) + hasErr = do.err != nil + if debugDecoder && hasErr { + println("executeSequences returned:", do.err) + } + do.b = block.dst + } + if !hasErr { + decodedFrame += uint64(len(do.b)) + if decodedFrame > fcs { + println("fcs exceeded", block.Last, fcs, decodedFrame) + do.err = ErrFrameSizeExceeded + hasErr = true + } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { + do.err = ErrFrameSizeMismatch + hasErr = true + } else { + if debugDecoder { + println("fcs ok", block.Last, fcs, decodedFrame) + } + } + } + output <- do + } + close(output) + frameHistCache = hist.b + wg.Done() + if debugDecoder { + println("decoder goroutines finished") + } + hist.reset() + }() + + var hist history +decodeStream: + for { + var hasErr bool + hist.reset() + decodeBlock := func(block *blockDec) { + if hasErr { + if block != nil { + seqDecode <- block + } + return + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqDecode <- block + return + } + + remain, err := block.decodeLiterals(block.data, &hist) + block.err = err + hasErr = block.err != nil + if err == nil { + block.async.literals = hist.decoders.literals + block.async.seqData = remain + } else if debugDecoder { + println("decodeLiterals error:", err) + } + seqDecode <- block + } + frame := d.frame + if debugDecoder { + println("New frame...") + } + var historySent bool + frame.history.reset() + err := frame.reset(&br) + if debugDecoder && err != nil { + println("Frame decoder returned", err) + } + if err == nil { + err = d.setDict(frame) + } + if err == nil && d.frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize) + } + + err = ErrDecoderSizeExceeded + } + if err != nil { + select { + case <-ctx.Done(): + case dec := <-d.decoders: + dec.sendErr(err) + decodeBlock(dec) + } + break decodeStream + } + + // Go through all blocks of the frame. + for { + var dec *blockDec + select { + case <-ctx.Done(): + break decodeStream + case dec = <-d.decoders: + // Once we have a decoder, we MUST return it. + } + err := frame.next(dec) + if !historySent { + h := frame.history + if debugDecoder { + println("Alloc History:", h.allocFrameBuffer) + } + hist.reset() + if h.dict != nil { + hist.setDict(h.dict) + } + dec.async.newHist = &h + dec.async.fcs = frame.FrameContentSize + historySent = true + } else { + dec.async.newHist = nil + } + if debugDecoder && err != nil { + println("next block returned error:", err) + } + dec.err = err + dec.hasCRC = false + if dec.Last && frame.HasCheckSum && err == nil { + crc, err := frame.rawInput.readSmall(4) + if len(crc) < 4 { + if err == nil { + err = io.ErrUnexpectedEOF + + } + println("CRC missing?", err) + dec.err = err + } else { + dec.checkCRC = binary.LittleEndian.Uint32(crc) + dec.hasCRC = true + if debugDecoder { + printf("found crc to check: %08x\n", dec.checkCRC) + } + } + } + err = dec.err + last := dec.Last + decodeBlock(dec) + if err != nil { + break decodeStream + } + if last { + break + } + } + } + close(seqDecode) + wg.Wait() + hist.reset() + d.frame.history.b = frameHistCache +} + +func (d *Decoder) setDict(frame *frameDec) (err error) { + dict, ok := d.dicts[frame.DictionaryID] + if ok { + if debugDecoder { + println("setting dict", frame.DictionaryID) + } + frame.history.setDict(dict) + } else if frame.DictionaryID != 0 { + // A zero or missing dictionary id is ambiguous: + // either dictionary zero, or no dictionary. In particular, + // zstd --patch-from uses this id for the source file, + // so only return an error if the dictionary id is not zero. + err = ErrUnknownDictionary + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go new file mode 100644 index 00000000000..774c5f00fe4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -0,0 +1,169 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math/bits" + "runtime" +) + +// DOption is an option for creating a decoder. +type DOption func(*decoderOptions) error + +// options retains accumulated state of multiple options. +type decoderOptions struct { + lowMem bool + concurrent int + maxDecodedSize uint64 + maxWindowSize uint64 + dicts []*dict + ignoreChecksum bool + limitToCap bool + decodeBufsBelow int +} + +func (o *decoderOptions) setDefault() { + *o = decoderOptions{ + // use less ram: true for now, but may change. + lowMem: true, + concurrent: runtime.GOMAXPROCS(0), + maxWindowSize: MaxWindowSize, + decodeBufsBelow: 128 << 10, + } + if o.concurrent > 4 { + o.concurrent = 4 + } + o.maxDecodedSize = 64 << 30 +} + +// WithDecoderLowmem will set whether to use a lower amount of memory, +// but possibly have to allocate more while running. +func WithDecoderLowmem(b bool) DOption { + return func(o *decoderOptions) error { o.lowMem = b; return nil } +} + +// WithDecoderConcurrency sets the number of created decoders. +// When decoding block with DecodeAll, this will limit the number +// of possible concurrently running decodes. +// When decoding streams, this will limit the number of +// inflight blocks. +// When decoding streams and setting maximum to 1, +// no async decoding will be done. +// When a value of 0 is provided GOMAXPROCS will be used. +// By default this will be set to 4 or GOMAXPROCS, whatever is lower. +func WithDecoderConcurrency(n int) DOption { + return func(o *decoderOptions) error { + if n < 0 { + return errors.New("concurrency must be at least 1") + } + if n == 0 { + o.concurrent = runtime.GOMAXPROCS(0) + } else { + o.concurrent = n + } + return nil + } +} + +// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory +// non-streaming operations or maximum window size for streaming operations. +// This can be used to control memory usage of potentially hostile content. +// Maximum is 1 << 63 bytes. Default is 64GiB. +func WithDecoderMaxMemory(n uint64) DOption { + return func(o *decoderOptions) error { + if n == 0 { + return errors.New("WithDecoderMaxMemory must be at least 1") + } + if n > 1<<63 { + return errors.New("WithDecoderMaxmemory must be less than 1 << 63") + } + o.maxDecodedSize = n + return nil + } +} + +// WithDecoderDicts allows to register one or more dictionaries for the decoder. +// +// Each slice in dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// If several dictionaries with the same ID are provided, the last one will be used. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format +func WithDecoderDicts(dicts ...[]byte) DOption { + return func(o *decoderOptions) error { + for _, b := range dicts { + d, err := loadDict(b) + if err != nil { + return err + } + o.dicts = append(o.dicts, d) + } + return nil + } +} + +// WithDecoderDictRaw registers a dictionary that may be used by the decoder. +// The slice content can be arbitrary data. +func WithDecoderDictRaw(id uint32, content []byte) DOption { + return func(o *decoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}) + return nil + } +} + +// WithDecoderMaxWindow allows to set a maximum window size for decodes. +// This allows rejecting packets that will cause big memory usage. +// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. +// If WithDecoderMaxMemory is set to a lower value, that will be used. +// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec. +func WithDecoderMaxWindow(size uint64) DOption { + return func(o *decoderOptions) error { + if size < MinWindowSize { + return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes") + } + if size > (1<<41)+7*(1<<38) { + return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB") + } + o.maxWindowSize = size + return nil + } +} + +// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes, +// or any size set in WithDecoderMaxMemory. +// This can be used to limit decoding to a specific maximum output size. +// Disabled by default. +func WithDecodeAllCapLimit(b bool) DOption { + return func(o *decoderOptions) error { + o.limitToCap = b + return nil + } +} + +// WithDecodeBuffersBelow will fully decode readers that have a +// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer. +// This typically uses less allocations but will have the full decompressed object in memory. +// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less. +// Default is 128KiB. +func WithDecodeBuffersBelow(size int) DOption { + return func(o *decoderOptions) error { + o.decodeBufsBelow = size + return nil + } +} + +// IgnoreChecksum allows to forcibly ignore checksum checking. +func IgnoreChecksum(b bool) DOption { + return func(o *decoderOptions) error { + o.ignoreChecksum = b + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go new file mode 100644 index 00000000000..b7b83164bc7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -0,0 +1,565 @@ +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "sort" + + "github.com/klauspost/compress/huff0" +) + +type dict struct { + id uint32 + + litEnc *huff0.Scratch + llDec, ofDec, mlDec sequenceDec + offsets [3]int + content []byte +} + +const dictMagic = "\x37\xa4\x30\xec" + +// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB. +const dictMaxLength = 1 << 31 + +// ID returns the dictionary id or 0 if d is nil. +func (d *dict) ID() uint32 { + if d == nil { + return 0 + } + return d.id +} + +// ContentSize returns the dictionary content size or 0 if d is nil. +func (d *dict) ContentSize() int { + if d == nil { + return 0 + } + return len(d.content) +} + +// Content returns the dictionary content. +func (d *dict) Content() []byte { + if d == nil { + return nil + } + return d.content +} + +// Offsets returns the initial offsets. +func (d *dict) Offsets() [3]int { + if d == nil { + return [3]int{} + } + return d.offsets +} + +// LitEncoder returns the literal encoder. +func (d *dict) LitEncoder() *huff0.Scratch { + if d == nil { + return nil + } + return d.litEnc +} + +// Load a dictionary as described in +// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format +func loadDict(b []byte) (*dict, error) { + // Check static field size. + if len(b) <= 8+(3*4) { + return nil, io.ErrUnexpectedEOF + } + d := dict{ + llDec: sequenceDec{fse: &fseDecoder{}}, + ofDec: sequenceDec{fse: &fseDecoder{}}, + mlDec: sequenceDec{fse: &fseDecoder{}}, + } + if string(b[:4]) != dictMagic { + return nil, ErrMagicMismatch + } + d.id = binary.LittleEndian.Uint32(b[4:8]) + if d.id == 0 { + return nil, errors.New("dictionaries cannot have ID 0") + } + + // Read literal table + var err error + d.litEnc, b, err = huff0.ReadTable(b[8:], nil) + if err != nil { + return nil, fmt.Errorf("loading literal table: %w", err) + } + d.litEnc.Reuse = huff0.ReusePolicyMust + + br := byteReader{ + b: b, + off: 0, + } + readDec := func(i tableIndex, dec *fseDecoder) error { + if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { + return err + } + if br.overread() { + return io.ErrUnexpectedEOF + } + err = dec.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debugDecoder || debugEncoder { + println("Read table ok", "symbolLen:", dec.symbolLen) + } + // Set decoders as predefined so they aren't reused. + dec.preDefined = true + return nil + } + + if err := readDec(tableOffsets, d.ofDec.fse); err != nil { + return nil, err + } + if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { + return nil, err + } + if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { + return nil, err + } + if br.remain() < 12 { + return nil, io.ErrUnexpectedEOF + } + + d.offsets[0] = int(br.Uint32()) + br.advance(4) + d.offsets[1] = int(br.Uint32()) + br.advance(4) + d.offsets[2] = int(br.Uint32()) + br.advance(4) + if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { + return nil, errors.New("invalid offset in dictionary") + } + d.content = make([]byte, br.remain()) + copy(d.content, br.unread()) + if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { + return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) + } + + return &d, nil +} + +// InspectDictionary loads a zstd dictionary and provides functions to inspect the content. +func InspectDictionary(b []byte) (interface { + ID() uint32 + ContentSize() int + Content() []byte + Offsets() [3]int + LitEncoder() *huff0.Scratch +}, error) { + initPredefined() + d, err := loadDict(b) + return d, err +} + +type BuildDictOptions struct { + // Dictionary ID. + ID uint32 + + // Content to use to create dictionary tables. + Contents [][]byte + + // History to use for all blocks. + History []byte + + // Offsets to use. + Offsets [3]int + + // CompatV155 will make the dictionary compatible with Zstd v1.5.5 and earlier. + // See https://github.com/facebook/zstd/issues/3724 + CompatV155 bool + + // Use the specified encoder level. + // The dictionary will be built using the specified encoder level, + // which will reflect speed and make the dictionary tailored for that level. + // If not set SpeedBestCompression will be used. + Level EncoderLevel + + // DebugOut will write stats and other details here if set. + DebugOut io.Writer +} + +func BuildDict(o BuildDictOptions) ([]byte, error) { + initPredefined() + hist := o.History + contents := o.Contents + debug := o.DebugOut != nil + println := func(args ...interface{}) { + if o.DebugOut != nil { + fmt.Fprintln(o.DebugOut, args...) + } + } + printf := func(s string, args ...interface{}) { + if o.DebugOut != nil { + fmt.Fprintf(o.DebugOut, s, args...) + } + } + print := func(args ...interface{}) { + if o.DebugOut != nil { + fmt.Fprint(o.DebugOut, args...) + } + } + + if int64(len(hist)) > dictMaxLength { + return nil, fmt.Errorf("dictionary of size %d > %d", len(hist), int64(dictMaxLength)) + } + if len(hist) < 8 { + return nil, fmt.Errorf("dictionary of size %d < %d", len(hist), 8) + } + if len(contents) == 0 { + return nil, errors.New("no content provided") + } + d := dict{ + id: o.ID, + litEnc: nil, + llDec: sequenceDec{}, + ofDec: sequenceDec{}, + mlDec: sequenceDec{}, + offsets: o.Offsets, + content: hist, + } + block := blockEnc{lowMem: false} + block.init() + enc := encoder(&bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(maxMatchLen), bufferReset: math.MaxInt32 - int32(maxMatchLen*2), lowMem: false}}) + if o.Level != 0 { + eOpts := encoderOptions{ + level: o.Level, + blockSize: maxMatchLen, + windowSize: maxMatchLen, + dict: &d, + lowMem: false, + } + enc = eOpts.encoder() + } else { + o.Level = SpeedBestCompression + } + var ( + remain [256]int + ll [256]int + ml [256]int + of [256]int + ) + addValues := func(dst *[256]int, src []byte) { + for _, v := range src { + dst[v]++ + } + } + addHist := func(dst *[256]int, src *[256]uint32) { + for i, v := range src { + dst[i] += int(v) + } + } + seqs := 0 + nUsed := 0 + litTotal := 0 + newOffsets := make(map[uint32]int, 1000) + for _, b := range contents { + block.reset(nil) + if len(b) < 8 { + continue + } + nUsed++ + enc.Reset(&d, true) + enc.Encode(&block, b) + addValues(&remain, block.literals) + litTotal += len(block.literals) + if len(block.sequences) == 0 { + continue + } + seqs += len(block.sequences) + block.genCodes() + addHist(&ll, block.coders.llEnc.Histogram()) + addHist(&ml, block.coders.mlEnc.Histogram()) + addHist(&of, block.coders.ofEnc.Histogram()) + for i, seq := range block.sequences { + if i > 3 { + break + } + offset := seq.offset + if offset == 0 { + continue + } + if int(offset) >= len(o.History) { + continue + } + if offset > 3 { + newOffsets[offset-3]++ + } else { + newOffsets[uint32(o.Offsets[offset-1])]++ + } + } + } + // Find most used offsets. + var sortedOffsets []uint32 + for k := range newOffsets { + sortedOffsets = append(sortedOffsets, k) + } + sort.Slice(sortedOffsets, func(i, j int) bool { + a, b := sortedOffsets[i], sortedOffsets[j] + if a == b { + // Prefer the longer offset + return sortedOffsets[i] > sortedOffsets[j] + } + return newOffsets[sortedOffsets[i]] > newOffsets[sortedOffsets[j]] + }) + if len(sortedOffsets) > 3 { + if debug { + print("Offsets:") + for i, v := range sortedOffsets { + if i > 20 { + break + } + printf("[%d: %d],", v, newOffsets[v]) + } + println("") + } + + sortedOffsets = sortedOffsets[:3] + } + for i, v := range sortedOffsets { + o.Offsets[i] = int(v) + } + if debug { + println("New repeat offsets", o.Offsets) + } + + if nUsed == 0 || seqs == 0 { + return nil, fmt.Errorf("%d blocks, %d sequences found", nUsed, seqs) + } + if debug { + println("Sequences:", seqs, "Blocks:", nUsed, "Literals:", litTotal) + } + if seqs/nUsed < 512 { + // Use 512 as minimum. + nUsed = seqs / 512 + if nUsed == 0 { + nUsed = 1 + } + } + copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) { + hist := dst.Histogram() + var maxSym uint8 + var maxCount int + var fakeLength int + for i, v := range src { + if v > 0 { + v = v / nUsed + if v == 0 { + v = 1 + } + } + if v > maxCount { + maxCount = v + } + if v != 0 { + maxSym = uint8(i) + } + fakeLength += v + hist[i] = uint32(v) + } + + // Ensure we aren't trying to represent RLE. + if maxCount == fakeLength { + for i := range hist { + if uint8(i) == maxSym { + fakeLength++ + maxSym++ + hist[i+1] = 1 + if maxSym > 1 { + break + } + } + if hist[0] == 0 { + fakeLength++ + hist[i] = 1 + if maxSym > 1 { + break + } + } + } + } + + dst.HistogramFinished(maxSym, maxCount) + dst.reUsed = false + dst.useRLE = false + err := dst.normalizeCount(fakeLength) + if err != nil { + return nil, err + } + if debug { + println("RAW:", dst.count[:maxSym+1], "NORM:", dst.norm[:maxSym+1], "LEN:", fakeLength) + } + return dst.writeCount(nil) + } + if debug { + print("Literal lengths: ") + } + llTable, err := copyHist(block.coders.llEnc, &ll) + if err != nil { + return nil, err + } + if debug { + print("Match lengths: ") + } + mlTable, err := copyHist(block.coders.mlEnc, &ml) + if err != nil { + return nil, err + } + if debug { + print("Offsets: ") + } + ofTable, err := copyHist(block.coders.ofEnc, &of) + if err != nil { + return nil, err + } + + // Literal table + avgSize := litTotal + if avgSize > huff0.BlockSizeMax/2 { + avgSize = huff0.BlockSizeMax / 2 + } + huffBuff := make([]byte, 0, avgSize) + // Target size + div := litTotal / avgSize + if div < 1 { + div = 1 + } + if debug { + println("Huffman weights:") + } + for i, n := range remain[:] { + if n > 0 { + n = n / div + // Allow all entries to be represented. + if n == 0 { + n = 1 + } + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + if debug { + printf("[%d: %d], ", i, n) + } + } + } + if o.CompatV155 && remain[255]/div == 0 { + huffBuff = append(huffBuff, 255) + } + scratch := &huff0.Scratch{TableLog: 11} + for tries := 0; tries < 255; tries++ { + scratch = &huff0.Scratch{TableLog: 11} + _, _, err = huff0.Compress1X(huffBuff, scratch) + if err == nil { + break + } + if debug { + printf("Try %d: Huffman error: %v\n", tries+1, err) + } + huffBuff = huffBuff[:0] + if tries == 250 { + if debug { + println("Huffman: Bailing out with predefined table") + } + + // Bail out.... Just generate something + huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...) + for i := 0; i < 128; i++ { + huffBuff = append(huffBuff, byte(i)) + } + continue + } + if errors.Is(err, huff0.ErrIncompressible) { + // Try truncating least common. + for i, n := range remain[:] { + if n > 0 { + n = n / (div * (i + 1)) + if n > 0 { + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + } + } + } + if o.CompatV155 && len(huffBuff) > 0 && huffBuff[len(huffBuff)-1] != 255 { + huffBuff = append(huffBuff, 255) + } + if len(huffBuff) == 0 { + huffBuff = append(huffBuff, 0, 255) + } + } + if errors.Is(err, huff0.ErrUseRLE) { + for i, n := range remain[:] { + n = n / (div * (i + 1)) + // Allow all entries to be represented. + if n == 0 { + n = 1 + } + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + } + } + } + + var out bytes.Buffer + out.Write([]byte(dictMagic)) + out.Write(binary.LittleEndian.AppendUint32(nil, o.ID)) + out.Write(scratch.OutTable) + if debug { + println("huff table:", len(scratch.OutTable), "bytes") + println("of table:", len(ofTable), "bytes") + println("ml table:", len(mlTable), "bytes") + println("ll table:", len(llTable), "bytes") + } + out.Write(ofTable) + out.Write(mlTable) + out.Write(llTable) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[0]))) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[1]))) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[2]))) + out.Write(hist) + if debug { + _, err := loadDict(out.Bytes()) + if err != nil { + panic(err) + } + i, err := InspectDictionary(out.Bytes()) + if err != nil { + panic(err) + } + println("ID:", i.ID()) + println("Content size:", i.ContentSize()) + println("Encoder:", i.LitEncoder() != nil) + println("Offsets:", i.Offsets()) + var totalSize int + for _, b := range contents { + totalSize += len(b) + } + + encWith := func(opts ...EOption) int { + enc, err := NewWriter(nil, opts...) + if err != nil { + panic(err) + } + defer enc.Close() + var dst []byte + var totalSize int + for _, b := range contents { + dst = enc.EncodeAll(b, dst[:0]) + totalSize += len(dst) + } + return totalSize + } + plain := encWith(WithEncoderLevel(o.Level)) + withDict := encWith(WithEncoderLevel(o.Level), WithEncoderDict(out.Bytes())) + println("Input size:", totalSize) + println("Plain Compressed:", plain) + println("Dict Compressed:", withDict) + println("Saved:", plain-withDict, (plain-withDict)/len(contents), "bytes per input (rounded down)") + } + return out.Bytes(), nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go new file mode 100644 index 00000000000..5ca46038ad9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -0,0 +1,173 @@ +package zstd + +import ( + "fmt" + "math/bits" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +const ( + dictShardBits = 6 +) + +type fastBase struct { + // cur is the offset at the start of hist + cur int32 + // maximum offset. Should be at least 2x block size. + maxMatchOff int32 + bufferReset int32 + hist []byte + crc *xxhash.Digest + tmp [8]byte + blk *blockEnc + lastDictID uint32 + lowMem bool +} + +// CRC returns the underlying CRC writer. +func (e *fastBase) CRC() *xxhash.Digest { + return e.crc +} + +// AppendCRC will append the CRC to the destination slice and return it. +func (e *fastBase) AppendCRC(dst []byte) []byte { + crc := e.crc.Sum(e.tmp[:0]) + dst = append(dst, crc[7], crc[6], crc[5], crc[4]) + return dst +} + +// WindowSize returns the window size of the encoder, +// or a window size small enough to contain the input size, if > 0. +func (e *fastBase) WindowSize(size int64) int32 { + if size > 0 && size < int64(e.maxMatchOff) { + b := int32(1) << uint(bits.Len(uint(size))) + // Keep minimum window. + if b < 1024 { + b = 1024 + } + return b + } + return e.maxMatchOff +} + +// Block returns the current block. +func (e *fastBase) Block() *blockEnc { + return e.blk +} + +func (e *fastBase) addBlock(src []byte) int32 { + if debugAsserts && e.cur > e.bufferReset { + panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset)) + } + // check if we have space already + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + e.ensureHist(len(src)) + } else { + if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) { + panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff)) + } + // Move down + offset := int32(len(e.hist)) - e.maxMatchOff + copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:e.maxMatchOff] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +// ensureHist will ensure that history can keep at least this many bytes. +func (e *fastBase) ensureHist(n int) { + if cap(e.hist) >= n { + return + } + l := e.maxMatchOff + if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize { + l += maxCompressedBlockSize + } else { + l += e.maxMatchOff + } + // Make it at least 1MB. + if l < 1<<20 && !e.lowMem { + l = 1 << 20 + } + // Make it at least the requested size. + if l < int32(n) { + l = int32(n) + } + e.hist = make([]byte, 0, l) +} + +// useBlock will replace the block with the provided one, +// but transfer recent offsets from the previous. +func (e *fastBase) UseBlock(enc *blockEnc) { + enc.reset(e.blk) + e.blk = enc +} + +func (e *fastBase) matchlen(s, t int32, src []byte) int32 { + if debugAsserts { + if s < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if t < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if s-t > e.maxMatchOff { + err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) + panic(err) + } + if len(src)-int(s) > maxCompressedBlockSize { + panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) + } + } + return int32(matchLen(src[s:], src[t:])) +} + +// Reset the encoding table. +func (e *fastBase) resetBase(d *dict, singleBlock bool) { + if e.blk == nil { + e.blk = &blockEnc{lowMem: e.lowMem} + e.blk.init() + } else { + e.blk.reset(nil) + } + e.blk.initNewEncode() + if e.crc == nil { + e.crc = xxhash.New() + } else { + e.crc.Reset() + } + e.blk.dictLitEnc = nil + if d != nil { + low := e.lowMem + if singleBlock { + e.lowMem = true + } + e.ensureHist(d.ContentSize() + maxCompressedBlockSize) + e.lowMem = low + } + + // We offset current position so everything will be out of reach. + // If above reset line, history will be purged. + if e.cur < e.bufferReset { + e.cur += e.maxMatchOff + int32(len(e.hist)) + } + e.hist = e.hist[:0] + if d != nil { + // Set offsets (currently not used) + for i, off := range d.offsets { + e.blk.recentOffsets[i] = uint32(off) + e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i] + } + // Transfer litenc. + e.blk.dictLitEnc = d.litEnc + e.hist = append(e.hist, d.content...) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go new file mode 100644 index 00000000000..4613724e9d1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -0,0 +1,560 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "fmt" + + "github.com/klauspost/compress" +) + +const ( + bestLongTableBits = 22 // Bits used in the long match table + bestLongTableSize = 1 << bestLongTableBits // Size of the table + bestLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + bestShortTableBits = 18 // Bits used in the short match table + bestShortTableSize = 1 << bestShortTableBits // Size of the table + bestShortLen = 4 // Bytes used for table hash + +) + +type match struct { + offset int32 + s int32 + length int32 + rep int32 + est int32 +} + +const highScore = maxMatchLen * 8 + +// estBits will estimate output bits from predefined tables. +func (m *match) estBits(bitsPerByte int32) { + mlc := mlCode(uint32(m.length - zstdMinMatch)) + var ofc uint8 + if m.rep < 0 { + ofc = ofCode(uint32(m.s-m.offset) + 3) + } else { + ofc = ofCode(uint32(m.rep) & 3) + } + // Cost, excluding + ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] + + // Add cost of match encoding... + m.est = int32(ofTT.outBits + mlTT.outBits) + m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16) + // Subtract savings compared to literal encoding... + m.est -= (m.length * bitsPerByte) >> 10 + if m.est > 0 { + // Unlikely gain.. + m.length = 0 + m.est = highScore + } +} + +// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type bestFastEncoder struct { + fastBase + table [bestShortTableSize]prevEntry + longTable [bestLongTableSize]prevEntry + dictTable []prevEntry + dictLongTable []prevEntry +} + +// Encode improves compression... +func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 4 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [bestShortTableSize]prevEntry{} + e.longTable = [bestLongTableSize]prevEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + v2 := e.table[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.table[i] = prevEntry{ + offset: v, + prev: v2, + } + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + // Add block to history + s := e.addBlock(src) + blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Use this to estimate literal cost. + // Scaled by 10 bits. + bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src)) + // Huffman can never go < 1 bit/byte + if bitsPerByte < 1024 { + bitsPerByte = 1024 + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + const kSearchStrength = 10 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + offset3 := int32(blk.recentOffsets[2]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + const goodEnough = 250 + + cv := load6432(src, s) + + nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) + nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + // Set m to a match at offset if it looks like that will improve compression. + improve := func(m *match, offset int32, s int32, first uint32, rep int32) { + delta := s - offset + if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first { + return + } + // Try to quick reject if we already have a long match. + if m.length > 16 { + left := len(src) - int(m.s+m.length) + // If we are too close to the end, keep as is. + if left <= 0 { + return + } + checkLen := m.length - (s - m.s) - 8 + if left > 2 && checkLen > 4 { + // Check 4 bytes, 4 bytes from the end of the current match. + a := load3232(src, offset+checkLen) + b := load3232(src, s+checkLen) + if a != b { + return + } + } + } + l := 4 + e.matchlen(s+4, offset+4, src) + if m.rep <= 0 { + // Extend candidate match backwards as far as possible. + // Do not extend repeats as we can assume they are optimal + // and offsets change if s == nextEmit. + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength { + s-- + offset-- + l++ + } + } + if debugAsserts { + if offset >= s { + panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff)) + } + if !bytes.Equal(src[s:s+l], src[offset:offset+l]) { + panic(fmt.Sprintf("second match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) + } + } + cand := match{offset: offset, s: s, length: l, rep: rep} + cand.estBits(bitsPerByte) + if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 { + *m = cand + } + } + + best := match{s: s, est: highScore} + improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1) + + if canRepeat && best.length < goodEnough { + if s == nextEmit { + // Check repeats straight after a match. + improve(&best, s-offset2, s, uint32(cv), 1|4) + improve(&best, s-offset3, s, uint32(cv), 2|4) + if offset1 > 1 { + improve(&best, s-(offset1-1), s, uint32(cv), 3|4) + } + } + + // If either no match or a non-repeat match, check at + 1 + if best.rep <= 0 { + cv32 := uint32(cv >> 8) + spp := s + 1 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + if best.rep < 0 { + cv32 = uint32(cv >> 24) + spp += 2 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + } + } + } + // Load next and check... + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} + e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} + index0 := s + 1 + + // Look far ahead, unless we have a really long match already... + if best.length < goodEnough { + // No match found, move forward on input, no need to check forward... + if best.length < 4 { + s += 1 + (s-nextEmit)>>(kSearchStrength-1) + if s >= sLimit { + break encodeLoop + } + continue + } + + candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] + cv = load6432(src, s+1) + cv2 := load6432(src, s+2) + candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] + candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] + + // Short at s+1 + improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1) + // Long at s+1, s+2 + improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1) + improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1) + improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1) + if false { + // Short at s+3. + // Too often worse... + improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1) + } + + // Start check at a fixed offset to allow for a few mismatches. + // For this compression level 2 yields the best results. + // We cannot do this if we have already indexed this position. + const skipBeginning = 2 + if best.s > s-skipBeginning { + // See if we can find a better match by checking where the current best ends. + // Use that offset to see if we can find a better full match. + if sAt := best.s + best.length; sAt < sLimit { + nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) + candidateEnd := e.longTable[nextHashL] + + if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 { + improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 { + improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + } + } + } + } + } + + if debugAsserts { + if best.offset >= best.s { + panic(fmt.Sprintf("best.offset > s: %d >= %d", best.offset, best.s)) + } + if best.s < nextEmit { + panic(fmt.Sprintf("s %d < nextEmit %d", best.s, nextEmit)) + } + if best.offset < s-e.maxMatchOff { + panic(fmt.Sprintf("best.offset < s-e.maxMatchOff: %d < %d", best.offset, s-e.maxMatchOff)) + } + if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { + panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) + } + } + + // We have a match, we can store the forward value + s = best.s + if best.rep > 0 { + var seq seq + seq.matchLen = uint32(best.length - zstdMinMatch) + addLiterals(&seq, best.s) + + // Repeat. If bit 4 is set, this is a non-lit repeat. + seq.offset = uint32(best.rep & 3) + if debugSequences { + println("repeat sequence", seq, "next s:", best.s, "off:", best.s-best.offset) + } + blk.sequences = append(blk.sequences, seq) + + // Index old s + 1 -> s - 1 + s = best.s + best.length + nextEmit = s + + // Index skipped... + end := s + if s > sLimit+4 { + end = sLimit + 4 + } + off := index0 + e.cur + for index0 < end { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + off++ + index0++ + } + + switch best.rep { + case 2, 4 | 1: + offset1, offset2 = offset2, offset1 + case 3, 4 | 2: + offset1, offset2, offset3 = offset3, offset1, offset2 + case 4 | 3: + offset1, offset2, offset3 = offset1-1, offset1, offset2 + } + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, best.length) + } + break encodeLoop + } + continue + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + t := best.offset + offset1, offset2, offset3 = s-t, offset1, offset2 + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && int(offset1) > len(src) { + panic("invalid offset") + } + + // Write our sequence + var seq seq + l := best.length + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + + // Index old s + 1 -> s - 1 or sLimit + end := s + if s > sLimit-4 { + end = sLimit - 4 + } + + off := index0 + e.cur + for index0 < end { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + index0++ + off++ + } + if s >= sLimit { + break encodeLoop + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + blk.recentOffsets[2] = uint32(offset3) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Reset will reset and set a dictionary if not nil +func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]prevEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = bestShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7 + e.dictTable[nextHash] = prevEntry{ + prev: e.dictTable[nextHash].offset, + offset: i, + } + e.dictTable[nextHash1] = prevEntry{ + prev: e.dictTable[nextHash1].offset, + offset: i + 1, + } + e.dictTable[nextHash2] = prevEntry{ + prev: e.dictTable[nextHash2].offset, + offset: i + 2, + } + e.dictTable[nextHash3] = prevEntry{ + prev: e.dictTable[nextHash3].offset, + offset: i + 3, + } + } + e.lastDictID = d.id + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + } + // Reset table to initial state + copy(e.longTable[:], e.dictLongTable) + + e.cur = e.maxMatchOff + // Reset table to initial state + copy(e.table[:], e.dictTable) +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go new file mode 100644 index 00000000000..a4f5bf91fc6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -0,0 +1,1252 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + betterLongTableBits = 19 // Bits used in the long match table + betterLongTableSize = 1 << betterLongTableBits // Size of the table + betterLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + betterShortTableBits = 13 // Bits used in the short match table + betterShortTableSize = 1 << betterShortTableBits // Size of the table + betterShortLen = 5 // Bytes used for table hash + + betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table + betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard + + betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table + betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard +) + +type prevEntry struct { + offset int32 + prev int32 +} + +// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type betterFastEncoder struct { + fastBase + table [betterShortTableSize]tableEntry + longTable [betterLongTableSize]prevEntry +} + +type betterFastEncoderDict struct { + betterFastEncoder + dictTable []tableEntry + dictLongTable []prevEntry + shortTableShardDirty [betterShortTableShardCnt]bool + longTableShardDirty [betterLongTableShardCnt]bool + allDirty bool +} + +// Encode improves compression... +func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [betterShortTableSize]tableEntry{} + e.longTable = [betterLongTableSize]prevEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + // Add block to history + s := e.addBlock(src) + blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched, index0 int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + index0 = s + 1 + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += lenght + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + s += lenght + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is around 3 bytes, but depends on input. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 3 + + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + s2 := s + skipBeginning + cv := load3232(src, s2) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + s = s2 + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + s = s2 + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + off := index0 + e.cur + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + off += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Encode improves compression... +func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + e.allDirty = true + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.allDirty = true + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched, index0 int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + index0 = s + 1 + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + s += lenght + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + s += lenght + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + cv := load3232(src, s) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + off := index0 + e.cur + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + off += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("betterFastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = betterShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + e.dictTable[nextHash2] = tableEntry{ + val: uint32(cv >> 16), + offset: i + 2, + } + e.dictTable[nextHash3] = tableEntry{ + val: uint32(cv >> 24), + offset: i + 3, + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, betterLongTableBits, betterLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, betterLongTableBits, betterLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Reset table to initial state + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterShortTableShardCnt + const shardSize = betterShortTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.table[:], e.dictTable) + for i := range e.shortTableShardDirty { + e.shortTableShardDirty[i] = false + } + } else { + for i := range e.shortTableShardDirty { + if !e.shortTableShardDirty[i] { + continue + } + + copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + e.shortTableShardDirty[i] = false + } + } + } + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterLongTableShardCnt + const shardSize = betterLongTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.longTable[:], e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + } else { + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize]) + e.longTableShardDirty[i] = false + } + } + } + e.cur = e.maxMatchOff + e.allDirty = false +} + +func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/betterLongTableShardSize] = true +} + +func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) { + e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go new file mode 100644 index 00000000000..a154c18f741 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -0,0 +1,1123 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + dFastLongTableBits = 17 // Bits used in the long match table + dFastLongTableSize = 1 << dFastLongTableBits // Size of the table + dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastLongLen = 8 // Bytes used for table hash + + dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table + dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard + + dFastShortTableBits = tableBits // Bits used in the short match table + dFastShortTableSize = 1 << dFastShortTableBits // Size of the table + dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastShortLen = 5 // Bytes used for table hash + +) + +type doubleFastEncoder struct { + fastEncoder + longTable [dFastLongTableSize]tableEntry +} + +type doubleFastEncoderDict struct { + fastEncoderDict + longTable [dFastLongTableSize]tableEntry + dictLongTable []tableEntry + longTableShardDirty [dLongTableShardCnt]bool +} + +// Encode mimmics functionality in zstd_dfast.c +func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [dFastShortTableSize]tableEntry{} + e.longTable = [dFastLongTableSize]tableEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += lenght + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + if e.cur >= e.bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + for { + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if len(blk.sequences) > 2 { + if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 + + cv = load6432(src, s) + + if len(blk.sequences) <= 2 { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < e.bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += lenght + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + e.markLongShardDirty(nextHashL) + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) + longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen) + e.longTable[longHash1] = te0 + e.longTable[longHash2] = te1 + e.markLongShardDirty(longHash1) + e.markLongShardDirty(longHash2) + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen) + hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen) + e.table[hashVal1] = te0 + e.markShardDirty(hashVal1) + e.table[hashVal2] = te1 + e.markShardDirty(hashVal2) + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // If we encoded more than 64K mark all dirty. + if len(src) > 64<<10 { + e.markAllShardsDirty() + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { + e.fastEncoder.Reset(d, singleBlock) + if d != nil { + panic("doubleFastEncoder: Reset with dict not supported") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { + allDirty := e.allDirty + e.fastEncoderDict.Reset(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]tableEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ + val: uint32(cv), + offset: e.maxMatchOff, + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ + val: uint32(cv), + offset: i, + } + } + } + e.lastDictID = d.id + allDirty = true + } + // Reset table to initial state + e.cur = e.maxMatchOff + + dirtyShardCnt := 0 + if !allDirty { + for i := range e.longTableShardDirty { + if e.longTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { + //copy(e.longTable[:], e.dictLongTable) + e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + return + } + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) + *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:]) + + e.longTableShardDirty[i] = false + } +} + +func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/dLongTableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go new file mode 100644 index 00000000000..f45a3da7dae --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -0,0 +1,891 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" +) + +const ( + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table + tableShardSize = tableSize / tableShardCnt // Size of an individual shard + tableFastHashLen = 6 + tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + maxMatchLength = 131074 +) + +type tableEntry struct { + val uint32 + offset int32 +} + +type fastEncoder struct { + fastBase + table [tableSize]tableEntry +} + +type fastEncoderDict struct { + fastEncoder + dictTable []tableEntry + tableShardDirty [tableShardCnt]bool + allDirty bool +} + +// Encode mimmics functionality in zstd_fast.c +func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if debugEncoder { + if len(src) > maxCompressedBlockSize { + panic("src too big") + } + } + + // Protect against e.cur wraparound. + if e.cur >= e.bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + + for { + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0 ", t)) + } + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < e.bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if e.allDirty || len(src) > 32<<10 { + e.fastEncoder.Encode(blk, src) + e.allDirty = true + return + } + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [tableSize]tableEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 7 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + e.markShardDirty(nextHash2) + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("fastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + if true { + end := e.maxMatchOff + int32(len(d.content)) - 8 + for i := e.maxMatchOff; i < end; i += 2 { + const hashLog = tableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 6 + nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + } + } + e.lastDictID = d.id + e.allDirty = true + } + + e.cur = e.maxMatchOff + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.tableShardDirty { + if e.tableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + const shardCnt = tableShardCnt + const shardSize = tableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + //copy(e.table[:], e.dictTable) + e.table = *(*[tableSize]tableEntry)(e.dictTable) + for i := range e.tableShardDirty { + e.tableShardDirty[i] = false + } + e.allDirty = false + return + } + for i := range e.tableShardDirty { + if !e.tableShardDirty[i] { + continue + } + + //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:]) + e.tableShardDirty[i] = false + } + e.allDirty = false +} + +func (e *fastEncoderDict) markAllShardsDirty() { + e.allDirty = true +} + +func (e *fastEncoderDict) markShardDirty(entryNum uint32) { + e.tableShardDirty[entryNum/tableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go new file mode 100644 index 00000000000..72af7ef0fe0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -0,0 +1,619 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "crypto/rand" + "fmt" + "io" + "math" + rdebug "runtime/debug" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Encoder provides encoding to Zstandard. +// An Encoder can be used for either compressing a stream via the +// io.WriteCloser interface supported by the Encoder or as multiple independent +// tasks via the EncodeAll function. +// Smaller encodes are encouraged to use the EncodeAll function. +// Use NewWriter to create a new instance. +type Encoder struct { + o encoderOptions + encoders chan encoder + state encoderState + init sync.Once +} + +type encoder interface { + Encode(blk *blockEnc, src []byte) + EncodeNoHist(blk *blockEnc, src []byte) + Block() *blockEnc + CRC() *xxhash.Digest + AppendCRC([]byte) []byte + WindowSize(size int64) int32 + UseBlock(*blockEnc) + Reset(d *dict, singleBlock bool) +} + +type encoderState struct { + w io.Writer + filling []byte + current []byte + previous []byte + encoder encoder + writing *blockEnc + err error + writeErr error + nWritten int64 + nInput int64 + frameContentSize int64 + headerWritten bool + eofWritten bool + fullFrameWritten bool + + // This waitgroup indicates an encode is running. + wg sync.WaitGroup + // This waitgroup indicates we have a block encoding/writing. + wWg sync.WaitGroup +} + +// NewWriter will create a new Zstandard encoder. +// If the encoder will be used for encoding blocks a nil writer can be used. +func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { + initPredefined() + var e Encoder + e.o.setDefault() + for _, o := range opts { + err := o(&e.o) + if err != nil { + return nil, err + } + } + if w != nil { + e.Reset(w) + } + return &e, nil +} + +func (e *Encoder) initialize() { + if e.o.concurrent == 0 { + e.o.setDefault() + } + e.encoders = make(chan encoder, e.o.concurrent) + for i := 0; i < e.o.concurrent; i++ { + enc := e.o.encoder() + e.encoders <- enc + } +} + +// Reset will re-initialize the writer and new writes will encode to the supplied writer +// as a new, independent stream. +func (e *Encoder) Reset(w io.Writer) { + s := &e.state + s.wg.Wait() + s.wWg.Wait() + if cap(s.filling) == 0 { + s.filling = make([]byte, 0, e.o.blockSize) + } + if e.o.concurrent > 1 { + if cap(s.current) == 0 { + s.current = make([]byte, 0, e.o.blockSize) + } + if cap(s.previous) == 0 { + s.previous = make([]byte, 0, e.o.blockSize) + } + s.current = s.current[:0] + s.previous = s.previous[:0] + if s.writing == nil { + s.writing = &blockEnc{lowMem: e.o.lowMem} + s.writing.init() + } + s.writing.initNewEncode() + } + if s.encoder == nil { + s.encoder = e.o.encoder() + } + s.filling = s.filling[:0] + s.encoder.Reset(e.o.dict, false) + s.headerWritten = false + s.eofWritten = false + s.fullFrameWritten = false + s.w = w + s.err = nil + s.nWritten = 0 + s.nInput = 0 + s.writeErr = nil + s.frameContentSize = 0 +} + +// ResetContentSize will reset and set a content size for the next stream. +// If the bytes written does not match the size given an error will be returned +// when calling Close(). +// This is removed when Reset is called. +// Sizes <= 0 results in no content size set. +func (e *Encoder) ResetContentSize(w io.Writer, size int64) { + e.Reset(w) + if size >= 0 { + e.state.frameContentSize = size + } +} + +// Write data to the encoder. +// Input data will be buffered and as the buffer fills up +// content will be compressed and written to the output. +// When done writing, use Close to flush the remaining output +// and write CRC if requested. +func (e *Encoder) Write(p []byte) (n int, err error) { + s := &e.state + for len(p) > 0 { + if len(p)+len(s.filling) < e.o.blockSize { + if e.o.crc { + _, _ = s.encoder.CRC().Write(p) + } + s.filling = append(s.filling, p...) + return n + len(p), nil + } + add := p + if len(p)+len(s.filling) > e.o.blockSize { + add = add[:e.o.blockSize-len(s.filling)] + } + if e.o.crc { + _, _ = s.encoder.CRC().Write(add) + } + s.filling = append(s.filling, add...) + p = p[len(add):] + n += len(add) + if len(s.filling) < e.o.blockSize { + return n, nil + } + err := e.nextBlock(false) + if err != nil { + return n, err + } + if debugAsserts && len(s.filling) > 0 { + panic(len(s.filling)) + } + } + return n, nil +} + +// nextBlock will synchronize and start compressing input in e.state.filling. +// If an error has occurred during encoding it will be returned. +func (e *Encoder) nextBlock(final bool) error { + s := &e.state + // Wait for current block. + s.wg.Wait() + if s.err != nil { + return s.err + } + if len(s.filling) > e.o.blockSize { + return fmt.Errorf("block > maxStoreBlockSize") + } + if !s.headerWritten { + // If we have a single block encode, do a sync compression. + if final && len(s.filling) == 0 && !e.o.fullZero { + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + if final && len(s.filling) > 0 { + s.current = e.EncodeAll(s.filling, s.current[:0]) + var n2 int + n2, s.err = s.w.Write(s.current) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + s.nInput += int64(len(s.filling)) + s.current = s.current[:0] + s.filling = s.filling[:0] + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + + var tmp [maxHeaderSize]byte + fh := frameHeader{ + ContentSize: uint64(s.frameContentSize), + WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)), + SingleSegment: false, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + dst := fh.appendTo(tmp[:0]) + s.headerWritten = true + s.wWg.Wait() + var n2 int + n2, s.err = s.w.Write(dst) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + } + if s.eofWritten { + // Ensure we only write it once. + final = false + } + + if len(s.filling) == 0 { + // Final block, but no data. + if final { + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + blk.last = true + blk.encodeRaw(nil) + s.wWg.Wait() + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.eofWritten = true + } + return s.err + } + + // SYNC: + if e.o.concurrent == 1 { + src := s.filling + s.nInput += int64(len(s.filling)) + if debugEncoder { + println("Adding sync block,", len(src), "bytes, final:", final) + } + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + + s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if s.err != nil { + return s.err + } + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.filling = s.filling[:0] + return s.err + } + + // Move blocks forward. + s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current + s.nInput += int64(len(s.current)) + s.wg.Add(1) + go func(src []byte) { + if debugEncoder { + println("Adding block,", len(src), "bytes, final:", final) + } + defer func() { + if r := recover(); r != nil { + s.err = fmt.Errorf("panic while encoding: %v", r) + rdebug.PrintStack() + } + s.wg.Done() + }() + enc := s.encoder + blk := enc.Block() + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + // Wait for pending writes. + s.wWg.Wait() + if s.writeErr != nil { + s.err = s.writeErr + return + } + // Transfer encoders from previous write block. + blk.swapEncoders(s.writing) + // Transfer recent offsets to next. + enc.UseBlock(s.writing) + s.writing = blk + s.wWg.Add(1) + go func() { + defer func() { + if r := recover(); r != nil { + s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r) + rdebug.PrintStack() + } + s.wWg.Done() + }() + s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if s.writeErr != nil { + return + } + _, s.writeErr = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + }() + }(s.current) + return nil +} + +// ReadFrom reads data from r until EOF or error. +// The return value n is the number of bytes read. +// Any error except io.EOF encountered during the read is also returned. +// +// The Copy function uses ReaderFrom if available. +func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { + if debugEncoder { + println("Using ReadFrom") + } + + // Flush any current writes. + if len(e.state.filling) > 0 { + if err := e.nextBlock(false); err != nil { + return 0, err + } + } + e.state.filling = e.state.filling[:e.o.blockSize] + src := e.state.filling + for { + n2, err := r.Read(src) + if e.o.crc { + _, _ = e.state.encoder.CRC().Write(src[:n2]) + } + // src is now the unfilled part... + src = src[n2:] + n += int64(n2) + switch err { + case io.EOF: + e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] + if debugEncoder { + println("ReadFrom: got EOF final block:", len(e.state.filling)) + } + return n, nil + case nil: + default: + if debugEncoder { + println("ReadFrom: got error:", err) + } + e.state.err = err + return n, err + } + if len(src) > 0 { + if debugEncoder { + println("ReadFrom: got space left in source:", len(src)) + } + continue + } + err = e.nextBlock(false) + if err != nil { + return n, err + } + e.state.filling = e.state.filling[:e.o.blockSize] + src = e.state.filling + } +} + +// Flush will send the currently written data to output +// and block until everything has been written. +// This should only be used on rare occasions where pushing the currently queued data is critical. +func (e *Encoder) Flush() error { + s := &e.state + if len(s.filling) > 0 { + err := e.nextBlock(false) + if err != nil { + return err + } + } + s.wg.Wait() + s.wWg.Wait() + if s.err != nil { + return s.err + } + return s.writeErr +} + +// Close will flush the final output and close the stream. +// The function will block until everything has been written. +// The Encoder can still be re-used after calling this. +func (e *Encoder) Close() error { + s := &e.state + if s.encoder == nil { + return nil + } + err := e.nextBlock(true) + if err != nil { + return err + } + if s.frameContentSize > 0 { + if s.nInput != s.frameContentSize { + return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput) + } + } + if e.state.fullFrameWritten { + return s.err + } + s.wg.Wait() + s.wWg.Wait() + + if s.err != nil { + return s.err + } + if s.writeErr != nil { + return s.writeErr + } + + // Write CRC + if e.o.crc && s.err == nil { + // heap alloc. + var tmp [4]byte + _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0])) + s.nWritten += 4 + } + + // Add padding with content from crypto/rand.Reader + if s.err == nil && e.o.pad > 0 { + add := calcSkippableFrame(s.nWritten, int64(e.o.pad)) + frame, err := skippableFrame(s.filling[:0], add, rand.Reader) + if err != nil { + return err + } + _, s.err = s.w.Write(frame) + } + return s.err +} + +// EncodeAll will encode all input in src and append it to dst. +// This function can be called concurrently, but each call will only run on a single goroutine. +// If empty input is given, nothing is returned, unless WithZeroFrames is specified. +// Encoded blocks can be concatenated and the result will be the combined input stream. +// Data compressed with EncodeAll can be decoded with the Decoder, +// using either a stream or DecodeAll. +func (e *Encoder) EncodeAll(src, dst []byte) []byte { + if len(src) == 0 { + if e.o.fullZero { + // Add frame header. + fh := frameHeader{ + ContentSize: 0, + WindowSize: MinWindowSize, + SingleSegment: true, + // Adding a checksum would be a waste of space. + Checksum: false, + DictID: 0, + } + dst = fh.appendTo(dst) + + // Write raw block as last one only. + var blk blockHeader + blk.setSize(0) + blk.setType(blockTypeRaw) + blk.setLast(true) + dst = blk.appendTo(dst) + } + return dst + } + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + // Release encoder reference to last block. + // If a non-single block is needed the encoder will reset again. + e.encoders <- enc + }() + // Use single segments when above minimum window and below window size. + single := len(src) <= e.o.windowSize && len(src) > MinWindowSize + if e.o.single != nil { + single = *e.o.single + } + fh := frameHeader{ + ContentSize: uint64(len(src)), + WindowSize: uint32(enc.WindowSize(int64(len(src)))), + SingleSegment: single, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + // If less than 1MB, allocate a buffer up front. + if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { + dst = make([]byte, 0, len(src)) + } + dst = fh.appendTo(dst) + + // If we can do everything in one block, prefer that. + if len(src) <= e.o.blockSize { + enc.Reset(e.o.dict, true) + // Slightly faster with no history and everything in one block. + if e.o.crc { + _, _ = enc.CRC().Write(src) + } + blk := enc.Block() + blk.last = true + if e.o.dict == nil { + enc.EncodeNoHist(blk, src) + } else { + enc.Encode(blk, src) + } + + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + oldout := blk.output + // Output directly to dst + blk.output = dst + + err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if err != nil { + panic(err) + } + dst = blk.output + blk.output = oldout + } else { + enc.Reset(e.o.dict, false) + blk := enc.Block() + for len(src) > 0 { + todo := src + if len(todo) > e.o.blockSize { + todo = todo[:e.o.blockSize] + } + src = src[len(todo):] + if e.o.crc { + _, _ = enc.CRC().Write(todo) + } + blk.pushOffsets() + enc.Encode(blk, todo) + if len(src) == 0 { + blk.last = true + } + err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) + if err != nil { + panic(err) + } + dst = append(dst, blk.output...) + blk.reset(nil) + } + } + if e.o.crc { + dst = enc.AppendCRC(dst) + } + // Add padding with content from crypto/rand.Reader + if e.o.pad > 0 { + add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) + var err error + dst, err = skippableFrame(dst, add, rand.Reader) + if err != nil { + panic(err) + } + } + return dst +} + +// MaxEncodedSize returns the expected maximum +// size of an encoded block or stream. +func (e *Encoder) MaxEncodedSize(size int) int { + frameHeader := 4 + 2 // magic + frame header & window descriptor + if e.o.dict != nil { + frameHeader += 4 + } + // Frame content size: + if size < 256 { + frameHeader++ + } else if size < 65536+256 { + frameHeader += 2 + } else if size < math.MaxInt32 { + frameHeader += 4 + } else { + frameHeader += 8 + } + // Final crc + if e.o.crc { + frameHeader += 4 + } + + // Max overhead is 3 bytes/block. + // There cannot be 0 blocks. + blocks := (size + e.o.blockSize) / e.o.blockSize + + // Combine, add padding. + maxSz := frameHeader + 3*blocks + size + if e.o.pad > 1 { + maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad)) + } + return maxSz +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go new file mode 100644 index 00000000000..20671dcb91d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -0,0 +1,339 @@ +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + "runtime" + "strings" +) + +// EOption is an option for creating a encoder. +type EOption func(*encoderOptions) error + +// options retains accumulated state of multiple options. +type encoderOptions struct { + concurrent int + level EncoderLevel + single *bool + pad int + blockSize int + windowSize int + crc bool + fullZero bool + noEntropy bool + allLitEntropy bool + customWindow bool + customALEntropy bool + customBlockSize bool + lowMem bool + dict *dict +} + +func (o *encoderOptions) setDefault() { + *o = encoderOptions{ + concurrent: runtime.GOMAXPROCS(0), + crc: true, + single: nil, + blockSize: maxCompressedBlockSize, + windowSize: 8 << 20, + level: SpeedDefault, + allLitEntropy: false, + lowMem: false, + } +} + +// encoder returns an encoder with the selected options. +func (o encoderOptions) encoder() encoder { + switch o.level { + case SpeedFastest: + if o.dict != nil { + return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + } + return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + + case SpeedDefault: + if o.dict != nil { + return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}} + } + return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + case SpeedBetterCompression: + if o.dict != nil { + return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + } + return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + case SpeedBestCompression: + return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + } + panic("unknown compression level") +} + +// WithEncoderCRC will add CRC value to output. +// Output will be 4 bytes larger. +func WithEncoderCRC(b bool) EOption { + return func(o *encoderOptions) error { o.crc = b; return nil } +} + +// WithEncoderConcurrency will set the concurrency, +// meaning the maximum number of encoders to run concurrently. +// The value supplied must be at least 1. +// For streams, setting a value of 1 will disable async compression. +// By default this will be set to GOMAXPROCS. +func WithEncoderConcurrency(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("concurrency must be at least 1") + } + o.concurrent = n + return nil + } +} + +// WithWindowSize will set the maximum allowed back-reference distance. +// The value must be a power of two between MinWindowSize and MaxWindowSize. +// A larger value will enable better compression but allocate more memory and, +// for above-default values, take considerably longer. +// The default value is determined by the compression level and max 8MB. +func WithWindowSize(n int) EOption { + return func(o *encoderOptions) error { + switch { + case n < MinWindowSize: + return fmt.Errorf("window size must be at least %d", MinWindowSize) + case n > MaxWindowSize: + return fmt.Errorf("window size must be at most %d", MaxWindowSize) + case (n & (n - 1)) != 0: + return errors.New("window size must be a power of 2") + } + + o.windowSize = n + o.customWindow = true + if o.blockSize > o.windowSize { + o.blockSize = o.windowSize + o.customBlockSize = true + } + return nil + } +} + +// WithEncoderPadding will add padding to all output so the size will be a multiple of n. +// This can be used to obfuscate the exact output size or make blocks of a certain size. +// The contents will be a skippable frame, so it will be invisible by the decoder. +// n must be > 0 and <= 1GB, 1<<30 bytes. +// The padded area will be filled with data from crypto/rand.Reader. +// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. +func WithEncoderPadding(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("padding must be at least 1") + } + // No need to waste our time. + if n == 1 { + n = 0 + } + if n > 1<<30 { + return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") + } + o.pad = n + return nil + } +} + +// EncoderLevel predefines encoder compression levels. +// Only use the constants made available, since the actual mapping +// of these values are very likely to change and your compression could change +// unpredictably when upgrading the library. +type EncoderLevel int + +const ( + speedNotSet EncoderLevel = iota + + // SpeedFastest will choose the fastest reasonable compression. + // This is roughly equivalent to the fastest Zstandard mode. + SpeedFastest + + // SpeedDefault is the default "pretty fast" compression option. + // This is roughly equivalent to the default Zstandard mode (level 3). + SpeedDefault + + // SpeedBetterCompression will yield better compression than the default. + // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. + // By using this, notice that CPU usage may go up in the future. + SpeedBetterCompression + + // SpeedBestCompression will choose the best available compression option. + // This will offer the best compression no matter the CPU cost. + SpeedBestCompression + + // speedLast should be kept as the last actual compression option. + // The is not for external usage, but is used to keep track of the valid options. + speedLast +) + +// EncoderLevelFromString will convert a string representation of an encoding level back +// to a compression level. The compare is not case sensitive. +// If the string wasn't recognized, (false, SpeedDefault) will be returned. +func EncoderLevelFromString(s string) (bool, EncoderLevel) { + for l := speedNotSet + 1; l < speedLast; l++ { + if strings.EqualFold(s, l.String()) { + return true, l + } + } + return false, SpeedDefault +} + +// EncoderLevelFromZstd will return an encoder level that closest matches the compression +// ratio of a specific zstd compression level. +// Many input values will provide the same compression level. +func EncoderLevelFromZstd(level int) EncoderLevel { + switch { + case level < 3: + return SpeedFastest + case level >= 3 && level < 6: + return SpeedDefault + case level >= 6 && level < 10: + return SpeedBetterCompression + default: + return SpeedBestCompression + } +} + +// String provides a string representation of the compression level. +func (e EncoderLevel) String() string { + switch e { + case SpeedFastest: + return "fastest" + case SpeedDefault: + return "default" + case SpeedBetterCompression: + return "better" + case SpeedBestCompression: + return "best" + default: + return "invalid" + } +} + +// WithEncoderLevel specifies a predefined compression level. +func WithEncoderLevel(l EncoderLevel) EOption { + return func(o *encoderOptions) error { + switch { + case l <= speedNotSet || l >= speedLast: + return fmt.Errorf("unknown encoder level") + } + o.level = l + if !o.customWindow { + switch o.level { + case SpeedFastest: + o.windowSize = 4 << 20 + if !o.customBlockSize { + o.blockSize = 1 << 16 + } + case SpeedDefault: + o.windowSize = 8 << 20 + case SpeedBetterCompression: + o.windowSize = 8 << 20 + case SpeedBestCompression: + o.windowSize = 8 << 20 + } + } + if !o.customALEntropy { + o.allLitEntropy = l > SpeedDefault + } + + return nil + } +} + +// WithZeroFrames will encode 0 length input as full frames. +// This can be needed for compatibility with zstandard usage, +// but is not needed for this package. +func WithZeroFrames(b bool) EOption { + return func(o *encoderOptions) error { + o.fullZero = b + return nil + } +} + +// WithAllLitEntropyCompression will apply entropy compression if no matches are found. +// Disabling this will skip incompressible data faster, but in cases with no matches but +// skewed character distribution compression is lost. +// Default value depends on the compression level selected. +func WithAllLitEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.customALEntropy = true + o.allLitEntropy = b + return nil + } +} + +// WithNoEntropyCompression will always skip entropy compression of literals. +// This can be useful if content has matches, but unlikely to benefit from entropy +// compression. Usually the slight speed improvement is not worth enabling this. +func WithNoEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.noEntropy = b + return nil + } +} + +// WithSingleSegment will set the "single segment" flag when EncodeAll is used. +// If this flag is set, data must be regenerated within a single continuous memory segment. +// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. +// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content. +// In order to preserve the decoder from unreasonable memory requirements, +// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. +// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. +// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. +// If this is not specified, block encodes will automatically choose this based on the input size and the window size. +// This setting has no effect on streamed encodes. +func WithSingleSegment(b bool) EOption { + return func(o *encoderOptions) error { + o.single = &b + return nil + } +} + +// WithLowerEncoderMem will trade in some memory cases trade less memory usage for +// slower encoding speed. +// This will not change the window size which is the primary function for reducing +// memory usage. See WithWindowSize. +func WithLowerEncoderMem(b bool) EOption { + return func(o *encoderOptions) error { + o.lowMem = b + return nil + } +} + +// WithEncoderDict allows to register a dictionary that will be used for the encode. +// +// The slice dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// The encoder *may* choose to use no dictionary instead for certain payloads. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format +func WithEncoderDict(dict []byte) EOption { + return func(o *encoderOptions) error { + d, err := loadDict(dict) + if err != nil { + return err + } + o.dict = d + return nil + } +} + +// WithEncoderDictRaw registers a dictionary that may be used by the encoder. +// +// The slice content may contain arbitrary data. It will be used as an initial +// history. +func WithEncoderDictRaw(id uint32, content []byte) EOption { + return func(o *encoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}} + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go new file mode 100644 index 00000000000..53e160f7e5a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -0,0 +1,413 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "encoding/hex" + "errors" + "io" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type frameDec struct { + o decoderOptions + crc *xxhash.Digest + + WindowSize uint64 + + // Frame history passed between blocks + history history + + rawInput byteBuffer + + // Byte buffer that can be reused for small input blocks. + bBuf byteBuf + + FrameContentSize uint64 + + DictionaryID uint32 + HasCheckSum bool + SingleSegment bool +} + +const ( + // MinWindowSize is the minimum Window Size, which is 1 KB. + MinWindowSize = 1 << 10 + + // MaxWindowSize is the maximum encoder window size + // and the default decoder maximum window size. + MaxWindowSize = 1 << 29 +) + +const ( + frameMagic = "\x28\xb5\x2f\xfd" + skippableFrameMagic = "\x2a\x4d\x18" +) + +func newFrameDec(o decoderOptions) *frameDec { + if o.maxWindowSize > o.maxDecodedSize { + o.maxWindowSize = o.maxDecodedSize + } + d := frameDec{ + o: o, + } + return &d +} + +// reset will read the frame header and prepare for block decoding. +// If nothing can be read from the input, io.EOF will be returned. +// Any other error indicated that the stream contained data, but +// there was a problem. +func (d *frameDec) reset(br byteBuffer) error { + d.HasCheckSum = false + d.WindowSize = 0 + var signature [4]byte + for { + var err error + // Check if we can read more... + b, err := br.readSmall(1) + switch err { + case io.EOF, io.ErrUnexpectedEOF: + return io.EOF + case nil: + signature[0] = b[0] + default: + return err + } + // Read the rest, don't allow io.ErrUnexpectedEOF + b, err = br.readSmall(3) + switch err { + case io.EOF: + return io.EOF + case nil: + copy(signature[1:], b) + default: + return err + } + + if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 { + if debugDecoder { + println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic))) + } + // Break if not skippable frame. + break + } + // Read size to skip + b, err = br.readSmall(4) + if err != nil { + if debugDecoder { + println("Reading Frame Size", err) + } + return err + } + n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + println("Skipping frame with", n, "bytes.") + err = br.skipN(int64(n)) + if err != nil { + if debugDecoder { + println("Reading discarded frame", err) + } + return err + } + } + if string(signature[:]) != frameMagic { + if debugDecoder { + println("Got magic numbers: ", signature, "want:", []byte(frameMagic)) + } + return ErrMagicMismatch + } + + // Read Frame_Header_Descriptor + fhd, err := br.readByte() + if err != nil { + if debugDecoder { + println("Reading Frame_Header_Descriptor", err) + } + return err + } + d.SingleSegment = fhd&(1<<5) != 0 + + if fhd&(1<<3) != 0 { + return errors.New("reserved bit set on frame header") + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + d.WindowSize = 0 + if !d.SingleSegment { + wd, err := br.readByte() + if err != nil { + if debugDecoder { + println("Reading Window_Descriptor", err) + } + return err + } + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + d.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + d.DictionaryID = 0 + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + + b, err := br.readSmall(int(size)) + if err != nil { + println("Reading Dictionary_ID", err) + return err + } + var id uint32 + switch len(b) { + case 1: + id = uint32(b[0]) + case 2: + id = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + if debugDecoder { + println("Dict size", size, "ID:", id) + } + d.DictionaryID = id + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if d.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + d.FrameContentSize = fcsUnknown + if fcsSize > 0 { + b, err := br.readSmall(fcsSize) + if err != nil { + println("Reading Frame content", err) + return err + } + switch len(b) { + case 1: + d.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + if debugDecoder { + println("Read FCS:", d.FrameContentSize) + } + } + + // Move this to shared. + d.HasCheckSum = fhd&(1<<2) != 0 + if d.HasCheckSum { + if d.crc == nil { + d.crc = xxhash.New() + } + d.crc.Reset() + } + + if d.WindowSize > d.o.maxWindowSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrWindowSizeExceeded + } + + if d.WindowSize == 0 && d.SingleSegment { + // We may not need window in this case. + d.WindowSize = d.FrameContentSize + if d.WindowSize < MinWindowSize { + d.WindowSize = MinWindowSize + } + if d.WindowSize > d.o.maxDecodedSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrDecoderSizeExceeded + } + } + + // The minimum Window_Size is 1 KB. + if d.WindowSize < MinWindowSize { + if debugDecoder { + println("got window size: ", d.WindowSize) + } + return ErrWindowSizeTooSmall + } + d.history.windowSize = int(d.WindowSize) + if !d.o.lowMem || d.history.windowSize < maxBlockSize { + // Alloc 2x window size if not low-mem, or window size below 2MB. + d.history.allocFrameBuffer = d.history.windowSize * 2 + } else { + if d.o.lowMem { + // Alloc with 1MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2 + } else { + // Alloc with 2MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize + } + } + + if debugDecoder { + println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) + } + + // history contains input - maybe we do something + d.rawInput = br + return nil +} + +// next will start decoding the next block from stream. +func (d *frameDec) next(block *blockDec) error { + if debugDecoder { + println("decoding new block") + } + err := block.reset(d.rawInput, d.WindowSize) + if err != nil { + println("block error:", err) + // Signal the frame decoder we have a problem. + block.sendErr(err) + return err + } + return nil +} + +// checkCRC will check the checksum, assuming the frame has one. +// Will return ErrCRCMismatch if crc check failed, otherwise nil. +func (d *frameDec) checkCRC() error { + // We can overwrite upper tmp now + buf, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + return err + } + + want := binary.LittleEndian.Uint32(buf[:4]) + got := uint32(d.crc.Sum64()) + + if got != want { + if debugDecoder { + printf("CRC check failed: got %08x, want %08x\n", got, want) + } + return ErrCRCMismatch + } + if debugDecoder { + printf("CRC ok %08x\n", got) + } + return nil +} + +// consumeCRC skips over the checksum, assuming the frame has one. +func (d *frameDec) consumeCRC() error { + _, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + } + return err +} + +// runDecoder will run the decoder for the remainder of the frame. +func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { + saved := d.history.b + + // We use the history for output to avoid copying it. + d.history.b = dst + d.history.ignoreBuffer = len(dst) + // Store input length, so we only check new data. + crcStart := len(dst) + d.history.decoders.maxSyncLen = 0 + if d.o.limitToCap { + d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst)) + } + if d.FrameContentSize != fcsUnknown { + if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen { + d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) + } + if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize) + } + return dst, ErrDecoderSizeExceeded + } + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen) + } + if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen { + // Alloc for output + dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } + var err error + for { + err = dec.reset(d.rawInput, d.WindowSize) + if err != nil { + break + } + if debugDecoder { + println("next block:", dec) + } + err = dec.decodeBuf(&d.history) + if err != nil { + break + } + if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize { + println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize) + err = ErrDecoderSizeExceeded + break + } + if d.o.limitToCap && len(d.history.b) > cap(dst) { + println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst)) + err = ErrDecoderSizeExceeded + break + } + if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { + println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) + err = ErrFrameSizeExceeded + break + } + if dec.Last { + break + } + if debugDecoder { + println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) + } + } + dst = d.history.b + if err == nil { + if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { + err = ErrFrameSizeMismatch + } else if d.HasCheckSum { + if d.o.ignoreChecksum { + err = d.consumeCRC() + } else { + d.crc.Write(dst[crcStart:]) + err = d.checkCRC() + } + } + } + d.history.b = saved + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go new file mode 100644 index 00000000000..667ca06794e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -0,0 +1,137 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "fmt" + "io" + "math" + "math/bits" +) + +type frameHeader struct { + ContentSize uint64 + WindowSize uint32 + SingleSegment bool + Checksum bool + DictID uint32 +} + +const maxHeaderSize = 14 + +func (f frameHeader) appendTo(dst []byte) []byte { + dst = append(dst, frameMagic...) + var fhd uint8 + if f.Checksum { + fhd |= 1 << 2 + } + if f.SingleSegment { + fhd |= 1 << 5 + } + + var dictIDContent []byte + if f.DictID > 0 { + var tmp [4]byte + if f.DictID < 256 { + fhd |= 1 + tmp[0] = uint8(f.DictID) + dictIDContent = tmp[:1] + } else if f.DictID < 1<<16 { + fhd |= 2 + binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID)) + dictIDContent = tmp[:2] + } else { + fhd |= 3 + binary.LittleEndian.PutUint32(tmp[:4], f.DictID) + dictIDContent = tmp[:4] + } + } + var fcs uint8 + if f.ContentSize >= 256 { + fcs++ + } + if f.ContentSize >= 65536+256 { + fcs++ + } + if f.ContentSize >= 0xffffffff { + fcs++ + } + + fhd |= fcs << 6 + + dst = append(dst, fhd) + if !f.SingleSegment { + const winLogMin = 10 + windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 + dst = append(dst, uint8(windowLog)) + } + if f.DictID > 0 { + dst = append(dst, dictIDContent...) + } + switch fcs { + case 0: + if f.SingleSegment { + dst = append(dst, uint8(f.ContentSize)) + } + // Unless SingleSegment is set, framessizes < 256 are not stored. + case 1: + f.ContentSize -= 256 + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) + case 2: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) + case 3: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), + uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) + default: + panic("invalid fcs") + } + return dst +} + +const skippableFrameHeader = 4 + 4 + +// calcSkippableFrame will return a total size to be added for written +// to be divisible by multiple. +// The value will always be > skippableFrameHeader. +// The function will panic if written < 0 or wantMultiple <= 0. +func calcSkippableFrame(written, wantMultiple int64) int { + if wantMultiple <= 0 { + panic("wantMultiple <= 0") + } + if written < 0 { + panic("written < 0") + } + leftOver := written % wantMultiple + if leftOver == 0 { + return 0 + } + toAdd := wantMultiple - leftOver + for toAdd < skippableFrameHeader { + toAdd += wantMultiple + } + return int(toAdd) +} + +// skippableFrame will add a skippable frame with a total size of bytes. +// total should be >= skippableFrameHeader and < math.MaxUint32. +func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { + if total == 0 { + return dst, nil + } + if total < skippableFrameHeader { + return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) + } + if int64(total) > math.MaxUint32 { + return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) + } + dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) + f := uint32(total - skippableFrameHeader) + dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) + start := len(dst) + dst = append(dst, make([]byte, f)...) + _, err := io.ReadFull(r, dst[start:]) + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go new file mode 100644 index 00000000000..2f8860a722b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -0,0 +1,307 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "fmt" + "io" +) + +const ( + tablelogAbsoluteMax = 9 +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = tablelogAbsoluteMax + 2 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + maxTableMask = (1 << maxTableLog) - 1 + minTablelog = 5 + maxSymbolValue = 255 +) + +// fseDecoder provides temporary storage for compression and decompression. +type fseDecoder struct { + dt [maxTablesize]decSymbol // Decompression table. + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + maxBits uint8 // Maximum number of additional bits + + // used for table creation to avoid allocations. + stateTable [256]uint16 + norm [maxSymbolValue + 1]int16 + preDefined bool +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +// readNCount will read the symbol distribution so decoding tables can be constructed. +func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { + var ( + charnum uint16 + previous0 bool + ) + if b.remain() < 4 { + return errors.New("input too small") + } + bitStream := b.Uint32NC() + nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog + if nbBits > tablelogAbsoluteMax { + println("Invalid tablelog:", nbBits) + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 && charnum <= maxSymbol { + if previous0 { + //println("prev0") + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + //println("24 x 0") + n0 += 24 + if r := b.remain(); r > 5 { + b.advance(2) + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + // end of bit stream + bitStream >>= 16 + bitCount += 16 + } + } + //printf("bitstream: %d, 0b%b", bitStream&3, bitStream) + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0) + for charnum < n0 { + s.norm[uint8(charnum)] = 0 + charnum++ + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*threshold - 1) - remaining + var count int32 + + if int32(bitStream)&(threshold-1) < max { + count = int32(bitStream) & (threshold - 1) + if debugAsserts && nbBits < 1 { + panic("nbBits underflow") + } + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + // extra accuracy + count-- + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> (bitCount & 31) + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + bitStream = b.Uint32() >> (bitCount & 31) + } + } + s.symbolLen = charnum + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return s.buildDtable() +} + +func (s *fseDecoder) mustReadFrom(r io.Reader) { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + // dt [maxTablesize]decSymbol // Decompression table. + // symbolLen uint16 // Length of active part of the symbol table. + // actualTableLog uint8 // Selected tablelog. + // maxBits uint8 // Maximum number of additional bits + // // used for table creation to avoid allocations. + // stateTable [256]uint16 + // norm [maxSymbolValue + 1]int16 + // preDefined bool + fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +// Using a composite uint64 is faster than a struct with separate members. +type decSymbol uint64 + +func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { + return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d decSymbol) nbBits() uint8 { + return uint8(d) +} + +func (d decSymbol) addBits() uint8 { + return uint8(d >> 8) +} + +func (d decSymbol) newState() uint16 { + return uint16(d >> 16) +} + +func (d decSymbol) baselineInt() int { + return int(d >> 32) +} + +func (d *decSymbol) setNBits(nBits uint8) { + const mask = 0xffffffffffffff00 + *d = (*d & mask) | decSymbol(nBits) +} + +func (d *decSymbol) setAddBits(addBits uint8) { + const mask = 0xffffffffffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) +} + +func (d *decSymbol) setNewState(state uint16) { + const mask = 0xffffffff0000ffff + *d = (*d & mask) | decSymbol(state)<<16 +} + +func (d *decSymbol) setExt(addBits uint8, baseline uint32) { + const mask = 0xffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) +} + +// decSymbolValue returns the transformed decSymbol for the given symbol. +func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { + if int(symb) >= len(t) { + return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) + } + lu := t[symb] + return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil +} + +// setRLE will set the decoder til RLE mode. +func (s *fseDecoder) setRLE(symbol decSymbol) { + s.actualTableLog = 0 + s.maxBits = symbol.addBits() + s.dt[0] = symbol +} + +// transform will transform the decoder table into a table usable for +// decoding without having to apply the transformation while decoding. +// The state will contain the base value and the number of bits to read. +func (s *fseDecoder) transform(t []baseOffset) error { + tableSize := uint16(1 << s.actualTableLog) + s.maxBits = 0 + for i, v := range s.dt[:tableSize] { + add := v.addBits() + if int(add) >= len(t) { + return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) + } + lu := t[add] + if lu.addBits > s.maxBits { + s.maxBits = lu.addBits + } + v.setExt(lu.addBits, lu.baseLine) + s.dt[i] = v + } + return nil +} + +type fseState struct { + dt []decSymbol + state decSymbol +} + +// Initialize and decodeAsync first state and symbol. +func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { + s.dt = dt + br.fill() + s.state = dt[br.getBits(tableLog)] +} + +// final returns the current state symbol without decoding the next. +func (s decSymbol) final() (int, uint8) { + return s.baselineInt(), s.addBits() +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go new file mode 100644 index 00000000000..d04a829b0a0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go @@ -0,0 +1,65 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" +) + +type buildDtableAsmContext struct { + // inputs + stateTable *uint16 + norm *int16 + dt *uint64 + + // outputs --- set by the procedure in the case of error; + // for interpretation please see the error handling part below + errParam1 uint64 + errParam2 uint64 +} + +// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable. +// Function returns non-zero exit code on error. +// +//go:noescape +func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int + +// please keep in sync with _generate/gen_fse.go +const ( + errorCorruptedNormalizedCounter = 1 + errorNewStateTooBig = 2 + errorNewStateNoBits = 3 +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + ctx := buildDtableAsmContext{ + stateTable: &s.stateTable[0], + norm: &s.norm[0], + dt: (*uint64)(&s.dt[0]), + } + code := buildDtable_asm(s, &ctx) + + if code != 0 { + switch code { + case errorCorruptedNormalizedCounter: + position := ctx.errParam1 + return fmt.Errorf("corrupted input (position=%d, expected 0)", position) + + case errorNewStateTooBig: + newState := decSymbol(ctx.errParam1) + size := ctx.errParam2 + return fmt.Errorf("newState (%d) outside table size (%d)", newState, size) + + case errorNewStateNoBits: + newState := decSymbol(ctx.errParam1) + oldState := decSymbol(ctx.errParam2) + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState) + + default: + return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s new file mode 100644 index 00000000000..bcde3986953 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s @@ -0,0 +1,126 @@ +// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int +TEXT ·buildDtable_asm(SB), $0-24 + MOVQ ctx+8(FP), CX + MOVQ s+0(FP), DI + + // Load values + MOVBQZX 4098(DI), DX + XORQ AX, AX + BTSQ DX, AX + MOVQ (CX), BX + MOVQ 16(CX), SI + LEAQ -1(AX), R8 + MOVQ 8(CX), CX + MOVWQZX 4096(DI), DI + + // End load values + // Init, lay down lowprob symbols + XORQ R9, R9 + JMP init_main_loop_condition + +init_main_loop: + MOVWQSX (CX)(R9*2), R10 + CMPW R10, $-1 + JNE do_not_update_high_threshold + MOVB R9, 1(SI)(R8*8) + DECQ R8 + MOVQ $0x0000000000000001, R10 + +do_not_update_high_threshold: + MOVW R10, (BX)(R9*2) + INCQ R9 + +init_main_loop_condition: + CMPQ R9, DI + JL init_main_loop + + // Spread symbols + // Calculate table step + MOVQ AX, R9 + SHRQ $0x01, R9 + MOVQ AX, R10 + SHRQ $0x03, R10 + LEAQ 3(R9)(R10*1), R9 + + // Fill add bits values + LEAQ -1(AX), R10 + XORQ R11, R11 + XORQ R12, R12 + JMP spread_main_loop_condition + +spread_main_loop: + XORQ R13, R13 + MOVWQSX (CX)(R12*2), R14 + JMP spread_inner_loop_condition + +spread_inner_loop: + MOVB R12, 1(SI)(R11*8) + +adjust_position: + ADDQ R9, R11 + ANDQ R10, R11 + CMPQ R11, R8 + JG adjust_position + INCQ R13 + +spread_inner_loop_condition: + CMPQ R13, R14 + JL spread_inner_loop + INCQ R12 + +spread_main_loop_condition: + CMPQ R12, DI + JL spread_main_loop + TESTQ R11, R11 + JZ spread_check_ok + MOVQ ctx+8(FP), AX + MOVQ R11, 24(AX) + MOVQ $+1, ret+16(FP) + RET + +spread_check_ok: + // Build Decoding table + XORQ DI, DI + +build_table_main_table: + MOVBQZX 1(SI)(DI*8), CX + MOVWQZX (BX)(CX*2), R8 + LEAQ 1(R8), R9 + MOVW R9, (BX)(CX*2) + MOVQ R8, R9 + BSRQ R9, R9 + MOVQ DX, CX + SUBQ R9, CX + SHLQ CL, R8 + SUBQ AX, R8 + MOVB CL, (SI)(DI*8) + MOVW R8, 2(SI)(DI*8) + CMPQ R8, AX + JLE build_table_check1_ok + MOVQ ctx+8(FP), CX + MOVQ R8, 24(CX) + MOVQ AX, 32(CX) + MOVQ $+2, ret+16(FP) + RET + +build_table_check1_ok: + TESTB CL, CL + JNZ build_table_check2_ok + CMPW R8, DI + JNE build_table_check2_ok + MOVQ ctx+8(FP), AX + MOVQ R8, 24(AX) + MOVQ DI, 32(AX) + MOVQ $+3, ret+16(FP) + RET + +build_table_check2_ok: + INCQ DI + CMPQ DI, AX + JL build_table_main_table + MOVQ $+0, ret+16(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go new file mode 100644 index 00000000000..8adfebb0297 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -0,0 +1,73 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "errors" + "fmt" +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + symbolNext := s.stateTable[:256] + + // Init, lay down lowprob symbols + { + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.dt[highThreshold].setAddBits(uint8(i)) + highThreshold-- + v = 1 + } + symbolNext[i] = uint16(v) + } + } + + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.dt[position].setAddBits(uint8(ss)) + for { + // lowprob area + position = (position + step) & tableMask + if position <= highThreshold { + break + } + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.dt[:tableSize] { + symbol := v.addBits() + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.dt[u&maxTableMask].setNBits(nBits) + newState := (nextState << nBits) - tableSize + if newState > tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.dt[u&maxTableMask].setNewState(newState) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go new file mode 100644 index 00000000000..ab26326a8ff --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -0,0 +1,701 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" +) + +const ( + // For encoding we only support up to + maxEncTableLog = 8 + maxEncTablesize = 1 << maxTableLog + maxEncTableMask = (1 << maxTableLog) - 1 + minEncTablelog = 5 + maxEncSymbolValue = maxMatchLengthSymbol +) + +// Scratch provides temporary storage for compression and decompression. +type fseEncoder struct { + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + ct cTable // Compression tables. + maxCount int // count of the most probable symbol + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + useRLE bool // This encoder is for RLE + preDefined bool // This encoder is predefined. + reUsed bool // Set to know when the encoder has been reused. + rleVal uint8 // RLE Symbol + maxBits uint8 // Maximum output bits after transform. + + // TODO: Technically zstd should be fine with 64 bytes. + count [256]uint32 + norm [256]int16 +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaNbBits uint32 + deltaFindState int16 + outBits uint8 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +func (s *fseEncoder) Histogram() *[256]uint32 { + return &s.count +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *fseEncoder) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *fseEncoder) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [256]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = total - 1 + total++ + default: + maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = total - v + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +func (s *fseEncoder) setRLE(val byte) { + s.allocCtable() + s.actualTableLog = 0 + s.ct.stateTable = s.ct.stateTable[:1] + s.ct.symbolTT[val] = symbolTransform{ + deltaFindState: 0, + deltaNbBits: 0, + } + if debugEncoder { + println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) + } + s.rleVal = val + s.useRLE = true +} + +// setBits will set output bits for the transform. +// if nil is provided, the number of bits is equal to the index. +func (s *fseEncoder) setBits(transform []byte) { + if s.reUsed || s.preDefined { + return + } + if s.useRLE { + if transform == nil { + s.ct.symbolTT[s.rleVal].outBits = s.rleVal + s.maxBits = s.rleVal + return + } + s.maxBits = transform[s.rleVal] + s.ct.symbolTT[s.rleVal].outBits = s.maxBits + return + } + if transform == nil { + for i := range s.ct.symbolTT[:s.symbolLen] { + s.ct.symbolTT[i].outBits = uint8(i) + } + s.maxBits = uint8(s.symbolLen - 1) + return + } + s.maxBits = 0 + for i, v := range transform[:s.symbolLen] { + s.ct.symbolTT[i].outBits = v + if v > s.maxBits { + // We could assume bits always going up, but we play safe. + s.maxBits = v + } + } +} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +// If successful, compression tables will also be made ready. +func (s *fseEncoder) normalizeCount(length int) error { + if s.reUsed { + return nil + } + s.optimalTableLog(length) + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(length) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(length >> tableLog) + ) + if s.maxCount == length { + s.useRLE = true + return nil + } + s.useRLE = false + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + err := s.normalizeCount2(length) + if err != nil { + return err + } + if debugAsserts { + err = s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() + } + s.norm[largest] += stillToDistribute + if debugAsserts { + err := s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *fseEncoder) normalizeCount2(length int) error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(length) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *fseEncoder) optimalTableLog(length int) { + tableLog := uint8(maxEncTableLog) + minBitsSrc := highBit(uint32(length)) + 1 + minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 + minBits := uint8(minBitsSymbols) + if minBitsSrc < minBitsSymbols { + minBits = uint8(minBitsSrc) + } + + maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minEncTablelog { + tableLog = minEncTablelog + } + if tableLog > maxEncTableLog { + tableLog = maxEncTableLog + } + s.actualTableLog = tableLog +} + +// validateNorm validates the normalized histogram table. +func (s *fseEncoder) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2 + + // Write Table Size + bitStream = uint32(tableLog - minEncTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + outP = len(out) + ) + if cap(out) < outP+maxHeaderSize { + out = append(out, make([]byte, maxHeaderSize*3)...) + out = out[:len(out)-maxHeaderSize*3] + } + out = out[:outP+maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return nil, errors.New("internal error: remaining < 1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + if outP+2 > len(out) { + return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) + } + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += int((bitCount + 7) / 8) + + if charnum > s.symbolLen { + return nil, errors.New("internal error: charnum > s.symbolLen") + } + return out[:outP], nil +} + +// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) +// note 1 : assume symbolValue is valid (<= maxSymbolValue) +// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * +func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { + minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 + threshold := (minNbBits + 1) << 16 + if debugAsserts { + if !(s.actualTableLog < 16) { + panic("!s.actualTableLog < 16") + } + // ensure enough room for renormalization double shift + if !(uint8(accuracyLog) < 31-s.actualTableLog) { + panic("!uint8(accuracyLog) < 31-s.actualTableLog") + } + } + tableSize := uint32(1) << s.actualTableLog + deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) + // linear interpolation (very approximate) + normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog + bitMultiplier := uint32(1) << accuracyLog + if debugAsserts { + if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { + panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") + } + if normalizedDeltaFromThreshold > bitMultiplier { + panic("normalizedDeltaFromThreshold > bitMultiplier") + } + } + return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold +} + +// Returns the cost in bits of encoding the distribution in count using ctable. +// Histogram should only be up to the last non-zero symbol. +// Returns an -1 if ctable cannot represent all the symbols in count. +func (s *fseEncoder) approxSize(hist []uint32) uint32 { + if int(s.symbolLen) < len(hist) { + // More symbols than we have. + return math.MaxUint32 + } + if s.useRLE { + // We will never reuse RLE encoders. + return math.MaxUint32 + } + const kAccuracyLog = 8 + badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog + var cost uint32 + for i, v := range hist { + if v == 0 { + continue + } + if s.norm[i] == 0 { + return math.MaxUint32 + } + bitCost := s.bitCost(uint8(i), kAccuracyLog) + if bitCost > badCost { + return math.MaxUint32 + } + cost += v * bitCost + } + return cost >> kAccuracyLog +} + +// maxHeaderSize returns the maximum header size in bits. +// This is not exact size, but we want a penalty for new tables anyway. +func (s *fseEncoder) maxHeaderSize() uint32 { + if s.preDefined { + return 0 + } + if s.useRLE { + return 8 + } + return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + if len(c.stateTable) == 1 { + // RLE + c.stateTable[0] = uint16(0) + c.state = 0 + return + } + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + int32(first.deltaFindState) + c.state = c.stateTable[lu] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go new file mode 100644 index 00000000000..474cb77d2b9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go @@ -0,0 +1,158 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "math" + "sync" +) + +var ( + // fsePredef are the predefined fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredef [3]fseDecoder + + // fsePredefEnc are the predefined encoder based on fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredefEnc [3]fseEncoder + + // symbolTableX contain the transformations needed for each type as defined in + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + symbolTableX [3][]baseOffset + + // maxTableSymbol is the biggest supported symbol for each table type + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} + + // bitTables is the bits table for each table. + bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} +) + +type tableIndex uint8 + +const ( + // indexes for fsePredef and symbolTableX + tableLiteralLengths tableIndex = 0 + tableOffsets tableIndex = 1 + tableMatchLengths tableIndex = 2 + + maxLiteralLengthSymbol = 35 + maxOffsetLengthSymbol = 30 + maxMatchLengthSymbol = 52 +) + +// baseOffset is used for calculating transformations. +type baseOffset struct { + baseLine uint32 + addBits uint8 +} + +// fillBase will precalculate base offsets with the given bit distributions. +func fillBase(dst []baseOffset, base uint32, bits ...uint8) { + if len(bits) != len(dst) { + panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) + } + for i, bit := range bits { + if base > math.MaxInt32 { + panic("invalid decoding table, base overflows int32") + } + + dst[i] = baseOffset{ + baseLine: base, + addBits: bit, + } + base += 1 << bit + } +} + +var predef sync.Once + +func initPredefined() { + predef.Do(func() { + // Literals length codes + tmp := make([]baseOffset, 36) + for i := range tmp[:16] { + tmp[i] = baseOffset{ + baseLine: uint32(i), + addBits: 0, + } + } + fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableLiteralLengths] = tmp + + // Match length codes + tmp = make([]baseOffset, 53) + for i := range tmp[:32] { + tmp[i] = baseOffset{ + // The transformation adds the 3 length. + baseLine: uint32(i) + 3, + addBits: 0, + } + } + fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableMatchLengths] = tmp + + // Offset codes + tmp = make([]baseOffset, maxOffsetBits+1) + tmp[1] = baseOffset{ + baseLine: 1, + addBits: 1, + } + fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) + symbolTableX[tableOffsets] = tmp + + // Fill predefined tables and transform them. + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + for i := range fsePredef[:] { + f := &fsePredef[i] + switch tableIndex(i) { + case tableLiteralLengths: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 + f.actualTableLog = 6 + copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, + -1, -1, -1, -1}) + f.symbolLen = 36 + case tableOffsets: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 + f.actualTableLog = 5 + copy(f.norm[:], []int16{ + 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) + f.symbolLen = 29 + case tableMatchLengths: + //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 + f.actualTableLog = 6 + copy(f.norm[:], []int16{ + 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, + -1, -1, -1, -1, -1}) + f.symbolLen = 53 + } + if err := f.buildDtable(); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + if err := f.transform(symbolTableX[i]); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + f.preDefined = true + + // Create encoder as well + enc := &fsePredefEnc[i] + copy(enc.norm[:], f.norm[:]) + enc.symbolLen = f.symbolLen + enc.actualTableLog = f.actualTableLog + if err := enc.buildCTable(); err != nil { + panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) + } + enc.setBits(bitTables[i]) + enc.preDefined = true + } + }) +} diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go new file mode 100644 index 00000000000..5d73c21ebdd --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/hash.go @@ -0,0 +1,35 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +const ( + prime3bytes = 506832829 + prime4bytes = 2654435761 + prime5bytes = 889523592379 + prime6bytes = 227718039650203 + prime7bytes = 58295818150454627 + prime8bytes = 0xcf1bbcdcb7a56463 +) + +// hashLen returns a hash of the lowest mls bytes of with length output bits. +// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. +// length should always be < 32. +// Preferably length and mls should be a constant for inlining. +func hashLen(u uint64, length, mls uint8) uint32 { + switch mls { + case 3: + return (uint32(u<<8) * prime3bytes) >> (32 - length) + case 5: + return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) + case 6: + return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) + case 7: + return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) + case 8: + return uint32((u * prime8bytes) >> (64 - length)) + default: + return (uint32(u) * prime4bytes) >> (32 - length) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go new file mode 100644 index 00000000000..09164856d22 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -0,0 +1,116 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "github.com/klauspost/compress/huff0" +) + +// history contains the information transferred between blocks. +type history struct { + // Literal decompression + huffTree *huff0.Scratch + + // Sequence decompression + decoders sequenceDecs + recentOffsets [3]int + + // History buffer... + b []byte + + // ignoreBuffer is meant to ignore a number of bytes + // when checking for matches in history + ignoreBuffer int + + windowSize int + allocFrameBuffer int // needed? + error bool + dict *dict +} + +// reset will reset the history to initial state of a frame. +// The history must already have been initialized to the desired size. +func (h *history) reset() { + h.b = h.b[:0] + h.ignoreBuffer = 0 + h.error = false + h.recentOffsets = [3]int{1, 4, 8} + h.decoders.freeDecoders() + h.decoders = sequenceDecs{br: h.decoders.br} + h.freeHuffDecoder() + h.huffTree = nil + h.dict = nil + //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) +} + +func (h *history) freeHuffDecoder() { + if h.huffTree != nil { + if h.dict == nil || h.dict.litEnc != h.huffTree { + huffDecoderPool.Put(h.huffTree) + h.huffTree = nil + } + } +} + +func (h *history) setDict(dict *dict) { + if dict == nil { + return + } + h.dict = dict + h.decoders.litLengths = dict.llDec + h.decoders.offsets = dict.ofDec + h.decoders.matchLengths = dict.mlDec + h.decoders.dict = dict.content + h.recentOffsets = dict.offsets + h.huffTree = dict.litEnc +} + +// append bytes to history. +// This function will make sure there is space for it, +// if the buffer has been allocated with enough extra space. +func (h *history) append(b []byte) { + if len(b) >= h.windowSize { + // Discard all history by simply overwriting + h.b = h.b[:h.windowSize] + copy(h.b, b[len(b)-h.windowSize:]) + return + } + + // If there is space, append it. + if len(b) < cap(h.b)-len(h.b) { + h.b = append(h.b, b...) + return + } + + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(b) + len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] + copy(h.b[h.windowSize-len(b):], b) +} + +// ensureBlock will ensure there is space for at least one block... +func (h *history) ensureBlock() { + if cap(h.b) < h.allocFrameBuffer { + h.b = make([]byte, 0, h.allocFrameBuffer) + return + } + + avail := cap(h.b) - len(h.b) + if avail >= h.windowSize || avail > maxCompressedBlockSize { + return + } + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] +} + +// append bytes to history without ever discarding anything. +func (h *history) appendKeep(b []byte) { + h.b = append(h.b, b...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt new file mode 100644 index 00000000000..24b53065f40 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md new file mode 100644 index 00000000000..777290d44ce --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md @@ -0,0 +1,71 @@ +# xxhash + +VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. + +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | + +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: + +``` +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) +- [FreeCache](https://github.com/coocood/freecache) +- [FastCache](https://github.com/VictoriaMetrics/fastcache) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go new file mode 100644 index 00000000000..fc40c820016 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go @@ -0,0 +1,230 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package. + +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array of the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = primes[0] + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -primes[0] + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + memleft := d.mem[d.n&(len(d.mem)-1):] + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(memleft, b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + c := copy(memleft, b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[c:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s new file mode 100644 index 00000000000..ddb63aa91b1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s @@ -0,0 +1,210 @@ +//go:build !appengine && gc && !purego && !noasm +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +#include "textflag.h" + +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + // Load fixed primes. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 + + // Load slice. + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end + + // The first loop limit will be len(b)-32. + SUBQ $32, end + + // Check whether we have at least one block. + CMPQ n, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + + JMP afterBlocks + +noBlocks: + MOVQ ·primes+32(SB), h + +afterBlocks: + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end + JGE finalize + +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h + + CMPQ p, end + JL loop1 + +finalize: + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + // Load fixed primes needed for round. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + + // Load slice. + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end + + // Load vN from d. + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. + blockLoop() + + // Copy vN back to d. + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) + + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s new file mode 100644 index 00000000000..ae7d4d3295a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s @@ -0,0 +1,184 @@ +//go:build !appengine && gc && !purego && !noasm +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +#include "textflag.h" + +// Registers: +#define digest R1 +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) + + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 + + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blockLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + +afterLoop: + ADD n, h + + TBZ $4, n, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, n, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, n, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, n, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h, h + MUL prime1, h + +try1: + TBZ $0, n, finalize + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h, h + MUL prime1, h + +finalize: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(s *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD s+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, n) + + blockLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, n + MOVD n, ret+32(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go new file mode 100644 index 00000000000..d4221edf4fd --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go @@ -0,0 +1,16 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm +// +build amd64 arm64 +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(s *Digest, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go new file mode 100644 index 00000000000..0be16cefc7f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -0,0 +1,76 @@ +//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm +// +build !amd64,!arm64 appengine !gc purego noasm + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := primes[0] + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -primes[0] + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go new file mode 100644 index 00000000000..6f3b0cb1026 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go @@ -0,0 +1,11 @@ +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go new file mode 100644 index 00000000000..f41932b7a4f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go @@ -0,0 +1,16 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// +// len(a) <= len(b) and len(a) > 0 +// +//go:noescape +func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s new file mode 100644 index 00000000000..0782b86e3d1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s @@ -0,0 +1,66 @@ +// Copied from S2 implementation. + +//go:build !appengine && !noasm && gc && !noasm + +#include "textflag.h" + +// func matchLen(a []byte, b []byte) int +TEXT ·matchLen(SB), NOSPLIT, $0-56 + MOVQ a_base+0(FP), AX + MOVQ b_base+24(FP), CX + MOVQ a_len+8(FP), DX + + // matchLen + XORL SI, SI + CMPL DX, $0x08 + JB matchlen_match4_standalone + +matchlen_loopback_standalone: + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + JZ matchlen_loop_standalone + +#ifdef GOAMD64_v3 + TZCNTQ BX, BX +#else + BSFQ BX, BX +#endif + SHRL $0x03, BX + LEAL (SI)(BX*1), SI + JMP gen_match_len_end + +matchlen_loop_standalone: + LEAL -8(DX), DX + LEAL 8(SI), SI + CMPL DX, $0x08 + JAE matchlen_loopback_standalone + +matchlen_match4_standalone: + CMPL DX, $0x04 + JB matchlen_match2_standalone + MOVL (AX)(SI*1), BX + CMPL (CX)(SI*1), BX + JNE matchlen_match2_standalone + LEAL -4(DX), DX + LEAL 4(SI), SI + +matchlen_match2_standalone: + CMPL DX, $0x02 + JB matchlen_match1_standalone + MOVW (AX)(SI*1), BX + CMPW (CX)(SI*1), BX + JNE matchlen_match1_standalone + LEAL -2(DX), DX + LEAL 2(SI), SI + +matchlen_match1_standalone: + CMPL DX, $0x01 + JB gen_match_len_end + MOVB (AX)(SI*1), BL + CMPB (CX)(SI*1), BL + JNE gen_match_len_end + INCL SI + +gen_match_len_end: + MOVQ SI, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go new file mode 100644 index 00000000000..57b9c31c027 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go @@ -0,0 +1,33 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "encoding/binary" + "math/bits" +) + +// matchLen returns the maximum common prefix length of a and b. +// a must be the shortest of the two. +func matchLen(a, b []byte) (n int) { + for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { + diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + if diff != 0 { + return n + bits.TrailingZeros64(diff)>>3 + } + n += 8 + } + + for i := range a { + if a[i] != b[i] { + break + } + n++ + } + return n + +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go new file mode 100644 index 00000000000..d7fe6d82d93 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -0,0 +1,503 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "io" +) + +type seq struct { + litLen uint32 + matchLen uint32 + offset uint32 + + // Codes are stored here for the encoder + // so they only have to be looked up once. + llCode, mlCode, ofCode uint8 +} + +type seqVals struct { + ll, ml, mo int +} + +func (s seq) String() string { + if s.offset <= 3 { + if s.offset == 0 { + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)") +} + +type seqCompMode uint8 + +const ( + compModePredefined seqCompMode = iota + compModeRLE + compModeFSE + compModeRepeat +) + +type sequenceDec struct { + // decoder keeps track of the current state and updates it from the bitstream. + fse *fseDecoder + state fseState + repeat bool +} + +// init the state of the decoder with input from stream. +func (s *sequenceDec) init(br *bitReader) error { + if s.fse == nil { + return errors.New("sequence decoder not defined") + } + s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1< cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Copy from dictionary... + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (seq.mo - (t + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) + } + end := dictO + seq.ml + if end > len(s.dict) { + n := len(s.dict) - dictO + copy(out[t:], s.dict[dictO:]) + t += n + seq.ml -= n + } else { + copy(out[t:], s.dict[dictO:end]) + t += end - dictO + continue + } + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + // We must be in current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + continue + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} + +// decode sequences from the stream with the provided history. +func (s *sequenceDecs) decodeSync(hist []byte) error { + supported, err := s.decodeSyncSimple(hist) + if supported { + return err + } + + br := s.br + seqs := s.nSeqs + startSize := len(s.out) + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + out := s.out + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + if debugDecoder { + println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream") + } + for i := seqs - 1; i >= 0; i-- { + if br.overread() { + printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain()) + return io.ErrUnexpectedEOF + } + var ll, mo, ml int + if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + + if ll > len(s.literals) { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) + } + size := ll + ml + len(out) + if size-startSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + if size > cap(out) { + // Not enough size, which can happen under high volume block streaming conditions + // but could be if destination slice is too small for sync operations. + // over-allocating here can create a large amount of GC pressure so we try to keep + // it as contained as possible + used := len(out) - startSize + addBytes := 256 + ll + ml + used>>2 + // Clamp to max block size. + if used+addBytes > maxBlockSize { + addBytes = maxBlockSize - used + } + out = append(out, make([]byte, addBytes)...) + out = out[:len(out)-addBytes] + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + + // Add literals + out = append(out, s.literals[:ll]...) + s.literals = s.literals[ll:] + + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + + if mo > len(out)+len(hist) || mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) + } + + // we may be in dictionary. + dictO := len(s.dict) - (mo - (len(out) + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) + } + end := dictO + ml + if end > len(s.dict) { + out = append(out, s.dict[dictO:]...) + ml -= len(s.dict) - dictO + } else { + out = append(out, s.dict[dictO:end]...) + mo = 0 + ml = 0 + } + } + + // Copy from history. + // TODO: Blocks without history could be made to ignore this completely. + if v := mo - len(out); v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if ml > v { + // Some goes into current block. + // Copy remainder of history + out = append(out, hist[start:]...) + ml -= v + } else { + out = append(out, hist[start:start+ml]...) + ml = 0 + } + } + // We must be in current buffer now + if ml > 0 { + start := len(out) - mo + if ml <= len(out)-start { + // No overlap + out = append(out, out[start:start+ml]...) + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + out = out[:len(out)+ml] + src := out[start : start+ml] + // Destination is the space we just added. + dst := out[len(out)-ml:] + dst = dst[:len(src)] + for i := range src { + dst[i] = src[i] + } + } + } + if i == 0 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + + if size := len(s.literals) + len(out) - startSize; size > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + + // Add final literals + s.out = append(out, s.literals...) + return br.close() +} + +var bitMask [16]uint16 + +func init() { + for i := range bitMask[:] { + bitMask[i] = uint16((1 << uint(i)) - 1) + } +} + +func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { + // Final will not read from stream. + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() + + // extra bits are stored in reverse order. + br.fill() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fill() + } + // matchlength+literal length, max 32 bits + ml += br.getBits(mlB) + ll += br.getBits(llB) + mo = s.adjustOffset(mo, ll, moB) + return +} + +func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { + if offsetB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = offset + return offset + } + + if litLen == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + offset++ + } + + if offset == 0 { + return s.prevOffset[0] + } + var temp int + if offset == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[offset] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if offset != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + return temp +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go new file mode 100644 index 00000000000..8adabd82877 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -0,0 +1,394 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" + "io" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +type decodeSyncAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + litRemain int + out []byte + outPosition int + literals []byte + litPosition int + history []byte + windowSize int + ll int // set on error (not for all errors, please refer to _generate/gen.go) + ml int // set on error (not for all errors, please refer to _generate/gen.go) + mo int // set on error (not for all errors, please refer to _generate/gen.go) +} + +// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// decode sequences from the stream with the provided history but without a dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + if len(s.dict) > 0 { + return false, nil + } + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { + return false, nil + } + + // FIXME: Using unsafe memory copies leads to rare, random crashes + // with fuzz testing. It is therefore disabled for now. + const useSafe = true + /* + useSafe := false + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { + useSafe = true + } + if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { + useSafe = true + } + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + useSafe = true + } + */ + + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeSyncAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + iteration: s.nSeqs - 1, + litRemain: len(s.literals), + out: s.out, + outPosition: len(s.out), + literals: s.literals, + windowSize: s.windowSize, + history: hist, + } + + s.seqSize = 0 + startSize := len(s.out) + + var errCode int + if cpuinfo.HasBMI2() { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) + } + } else { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) + } + } + switch errCode { + case noError: + break + + case errorMatchLenOfsMismatch: + return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) + + case errorMatchLenTooBig: + return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) + + case errorMatchOffTooBig: + return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", + ctx.mo, ctx.outPosition+len(hist)-startSize) + + case errorNotEnoughLiterals: + return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", + ctx.ll, ctx.litRemain+ctx.ll) + + case errorOverread: + return true, io.ErrUnexpectedEOF + + case errorNotEnoughSpace: + size := ctx.outPosition + ctx.ll + ctx.ml + if debugDecoder { + println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) + } + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + + default: + return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + return true, err + } + + s.literals = s.literals[ctx.litPosition:] + t := ctx.outPosition + s.out = s.out[:t] + + // Add final literals + s.out = append(s.out, s.literals...) + if debugDecoder { + t += len(s.literals) + if t != len(s.out) { + panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) + } + } + + return true, nil +} + +// -------------------------------------------------------------------------------- + +type decodeAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + seqs []seqVals + litRemain int +} + +const noError = 0 + +// error reported when mo == 0 && ml > 0 +const errorMatchLenOfsMismatch = 1 + +// error reported when ml > maxMatchLen +const errorMatchLenTooBig = 2 + +// error reported when mo > available history or mo > s.windowSize +const errorMatchOffTooBig = 3 + +// error reported when the sum of literal lengths exeeceds the literal buffer size +const errorNotEnoughLiterals = 4 + +// error reported when capacity of `out` is too small +const errorNotEnoughSpace = 5 + +// error reported when bits are overread. +const errorOverread = 6 + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + seqs: seqs, + iteration: len(seqs) - 1, + litRemain: len(s.literals), + } + + if debugDecoder { + println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream") + } + + s.seqSize = 0 + lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 + var errCode int + if cpuinfo.HasBMI2() { + if lte56bits { + errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_bmi2(s, br, &ctx) + } + } else { + if lte56bits { + errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_amd64(s, br, &ctx) + } + } + if errCode != 0 { + i := len(seqs) - ctx.iteration - 1 + switch errCode { + case errorMatchLenOfsMismatch: + ml := ctx.seqs[i].ml + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + + case errorMatchLenTooBig: + ml := ctx.seqs[i].ml + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + + case errorNotEnoughLiterals: + ll := ctx.seqs[i].ll + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) + case errorOverread: + return io.ErrUnexpectedEOF + } + + return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) + } + + if ctx.litRemain < 0 { + return fmt.Errorf("literal count is too big: total available %d, total requested %d", + len(s.literals), len(s.literals)-ctx.litRemain) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + if debugDecoder { + println("decode: ", br.remain(), "bits remain on stream. code:", errCode) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// -------------------------------------------------------------------------------- + +type executeAsmContext struct { + seqs []seqVals + seqIndex int + out []byte + history []byte + literals []byte + outPosition int + litPosition int + windowSize int +} + +// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. +// +// Returns false if a match offset is too big. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool + +// Same as above, but with safe memcopies +// +//go:noescape +func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool + +// executeSimple handles cases when dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { + addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + ctx := executeAsmContext{ + seqs: seqs, + seqIndex: 0, + out: out, + history: hist, + outPosition: t, + litPosition: 0, + literals: s.literals, + windowSize: s.windowSize, + } + var ok bool + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + ok = sequenceDecs_executeSimple_safe_amd64(&ctx) + } else { + ok = sequenceDecs_executeSimple_amd64(&ctx) + } + if !ok { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", + seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) + } + s.literals = s.literals[ctx.litPosition:] + t = ctx.outPosition + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s new file mode 100644 index 00000000000..5b06174b898 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -0,0 +1,4151 @@ +// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_amd64(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_end + +sequenceDecs_decode_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_byte_by_byte + +sequenceDecs_decode_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decode_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_2_end + +sequenceDecs_decode_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_2_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte + +sequenceDecs_decode_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decode_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRL $0x10, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRL $0x10, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRL $0x10, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_amd64_adjust_offset_nonzero + +sequenceDecs_decode_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_amd64_adjust_zero + JEQ sequenceDecs_decode_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_amd64_adjust_three + JMP sequenceDecs_decode_amd64_adjust_two + +sequenceDecs_decode_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_56_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_56_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_56_amd64_fill_end + +sequenceDecs_decode_56_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_56_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decode_56_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte + +sequenceDecs_decode_56_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decode_56_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRL $0x10, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRL $0x10, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRL $0x10, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_56_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero + +sequenceDecs_decode_56_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_amd64_adjust_zero + JEQ sequenceDecs_decode_56_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_amd64_adjust_three + JMP sequenceDecs_decode_56_amd64_adjust_two + +sequenceDecs_decode_56_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_56_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_56_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_56_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_56_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_56_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_end + +sequenceDecs_decode_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_byte_by_byte + +sequenceDecs_decode_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decode_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_2_end + +sequenceDecs_decode_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_2_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte + +sequenceDecs_decode_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decode_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_bmi2_adjust_zero + JEQ sequenceDecs_decode_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_bmi2_adjust_three + JMP sequenceDecs_decode_bmi2_adjust_two + +sequenceDecs_decode_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_56_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_56_bmi2_fill_end + +sequenceDecs_decode_56_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_56_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decode_56_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte + +sequenceDecs_decode_56_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decode_56_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_56_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_bmi2_adjust_zero + JEQ sequenceDecs_decode_56_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_bmi2_adjust_three + JMP sequenceDecs_decode_56_bmi2_adjust_two + +sequenceDecs_decode_56_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_56_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_56_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_56_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_56_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (SI)(R14*1), X0 + MOVUPS X0, (BX)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, R11 + JB copy_1 + ADDQ R11, SI + ADDQ R11, BX + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ BX, R12 + ADDQ R13, BX + +copy_2: + MOVUPS (R11), X0 + MOVUPS X0, (R12) + ADDQ $0x10, R11 + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + MOVQ R11, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (SI), X0 + MOVUPS X0, (BX) + ADDQ $0x10, SI + ADDQ $0x10, BX + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(SI)(R14*1), SI + LEAQ 16(BX)(R14*1), BX + MOVUPS -16(SI), X0 + MOVUPS X0, -16(BX) + JMP copy_1_end + +copy_1_small: + CMPQ R11, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ R11, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (SI), R14 + MOVB -1(SI)(R11*1), R15 + MOVB R14, (BX) + MOVB R15, -1(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_3: + MOVW (SI), R14 + MOVB 2(SI), R15 + MOVW R14, (BX) + MOVB R15, 2(BX) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_4through7: + MOVL (SI), R14 + MOVL -4(SI)(R11*1), R15 + MOVL R14, (BX) + MOVL R15, -4(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (SI), R14 + MOVQ -8(SI)(R11*1), R15 + MOVQ R14, (BX) + MOVQ R15, -8(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + +copy_1_end: + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (R11), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R11 + ADDQ $0x10, BX + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(R11)(R12*1), R11 + LEAQ 16(BX)(R12*1), BX + MOVUPS -16(R11), X0 + MOVUPS X0, -16(BX) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (R11), R12 + MOVB -1(R11)(R13*1), R14 + MOVB R12, (BX) + MOVB R14, -1(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_3: + MOVW (R11), R12 + MOVB 2(R11), R14 + MOVW R12, (BX) + MOVB R14, 2(BX) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_4through7: + MOVL (R11), R12 + MOVL -4(R11)(R13*1), R14 + MOVL R12, (BX) + MOVL R14, -4(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (R11), R12 + MOVQ -8(R11)(R13*1), R14 + MOVQ R12, (BX) + MOVQ R14, -8(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_end + +sequenceDecs_decodeSync_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_2_end + +sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_2_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRL $0x10, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRL $0x10, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRL $0x10, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R11)(R14*1), X0 + MOVUPS X0, (R10)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, AX + JB copy_1 + ADDQ AX, R11 + ADDQ AX, R10 + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R10, CX + ADDQ R13, R10 + +copy_2: + MOVUPS (AX), X0 + MOVUPS X0, (CX) + ADDQ $0x10, AX + ADDQ $0x10, CX + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_end + +sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_2_end + +sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_2_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R10)(R14*1), X0 + MOVUPS X0, (R9)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, CX + JB copy_1 + ADDQ CX, R10 + ADDQ CX, R9 + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R9, R12 + ADDQ R13, R9 + +copy_2: + MOVUPS (CX), X0 + MOVUPS X0, (R12) + ADDQ $0x10, CX + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_safe_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_end + +sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end + +sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRL $0x10, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRL $0x10, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRL $0x10, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_safe_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_safe_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + MOVQ AX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R11), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R11 + ADDQ $0x10, R10 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R11)(R14*1), R11 + LEAQ 16(R10)(R14*1), R10 + MOVUPS -16(R11), X0 + MOVUPS X0, -16(R10) + JMP copy_1_end + +copy_1_small: + CMPQ AX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ AX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R11), R14 + MOVB -1(R11)(AX*1), R15 + MOVB R14, (R10) + MOVB R15, -1(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_3: + MOVW (R11), R14 + MOVB 2(R11), R15 + MOVW R14, (R10) + MOVB R15, 2(R10) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R11), R14 + MOVL -4(R11)(AX*1), R15 + MOVL R14, (R10) + MOVL R15, -4(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R11), R14 + MOVQ -8(R11)(AX*1), R15 + MOVQ R14, (R10) + MOVQ R15, -8(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + +copy_1_end: + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_2_small + +copy_2_loop: + MOVUPS (AX), X0 + MOVUPS X0, (R10) + ADDQ $0x10, AX + ADDQ $0x10, R10 + SUBQ $0x10, CX + JAE copy_2_loop + LEAQ 16(AX)(CX*1), AX + LEAQ 16(R10)(CX*1), R10 + MOVUPS -16(AX), X0 + MOVUPS X0, -16(R10) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (AX), CL + MOVB -1(AX)(R13*1), R14 + MOVB CL, (R10) + MOVB R14, -1(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_3: + MOVW (AX), CX + MOVB 2(AX), R14 + MOVW CX, (R10) + MOVB R14, 2(R10) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (AX), CX + MOVL -4(AX)(R13*1), R14 + MOVL CX, (R10) + MOVL R14, -4(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (AX), CX + MOVQ -8(AX)(R13*1), R14 + MOVQ CX, (R10) + MOVQ R14, -8(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_safe_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_safe_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_end + +sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end + +sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_safe_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_safe_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + MOVQ CX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R10), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R10 + ADDQ $0x10, R9 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R10)(R14*1), R10 + LEAQ 16(R9)(R14*1), R9 + MOVUPS -16(R10), X0 + MOVUPS X0, -16(R9) + JMP copy_1_end + +copy_1_small: + CMPQ CX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ CX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R10), R14 + MOVB -1(R10)(CX*1), R15 + MOVB R14, (R9) + MOVB R15, -1(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_3: + MOVW (R10), R14 + MOVB 2(R10), R15 + MOVW R14, (R9) + MOVB R15, 2(R9) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R10), R14 + MOVL -4(R10)(CX*1), R15 + MOVL R14, (R9) + MOVL R15, -4(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R10), R14 + MOVQ -8(R10)(CX*1), R15 + MOVQ R14, (R9) + MOVQ R15, -8(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + +copy_1_end: + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (CX), X0 + MOVUPS X0, (R9) + ADDQ $0x10, CX + ADDQ $0x10, R9 + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(CX)(R12*1), CX + LEAQ 16(R9)(R12*1), R9 + MOVUPS -16(CX), X0 + MOVUPS X0, -16(R9) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (CX), R12 + MOVB -1(CX)(R13*1), R14 + MOVB R12, (R9) + MOVB R14, -1(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_3: + MOVW (CX), R12 + MOVB 2(CX), R14 + MOVW R12, (R9) + MOVB R14, 2(R9) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (CX), R12 + MOVL -4(CX)(R13*1), R14 + MOVL R12, (R9) + MOVL R14, -4(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (CX), R12 + MOVQ -8(CX)(R13*1), R14 + MOVQ R12, (R9) + MOVQ R14, -8(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_safe_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go new file mode 100644 index 00000000000..2fb35b788c1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -0,0 +1,237 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "fmt" + "io" +) + +// decode sequences from the stream with the provided history but without dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + return false, nil +} + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + s.seqSize = 0 + litRemain := len(s.literals) + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + for i := range seqs { + var ll, mo, ml int + if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + if br.overread() { + if debugDecoder { + printf("reading sequence %d, exceeded available data\n", i) + } + return io.ErrUnexpectedEOF + } + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + // Evaluate. + // We might be doing this async, so do it early. + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + s.seqSize += ll + ml + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + litRemain -= ll + if litRemain < 0 { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) + } + seqs[i] = seqVals{ + ll: ll, + ml: ml, + mo: mo, + } + if i == len(seqs)-1 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + s.seqSize += litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// executeSimple handles cases when a dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Malformed input + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into the current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + + // We must be in the current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go new file mode 100644 index 00000000000..8014174a771 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go @@ -0,0 +1,114 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "math/bits" + +type seqCoders struct { + llEnc, ofEnc, mlEnc *fseEncoder + llPrev, ofPrev, mlPrev *fseEncoder +} + +// swap coders with another (block). +func (s *seqCoders) swap(other *seqCoders) { + *s, *other = *other, *s +} + +// setPrev will update the previous encoders to the actually used ones +// and make sure a fresh one is in the main slot. +func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { + compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { + // We used the new one, more current to history and reuse the previous history + if *current == used { + *prev, *current = *current, *prev + c := *current + p := *prev + c.reUsed = false + p.reUsed = true + return + } + if used == *prev { + return + } + // Ensure we cannot reuse by accident + prevEnc := *prev + prevEnc.symbolLen = 0 + } + compareSwap(ll, &s.llEnc, &s.llPrev) + compareSwap(ml, &s.mlEnc, &s.mlPrev) + compareSwap(of, &s.ofEnc, &s.ofPrev) +} + +func highBit(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} + +var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 16, 17, 17, 18, 18, 19, 19, + 20, 20, 20, 20, 21, 21, 21, 21, + 22, 22, 22, 22, 22, 22, 22, 22, + 23, 23, 23, 23, 23, 23, 23, 23, + 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24} + +// Up to 6 bits +const maxLLCode = 35 + +// llBitsTable translates from ll code to number of bits. +var llBitsTable = [maxLLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16} + +// llCode returns the code that represents the literal length requested. +func llCode(litLength uint32) uint8 { + const llDeltaCode = 19 + if litLength <= 63 { + // Compiler insists on bounds check (Go 1.12) + return llCodeTable[litLength&63] + } + return uint8(highBit(litLength)) + llDeltaCode +} + +var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, + 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, + 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, + 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} + +// Up to 6 bits +const maxMLCode = 52 + +// mlBitsTable translates from ml code to number of bits. +var mlBitsTable = [maxMLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16} + +// note : mlBase = matchLength - MINMATCH; +// because it's the format it's stored in seqStore->sequences +func mlCode(mlBase uint32) uint8 { + const mlDeltaCode = 36 + if mlBase <= 127 { + // Compiler insists on bounds check (Go 1.12) + return mlCodeTable[mlBase&127] + } + return uint8(highBit(mlBase)) + mlDeltaCode +} + +func ofCode(offset uint32) uint8 { + // A valid offset will always be > 0. + return uint8(bits.Len32(offset) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go new file mode 100644 index 00000000000..ec13594e89b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -0,0 +1,434 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "hash/crc32" + "io" + + "github.com/klauspost/compress/huff0" + snappy "github.com/klauspost/compress/internal/snapref" +) + +const ( + snappyTagLiteral = 0x00 + snappyTagCopy1 = 0x01 + snappyTagCopy2 = 0x02 + snappyTagCopy4 = 0x03 +) + +const ( + snappyChecksumSize = 4 + snappyMagicBody = "sNaPpY" + + // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + snappyMaxBlockSize = 65536 + + // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + snappyMaxEncodedLenOfMaxBlockSize = 76490 +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var ( + // ErrSnappyCorrupt reports that the input is invalid. + ErrSnappyCorrupt = errors.New("snappy: corrupt input") + // ErrSnappyTooLarge reports that the uncompressed length is too large. + ErrSnappyTooLarge = errors.New("snappy: decoded block is too large") + // ErrSnappyUnsupported reports that the input isn't supported. + ErrSnappyUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd. +// Conversion is done by converting the stream directly from Snappy without intermediate +// full decoding. +// Therefore the compression ratio is much less than what can be done by a full decompression +// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without +// any errors being generated. +// No CRC value is being generated and not all CRC values of the Snappy stream are checked. +// However, it provides really fast recompression of Snappy streams. +// The converter can be reused to avoid allocations, even after errors. +type SnappyConverter struct { + r io.Reader + err error + buf []byte + block *blockEnc +} + +// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'. +// If any error is detected on the Snappy stream it is returned. +// The number of bytes written is returned. +func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { + initPredefined() + r.err = nil + r.r = in + if r.block == nil { + r.block = &blockEnc{} + r.block.init() + } + r.block.initNewEncode() + if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize { + r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize) + } + r.block.litEnc.Reuse = huff0.ReusePolicyNone + var written int64 + var readHeader bool + { + header := frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) + + var n int + n, r.err = w.Write(header) + if r.err != nil { + return written, r.err + } + written += int64(n) + } + + for { + if !r.readFull(r.buf[:4], true) { + // Add empty last block + r.block.reset(nil) + r.block.last = true + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, err := w.Write(r.block.output) + if err != nil { + return written, err + } + written += int64(n) + + return written, r.err + } + chunkType := r.buf[0] + if !readHeader { + if chunkType != chunkTypeStreamIdentifier { + println("chunkType != chunkTypeStreamIdentifier", chunkType) + r.err = ErrSnappyCorrupt + return written, r.err + } + readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + println("chunkLen > len(r.buf)", chunkType) + r.err = ErrSnappyUnsupported + return written, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return written, r.err + } + //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[snappyChecksumSize:] + + n, hdr, err := snappyDecodedLen(buf) + if err != nil { + r.err = err + return written, r.err + } + buf = buf[hdr:] + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + r.block.pushOffsets() + if err := decodeSnappy(r.block, buf); err != nil { + r.err = err + return written, r.err + } + if r.block.size+r.block.extraLits != n { + printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits) + r.err = ErrSnappyCorrupt + return written, r.err + } + err = r.block.encode(nil, false, false) + switch err { + case errIncompressible: + r.block.popOffsets() + r.block.reset(nil) + r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) + if err != nil { + return written, err + } + err = r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + case nil: + default: + return written, err + } + + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + case chunkTypeUncompressedData: + if debugEncoder { + println("Uncompressed, chunklen", chunkLen) + } + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + buf := r.buf[:snappyChecksumSize] + if !r.readFull(buf, false) { + return written, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - snappyChecksumSize + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.literals = r.block.literals[:n] + if !r.readFull(r.block.literals, false) { + return written, r.err + } + if snappyCRC(r.block.literals) != checksum { + println("literals crc mismatch") + r.err = ErrSnappyCorrupt + return written, r.err + } + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + + case chunkTypeStreamIdentifier: + if debugEncoder { + println("stream id", chunkLen, len(snappyMagicBody)) + } + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(snappyMagicBody) { + println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody)) + r.err = ErrSnappyCorrupt + return written, r.err + } + if !r.readFull(r.buf[:len(snappyMagicBody)], false) { + return written, r.err + } + for i := 0; i < len(snappyMagicBody); i++ { + if r.buf[i] != snappyMagicBody[i] { + println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) + r.err = ErrSnappyCorrupt + return written, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + println("chunkType <= 0x7f") + r.err = ErrSnappyUnsupported + return written, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return written, r.err + } + } +} + +// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read. +func decodeSnappy(blk *blockEnc, src []byte) error { + //decodeRef(make([]byte, snappyMaxBlockSize), src) + var s, length int + lits := blk.extraLits + var offset uint32 + for s < len(src) { + switch src[s] & 0x03 { + case snappyTagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + if x > snappyMaxBlockSize { + println("x > snappyMaxBlockSize", x, snappyMaxBlockSize) + return ErrSnappyCorrupt + } + length = int(x) + 1 + if length <= 0 { + println("length <= 0 ", length) + + return errUnsupportedLiteralLength + } + //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s { + // return ErrSnappyCorrupt + //} + + blk.literals = append(blk.literals, src[s:s+length]...) + //println(length, "litLen") + lits += length + s += length + continue + + case snappyTagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]) + + case snappyTagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = uint32(src[s-2]) | uint32(src[s-1])<<8 + + case snappyTagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + + if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ { + println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits) + + return ErrSnappyCorrupt + } + + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if false { + offset = blk.matchOffset(offset, uint32(lits)) + } else { + offset += 3 + } + + blk.sequences = append(blk.sequences, seq{ + litLen: uint32(lits), + offset: offset, + matchLen: uint32(length) - zstdMinMatch, + }) + blk.size += length + lits + lits = 0 + } + blk.extraLits = lits + return nil +} + +func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrSnappyCorrupt + } + return false + } + return true +} + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func snappyCRC(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return c>>15 | c<<17 + 0xa282ead8 +} + +// snappyDecodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrSnappyCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrSnappyTooLarge + } + return int(v), n, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go new file mode 100644 index 00000000000..29c15c8c4ef --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -0,0 +1,141 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "errors" + "io" + "sync" +) + +// ZipMethodWinZip is the method for Zstandard compressed data inside Zip files for WinZip. +// See https://www.winzip.com/win/en/comp_info.html +const ZipMethodWinZip = 93 + +// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression. +// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression. +// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT +const ZipMethodPKWare = 20 + +// zipReaderPool is the default reader pool. +var zipReaderPool = sync.Pool{New: func() interface{} { + z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) + if err != nil { + panic(err) + } + return z +}} + +// newZipReader creates a pooled zip decompressor. +func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { + pool := &zipReaderPool + if len(opts) > 0 { + opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) + // Force concurrency 1 + opts = append(opts, WithDecoderConcurrency(1)) + // Create our own pool + pool = &sync.Pool{} + } + return func(r io.Reader) io.ReadCloser { + dec, ok := pool.Get().(*Decoder) + if ok { + dec.Reset(r) + } else { + d, err := NewReader(r, opts...) + if err != nil { + panic(err) + } + dec = d + } + return &pooledZipReader{dec: dec, pool: pool} + } +} + +type pooledZipReader struct { + mu sync.Mutex // guards Close and Read + pool *sync.Pool + dec *Decoder +} + +func (r *pooledZipReader) Read(p []byte) (n int, err error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.dec == nil { + return 0, errors.New("read after close or EOF") + } + dec, err := r.dec.Read(p) + if err == io.EOF { + r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return dec, err +} + +func (r *pooledZipReader) Close() error { + r.mu.Lock() + defer r.mu.Unlock() + var err error + if r.dec != nil { + err = r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return err +} + +type pooledZipWriter struct { + mu sync.Mutex // guards Close and Read + enc *Encoder + pool *sync.Pool +} + +func (w *pooledZipWriter) Write(p []byte) (n int, err error) { + w.mu.Lock() + defer w.mu.Unlock() + if w.enc == nil { + return 0, errors.New("Write after Close") + } + return w.enc.Write(p) +} + +func (w *pooledZipWriter) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + var err error + if w.enc != nil { + err = w.enc.Close() + w.pool.Put(w.enc) + w.enc = nil + } + return err +} + +// ZipCompressor returns a compressor that can be registered with zip libraries. +// The provided encoder options will be used on all encodes. +func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { + var pool sync.Pool + return func(w io.Writer) (io.WriteCloser, error) { + enc, ok := pool.Get().(*Encoder) + if ok { + enc.Reset(w) + } else { + var err error + enc, err = NewWriter(w, opts...) + if err != nil { + return nil, err + } + } + return &pooledZipWriter{enc: enc, pool: &pool}, nil + } +} + +// ZipDecompressor returns a decompressor that can be registered with zip libraries. +// See ZipCompressor for example. +// Options can be specified. WithDecoderConcurrency(1) is forced, +// and by default a 128MB maximum decompression window is specified. +// The window size can be overridden if required. +func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { + return newZipReader(opts...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go new file mode 100644 index 00000000000..4be7cc73671 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -0,0 +1,121 @@ +// Package zstd provides decompression of zstandard files. +// +// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "log" + "math" +) + +// enable debug printing +const debug = false + +// enable encoding debug printing +const debugEncoder = debug + +// enable decoding debug printing +const debugDecoder = debug + +// Enable extra assertions. +const debugAsserts = debug || false + +// print sequence details +const debugSequences = false + +// print detailed matching information +const debugMatches = false + +// force encoder to use predefined tables. +const forcePreDef = false + +// zstdMinMatch is the minimum zstd match length. +const zstdMinMatch = 3 + +// fcsUnknown is used for unknown frame content size. +const fcsUnknown = math.MaxUint64 + +var ( + // ErrReservedBlockType is returned when a reserved block type is found. + // Typically this indicates wrong or corrupted input. + ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") + + // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. + // Typically this indicates wrong or corrupted input. + ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") + + // ErrBlockTooSmall is returned when a block is too small to be decoded. + // Typically returned on invalid input. + ErrBlockTooSmall = errors.New("block too small") + + // ErrUnexpectedBlockSize is returned when a block has unexpected size. + // Typically returned on invalid input. + ErrUnexpectedBlockSize = errors.New("unexpected block size") + + // ErrMagicMismatch is returned when a "magic" number isn't what is expected. + // Typically this indicates wrong or corrupted input. + ErrMagicMismatch = errors.New("invalid input: magic number mismatch") + + // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeExceeded = errors.New("window size exceeded") + + // ErrWindowSizeTooSmall is returned when no window size is specified. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") + + // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. + ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") + + // ErrUnknownDictionary is returned if the dictionary ID is unknown. + ErrUnknownDictionary = errors.New("unknown dictionary") + + // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeExceeded = errors.New("frame size exceeded") + + // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") + + // ErrCRCMismatch is returned if CRC mismatches. + ErrCRCMismatch = errors.New("CRC check failed") + + // ErrDecoderClosed will be returned if the Decoder was used after + // Close has been called. + ErrDecoderClosed = errors.New("decoder used after Close") + + // ErrDecoderNilInput is returned when a nil Reader was provided + // and an operation other than Reset/DecodeAll/Close was attempted. + ErrDecoderNilInput = errors.New("nil input provided as reader") +) + +func println(a ...interface{}) { + if debug || debugDecoder || debugEncoder { + log.Println(a...) + } +} + +func printf(format string, a ...interface{}) { + if debug || debugDecoder || debugEncoder { + log.Printf(format, a...) + } +} + +func load3232(b []byte, i int32) uint32 { + return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:]) +} + +func load6432(b []byte, i int32) uint64 { + return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:]) +} + +type byter interface { + Bytes() []byte + Len() int +} + +var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE index dd878a30ee9..b9cc55abbb0 100644 --- a/vendor/github.com/prometheus/client_golang/NOTICE +++ b/vendor/github.com/prometheus/client_golang/NOTICE @@ -16,8 +16,3 @@ Go support for Protocol Buffers - Google's data interchange format http://github.com/golang/protobuf/ Copyright 2010 The Go Authors See source code for license details. - -Support for streaming Protocol Buffer messages for the Go language (golang). -https://github.com/matttproud/golang_protobuf_extensions -Copyright 2013 Matt T. Proud -Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE new file mode 100644 index 00000000000..65d761bc9f2 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go new file mode 100644 index 00000000000..8547c8dfd18 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go @@ -0,0 +1,145 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// Package header provides functions for parsing HTTP headers. +package header + +import ( + "net/http" + "strings" +) + +// Octet types from RFC 2616. +var octetTypes [256]octetType + +type octetType byte + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// AcceptSpec describes an Accept* header. +type AcceptSpec struct { + Value string + Q float64 +} + +// ParseAccept parses Accept* headers. +func ParseAccept(header http.Header, key string) (specs []AcceptSpec) { +loop: + for _, s := range header[key] { + for { + var spec AcceptSpec + spec.Value, s = expectTokenSlash(s) + if spec.Value == "" { + continue loop + } + spec.Q = 1.0 + s = skipSpace(s) + if strings.HasPrefix(s, ";") { + s = skipSpace(s[1:]) + if !strings.HasPrefix(s, "q=") { + continue loop + } + spec.Q, s = expectQuality(s[2:]) + if spec.Q < 0.0 { + continue loop + } + } + specs = append(specs, spec) + s = skipSpace(s) + if !strings.HasPrefix(s, ",") { + continue loop + } + s = skipSpace(s[1:]) + } + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectTokenSlash(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + b := s[i] + if (octetTypes[b]&isToken == 0) && b != '/' { + break + } + } + return s[:i], s[i:] +} + +func expectQuality(s string) (q float64, rest string) { + switch { + case len(s) == 0: + return -1, "" + case s[0] == '0': + q = 0 + case s[0] == '1': + q = 1 + default: + return -1, "" + } + s = s[1:] + if !strings.HasPrefix(s, ".") { + return q, s + } + s = s[1:] + i := 0 + n := 0 + d := 1 + for ; i < len(s); i++ { + b := s[i] + if b < '0' || b > '9' { + break + } + n = n*10 + int(b) - '0' + d *= 10 + } + return q + float64(n)/float64(d), s[i:] +} diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go new file mode 100644 index 00000000000..2e45780b74b --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go @@ -0,0 +1,36 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +package httputil + +import ( + "net/http" + + "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header" +) + +// NegotiateContentEncoding returns the best offered content encoding for the +// request's Accept-Encoding header. If two offers match with equal weight and +// then the offer earlier in the list is preferred. If no offers are +// acceptable, then "" is returned. +func NegotiateContentEncoding(r *http.Request, offers []string) string { + bestOffer := "identity" + bestQ := -1.0 + specs := header.ParseAccept(r.Header, "Accept-Encoding") + for _, offer := range offers { + for _, spec := range specs { + if spec.Q > bestQ && + (spec.Value == "*" || spec.Value == offer) { + bestQ = spec.Q + bestOffer = offer + } + } + } + if bestQ == 0 { + bestOffer = "" + } + return bestOffer +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index ad9a71a5e0d..520cbd7d418 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -22,13 +22,13 @@ import ( // goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats. // From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so // while eval closure works on runtime.MemStats, the struct from Go 1.17+ is -// populated using runtime/metrics. +// populated using runtime/metrics. Those are the defaults we can't alter. func goRuntimeMemStats() memStatsMetrics { return memStatsMetrics{ { desc: NewDesc( memstatNamespace("alloc_bytes"), - "Number of bytes allocated and still in use.", + "Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, @@ -36,7 +36,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("alloc_bytes_total"), - "Total number of bytes allocated, even if freed.", + "Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, @@ -44,23 +44,16 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("sys_bytes"), - "Number of bytes obtained from system.", + "Number of bytes obtained from system. Equals to /memory/classes/total:byte.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("lookups_total"), - "Total number of pointer lookups.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, - valType: CounterValue, }, { desc: NewDesc( memstatNamespace("mallocs_total"), - "Total number of mallocs.", + // TODO(bwplotka): We could add go_memstats_heap_objects, probably useful for discovery. Let's gather more feedback, kind of a waste of bytes for everybody for compatibility reasons to keep both, and we can't really rename/remove useful metric. + "Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, @@ -68,7 +61,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("frees_total"), - "Total number of frees.", + "Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, @@ -76,7 +69,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_alloc_bytes"), - "Number of heap bytes allocated and still in use.", + "Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, @@ -84,7 +77,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_sys_bytes"), - "Number of heap bytes obtained from system.", + "Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, @@ -92,7 +85,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_idle_bytes"), - "Number of heap bytes waiting to be used.", + "Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, @@ -100,7 +93,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_inuse_bytes"), - "Number of heap bytes that are in use.", + "Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, @@ -108,7 +101,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_released_bytes"), - "Number of heap bytes released to OS.", + "Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, @@ -116,7 +109,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_objects"), - "Number of allocated objects.", + "Number of currently allocated objects. Equals to /gc/heap/objects:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, @@ -124,7 +117,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("stack_inuse_bytes"), - "Number of bytes in use by the stack allocator.", + "Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, @@ -132,7 +125,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("stack_sys_bytes"), - "Number of bytes obtained from system for stack allocator.", + "Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, @@ -140,7 +133,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mspan_inuse_bytes"), - "Number of bytes in use by mspan structures.", + "Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, @@ -148,7 +141,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mspan_sys_bytes"), - "Number of bytes used for mspan structures obtained from system.", + "Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, @@ -156,7 +149,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mcache_inuse_bytes"), - "Number of bytes in use by mcache structures.", + "Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, @@ -164,7 +157,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mcache_sys_bytes"), - "Number of bytes used for mcache structures obtained from system.", + "Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, @@ -172,7 +165,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("buck_hash_sys_bytes"), - "Number of bytes used by the profiling bucket hash table.", + "Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, @@ -180,7 +173,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("gc_sys_bytes"), - "Number of bytes used for garbage collection system metadata.", + "Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, @@ -188,7 +181,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("other_sys_bytes"), - "Number of bytes used for other system allocations.", + "Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, @@ -196,7 +189,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("next_gc_bytes"), - "Number of heap bytes when next garbage collection will take place.", + "Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, @@ -225,7 +218,7 @@ func newBaseGoCollector() baseGoCollector { nil, nil), gcDesc: NewDesc( "go_gc_duration_seconds", - "A summary of the pause duration of garbage collection cycles.", + "A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.", nil, nil), gcLastTimeDesc: NewDesc( "go_memstats_last_gc_time_seconds", diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go index 2d8d9f64f43..51174641729 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go @@ -17,6 +17,7 @@ package prometheus import ( + "fmt" "math" "runtime" "runtime/metrics" @@ -153,7 +154,8 @@ func defaultGoCollectorOptions() internal.GoCollectorOptions { "/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes, }, RuntimeMetricRules: []internal.GoCollectorRule{ - //{Matcher: regexp.MustCompile("")}, + // Recommended metrics we want by default from runtime/metrics. + {Matcher: internal.GoCollectorDefaultRuntimeMetrics}, }, } } @@ -203,6 +205,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { // to fail here. This condition is tested in TestExpectedRuntimeMetrics. continue } + help := attachOriginalName(d.Description.Description, d.Name) sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name}) sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1] @@ -214,7 +217,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { m = newBatchHistogram( NewDesc( BuildFQName(namespace, subsystem, name), - d.Description.Description, + help, nil, nil, ), @@ -226,7 +229,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description.Description, + Help: help, }, ) } else { @@ -234,7 +237,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description.Description, + Help: help, }) } metricSet = append(metricSet, m) @@ -284,6 +287,10 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { } } +func attachOriginalName(desc, origName string) string { + return fmt.Sprintf("%s Sourced from %s", desc, origName) +} + // Describe returns all descriptions of the collector. func (c *goCollector) Describe(ch chan<- *Desc) { c.base.Describe(ch) @@ -376,13 +383,13 @@ func unwrapScalarRMValue(v metrics.Value) float64 { // // This should never happen because we always populate our metric // set from the runtime/metrics package. - panic("unexpected unsupported metric") + panic("unexpected bad kind metric") default: // Unsupported metric kind. // // This should never happen because we check for this during initialization // and flag and filter metrics whose kinds we don't understand. - panic("unexpected unsupported metric kind") + panic(fmt.Sprintf("unexpected unsupported metric: %v", v.Kind())) } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index b5c8bcb395a..519db348a74 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -440,7 +440,7 @@ type HistogramOpts struct { // constant (or any negative float value). NativeHistogramZeroThreshold float64 - // The remaining fields define a strategy to limit the number of + // The next three fields define a strategy to limit the number of // populated sparse buckets. If NativeHistogramMaxBucketNumber is left // at zero, the number of buckets is not limited. (Note that this might // lead to unbounded memory consumption if the values observed by the @@ -473,6 +473,22 @@ type HistogramOpts struct { NativeHistogramMinResetDuration time.Duration NativeHistogramMaxZeroThreshold float64 + // NativeHistogramMaxExemplars limits the number of exemplars + // that are kept in memory for each native histogram. If you leave it at + // zero, a default value of 10 is used. If no exemplars should be kept specifically + // for native histograms, set it to a negative value. (Scrapers can + // still use the exemplars exposed for classic buckets, which are managed + // independently.) + NativeHistogramMaxExemplars int + // NativeHistogramExemplarTTL is only checked once + // NativeHistogramMaxExemplars is exceeded. In that case, the + // oldest exemplar is removed if it is older than NativeHistogramExemplarTTL. + // Otherwise, the older exemplar in the pair of exemplars that are closest + // together (on an exponential scale) is removed. + // If NativeHistogramExemplarTTL is left at its zero value, a default value of + // 5m is used. To always delete the oldest exemplar, set it to a negative value. + NativeHistogramExemplarTTL time.Duration + // now is for testing purposes, by default it's time.Now. now func() time.Time @@ -532,6 +548,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr if opts.afterFunc == nil { opts.afterFunc = time.AfterFunc } + h := &histogram{ desc: desc, upperBounds: opts.Buckets, @@ -556,6 +573,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold } // Leave h.nativeHistogramZeroThreshold at 0 otherwise. h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor) + h.nativeExemplars = makeNativeExemplars(opts.NativeHistogramExemplarTTL, opts.NativeHistogramMaxExemplars) } for i, upperBound := range h.upperBounds { if i < len(h.upperBounds)-1 { @@ -725,7 +743,8 @@ type histogram struct { // resetScheduled is protected by mtx. It is true if a reset is // scheduled for a later time (when nativeHistogramMinResetDuration has // passed). - resetScheduled bool + resetScheduled bool + nativeExemplars nativeExemplars // now is for testing purposes, by default it's time.Now. now func() time.Time @@ -742,6 +761,9 @@ func (h *histogram) Observe(v float64) { h.observe(v, h.findBucket(v)) } +// ObserveWithExemplar should not be called in a high-frequency setting +// for a native histogram with configured exemplars. For this case, +// the implementation isn't lock-free and might suffer from lock contention. func (h *histogram) ObserveWithExemplar(v float64, e Labels) { i := h.findBucket(v) h.observe(v, i) @@ -821,6 +843,13 @@ func (h *histogram) Write(out *dto.Metric) error { Length: proto.Uint32(0), }} } + + if h.nativeExemplars.isEnabled() { + h.nativeExemplars.Lock() + his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...) + h.nativeExemplars.Unlock() + } + } addAndResetCounts(hotCounts, coldCounts) return nil @@ -1091,8 +1120,10 @@ func (h *histogram) resetCounts(counts *histogramCounts) { deleteSyncMap(&counts.nativeHistogramBucketsPositive) } -// updateExemplar replaces the exemplar for the provided bucket. With empty -// labels, it's a no-op. It panics if any of the labels is invalid. +// updateExemplar replaces the exemplar for the provided classic bucket. +// With empty labels, it's a no-op. It panics if any of the labels is invalid. +// If histogram is native, the exemplar will be cached into nativeExemplars, +// which has a limit, and will remove one exemplar when limit is reached. func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { if l == nil { return @@ -1102,6 +1133,10 @@ func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { panic(err) } h.exemplars[bucket].Store(e) + doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v) + if doSparse { + h.nativeExemplars.addExemplar(e) + } } // HistogramVec is a Collector that bundles a set of Histograms that all share the @@ -1336,6 +1371,48 @@ func MustNewConstHistogram( return m } +// NewConstHistogramWithCreatedTimestamp does the same thing as NewConstHistogram but sets the created timestamp. +func NewConstHistogramWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + ct time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: MakeLabelPairs(desc, labelValues), + createdTs: timestamppb.New(ct), + }, nil +} + +// MustNewConstHistogramWithCreatedTimestamp is a version of NewConstHistogramWithCreatedTimestamp that panics where +// NewConstHistogramWithCreatedTimestamp would have returned an error. +func MustNewConstHistogramWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + ct time.Time, + labelValues ...string, +) Metric { + m, err := NewConstHistogramWithCreatedTimestamp(desc, count, sum, buckets, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} + type buckSort []*dto.Bucket func (s buckSort) Len() int { @@ -1575,3 +1652,186 @@ func addAndResetCounts(hot, cold *histogramCounts) { atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket)) atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0) } + +type nativeExemplars struct { + sync.Mutex + + // Time-to-live for exemplars, it is set to -1 if exemplars are disabled, that is NativeHistogramMaxExemplars is below 0. + // The ttl is used on insertion to remove an exemplar that is older than ttl, if present. + ttl time.Duration + + exemplars []*dto.Exemplar +} + +func (n *nativeExemplars) isEnabled() bool { + return n.ttl != -1 +} + +func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars { + if ttl == 0 { + ttl = 5 * time.Minute + } + + if maxCount == 0 { + maxCount = 10 + } + + if maxCount < 0 { + maxCount = 0 + ttl = -1 + } + + return nativeExemplars{ + ttl: ttl, + exemplars: make([]*dto.Exemplar, 0, maxCount), + } +} + +func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { + if !n.isEnabled() { + return + } + + n.Lock() + defer n.Unlock() + + // When the number of exemplars has not yet exceeded or + // is equal to cap(n.exemplars), then + // insert the new exemplar directly. + if len(n.exemplars) < cap(n.exemplars) { + var nIdx int + for nIdx = 0; nIdx < len(n.exemplars); nIdx++ { + if *e.Value < *n.exemplars[nIdx].Value { + break + } + } + n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...) + return + } + + if len(n.exemplars) == 1 { + // When the number of exemplars is 1, then + // replace the existing exemplar with the new exemplar. + n.exemplars[0] = e + return + } + // From this point on, the number of exemplars is greater than 1. + + // When the number of exemplars exceeds the limit, remove one exemplar. + var ( + ot = time.Time{} // Oldest timestamp seen. Initial value doesn't matter as we replace it due to otIdx == -1 in the loop. + otIdx = -1 // Index of the exemplar with the oldest timestamp. + + md = -1.0 // Logarithm of the delta of the closest pair of exemplars. + + // The insertion point of the new exemplar in the exemplars slice after insertion. + // This is calculated purely based on the order of the exemplars by value. + // nIdx == len(n.exemplars) means the new exemplar is to be inserted after the end. + nIdx = -1 + + // rIdx is ultimately the index for the exemplar that we are replacing with the new exemplar. + // The aim is to keep a good spread of exemplars by value and not let them bunch up too much. + // It is calculated in 3 steps: + // 1. First we set rIdx to the index of the older exemplar within the closest pair by value. + // That is the following will be true (on log scale): + // either the exemplar pair on index (rIdx-1, rIdx) or (rIdx, rIdx+1) will have + // the closest values to each other from all pairs. + // For example, suppose the values are distributed like this: + // |-----------x-------------x----------------x----x-----| + // ^--rIdx as this is older. + // Or like this: + // |-----------x-------------x----------------x----x-----| + // ^--rIdx as this is older. + // 2. If there is an exemplar that expired, then we simple reset rIdx to that index. + // 3. We check if by inserting the new exemplar we would create a closer pair at + // (nIdx-1, nIdx) or (nIdx, nIdx+1) and set rIdx to nIdx-1 or nIdx accordingly to + // keep the spread of exemplars by value; otherwise we keep rIdx as it is. + rIdx = -1 + cLog float64 // Logarithm of the current exemplar. + pLog float64 // Logarithm of the previous exemplar. + ) + + for i, exemplar := range n.exemplars { + // Find the exemplar with the oldest timestamp. + if otIdx == -1 || exemplar.Timestamp.AsTime().Before(ot) { + ot = exemplar.Timestamp.AsTime() + otIdx = i + } + + // Find the index at which to insert new the exemplar. + if nIdx == -1 && *e.Value <= *exemplar.Value { + nIdx = i + } + + // Find the two closest exemplars and pick the one the with older timestamp. + pLog = cLog + cLog = math.Log(exemplar.GetValue()) + if i == 0 { + continue + } + diff := math.Abs(cLog - pLog) + if md == -1 || diff < md { + // The closest exemplar pair is at index: i-1, i. + // Choose the exemplar with the older timestamp for replacement. + md = diff + if n.exemplars[i].Timestamp.AsTime().Before(n.exemplars[i-1].Timestamp.AsTime()) { + rIdx = i + } else { + rIdx = i - 1 + } + } + + } + + // If all existing exemplar are smaller than new exemplar, + // then the exemplar should be inserted at the end. + if nIdx == -1 { + nIdx = len(n.exemplars) + } + // Here, we have the following relationships: + // n.exemplars[nIdx-1].Value < e.Value (if nIdx > 0) + // e.Value <= n.exemplars[nIdx].Value (if nIdx < len(n.exemplars)) + + if otIdx != -1 && e.Timestamp.AsTime().Sub(ot) > n.ttl { + // If the oldest exemplar has expired, then replace it with the new exemplar. + rIdx = otIdx + } else { + // In the previous for loop, when calculating the closest pair of exemplars, + // we did not take into account the newly inserted exemplar. + // So we need to calculate with the newly inserted exemplar again. + elog := math.Log(e.GetValue()) + if nIdx > 0 { + diff := math.Abs(elog - math.Log(n.exemplars[nIdx-1].GetValue())) + if diff < md { + // The value we are about to insert is closer to the previous exemplar at the insertion point than what we calculated before in rIdx. + // v--rIdx + // |-----------x-n-----------x----------------x----x-----| + // nIdx-1--^ ^--new exemplar value + // Do not make the spread worse, replace nIdx-1 and not rIdx. + md = diff + rIdx = nIdx - 1 + } + } + if nIdx < len(n.exemplars) { + diff := math.Abs(math.Log(n.exemplars[nIdx].GetValue()) - elog) + if diff < md { + // The value we are about to insert is closer to the next exemplar at the insertion point than what we calculated before in rIdx. + // v--rIdx + // |-----------x-----------n-x----------------x----x-----| + // new exemplar value--^ ^--nIdx + // Do not make the spread worse, replace nIdx-1 and not rIdx. + rIdx = nIdx + } + } + } + + // Adjust the slice according to rIdx and nIdx. + switch { + case rIdx == nIdx: + n.exemplars[nIdx] = e + case rIdx < nIdx: + n.exemplars = append(n.exemplars[:rIdx], append(n.exemplars[rIdx+1:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)...) + case rIdx > nIdx: + n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go index 723b45d6444..a4fa6eabd78 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go @@ -30,3 +30,5 @@ type GoCollectorOptions struct { RuntimeMetricSumForHist map[string]string RuntimeMetricRules []GoCollectorRule } + +var GoCollectorDefaultRuntimeMetrics = regexp.MustCompile(`/gc/gogc:percent|/gc/gomemlimit:bytes|/sched/gomaxprocs:threads`) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index f018e57237d..9d9b81ab448 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -234,7 +234,7 @@ func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) { ) for i, e := range exemplars { ts := e.Timestamp - if ts == (time.Time{}) { + if ts.IsZero() { ts = now } exs[i], err = newExemplar(e.Value, ts, e.Labels) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index 8548dd18ed5..62a4e7ad9a0 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -22,14 +22,15 @@ import ( ) type processCollector struct { - collectFn func(chan<- Metric) - pidFn func() (int, error) - reportErrors bool - cpuTotal *Desc - openFDs, maxFDs *Desc - vsize, maxVsize *Desc - rss *Desc - startTime *Desc + collectFn func(chan<- Metric) + pidFn func() (int, error) + reportErrors bool + cpuTotal *Desc + openFDs, maxFDs *Desc + vsize, maxVsize *Desc + rss *Desc + startTime *Desc + inBytes, outBytes *Desc } // ProcessCollectorOpts defines the behavior of a process metrics collector @@ -100,6 +101,16 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector { "Start time of the process since unix epoch in seconds.", nil, nil, ), + inBytes: NewDesc( + ns+"process_network_receive_bytes_total", + "Number of bytes received by the process over the network.", + nil, nil, + ), + outBytes: NewDesc( + ns+"process_network_transmit_bytes_total", + "Number of bytes sent by the process over the network.", + nil, nil, + ), } if opts.PidFn == nil { @@ -129,6 +140,8 @@ func (c *processCollector) Describe(ch chan<- *Desc) { ch <- c.maxVsize ch <- c.rss ch <- c.startTime + ch <- c.inBytes + ch <- c.outBytes } // Collect returns the current state of all metrics of the collector. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go index 8c1136ceea3..14d56d2d068 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -63,4 +63,18 @@ func (c *processCollector) processCollect(ch chan<- Metric) { } else { c.reportError(ch, nil, err) } + + if netstat, err := p.Netstat(); err == nil { + var inOctets, outOctets float64 + if netstat.IpExt.InOctets != nil { + inOctets = *netstat.IpExt.InOctets + } + if netstat.IpExt.OutOctets != nil { + outOctets = *netstat.IpExt.OutOctets + } + ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets) + ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets) + } else { + c.reportError(ch, nil, err) + } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go index 9819917b83b..315eab5f179 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -76,6 +76,12 @@ func (r *responseWriterDelegator) Write(b []byte) (int, error) { return n, err } +// Unwrap lets http.ResponseController get the underlying http.ResponseWriter, +// by implementing the [rwUnwrapper](https://cs.opensource.google/go/go/+/refs/tags/go1.21.4:src/net/http/responsecontroller.go;l=42-44) interface. +func (r *responseWriterDelegator) Unwrap() http.ResponseWriter { + return r.ResponseWriter +} + type ( closeNotifierDelegator struct{ *responseWriterDelegator } flusherDelegator struct{ *responseWriterDelegator } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index 09b8d2fbead..e598e66e688 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -38,12 +38,13 @@ import ( "io" "net/http" "strconv" - "strings" "sync" "time" + "github.com/klauspost/compress/zstd" "github.com/prometheus/common/expfmt" + "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil" "github.com/prometheus/client_golang/prometheus" ) @@ -54,6 +55,18 @@ const ( processStartTimeHeader = "Process-Start-Time-Unix" ) +// Compression represents the content encodings handlers support for the HTTP +// responses. +type Compression string + +const ( + Identity Compression = "identity" + Gzip Compression = "gzip" + Zstd Compression = "zstd" +) + +var defaultCompressionFormats = []Compression{Identity, Gzip, Zstd} + var gzipPool = sync.Pool{ New: func() interface{} { return gzip.NewWriter(nil) @@ -122,6 +135,18 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO } } + // Select compression formats to offer based on default or user choice. + var compressions []string + if !opts.DisableCompression { + offers := defaultCompressionFormats + if len(opts.OfferedCompressions) > 0 { + offers = opts.OfferedCompressions + } + for _, comp := range offers { + compressions = append(compressions, string(comp)) + } + } + h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { if !opts.ProcessStartTime.IsZero() { rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10)) @@ -165,21 +190,23 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO } else { contentType = expfmt.Negotiate(req.Header) } - header := rsp.Header() - header.Set(contentTypeHeader, string(contentType)) + rsp.Header().Set(contentTypeHeader, string(contentType)) - w := io.Writer(rsp) - if !opts.DisableCompression && gzipAccepted(req.Header) { - header.Set(contentEncodingHeader, "gzip") - gz := gzipPool.Get().(*gzip.Writer) - defer gzipPool.Put(gz) + w, encodingHeader, closeWriter, err := negotiateEncodingWriter(req, rsp, compressions) + if err != nil { + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error getting writer", err) + } + w = io.Writer(rsp) + encodingHeader = string(Identity) + } - gz.Reset(w) - defer gz.Close() + defer closeWriter() - w = gz + // Set Content-Encoding only when data is compressed + if encodingHeader != string(Identity) { + rsp.Header().Set(contentEncodingHeader, encodingHeader) } - enc := expfmt.NewEncoder(w, contentType) // handleError handles the error according to opts.ErrorHandling @@ -343,9 +370,19 @@ type HandlerOpts struct { // no effect on the HTTP status code because ErrorHandling is set to // ContinueOnError. Registry prometheus.Registerer - // If DisableCompression is true, the handler will never compress the - // response, even if requested by the client. + // DisableCompression disables the response encoding (compression) and + // encoding negotiation. If true, the handler will + // never compress the response, even if requested + // by the client and the OfferedCompressions field is set. DisableCompression bool + // OfferedCompressions is a set of encodings (compressions) handler will + // try to offer when negotiating with the client. This defaults to identity, gzip + // and zstd. + // NOTE: If handler can't agree with the client on the encodings or + // unsupported or empty encodings are set in OfferedCompressions, + // handler always fallbacks to no compression (identity), for + // compatibility reasons. In such cases ErrorLog will be used if set. + OfferedCompressions []Compression // The number of concurrent HTTP requests is limited to // MaxRequestsInFlight. Additional requests are responded to with 503 // Service Unavailable and a suitable message in the body. If @@ -381,19 +418,6 @@ type HandlerOpts struct { ProcessStartTime time.Time } -// gzipAccepted returns whether the client will accept gzip-encoded content. -func gzipAccepted(header http.Header) bool { - a := header.Get(acceptEncodingHeader) - parts := strings.Split(a, ",") - for _, part := range parts { - part = strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return true - } - } - return false -} - // httpError removes any content-encoding header and then calls http.Error with // the provided error and http.StatusInternalServerError. Error contents is // supposed to be uncompressed plain text. Same as with a plain http.Error, this @@ -406,3 +430,38 @@ func httpError(rsp http.ResponseWriter, err error) { http.StatusInternalServerError, ) } + +// negotiateEncodingWriter reads the Accept-Encoding header from a request and +// selects the right compression based on an allow-list of supported +// compressions. It returns a writer implementing the compression and an the +// correct value that the caller can set in the response header. +func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []string) (_ io.Writer, encodingHeaderValue string, closeWriter func(), _ error) { + if len(compressions) == 0 { + return rw, string(Identity), func() {}, nil + } + + // TODO(mrueg): Replace internal/github.com/gddo once https://github.com/golang/go/issues/19307 is implemented. + selected := httputil.NegotiateContentEncoding(r, compressions) + + switch selected { + case "zstd": + // TODO(mrueg): Replace klauspost/compress with stdlib implementation once https://github.com/golang/go/issues/62513 is implemented. + z, err := zstd.NewWriter(rw, zstd.WithEncoderLevel(zstd.SpeedFastest)) + if err != nil { + return nil, "", func() {}, err + } + + z.Reset(rw) + return z, selected, func() { _ = z.Close() }, nil + case "gzip": + gz := gzipPool.Get().(*gzip.Writer) + gz.Reset(rw) + return gz, selected, func() { _ = gz.Close(); gzipPool.Put(gz) }, nil + case "identity": + // This means the content is not compressed. + return rw, selected, func() {}, nil + default: + // The content encoding was not implemented yet. + return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 5e2ced25a02..c6fd2f58b74 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -314,16 +314,17 @@ func (r *Registry) Register(c Collector) error { if dimHash != desc.dimHash { return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) } - } else { - // ...then check the new descriptors already seen. - if dimHash, exists := newDimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) - } - } else { - newDimHashesByName[desc.fqName] = desc.dimHash + continue + } + + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) } + continue } + newDimHashesByName[desc.fqName] = desc.dimHash } // A Collector yielding no Desc at all is considered unchecked. if len(newDescIDs) == 0 { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index 1462704446c..1ab0e479655 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -783,3 +783,45 @@ func MustNewConstSummary( } return m } + +// NewConstSummaryWithCreatedTimestamp does the same thing as NewConstSummary but sets the created timestamp. +func NewConstSummaryWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + ct time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + return &constSummary{ + desc: desc, + count: count, + sum: sum, + quantiles: quantiles, + labelPairs: MakeLabelPairs(desc, labelValues), + createdTs: timestamppb.New(ct), + }, nil +} + +// MustNewConstSummaryWithCreatedTimestamp is a version of NewConstSummaryWithCreatedTimestamp that panics where +// NewConstSummaryWithCreatedTimestamp would have returned an error. +func MustNewConstSummaryWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + ct time.Time, + labelValues ...string, +) Metric { + m, err := NewConstSummaryWithCreatedTimestamp(desc, count, sum, quantiles, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 955cfd59f83..2c808eece0a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -507,7 +507,7 @@ func (m *metricMap) getOrCreateMetricWithLabelValues( return metric } -// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// getOrCreateMetricWithLabels retrieves the metric by hash and label value // or creates it and returns the new one. // // This function holds the mutex. diff --git a/vendor/k8s.io/gengo/v2/generator/snippet_writer.go b/vendor/k8s.io/gengo/v2/generator/snippet_writer.go index 7f4610c00f7..550214e1635 100644 --- a/vendor/k8s.io/gengo/v2/generator/snippet_writer.go +++ b/vendor/k8s.io/gengo/v2/generator/snippet_writer.go @@ -123,25 +123,28 @@ func (s *SnippetWriter) Do(format string, args interface{}) *SnippetWriter { // SnippetWriter.Do. type Args map[interface{}]interface{} -// With makes a copy of a and adds the given key, value pair. +// With makes a copy of a and adds the given key, value pair. If key overlaps, +// the new value wins. func (a Args) With(key, value interface{}) Args { - a2 := Args{key: value} + result := Args{} for k, v := range a { - a2[k] = v + result[k] = v } - return a2 + result[key] = value + return result } -// WithArgs makes a copy of a and adds the given arguments. +// WithArgs makes a copy of a and adds the given arguments. If any keys +// overlap, the values from rhs win. func (a Args) WithArgs(rhs Args) Args { - a2 := Args{} - for k, v := range rhs { - a2[k] = v - } + result := Args{} for k, v := range a { - a2[k] = v + result[k] = v + } + for k, v := range rhs { + result[k] = v } - return a2 + return result } func (s *SnippetWriter) Out() io.Writer { @@ -152,3 +155,16 @@ func (s *SnippetWriter) Out() io.Writer { func (s *SnippetWriter) Error() error { return s.err } + +// Dup creates an exact duplicate SnippetWriter with a different io.Writer. +func (s *SnippetWriter) Dup(w io.Writer) *SnippetWriter { + ret := *s + ret.w = w + return &ret +} + +// Append adds the contents of the io.Reader to this SnippetWriter's buffer. +func (s *SnippetWriter) Append(r io.Reader) error { + _, err := io.Copy(s.w, r) + return err +} diff --git a/vendor/k8s.io/gengo/v2/namer/namer.go b/vendor/k8s.io/gengo/v2/namer/namer.go index e82fe66adcf..37877eb4921 100644 --- a/vendor/k8s.io/gengo/v2/namer/namer.go +++ b/vendor/k8s.io/gengo/v2/namer/namer.go @@ -281,12 +281,12 @@ func (ns *NameStrategy) Name(t *types.Type) string { case types.Func: // TODO: add to name test parts := []string{"Func"} - for _, pt := range t.Signature.Parameters { - parts = append(parts, ns.removePrefixAndSuffix(ns.Name(pt))) + for _, param := range t.Signature.Parameters { + parts = append(parts, ns.removePrefixAndSuffix(ns.Name(param.Type))) } parts = append(parts, "Returns") - for _, rt := range t.Signature.Results { - parts = append(parts, ns.removePrefixAndSuffix(ns.Name(rt))) + for _, result := range t.Signature.Results { + parts = append(parts, ns.removePrefixAndSuffix(ns.Name(result.Type))) } name = ns.Join(ns.Prefix, parts, ns.Suffix) default: @@ -374,12 +374,12 @@ func (r *rawNamer) Name(t *types.Type) string { case types.Func: // TODO: add to name test params := []string{} - for _, pt := range t.Signature.Parameters { - params = append(params, r.Name(pt)) + for _, param := range t.Signature.Parameters { + params = append(params, r.Name(param.Type)) } results := []string{} - for _, rt := range t.Signature.Results { - results = append(results, r.Name(rt)) + for _, result := range t.Signature.Results { + results = append(results, r.Name(result.Type)) } name = "func(" + strings.Join(params, ",") + ")" if len(results) == 1 { diff --git a/vendor/k8s.io/gengo/v2/parser/parse.go b/vendor/k8s.io/gengo/v2/parser/parse.go index a5993d16393..da9488b8e89 100644 --- a/vendor/k8s.io/gengo/v2/parser/parse.go +++ b/vendor/k8s.io/gengo/v2/parser/parse.go @@ -572,6 +572,9 @@ func goVarNameToName(in string) types.Name { return goNameToName(nameParts[1]) } +// goNameToName converts a go name string to a gengo types.Name. +// It operates solely on the string on a best effort basis. The name may be updated +// in walkType for generics. func goNameToName(in string) types.Name { // Detect anonymous type names. (These may have '.' characters because // embedded types may have packages, so we detect them specially.) @@ -587,14 +590,25 @@ func goNameToName(in string) types.Name { return types.Name{Name: in} } + // There may be '.' characters within a generic. Temporarily remove + // the generic. + genericIndex := strings.IndexRune(in, '[') + if genericIndex == -1 { + genericIndex = len(in) + } + // Otherwise, if there are '.' characters present, the name has a // package path in front. - nameParts := strings.Split(in, ".") + nameParts := strings.Split(in[:genericIndex], ".") name := types.Name{Name: in} if n := len(nameParts); n >= 2 { // The final "." is the name of the type--previous ones must // have been in the package path. name.Package, name.Name = strings.Join(nameParts[:n-1], "."), nameParts[n-1] + // Add back the generic component now that the package and type name have been separated. + if genericIndex != len(in) { + name.Name = name.Name + in[genericIndex:] + } } return name } @@ -602,12 +616,16 @@ func goNameToName(in string) types.Name { func (p *Parser) convertSignature(u types.Universe, t *gotypes.Signature) *types.Signature { signature := &types.Signature{} for i := 0; i < t.Params().Len(); i++ { - signature.Parameters = append(signature.Parameters, p.walkType(u, nil, t.Params().At(i).Type())) - signature.ParameterNames = append(signature.ParameterNames, t.Params().At(i).Name()) + signature.Parameters = append(signature.Parameters, &types.ParamResult{ + Name: t.Params().At(i).Name(), + Type: p.walkType(u, nil, t.Params().At(i).Type()), + }) } for i := 0; i < t.Results().Len(); i++ { - signature.Results = append(signature.Results, p.walkType(u, nil, t.Results().At(i).Type())) - signature.ResultNames = append(signature.ResultNames, t.Results().At(i).Name()) + signature.Results = append(signature.Results, &types.ParamResult{ + Name: t.Results().At(i).Name(), + Type: p.walkType(u, nil, t.Results().At(i).Type()), + }) } if r := t.Recv(); r != nil { signature.Receiver = p.walkType(u, nil, r.Type()) @@ -734,6 +752,27 @@ func (p *Parser) walkType(u types.Universe, useName *types.Name, in gotypes.Type } out.Kind = types.Alias out.Underlying = p.walkType(u, nil, t.Underlying()) + case *gotypes.Struct, *gotypes.Interface: + name := goNameToName(t.String()) + tpMap := map[string]*types.Type{} + if t.TypeParams().Len() != 0 { + // Remove generics, then readd them without the encoded + // type, e.g. Foo[T any] => Foo[T] + var tpNames []string + for i := 0; i < t.TypeParams().Len(); i++ { + tp := t.TypeParams().At(i) + tpName := tp.Obj().Name() + tpNames = append(tpNames, tpName) + tpMap[tpName] = p.walkType(u, nil, tp.Constraint()) + } + name.Name = fmt.Sprintf("%s[%s]", strings.SplitN(name.Name, "[", 2)[0], strings.Join(tpNames, ",")) + } + + if out := u.Type(name); out.Kind != types.Unknown { + return out // short circuit if we've already made this. + } + out = p.walkType(u, &name, t.Underlying()) + out.TypeParams = tpMap default: // gotypes package makes everything "named" with an // underlying anonymous type--we remove that annoying @@ -760,6 +799,15 @@ func (p *Parser) walkType(u types.Universe, useName *types.Name, in gotypes.Type } } return out + case *gotypes.TypeParam: + // DO NOT retrieve the type from the universe. The default type-param name is only the + // generic variable name. Ideally, it would be namespaced by package and struct but it is + // not. Thus, if we try to use the universe, we would start polluting it. + // e.g. if Foo[T] and Bar[T] exists, we'd mistakenly use the same type T for both. + return &types.Type{ + Name: name, + Kind: types.TypeParam, + } default: out := u.Type(name) if out.Kind != types.Unknown { diff --git a/vendor/k8s.io/gengo/v2/types/types.go b/vendor/k8s.io/gengo/v2/types/types.go index e9c8319c656..7bbca017332 100644 --- a/vendor/k8s.io/gengo/v2/types/types.go +++ b/vendor/k8s.io/gengo/v2/types/types.go @@ -98,6 +98,7 @@ const ( DeclarationOf Kind = "DeclarationOf" Unknown Kind = "" Unsupported Kind = "Unsupported" + TypeParam Kind = "TypeParam" // Protobuf is protobuf type. Protobuf Kind = "Protobuf" @@ -324,6 +325,9 @@ type Type struct { // If Kind == Struct Members []Member + // If Kind == Struct + TypeParams map[string]*Type + // If Kind == Map, Slice, Pointer, or Chan Elem *Type @@ -423,14 +427,20 @@ func (m Member) String() string { return m.Name + " " + m.Type.String() } +// ParamResult represents a parameter or a result of a method's signature. +type ParamResult struct { + // The name of the parameter or result. + Name string + // The type of this parameter or result. + Type *Type +} + // Signature is a function's signature. type Signature struct { // If a method of some type, this is the type it's a member of. - Receiver *Type - Parameters []*Type - ParameterNames []string - Results []*Type - ResultNames []string + Receiver *Type + Parameters []*ParamResult + Results []*ParamResult // True if the last in parameter is of the form ...T. Variadic bool diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go b/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go index b0fa8272a7c..c5c00938183 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go +++ b/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go @@ -249,7 +249,7 @@ func methodReturnsValue(mt *types.Type, pkg, name string) bool { return false } r := mt.Signature.Results[0] - return r.Name.Name == name && r.Name.Package == pkg + return r.Type.Name.Name == name && r.Type.Name.Package == pkg } func hasOpenAPIV3DefinitionMethod(t *types.Type) bool { diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go b/vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go index af30edc5ed0..d7655f0d92c 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go +++ b/vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go @@ -32,14 +32,6 @@ var ( "-", ) - // Blacklist of JSON names that should skip match evaluation - jsonNameBlacklist = sets.NewString( - // Empty name is used for inline struct field (e.g. metav1.TypeMeta) - "", - // Special case for object and list meta - "metadata", - ) - // List of substrings that aren't allowed in Go name and JSON name disallowedNameSubstrings = sets.NewString( // Underscore is not allowed in either name @@ -73,12 +65,11 @@ is also considered matched. HTTPJSONSpec httpjsonSpec true -NOTE: JSON names in jsonNameBlacklist should skip evaluation +NOTE: an empty JSON name is valid only for inlined structs or pointer to structs. +It cannot be empty for anything else because capitalization must be set explicitly. - true - podSpec true - podSpec - true - podSpec metadata true +NOTE: metav1.ListMeta and metav1.ObjectMeta by convention must have "metadata" as name. +Other fields may have that JSON name if the field name matches. */ type NamesMatch struct{} @@ -109,7 +100,7 @@ func (n *NamesMatch) Validate(t *types.Type) ([]string, error) { continue } jsonName := strings.Split(jsonTag, ",")[0] - if !namesMatch(goName, jsonName) { + if !nameIsOkay(m, jsonName) { fields = append(fields, goName) } } @@ -117,6 +108,22 @@ func (n *NamesMatch) Validate(t *types.Type) ([]string, error) { return fields, nil } +func nameIsOkay(member types.Member, jsonName string) bool { + if jsonName == "" { + return member.Type.Kind == types.Struct || + member.Type.Kind == types.Pointer && member.Type.Elem.Kind == types.Struct + } + + typeName := member.Type.String() + switch typeName { + case "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", + "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta": + return jsonName == "metadata" + } + + return namesMatch(member.Name, jsonName) +} + // namesMatch evaluates if goName and jsonName match the API rule // TODO: Use an off-the-shelf CamelCase solution instead of implementing this logic. The following existing // @@ -129,9 +136,6 @@ func (n *NamesMatch) Validate(t *types.Type) ([]string, error) { // about why they don't satisfy our need. What we need can be a function that detects an acronym at the // beginning of a string. func namesMatch(goName, jsonName string) bool { - if jsonNameBlacklist.Has(jsonName) { - return true - } if !isAllowedName(goName) || !isAllowedName(jsonName) { return false } diff --git a/vendor/k8s.io/utils/integer/integer.go b/vendor/k8s.io/utils/integer/integer.go index e0811e8344c..f64d64955b4 100644 --- a/vendor/k8s.io/utils/integer/integer.go +++ b/vendor/k8s.io/utils/integer/integer.go @@ -18,7 +18,8 @@ package integer import "math" -// IntMax returns the maximum of the params +// IntMax returns the maximum of the params. +// Deprecated: for new code, use the max() builtin instead. func IntMax(a, b int) int { if b > a { return b @@ -26,7 +27,8 @@ func IntMax(a, b int) int { return a } -// IntMin returns the minimum of the params +// IntMin returns the minimum of the params. +// Deprecated: for new code, use the min() builtin instead. func IntMin(a, b int) int { if b < a { return b @@ -34,7 +36,8 @@ func IntMin(a, b int) int { return a } -// Int32Max returns the maximum of the params +// Int32Max returns the maximum of the params. +// Deprecated: for new code, use the max() builtin instead. func Int32Max(a, b int32) int32 { if b > a { return b @@ -42,7 +45,8 @@ func Int32Max(a, b int32) int32 { return a } -// Int32Min returns the minimum of the params +// Int32Min returns the minimum of the params. +// Deprecated: for new code, use the min() builtin instead. func Int32Min(a, b int32) int32 { if b < a { return b @@ -50,7 +54,8 @@ func Int32Min(a, b int32) int32 { return a } -// Int64Max returns the maximum of the params +// Int64Max returns the maximum of the params. +// Deprecated: for new code, use the max() builtin instead. func Int64Max(a, b int64) int64 { if b > a { return b @@ -58,7 +63,8 @@ func Int64Max(a, b int64) int64 { return a } -// Int64Min returns the minimum of the params +// Int64Min returns the minimum of the params. +// Deprecated: for new code, use the min() builtin instead. func Int64Min(a, b int64) int64 { if b < a { return b diff --git a/vendor/modules.txt b/vendor/modules.txt index fd1d0448440..f11d26eca8f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -31,8 +31,8 @@ github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1 github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1 github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1 github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1 -# github.com/cert-manager/cert-manager v1.13.3 -## explicit; go 1.20 +# github.com/cert-manager/cert-manager v1.16.3 +## explicit; go 1.22.0 github.com/cert-manager/cert-manager/pkg/apis/acme github.com/cert-manager/cert-manager/pkg/apis/acme/v1 github.com/cert-manager/cert-manager/pkg/apis/certmanager @@ -203,6 +203,15 @@ github.com/json-iterator/go # github.com/kelseyhightower/envconfig v1.4.0 ## explicit github.com/kelseyhightower/envconfig +# github.com/klauspost/compress v1.17.9 +## explicit; go 1.20 +github.com/klauspost/compress +github.com/klauspost/compress/fse +github.com/klauspost/compress/huff0 +github.com/klauspost/compress/internal/cpuinfo +github.com/klauspost/compress/internal/snapref +github.com/klauspost/compress/zstd +github.com/klauspost/compress/zstd/internal/xxhash # github.com/mailru/easyjson v0.7.7 ## explicit; go 1.12 github.com/mailru/easyjson/buffer @@ -244,8 +253,10 @@ github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 ## explicit github.com/pmezard/go-difflib/difflib -# github.com/prometheus/client_golang v1.19.1 +# github.com/prometheus/client_golang v1.20.4 ## explicit; go 1.20 +github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil +github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp @@ -433,8 +444,8 @@ golang.org/x/tools/internal/versions # gomodules.xyz/jsonpatch/v2 v2.4.0 ## explicit; go 1.20 gomodules.xyz/jsonpatch/v2 -# google.golang.org/api v0.183.0 -## explicit; go 1.20 +# google.golang.org/api v0.198.0 +## explicit; go 1.21 google.golang.org/api/support/bundler # google.golang.org/genproto/googleapis/api v0.0.0-20241202173237-19429a94021a ## explicit; go 1.21 @@ -1068,7 +1079,7 @@ k8s.io/code-generator/third_party/forked/golang/reflect ## explicit; go 1.13 k8s.io/gengo/parser k8s.io/gengo/types -# k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 +# k8s.io/gengo/v2 v2.0.0-20240826214909-a7b603a56eb7 ## explicit; go 1.20 k8s.io/gengo/v2 k8s.io/gengo/v2/generator @@ -1087,7 +1098,7 @@ k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity k8s.io/klog/v2/internal/sloghandler -# k8s.io/kube-openapi v0.0.0-20240808142205-8e686545bdb8 +# k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 ## explicit; go 1.20 k8s.io/kube-openapi/cmd/openapi-gen k8s.io/kube-openapi/cmd/openapi-gen/args @@ -1103,7 +1114,7 @@ k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/util/sets k8s.io/kube-openapi/pkg/validation/spec -# k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 +# k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 ## explicit; go 1.18 k8s.io/utils/buffer k8s.io/utils/clock @@ -1312,9 +1323,9 @@ knative.dev/reconciler-test/pkg/resources/serviceaccount knative.dev/reconciler-test/pkg/state knative.dev/reconciler-test/pkg/tracing knative.dev/reconciler-test/resources/certificate -# sigs.k8s.io/gateway-api v0.8.0 -## explicit; go 1.20 -sigs.k8s.io/gateway-api/apis/v1beta1 +# sigs.k8s.io/gateway-api v1.1.0 +## explicit; go 1.22.0 +sigs.k8s.io/gateway-api/apis/v1 # sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd ## explicit; go 1.18 sigs.k8s.io/json diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/doc.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/doc.go similarity index 83% rename from vendor/sigs.k8s.io/gateway-api/apis/v1beta1/doc.go rename to vendor/sigs.k8s.io/gateway-api/apis/v1/doc.go index 328100aee81..f2c7aa2b551 100644 --- a/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/doc.go +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/doc.go @@ -14,9 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package v1beta1 contains API Schema definitions for the -// gateway.networking.k8s.io API group. +// Package v1 contains API Schema definitions for the gateway.networking.k8s.io +// API group. // +// +k8s:openapi-gen=true // +kubebuilder:object:generate=true // +groupName=gateway.networking.k8s.io -package v1beta1 +package v1 diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/gateway_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/gateway_types.go similarity index 70% rename from vendor/sigs.k8s.io/gateway-api/apis/v1beta1/gateway_types.go rename to vendor/sigs.k8s.io/gateway-api/apis/v1/gateway_types.go index 033c1c2a16a..caa5e96bf3a 100644 --- a/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/gateway_types.go +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/gateway_types.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -23,8 +23,8 @@ import ( // +genclient // +kubebuilder:object:root=true // +kubebuilder:resource:categories=gateway-api,shortName=gtw -// +kubebuilder:storageversion // +kubebuilder:subresource:status +// +kubebuilder:storageversion // +kubebuilder:printcolumn:name="Class",type=string,JSONPath=`.spec.gatewayClassName` // +kubebuilder:printcolumn:name="Address",type=string,JSONPath=`.status.addresses[*].value` // +kubebuilder:printcolumn:name="Programmed",type=string,JSONPath=`.status.conditions[?(@.type=="Programmed")].status` @@ -57,8 +57,8 @@ type GatewayList struct { // GatewaySpec defines the desired state of Gateway. // // Not all possible combinations of options specified in the Spec are -// valid. Some invalid configurations can be caught synchronously via a -// webhook, but there are many cases that will require asynchronous +// valid. Some invalid configurations can be caught synchronously via CRD +// validation, but there are many cases that will require asynchronous // signaling via the GatewayStatus block. type GatewaySpec struct { // GatewayClassName used for this Gateway. This is the name of a @@ -69,54 +69,117 @@ type GatewaySpec struct { // logical endpoints that are bound on this Gateway's addresses. // At least one Listener MUST be specified. // - // Each listener in a Gateway must have a unique combination of Hostname, - // Port, and Protocol. - // - // Within the HTTP Conformance Profile, the below combinations of port and - // protocol are considered Core and MUST be supported: - // - // 1. Port: 80, Protocol: HTTP - // 2. Port: 443, Protocol: HTTPS - // - // Within the TLS Conformance Profile, the below combinations of port and - // protocol are considered Core and MUST be supported: - // - // 1. Port: 443, Protocol: TLS - // - // Port and protocol combinations not listed above are considered Extended. - // - // An implementation MAY group Listeners by Port and then collapse each - // group of Listeners into a single Listener if the implementation - // determines that the Listeners in the group are "compatible". An - // implementation MAY also group together and collapse compatible - // Listeners belonging to different Gateways. - // - // For example, an implementation might consider Listeners to be - // compatible with each other if all of the following conditions are - // met: - // - // 1. Either each Listener within the group specifies the "HTTP" - // Protocol or each Listener within the group specifies either - // the "HTTPS" or "TLS" Protocol. - // - // 2. Each Listener within the group specifies a Hostname that is unique - // within the group. - // - // 3. As a special case, one Listener within a group may omit Hostname, - // in which case this Listener matches when no other Listener - // matches. - // - // If the implementation does collapse compatible Listeners, the - // hostname provided in the incoming client request MUST be - // matched to a Listener to find the correct set of Routes. - // The incoming hostname MUST be matched using the Hostname - // field for each Listener in order of most to least specific. - // That is, exact matches must be processed before wildcard - // matches. - // - // If this field specifies multiple Listeners that have the same - // Port value but are not compatible, the implementation must raise - // a "Conflicted" condition in the Listener status. + // Each Listener in a set of Listeners (for example, in a single Gateway) + // MUST be _distinct_, in that a traffic flow MUST be able to be assigned to + // exactly one listener. (This section uses "set of Listeners" rather than + // "Listeners in a single Gateway" because implementations MAY merge configuration + // from multiple Gateways onto a single data plane, and these rules _also_ + // apply in that case). + // + // Practically, this means that each listener in a set MUST have a unique + // combination of Port, Protocol, and, if supported by the protocol, Hostname. + // + // Some combinations of port, protocol, and TLS settings are considered + // Core support and MUST be supported by implementations based on their + // targeted conformance profile: + // + // HTTP Profile + // + // 1. HTTPRoute, Port: 80, Protocol: HTTP + // 2. HTTPRoute, Port: 443, Protocol: HTTPS, TLS Mode: Terminate, TLS keypair provided + // + // TLS Profile + // + // 1. TLSRoute, Port: 443, Protocol: TLS, TLS Mode: Passthrough + // + // "Distinct" Listeners have the following property: + // + // The implementation can match inbound requests to a single distinct + // Listener. When multiple Listeners share values for fields (for + // example, two Listeners with the same Port value), the implementation + // can match requests to only one of the Listeners using other + // Listener fields. + // + // For example, the following Listener scenarios are distinct: + // + // 1. Multiple Listeners with the same Port that all use the "HTTP" + // Protocol that all have unique Hostname values. + // 2. Multiple Listeners with the same Port that use either the "HTTPS" or + // "TLS" Protocol that all have unique Hostname values. + // 3. A mixture of "TCP" and "UDP" Protocol Listeners, where no Listener + // with the same Protocol has the same Port value. + // + // Some fields in the Listener struct have possible values that affect + // whether the Listener is distinct. Hostname is particularly relevant + // for HTTP or HTTPS protocols. + // + // When using the Hostname value to select between same-Port, same-Protocol + // Listeners, the Hostname value must be different on each Listener for the + // Listener to be distinct. + // + // When the Listeners are distinct based on Hostname, inbound request + // hostnames MUST match from the most specific to least specific Hostname + // values to choose the correct Listener and its associated set of Routes. + // + // Exact matches must be processed before wildcard matches, and wildcard + // matches must be processed before fallback (empty Hostname value) + // matches. For example, `"foo.example.com"` takes precedence over + // `"*.example.com"`, and `"*.example.com"` takes precedence over `""`. + // + // Additionally, if there are multiple wildcard entries, more specific + // wildcard entries must be processed before less specific wildcard entries. + // For example, `"*.foo.example.com"` takes precedence over `"*.example.com"`. + // The precise definition here is that the higher the number of dots in the + // hostname to the right of the wildcard character, the higher the precedence. + // + // The wildcard character will match any number of characters _and dots_ to + // the left, however, so `"*.example.com"` will match both + // `"foo.bar.example.com"` _and_ `"bar.example.com"`. + // + // If a set of Listeners contains Listeners that are not distinct, then those + // Listeners are Conflicted, and the implementation MUST set the "Conflicted" + // condition in the Listener Status to "True". + // + // Implementations MAY choose to accept a Gateway with some Conflicted + // Listeners only if they only accept the partial Listener set that contains + // no Conflicted Listeners. To put this another way, implementations may + // accept a partial Listener set only if they throw out *all* the conflicting + // Listeners. No picking one of the conflicting listeners as the winner. + // This also means that the Gateway must have at least one non-conflicting + // Listener in this case, otherwise it violates the requirement that at + // least one Listener must be present. + // + // The implementation MUST set a "ListenersNotValid" condition on the + // Gateway Status when the Gateway contains Conflicted Listeners whether or + // not they accept the Gateway. That Condition SHOULD clearly + // indicate in the Message which Listeners are conflicted, and which are + // Accepted. Additionally, the Listener status for those listeners SHOULD + // indicate which Listeners are conflicted and not Accepted. + // + // A Gateway's Listeners are considered "compatible" if: + // + // 1. They are distinct. + // 2. The implementation can serve them in compliance with the Addresses + // requirement that all Listeners are available on all assigned + // addresses. + // + // Compatible combinations in Extended support are expected to vary across + // implementations. A combination that is compatible for one implementation + // may not be compatible for another. + // + // For example, an implementation that cannot serve both TCP and UDP listeners + // on the same address, or cannot mix HTTPS and generic TLS listens on the same port + // would not consider those cases compatible, even though they are distinct. + // + // Note that requests SHOULD match at most one Listener. For example, if + // Listeners are defined for "foo.example.com" and "*.example.com", a + // request to "foo.example.com" SHOULD only be routed using routes attached + // to the "foo.example.com" Listener (and not the "*.example.com" Listener). + // This concept is known as "Listener Isolation". Implementations that do + // not support Listener Isolation MUST clearly document this. + // + // Implementations MAY merge separate Gateways onto a single set of + // Addresses if all Listeners across all Gateways are compatible. // // Support: Core // @@ -124,11 +187,11 @@ type GatewaySpec struct { // +listMapKey=name // +kubebuilder:validation:MinItems=1 // +kubebuilder:validation:MaxItems=64 - // +kubebuilder:validation:XValidation:message="tls must be specified for protocols ['HTTPS', 'TLS']",rule="self.all(l, l.protocol in ['HTTPS', 'TLS'] ? has(l.tls) : true)" // +kubebuilder:validation:XValidation:message="tls must not be specified for protocols ['HTTP', 'TCP', 'UDP']",rule="self.all(l, l.protocol in ['HTTP', 'TCP', 'UDP'] ? !has(l.tls) : true)" + // +kubebuilder:validation:XValidation:message="tls mode must be Terminate for protocol HTTPS",rule="self.all(l, (l.protocol == 'HTTPS' && has(l.tls)) ? (l.tls.mode == '' || l.tls.mode == 'Terminate') : true)" // +kubebuilder:validation:XValidation:message="hostname must not be specified for protocols ['TCP', 'UDP']",rule="self.all(l, l.protocol in ['TCP', 'UDP'] ? (!has(l.hostname) || l.hostname == '') : true)" // +kubebuilder:validation:XValidation:message="Listener name must be unique within the Gateway",rule="self.all(l1, self.exists_one(l2, l1.name == l2.name))" - // +kubebuilder:validation:XValidation:message="Combination of port, protocol and hostname must be unique for each listener",rule="self.all(l1, self.exists_one(l2, l1.port == l2.port && l1.protocol == l2.protocol && (has(l1.hostname) && has(l2.hostname) ? l1.hostname == l2.hostname : true)))" + // +kubebuilder:validation:XValidation:message="Combination of port, protocol and hostname must be unique for each listener",rule="self.all(l1, self.exists_one(l2, l1.port == l2.port && l1.protocol == l2.protocol && (has(l1.hostname) && has(l2.hostname) ? l1.hostname == l2.hostname : !has(l1.hostname) && !has(l2.hostname))))" Listeners []Listener `json:"listeners"` // Addresses requested for this Gateway. This is optional and behavior can @@ -142,9 +205,6 @@ type GatewaySpec struct { // other networking infrastructure, or some other address that traffic will // be sent to. // - // The .listener.hostname field is used to route traffic that has already - // arrived at the Gateway to the correct in-cluster destination. - // // If no Addresses are specified, the implementation MAY schedule the // Gateway in an implementation-specific manner, assigning an appropriate // set of Addresses. @@ -161,6 +221,14 @@ type GatewaySpec struct { // +kubebuilder:validation:XValidation:message="IPAddress values must be unique",rule="self.all(a1, a1.type == 'IPAddress' ? self.exists_one(a2, a2.type == a1.type && a2.value == a1.value) : true )" // +kubebuilder:validation:XValidation:message="Hostname values must be unique",rule="self.all(a1, a1.type == 'Hostname' ? self.exists_one(a2, a2.type == a1.type && a2.value == a1.value) : true )" Addresses []GatewayAddress `json:"addresses,omitempty"` + + // Infrastructure defines infrastructure level attributes about this Gateway instance. + // + // Support: Core + // + // + // +optional + Infrastructure *GatewayInfrastructure `json:"infrastructure,omitempty"` } // Listener embodies the concept of a logical endpoint where a Gateway accepts @@ -308,18 +376,19 @@ const ( // GatewayTLSConfig describes a TLS configuration. // -// +kubebuilder:validation:XValidation:message="certificateRefs must be specified when TLSModeType is Terminate",rule="self.mode == 'Terminate' ? size(self.certificateRefs) > 0 : true" +// +kubebuilder:validation:XValidation:message="certificateRefs or options must be specified when mode is Terminate",rule="self.mode == 'Terminate' ? size(self.certificateRefs) > 0 || size(self.options) > 0 : true" type GatewayTLSConfig struct { // Mode defines the TLS behavior for the TLS session initiated by the client. // There are two possible modes: // - // - Terminate: The TLS session between the downstream client - // and the Gateway is terminated at the Gateway. This mode requires - // certificateRefs to be set and contain at least one element. + // - Terminate: The TLS session between the downstream client and the + // Gateway is terminated at the Gateway. This mode requires certificates + // to be specified in some way, such as populating the certificateRefs + // field. // - Passthrough: The TLS session is NOT terminated by the Gateway. This // implies that the Gateway can't decipher the TLS stream except for - // the ClientHello message of the TLS protocol. - // CertificateRefs field is ignored in this mode. + // the ClientHello message of the TLS protocol. The certificateRefs field + // is ignored in this mode. // // Support: Core // @@ -356,6 +425,18 @@ type GatewayTLSConfig struct { // +kubebuilder:validation:MaxItems=64 CertificateRefs []SecretObjectReference `json:"certificateRefs,omitempty"` + // FrontendValidation holds configuration information for validating the frontend (client). + // Setting this field will require clients to send a client certificate + // required for validation during the TLS handshake. In browsers this may result in a dialog appearing + // that requests a user to specify the client certificate. + // The maximum depth of a certificate chain accepted in verification is Implementation specific. + // + // Support: Extended + // + // +optional + // + FrontendValidation *FrontendTLSValidation `json:"frontendValidation,omitempty"` + // Options are a list of key/value pairs to enable extended TLS // configuration for each implementation. For example, configuring the // minimum TLS version or supported cipher suites. @@ -390,6 +471,36 @@ const ( TLSModePassthrough TLSModeType = "Passthrough" ) +// FrontendTLSValidation holds configuration information that can be used to validate +// the frontend initiating the TLS connection +type FrontendTLSValidation struct { + // CACertificateRefs contains one or more references to + // Kubernetes objects that contain TLS certificates of + // the Certificate Authorities that can be used + // as a trust anchor to validate the certificates presented by the client. + // + // A single CA certificate reference to a Kubernetes ConfigMap + // has "Core" support. + // Implementations MAY choose to support attaching multiple CA certificates to + // a Listener, but this behavior is implementation-specific. + // + // Support: Core - A single reference to a Kubernetes ConfigMap + // with the CA certificate in a key named `ca.crt`. + // + // Support: Implementation-specific (More than one reference, or other kinds + // of resources). + // + // References to a resource in a different namespace are invalid UNLESS there + // is a ReferenceGrant in the target namespace that allows the certificate + // to be attached. If a ReferenceGrant does not allow this reference, the + // "ResolvedRefs" condition MUST be set to False for this listener with the + // "RefNotPermitted" reason. + // + // +kubebuilder:validation:MaxItems=8 + // +kubebuilder:validation:MinItems=1 + CACertificateRefs []ObjectReference `json:"caCertificateRefs,omitempty"` +} + // AllowedRoutes defines which Routes may be attached to this Listener. type AllowedRoutes struct { // Namespaces indicates namespaces from which Routes may be attached to this @@ -493,7 +604,7 @@ type GatewayAddress struct { Value string `json:"value"` } -// GatewayStatusAddress describes an address that is bound to a Gateway. +// GatewayStatusAddress describes a network address that is bound to a Gateway. // // +kubebuilder:validation:XValidation:message="Hostname value must only contain valid characters (matching ^(\\*\\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$)",rule="self.type == 'Hostname' ? self.value.matches(r\"\"\"^(\\*\\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$\"\"\"): true" type GatewayStatusAddress struct { @@ -515,10 +626,15 @@ type GatewayStatusAddress struct { // GatewayStatus defines the observed state of Gateway. type GatewayStatus struct { - // Addresses lists the IP addresses that have actually been - // bound to the Gateway. These addresses may differ from the - // addresses in the Spec, e.g. if the Gateway automatically - // assigns an address from a reserved pool. + // Addresses lists the network addresses that have been bound to the + // Gateway. + // + // This list may differ from the addresses provided in the spec under some + // conditions: + // + // * no addresses are specified, all addresses are dynamically assigned + // * a combination of specified and dynamic addresses are assigned + // * a specified address was unusable (e.g. already in use) // // +optional // @@ -554,6 +670,66 @@ type GatewayStatus struct { Listeners []ListenerStatus `json:"listeners,omitempty"` } +// GatewayInfrastructure defines infrastructure level attributes about a Gateway instance. +type GatewayInfrastructure struct { + // Labels that SHOULD be applied to any resources created in response to this Gateway. + // + // For implementations creating other Kubernetes objects, this should be the `metadata.labels` field on resources. + // For other implementations, this refers to any relevant (implementation specific) "labels" concepts. + // + // An implementation may chose to add additional implementation-specific labels as they see fit. + // + // Support: Extended + // + // +optional + // +kubebuilder:validation:MaxProperties=8 + Labels map[AnnotationKey]AnnotationValue `json:"labels,omitempty"` + + // Annotations that SHOULD be applied to any resources created in response to this Gateway. + // + // For implementations creating other Kubernetes objects, this should be the `metadata.annotations` field on resources. + // For other implementations, this refers to any relevant (implementation specific) "annotations" concepts. + // + // An implementation may chose to add additional implementation-specific annotations as they see fit. + // + // Support: Extended + // + // +optional + // +kubebuilder:validation:MaxProperties=8 + Annotations map[AnnotationKey]AnnotationValue `json:"annotations,omitempty"` + + // ParametersRef is a reference to a resource that contains the configuration + // parameters corresponding to the Gateway. This is optional if the + // controller does not require any additional configuration. + // + // This follows the same semantics as GatewayClass's `parametersRef`, but on a per-Gateway basis + // + // The Gateway's GatewayClass may provide its own `parametersRef`. When both are specified, + // the merging behavior is implementation specific. + // It is generally recommended that GatewayClass provides defaults that can be overridden by a Gateway. + // + // Support: Implementation-specific + // + // +optional + ParametersRef *LocalParametersReference `json:"parametersRef,omitempty"` +} + +// LocalParametersReference identifies an API object containing controller-specific +// configuration resource within the namespace. +type LocalParametersReference struct { + // Group is the group of the referent. + Group Group `json:"group"` + + // Kind is kind of the referent. + Kind Kind `json:"kind"` + + // Name is the name of the referent. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=253 + Name string `json:"name"` +} + // GatewayConditionType is a type of condition associated with a // Gateway. This type should be used with the GatewayStatus.Conditions // field. @@ -599,8 +775,10 @@ const ( // true. GatewayReasonProgrammed GatewayConditionReason = "Programmed" - // This reason is used with the "Programmed" and "Accepted" conditions when the Gateway is - // syntactically or semantically invalid. + // This reason is used with the "Programmed" and "Accepted" conditions when + // the Gateway is syntactically or semantically invalid. For example, this + // could include unspecified TLS configuration, or some unrecognized or + // invalid values in the TLS configuration. GatewayReasonInvalid GatewayConditionReason = "Invalid" // This reason is used with the "Programmed" condition when the @@ -608,11 +786,34 @@ const ( // resources are available. GatewayReasonNoResources GatewayConditionReason = "NoResources" - // This reason is used with the "Programmed" condition when none of the requested - // addresses have been assigned to the Gateway. This reason can be used to - // express a range of circumstances, including (but not limited to) IPAM - // address exhaustion, address not yet allocated, or a named address not being found. + // This reason is used with the "Programmed" condition when the underlying + // implementation and network have yet to dynamically assign addresses for a + // Gateway. + // + // Some example situations where this reason can be used: + // + // * IPAM address exhaustion + // * Address not yet allocated + // + // When this reason is used the implementation SHOULD provide a clear + // message explaining the underlying problem, ideally with some hints as to + // what actions can be taken that might resolve the problem. GatewayReasonAddressNotAssigned GatewayConditionReason = "AddressNotAssigned" + + // This reason is used with the "Programmed" condition when the underlying + // implementation (and possibly, network) are unable to use an address that + // was provided in the Gateway specification. + // + // Some example situations where this reason can be used: + // + // * a named address not being found + // * a provided static address can't be used + // * the address is already in use + // + // When this reason is used the implementation SHOULD provide prescriptive + // information on which address is causing the problem and how to resolve it + // in the condition message. + GatewayReasonAddressNotUsable GatewayConditionReason = "AddressNotUsable" ) const ( @@ -629,6 +830,7 @@ const ( // Possible reasons for this condition to be False are: // // * "Invalid" + // * "InvalidParameters" // * "NotReconciled" // * "UnsupportedAddress" // * "ListenersNotValid" @@ -658,13 +860,15 @@ const ( // the Gateway. GatewayReasonPending GatewayConditionReason = "Pending" - // This reason is used with the "Accepted" condition when the Gateway could not be configured - // because the requested address is not supported. This reason could be used in a number of - // instances, including: - // - // * The address is already in use. - // * The type of address is not supported by the implementation. + // This reason is used with the "Accepted" condition to indicate that the + // Gateway could not be accepted because an address that was provided is a + // type which is not supported by the implementation. GatewayReasonUnsupportedAddress GatewayConditionReason = "UnsupportedAddress" + + // This reason is used with the "Accepted" condition when the + // Gateway was not accepted because the parametersRef field + // was invalid, with more detail in the message. + GatewayReasonInvalidParameters GatewayConditionReason = "InvalidParameters" ) const ( @@ -721,8 +925,23 @@ type ListenerStatus struct { // +kubebuilder:validation:MaxItems=8 SupportedKinds []RouteGroupKind `json:"supportedKinds"` - // AttachedRoutes represents the total number of accepted Routes that have been + // AttachedRoutes represents the total number of Routes that have been // successfully attached to this Listener. + // + // Successful attachment of a Route to a Listener is based solely on the + // combination of the AllowedRoutes field on the corresponding Listener + // and the Route's ParentRefs field. A Route is successfully attached to + // a Listener when it is selected by the Listener's AllowedRoutes field + // AND the Route has a valid ParentRef selecting the whole Gateway + // resource or a specific Listener as a parent resource (more detail on + // attachment semantics can be found in the documentation on the various + // Route kinds ParentRefs fields). Listener or Route status does not impact + // successful attachment, i.e. the AttachedRoutes field count MUST be set + // for Listeners with condition Accepted: false and MUST count successfully + // attached Routes that may themselves have Accepted: false conditions. + // + // Uses for this field include troubleshooting Route attachment and + // measuring blast radius/impact of changes to a Listener. AttachedRoutes int32 `json:"attachedRoutes"` // Conditions describe the current condition of this listener. diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/gatewayclass_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/gatewayclass_types.go similarity index 74% rename from vendor/sigs.k8s.io/gateway-api/apis/v1beta1/gatewayclass_types.go rename to vendor/sigs.k8s.io/gateway-api/apis/v1/gatewayclass_types.go index 9d166700d02..21875dce196 100644 --- a/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/gatewayclass_types.go +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/gatewayclass_types.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -24,8 +24,8 @@ import ( // +genclient:nonNamespaced // +kubebuilder:object:root=true // +kubebuilder:resource:categories=gateway-api,scope=Cluster,shortName=gc -// +kubebuilder:storageversion // +kubebuilder:subresource:status +// +kubebuilder:storageversion // +kubebuilder:printcolumn:name="Controller",type=string,JSONPath=`.spec.controllerName` // +kubebuilder:printcolumn:name="Accepted",type=string,JSONPath=`.status.conditions[?(@.type=="Accepted")].status` // +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` @@ -96,6 +96,10 @@ type GatewayClassSpec struct { // If the referent cannot be found, the GatewayClass's "InvalidParameters" // status condition will be true. // + // A Gateway for this GatewayClass may provide its own `parametersRef`. When both are specified, + // the merging behavior is implementation specific. + // It is generally recommended that GatewayClass provides defaults that can be overridden by a Gateway. + // // Support: Implementation-specific // // +optional @@ -158,6 +162,7 @@ const ( // Possible reasons for this condition to be False are: // // * "InvalidParameters" + // * "UnsupportedVersion" // // Possible reasons for this condition to be Unknown are: // @@ -186,6 +191,51 @@ const ( GatewayClassReasonWaiting GatewayClassConditionReason = "Waiting" ) +const ( + // This condition indicates whether the GatewayClass supports the version(s) + // of Gateway API CRDs present in the cluster. This condition MUST be set by + // a controller when it marks a GatewayClass "Accepted". + // + // The version of a Gateway API CRD is defined by the + // gateway.networking.k8s.io/bundle-version annotation on the CRD. If + // implementations detect any Gateway API CRDs that either do not have this + // annotation set, or have it set to a version that is not recognized or + // supported by the implementation, this condition MUST be set to false. + // + // Implementations MAY choose to either provide "best effort" support when + // an unrecognized CRD version is present. This would be communicated by + // setting the "Accepted" condition to true and the "SupportedVersion" + // condition to false. + // + // Alternatively, implementations MAY choose not to support CRDs with + // unrecognized versions. This would be communicated by setting the + // "Accepted" condition to false with the reason "UnsupportedVersions". + // + // Possible reasons for this condition to be true are: + // + // * "SupportedVersion" + // + // Possible reasons for this condition to be False are: + // + // * "UnsupportedVersion" + // + // Controllers should prefer to use the values of GatewayClassConditionReason + // for the corresponding Reason, where appropriate. + // + // + GatewayClassConditionStatusSupportedVersion GatewayClassConditionType = "SupportedVersion" + + // This reason is used with the "SupportedVersion" condition when the + // condition is true. + GatewayClassReasonSupportedVersion GatewayClassConditionReason = "SupportedVersion" + + // This reason is used with the "SupportedVersion" or "Accepted" condition + // when the condition is false. A message SHOULD be included in this + // condition that includes the detected CRD version(s) present in the + // cluster and the CRD version(s) that are supported by the GatewayClass. + GatewayClassReasonUnsupportedVersion GatewayClassConditionReason = "UnsupportedVersion" +) + // GatewayClassStatus is the current status for the GatewayClass. type GatewayClassStatus struct { // Conditions is the current status from the controller for @@ -200,6 +250,14 @@ type GatewayClassStatus struct { // +kubebuilder:validation:MaxItems=8 // +kubebuilder:default={{type: "Accepted", status: "Unknown", message: "Waiting for controller", reason: "Pending", lastTransitionTime: "1970-01-01T00:00:00Z"}} Conditions []metav1.Condition `json:"conditions,omitempty"` + + // SupportedFeatures is the set of features the GatewayClass support. + // It MUST be sorted in ascending alphabetical order. + // +optional + // +listType=set + // + // +kubebuilder:validation:MaxItems=64 + SupportedFeatures []SupportedFeature `json:"supportedFeatures,omitempty"` } // +kubebuilder:object:root=true @@ -210,3 +268,7 @@ type GatewayClassList struct { metav1.ListMeta `json:"metadata,omitempty"` Items []GatewayClass `json:"items"` } + +// SupportedFeature is used to describe distinct features that are covered by +// conformance tests. +type SupportedFeature string diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/grpcroute_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/grpcroute_types.go new file mode 100644 index 00000000000..91a8a3d2685 --- /dev/null +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/grpcroute_types.go @@ -0,0 +1,627 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:resource:categories=gateway-api +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Hostnames",type=string,JSONPath=`.spec.hostnames` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` + +// GRPCRoute provides a way to route gRPC requests. This includes the capability +// to match requests by hostname, gRPC service, gRPC method, or HTTP/2 header. +// Filters can be used to specify additional processing steps. Backends specify +// where matching requests will be routed. +// +// GRPCRoute falls under extended support within the Gateway API. Within the +// following specification, the word "MUST" indicates that an implementation +// supporting GRPCRoute must conform to the indicated requirement, but an +// implementation not supporting this route type need not follow the requirement +// unless explicitly indicated. +// +// Implementations supporting `GRPCRoute` with the `HTTPS` `ProtocolType` MUST +// accept HTTP/2 connections without an initial upgrade from HTTP/1.1, i.e. via +// ALPN. If the implementation does not support this, then it MUST set the +// "Accepted" condition to "False" for the affected listener with a reason of +// "UnsupportedProtocol". Implementations MAY also accept HTTP/2 connections +// with an upgrade from HTTP/1. +// +// Implementations supporting `GRPCRoute` with the `HTTP` `ProtocolType` MUST +// support HTTP/2 over cleartext TCP (h2c, +// https://www.rfc-editor.org/rfc/rfc7540#section-3.1) without an initial +// upgrade from HTTP/1.1, i.e. with prior knowledge +// (https://www.rfc-editor.org/rfc/rfc7540#section-3.4). If the implementation +// does not support this, then it MUST set the "Accepted" condition to "False" +// for the affected listener with a reason of "UnsupportedProtocol". +// Implementations MAY also accept HTTP/2 connections with an upgrade from +// HTTP/1, i.e. without prior knowledge. +type GRPCRoute struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the desired state of GRPCRoute. + Spec GRPCRouteSpec `json:"spec,omitempty"` + + // Status defines the current state of GRPCRoute. + Status GRPCRouteStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// GRPCRouteList contains a list of GRPCRoute. +type GRPCRouteList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []GRPCRoute `json:"items"` +} + +// GRPCRouteStatus defines the observed state of GRPCRoute. +type GRPCRouteStatus struct { + RouteStatus `json:",inline"` +} + +// GRPCRouteSpec defines the desired state of GRPCRoute +type GRPCRouteSpec struct { + CommonRouteSpec `json:",inline"` + + // Hostnames defines a set of hostnames to match against the GRPC + // Host header to select a GRPCRoute to process the request. This matches + // the RFC 1123 definition of a hostname with 2 notable exceptions: + // + // 1. IPs are not allowed. + // 2. A hostname may be prefixed with a wildcard label (`*.`). The wildcard + // label MUST appear by itself as the first label. + // + // If a hostname is specified by both the Listener and GRPCRoute, there + // MUST be at least one intersecting hostname for the GRPCRoute to be + // attached to the Listener. For example: + // + // * A Listener with `test.example.com` as the hostname matches GRPCRoutes + // that have either not specified any hostnames, or have specified at + // least one of `test.example.com` or `*.example.com`. + // * A Listener with `*.example.com` as the hostname matches GRPCRoutes + // that have either not specified any hostnames or have specified at least + // one hostname that matches the Listener hostname. For example, + // `test.example.com` and `*.example.com` would both match. On the other + // hand, `example.com` and `test.example.net` would not match. + // + // Hostnames that are prefixed with a wildcard label (`*.`) are interpreted + // as a suffix match. That means that a match for `*.example.com` would match + // both `test.example.com`, and `foo.test.example.com`, but not `example.com`. + // + // If both the Listener and GRPCRoute have specified hostnames, any + // GRPCRoute hostnames that do not match the Listener hostname MUST be + // ignored. For example, if a Listener specified `*.example.com`, and the + // GRPCRoute specified `test.example.com` and `test.example.net`, + // `test.example.net` MUST NOT be considered for a match. + // + // If both the Listener and GRPCRoute have specified hostnames, and none + // match with the criteria above, then the GRPCRoute MUST NOT be accepted by + // the implementation. The implementation MUST raise an 'Accepted' Condition + // with a status of `False` in the corresponding RouteParentStatus. + // + // If a Route (A) of type HTTPRoute or GRPCRoute is attached to a + // Listener and that listener already has another Route (B) of the other + // type attached and the intersection of the hostnames of A and B is + // non-empty, then the implementation MUST accept exactly one of these two + // routes, determined by the following criteria, in order: + // + // * The oldest Route based on creation timestamp. + // * The Route appearing first in alphabetical order by + // "{namespace}/{name}". + // + // The rejected Route MUST raise an 'Accepted' condition with a status of + // 'False' in the corresponding RouteParentStatus. + // + // Support: Core + // + // +optional + // +kubebuilder:validation:MaxItems=16 + Hostnames []Hostname `json:"hostnames,omitempty"` + + // Rules are a list of GRPC matchers, filters and actions. + // + // +optional + // +kubebuilder:validation:MaxItems=16 + Rules []GRPCRouteRule `json:"rules,omitempty"` +} + +// GRPCRouteRule defines the semantics for matching a gRPC request based on +// conditions (matches), processing it (filters), and forwarding the request to +// an API object (backendRefs). +type GRPCRouteRule struct { + // Matches define conditions used for matching the rule against incoming + // gRPC requests. Each match is independent, i.e. this rule will be matched + // if **any** one of the matches is satisfied. + // + // For example, take the following matches configuration: + // + // ``` + // matches: + // - method: + // service: foo.bar + // headers: + // values: + // version: 2 + // - method: + // service: foo.bar.v2 + // ``` + // + // For a request to match against this rule, it MUST satisfy + // EITHER of the two conditions: + // + // - service of foo.bar AND contains the header `version: 2` + // - service of foo.bar.v2 + // + // See the documentation for GRPCRouteMatch on how to specify multiple + // match conditions to be ANDed together. + // + // If no matches are specified, the implementation MUST match every gRPC request. + // + // Proxy or Load Balancer routing configuration generated from GRPCRoutes + // MUST prioritize rules based on the following criteria, continuing on + // ties. Merging MUST not be done between GRPCRoutes and HTTPRoutes. + // Precedence MUST be given to the rule with the largest number of: + // + // * Characters in a matching non-wildcard hostname. + // * Characters in a matching hostname. + // * Characters in a matching service. + // * Characters in a matching method. + // * Header matches. + // + // If ties still exist across multiple Routes, matching precedence MUST be + // determined in order of the following criteria, continuing on ties: + // + // * The oldest Route based on creation timestamp. + // * The Route appearing first in alphabetical order by + // "{namespace}/{name}". + // + // If ties still exist within the Route that has been given precedence, + // matching precedence MUST be granted to the first matching rule meeting + // the above criteria. + // + // +optional + // +kubebuilder:validation:MaxItems=8 + Matches []GRPCRouteMatch `json:"matches,omitempty"` + + // Filters define the filters that are applied to requests that match + // this rule. + // + // The effects of ordering of multiple behaviors are currently unspecified. + // This can change in the future based on feedback during the alpha stage. + // + // Conformance-levels at this level are defined based on the type of filter: + // + // - ALL core filters MUST be supported by all implementations that support + // GRPCRoute. + // - Implementers are encouraged to support extended filters. + // - Implementation-specific custom filters have no API guarantees across + // implementations. + // + // Specifying the same filter multiple times is not supported unless explicitly + // indicated in the filter. + // + // If an implementation can not support a combination of filters, it must clearly + // document that limitation. In cases where incompatible or unsupported + // filters are specified and cause the `Accepted` condition to be set to status + // `False`, implementations may use the `IncompatibleFilters` reason to specify + // this configuration error. + // + // Support: Core + // + // +optional + // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:validation:XValidation:message="RequestHeaderModifier filter cannot be repeated",rule="self.filter(f, f.type == 'RequestHeaderModifier').size() <= 1" + // +kubebuilder:validation:XValidation:message="ResponseHeaderModifier filter cannot be repeated",rule="self.filter(f, f.type == 'ResponseHeaderModifier').size() <= 1" + Filters []GRPCRouteFilter `json:"filters,omitempty"` + + // BackendRefs defines the backend(s) where matching requests should be + // sent. + // + // Failure behavior here depends on how many BackendRefs are specified and + // how many are invalid. + // + // If *all* entries in BackendRefs are invalid, and there are also no filters + // specified in this route rule, *all* traffic which matches this rule MUST + // receive an `UNAVAILABLE` status. + // + // See the GRPCBackendRef definition for the rules about what makes a single + // GRPCBackendRef invalid. + // + // When a GRPCBackendRef is invalid, `UNAVAILABLE` statuses MUST be returned for + // requests that would have otherwise been routed to an invalid backend. If + // multiple backends are specified, and some are invalid, the proportion of + // requests that would otherwise have been routed to an invalid backend + // MUST receive an `UNAVAILABLE` status. + // + // For example, if two backends are specified with equal weights, and one is + // invalid, 50 percent of traffic MUST receive an `UNAVAILABLE` status. + // Implementations may choose how that 50 percent is determined. + // + // Support: Core for Kubernetes Service + // + // Support: Implementation-specific for any other resource + // + // Support for weight: Core + // + // +optional + // +kubebuilder:validation:MaxItems=16 + BackendRefs []GRPCBackendRef `json:"backendRefs,omitempty"` + + // SessionPersistence defines and configures session persistence + // for the route rule. + // + // Support: Extended + // + // +optional + // + SessionPersistence *SessionPersistence `json:"sessionPersistence,omitempty"` +} + +// GRPCRouteMatch defines the predicate used to match requests to a given +// action. Multiple match types are ANDed together, i.e. the match will +// evaluate to true only if all conditions are satisfied. +// +// For example, the match below will match a gRPC request only if its service +// is `foo` AND it contains the `version: v1` header: +// +// ``` +// matches: +// - method: +// type: Exact +// service: "foo" +// headers: +// - name: "version" +// value "v1" +// +// ``` +type GRPCRouteMatch struct { + // Method specifies a gRPC request service/method matcher. If this field is + // not specified, all services and methods will match. + // + // +optional + Method *GRPCMethodMatch `json:"method,omitempty"` + + // Headers specifies gRPC request header matchers. Multiple match values are + // ANDed together, meaning, a request MUST match all the specified headers + // to select the route. + // + // +listType=map + // +listMapKey=name + // +optional + // +kubebuilder:validation:MaxItems=16 + Headers []GRPCHeaderMatch `json:"headers,omitempty"` +} + +// GRPCMethodMatch describes how to select a gRPC route by matching the gRPC +// request service and/or method. +// +// At least one of Service and Method MUST be a non-empty string. +// +// +kubebuilder:validation:XValidation:message="One or both of 'service' or 'method' must be specified",rule="has(self.type) ? has(self.service) || has(self.method) : true" +// +kubebuilder:validation:XValidation:message="service must only contain valid characters (matching ^(?i)\\.?[a-z_][a-z_0-9]*(\\.[a-z_][a-z_0-9]*)*$)",rule="(!has(self.type) || self.type == 'Exact') && has(self.service) ? self.service.matches(r\"\"\"^(?i)\\.?[a-z_][a-z_0-9]*(\\.[a-z_][a-z_0-9]*)*$\"\"\"): true" +// +kubebuilder:validation:XValidation:message="method must only contain valid characters (matching ^[A-Za-z_][A-Za-z_0-9]*$)",rule="(!has(self.type) || self.type == 'Exact') && has(self.method) ? self.method.matches(r\"\"\"^[A-Za-z_][A-Za-z_0-9]*$\"\"\"): true" +type GRPCMethodMatch struct { + // Type specifies how to match against the service and/or method. + // Support: Core (Exact with service and method specified) + // + // Support: Implementation-specific (Exact with method specified but no service specified) + // + // Support: Implementation-specific (RegularExpression) + // + // +optional + // +kubebuilder:default=Exact + Type *GRPCMethodMatchType `json:"type,omitempty"` + + // Value of the service to match against. If left empty or omitted, will + // match any service. + // + // At least one of Service and Method MUST be a non-empty string. + // + // +optional + // +kubebuilder:validation:MaxLength=1024 + Service *string `json:"service,omitempty"` + + // Value of the method to match against. If left empty or omitted, will + // match all services. + // + // At least one of Service and Method MUST be a non-empty string. + // + // +optional + // +kubebuilder:validation:MaxLength=1024 + Method *string `json:"method,omitempty"` +} + +// MethodMatchType specifies the semantics of how gRPC methods and services are compared. +// Valid MethodMatchType values, along with their conformance levels, are: +// +// * "Exact" - Core +// * "RegularExpression" - Implementation Specific +// +// Exact methods MUST be syntactically valid: +// +// - Must not contain `/` character +// +// +kubebuilder:validation:Enum=Exact;RegularExpression +type GRPCMethodMatchType string + +const ( + // Matches the method or service exactly and with case sensitivity. + GRPCMethodMatchExact GRPCMethodMatchType = "Exact" + + // Matches if the method or service matches the given regular expression with + // case sensitivity. + // + // Since `"RegularExpression"` has implementation-specific conformance, + // implementations can support POSIX, PCRE, RE2 or any other regular expression + // dialect. + // Please read the implementation's documentation to determine the supported + // dialect. + GRPCMethodMatchRegularExpression GRPCMethodMatchType = "RegularExpression" +) + +// GRPCHeaderMatch describes how to select a gRPC route by matching gRPC request +// headers. +type GRPCHeaderMatch struct { + // Type specifies how to match against the value of the header. + // + // +optional + // +kubebuilder:default=Exact + Type *HeaderMatchType `json:"type,omitempty"` + + // Name is the name of the gRPC Header to be matched. + // + // If multiple entries specify equivalent header names, only the first + // entry with an equivalent name MUST be considered for a match. Subsequent + // entries with an equivalent header name MUST be ignored. Due to the + // case-insensitivity of header names, "foo" and "Foo" are considered + // equivalent. + Name GRPCHeaderName `json:"name"` + + // Value is the value of the gRPC Header to be matched. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=4096 + Value string `json:"value"` +} + +// GRPCHeaderMatchType specifies the semantics of how GRPC header values should +// be compared. Valid GRPCHeaderMatchType values, along with their conformance +// levels, are: +// +// * "Exact" - Core +// * "RegularExpression" - Implementation Specific +// +// Note that new values may be added to this enum in future releases of the API, +// implementations MUST ensure that unknown values will not cause a crash. +// +// Unknown values here MUST result in the implementation setting the Accepted +// Condition for the Route to `status: False`, with a Reason of +// `UnsupportedValue`. +// +// +kubebuilder:validation:Enum=Exact;RegularExpression +type GRPCHeaderMatchType string + +// GRPCHeaderMatchType constants. +const ( + GRPCHeaderMatchExact GRPCHeaderMatchType = "Exact" + GRPCHeaderMatchRegularExpression GRPCHeaderMatchType = "RegularExpression" +) + +type GRPCHeaderName HeaderName + +// GRPCRouteFilterType identifies a type of GRPCRoute filter. +type GRPCRouteFilterType string + +const ( + // GRPCRouteFilterRequestHeaderModifier can be used to add or remove a gRPC + // header from a gRPC request before it is sent to the upstream target. + // + // Support in GRPCRouteRule: Core + // + // Support in GRPCBackendRef: Extended + GRPCRouteFilterRequestHeaderModifier GRPCRouteFilterType = "RequestHeaderModifier" + + // GRPCRouteFilterRequestHeaderModifier can be used to add or remove a gRPC + // header from a gRPC response before it is sent to the client. + // + // Support in GRPCRouteRule: Core + // + // Support in GRPCBackendRef: Extended + GRPCRouteFilterResponseHeaderModifier GRPCRouteFilterType = "ResponseHeaderModifier" + + // GRPCRouteFilterRequestMirror can be used to mirror gRPC requests to a + // different backend. The responses from this backend MUST be ignored by + // the Gateway. + // + // Support in GRPCRouteRule: Extended + // + // Support in GRPCBackendRef: Extended + GRPCRouteFilterRequestMirror GRPCRouteFilterType = "RequestMirror" + + // GRPCRouteFilterExtensionRef should be used for configuring custom + // gRPC filters. + // + // Support in GRPCRouteRule: Implementation-specific + // + // Support in GRPCBackendRef: Implementation-specific + GRPCRouteFilterExtensionRef GRPCRouteFilterType = "ExtensionRef" +) + +// GRPCRouteFilter defines processing steps that must be completed during the +// request or response lifecycle. GRPCRouteFilters are meant as an extension +// point to express processing that may be done in Gateway implementations. Some +// examples include request or response modification, implementing +// authentication strategies, rate-limiting, and traffic shaping. API +// guarantee/conformance is defined based on the type of the filter. +// +// +kubebuilder:validation:XValidation:message="filter.requestHeaderModifier must be nil if the filter.type is not RequestHeaderModifier",rule="!(has(self.requestHeaderModifier) && self.type != 'RequestHeaderModifier')" +// +kubebuilder:validation:XValidation:message="filter.requestHeaderModifier must be specified for RequestHeaderModifier filter.type",rule="!(!has(self.requestHeaderModifier) && self.type == 'RequestHeaderModifier')" +// +kubebuilder:validation:XValidation:message="filter.responseHeaderModifier must be nil if the filter.type is not ResponseHeaderModifier",rule="!(has(self.responseHeaderModifier) && self.type != 'ResponseHeaderModifier')" +// +kubebuilder:validation:XValidation:message="filter.responseHeaderModifier must be specified for ResponseHeaderModifier filter.type",rule="!(!has(self.responseHeaderModifier) && self.type == 'ResponseHeaderModifier')" +// +kubebuilder:validation:XValidation:message="filter.requestMirror must be nil if the filter.type is not RequestMirror",rule="!(has(self.requestMirror) && self.type != 'RequestMirror')" +// +kubebuilder:validation:XValidation:message="filter.requestMirror must be specified for RequestMirror filter.type",rule="!(!has(self.requestMirror) && self.type == 'RequestMirror')" +// +kubebuilder:validation:XValidation:message="filter.extensionRef must be nil if the filter.type is not ExtensionRef",rule="!(has(self.extensionRef) && self.type != 'ExtensionRef')" +// +kubebuilder:validation:XValidation:message="filter.extensionRef must be specified for ExtensionRef filter.type",rule="!(!has(self.extensionRef) && self.type == 'ExtensionRef')" +type GRPCRouteFilter struct { + // Type identifies the type of filter to apply. As with other API fields, + // types are classified into three conformance levels: + // + // - Core: Filter types and their corresponding configuration defined by + // "Support: Core" in this package, e.g. "RequestHeaderModifier". All + // implementations supporting GRPCRoute MUST support core filters. + // + // - Extended: Filter types and their corresponding configuration defined by + // "Support: Extended" in this package, e.g. "RequestMirror". Implementers + // are encouraged to support extended filters. + // + // - Implementation-specific: Filters that are defined and supported by specific vendors. + // In the future, filters showing convergence in behavior across multiple + // implementations will be considered for inclusion in extended or core + // conformance levels. Filter-specific configuration for such filters + // is specified using the ExtensionRef field. `Type` MUST be set to + // "ExtensionRef" for custom filters. + // + // Implementers are encouraged to define custom implementation types to + // extend the core API with implementation-specific behavior. + // + // If a reference to a custom filter type cannot be resolved, the filter + // MUST NOT be skipped. Instead, requests that would have been processed by + // that filter MUST receive a HTTP error response. + // + // +unionDiscriminator + // +kubebuilder:validation:Enum=ResponseHeaderModifier;RequestHeaderModifier;RequestMirror;ExtensionRef + // + Type GRPCRouteFilterType `json:"type"` + + // RequestHeaderModifier defines a schema for a filter that modifies request + // headers. + // + // Support: Core + // + // +optional + RequestHeaderModifier *HTTPHeaderFilter `json:"requestHeaderModifier,omitempty"` + + // ResponseHeaderModifier defines a schema for a filter that modifies response + // headers. + // + // Support: Extended + // + // +optional + ResponseHeaderModifier *HTTPHeaderFilter `json:"responseHeaderModifier,omitempty"` + + // RequestMirror defines a schema for a filter that mirrors requests. + // Requests are sent to the specified destination, but responses from + // that destination are ignored. + // + // This filter can be used multiple times within the same rule. Note that + // not all implementations will be able to support mirroring to multiple + // backends. + // + // Support: Extended + // + // +optional + RequestMirror *HTTPRequestMirrorFilter `json:"requestMirror,omitempty"` + + // ExtensionRef is an optional, implementation-specific extension to the + // "filter" behavior. For example, resource "myroutefilter" in group + // "networking.example.net"). ExtensionRef MUST NOT be used for core and + // extended filters. + // + // Support: Implementation-specific + // + // This filter can be used multiple times within the same rule. + // +optional + ExtensionRef *LocalObjectReference `json:"extensionRef,omitempty"` +} + +// GRPCBackendRef defines how a GRPCRoute forwards a gRPC request. +// +// Note that when a namespace different than the local namespace is specified, a +// ReferenceGrant object is required in the referent namespace to allow that +// namespace's owner to accept the reference. See the ReferenceGrant +// documentation for details. +// +// +// +// When the BackendRef points to a Kubernetes Service, implementations SHOULD +// honor the appProtocol field if it is set for the target Service Port. +// +// Implementations supporting appProtocol SHOULD recognize the Kubernetes +// Standard Application Protocols defined in KEP-3726. +// +// If a Service appProtocol isn't specified, an implementation MAY infer the +// backend protocol through its own means. Implementations MAY infer the +// protocol from the Route type referring to the backend Service. +// +// If a Route is not able to send traffic to the backend using the specified +// protocol then the backend is considered invalid. Implementations MUST set the +// "ResolvedRefs" condition to "False" with the "UnsupportedProtocol" reason. +// +// +type GRPCBackendRef struct { + // BackendRef is a reference to a backend to forward matched requests to. + // + // A BackendRef can be invalid for the following reasons. In all cases, the + // implementation MUST ensure the `ResolvedRefs` Condition on the Route + // is set to `status: False`, with a Reason and Message that indicate + // what is the cause of the error. + // + // A BackendRef is invalid if: + // + // * It refers to an unknown or unsupported kind of resource. In this + // case, the Reason MUST be set to `InvalidKind` and Message of the + // Condition MUST explain which kind of resource is unknown or unsupported. + // + // * It refers to a resource that does not exist. In this case, the Reason MUST + // be set to `BackendNotFound` and the Message of the Condition MUST explain + // which resource does not exist. + // + // * It refers a resource in another namespace when the reference has not been + // explicitly allowed by a ReferenceGrant (or equivalent concept). In this + // case, the Reason MUST be set to `RefNotPermitted` and the Message of the + // Condition MUST explain which cross-namespace reference is not allowed. + // + // Support: Core for Kubernetes Service + // + // Support: Extended for Kubernetes ServiceImport + // + // Support: Implementation-specific for any other resource + // + // Support for weight: Core + // + // +optional + BackendRef `json:",inline"` + + // Filters defined at this level MUST be executed if and only if the + // request is being forwarded to the backend defined here. + // + // Support: Implementation-specific (For broader support of filters, use the + // Filters field in GRPCRouteRule.) + // + // +optional + // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:validation:XValidation:message="RequestHeaderModifier filter cannot be repeated",rule="self.filter(f, f.type == 'RequestHeaderModifier').size() <= 1" + // +kubebuilder:validation:XValidation:message="ResponseHeaderModifier filter cannot be repeated",rule="self.filter(f, f.type == 'ResponseHeaderModifier').size() <= 1" + Filters []GRPCRouteFilter `json:"filters,omitempty"` +} diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/httproute_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/httproute_types.go similarity index 89% rename from vendor/sigs.k8s.io/gateway-api/apis/v1beta1/httproute_types.go rename to vendor/sigs.k8s.io/gateway-api/apis/v1/httproute_types.go index e8216ed33cb..736e80982a0 100644 --- a/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/httproute_types.go +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/httproute_types.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -23,8 +23,8 @@ import ( // +genclient // +kubebuilder:object:root=true // +kubebuilder:resource:categories=gateway-api -// +kubebuilder:storageversion // +kubebuilder:subresource:status +// +kubebuilder:storageversion // +kubebuilder:printcolumn:name="Hostnames",type=string,JSONPath=`.spec.hostnames` // +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` @@ -197,8 +197,19 @@ type HTTPRouteRule struct { // Filters define the filters that are applied to requests that match // this rule. // - // The effects of ordering of multiple behaviors are currently unspecified. - // This can change in the future based on feedback during the alpha stage. + // Wherever possible, implementations SHOULD implement filters in the order + // they are specified. + // + // Implementations MAY choose to implement this ordering strictly, rejecting + // any combination or order of filters that can not be supported. If implementations + // choose a strict interpretation of filter ordering, they MUST clearly document + // that behavior. + // + // To reject an invalid combination or order of filters, implementations SHOULD + // consider the Route Rules with this configuration invalid. If all Route Rules + // in a Route are invalid, the entire Route would be considered invalid. If only + // a portion of Route Rules are invalid, implementations MUST set the + // "PartiallyInvalid" condition for the Route. // // Conformance-levels at this level are defined based on the type of filter: // @@ -263,6 +274,75 @@ type HTTPRouteRule struct { // +optional // +kubebuilder:validation:MaxItems=16 BackendRefs []HTTPBackendRef `json:"backendRefs,omitempty"` + + // Timeouts defines the timeouts that can be configured for an HTTP request. + // + // Support: Extended + // + // +optional + // + Timeouts *HTTPRouteTimeouts `json:"timeouts,omitempty"` + + // SessionPersistence defines and configures session persistence + // for the route rule. + // + // Support: Extended + // + // +optional + // + SessionPersistence *SessionPersistence `json:"sessionPersistence,omitempty"` +} + +// HTTPRouteTimeouts defines timeouts that can be configured for an HTTPRoute. +// Timeout values are represented with Gateway API Duration formatting. +// +// +kubebuilder:validation:XValidation:message="backendRequest timeout cannot be longer than request timeout",rule="!(has(self.request) && has(self.backendRequest) && duration(self.request) != duration('0s') && duration(self.backendRequest) > duration(self.request))" +type HTTPRouteTimeouts struct { + // Request specifies the maximum duration for a gateway to respond to an HTTP request. + // If the gateway has not been able to respond before this deadline is met, the gateway + // MUST return a timeout error. + // + // For example, setting the `rules.timeouts.request` field to the value `10s` in an + // `HTTPRoute` will cause a timeout if a client request is taking longer than 10 seconds + // to complete. + // + // Setting a timeout to the zero duration (e.g. "0s") SHOULD disable the timeout + // completely. Implementations that cannot completely disable the timeout MUST + // instead interpret the zero duration as the longest possible value to which + // the timeout can be set. + // + // This timeout is intended to cover as close to the whole request-response transaction + // as possible although an implementation MAY choose to start the timeout after the entire + // request stream has been received instead of immediately after the transaction is + // initiated by the client. + // + // When this field is unspecified, request timeout behavior is implementation-specific. + // + // Support: Extended + // + // +optional + Request *Duration `json:"request,omitempty"` + + // BackendRequest specifies a timeout for an individual request from the gateway + // to a backend. This covers the time from when the request first starts being + // sent from the gateway to when the full response has been received from the backend. + // + // Setting a timeout to the zero duration (e.g. "0s") SHOULD disable the timeout + // completely. Implementations that cannot completely disable the timeout MUST + // instead interpret the zero duration as the longest possible value to which + // the timeout can be set. + // + // An entire client HTTP transaction with a gateway, covered by the Request timeout, + // may result in more than one call from the gateway to the destination backend, + // for example, if automatic retries are supported. + // + // Because the Request timeout encompasses the BackendRequest timeout, the value of + // BackendRequest must be <= the value of Request timeout. + // + // Support: Extended + // + // +optional + BackendRequest *Duration `json:"backendRequest,omitempty"` } // PathMatchType specifies the semantics of how HTTP paths should be compared. @@ -771,7 +851,7 @@ type HTTPHeader struct { // HTTPHeaderFilter defines a filter that modifies the headers of an HTTP // request or response. Only one action for a given header name is permitted. // Filters specifying multiple actions of the same or different type for any one -// header name are invalid and will be rejected by the webhook if installed. +// header name are invalid and will be rejected by CRD validation. // Configuration to set or add multiple values for a header must use RFC 7230 // header value formatting, separating each value with a comma. type HTTPHeaderFilter struct { @@ -1059,7 +1139,30 @@ type HTTPRequestMirrorFilter struct { BackendRef BackendObjectReference `json:"backendRef"` } -// HTTPBackendRef defines how a HTTPRoute should forward an HTTP request. +// HTTPBackendRef defines how a HTTPRoute forwards a HTTP request. +// +// Note that when a namespace different than the local namespace is specified, a +// ReferenceGrant object is required in the referent namespace to allow that +// namespace's owner to accept the reference. See the ReferenceGrant +// documentation for details. +// +// +// +// When the BackendRef points to a Kubernetes Service, implementations SHOULD +// honor the appProtocol field if it is set for the target Service Port. +// +// Implementations supporting appProtocol SHOULD recognize the Kubernetes +// Standard Application Protocols defined in KEP-3726. +// +// If a Service appProtocol isn't specified, an implementation MAY infer the +// backend protocol through its own means. Implementations MAY infer the +// protocol from the Route type referring to the backend Service. +// +// If a Route is not able to send traffic to the backend using the specified +// protocol then the backend is considered invalid. Implementations MUST set the +// "ResolvedRefs" condition to "False" with the "UnsupportedProtocol" reason. +// +// type HTTPBackendRef struct { // BackendRef is a reference to a backend to forward matched requests to. // @@ -1083,12 +1186,25 @@ type HTTPBackendRef struct { // case, the Reason must be set to `RefNotPermitted` and the Message of the // Condition must explain which cross-namespace reference is not allowed. // + // * It refers to a Kubernetes Service that has an incompatible appProtocol + // for the given Route type + // + // * The BackendTLSPolicy object is installed in the cluster, a BackendTLSPolicy + // is present that refers to the Service, and the implementation is unable + // to meet the requirement. At the time of writing, BackendTLSPolicy is + // experimental, but once it becomes standard, this will become a MUST + // requirement. + // // Support: Core for Kubernetes Service // // Support: Implementation-specific for any other resource // // Support for weight: Core // + // Support for Kubernetes Service appProtocol: Extended + // + // Support for BackendTLSPolicy: Experimental and ImplementationSpecific + // // +optional BackendRef `json:",inline"` diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/object_reference_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/object_reference_types.go similarity index 80% rename from vendor/sigs.k8s.io/gateway-api/apis/v1beta1/object_reference_types.go rename to vendor/sigs.k8s.io/gateway-api/apis/v1/object_reference_types.go index 4ef1c37891a..421572acebf 100644 --- a/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/object_reference_types.go +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/object_reference_types.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1 // LocalObjectReference identifies an API object within the namespace of the // referrer. @@ -62,7 +62,7 @@ type SecretObjectReference struct { // Name is the name of the referent. Name ObjectName `json:"name"` - // Namespace is the namespace of the backend. When unspecified, the local + // Namespace is the namespace of the referenced object. When unspecified, the local // namespace is inferred. // // Note that when a namespace different than the local namespace is specified, @@ -145,3 +145,36 @@ type BackendObjectReference struct { // +optional Port *PortNumber `json:"port,omitempty"` } + +// ObjectReference identifies an API object including its namespace. +// +// The API object must be valid in the cluster; the Group and Kind must +// be registered in the cluster for this reference to be valid. +// +// References to objects with invalid Group and Kind are not valid, and must +// be rejected by the implementation, with appropriate Conditions set +// on the containing object. +type ObjectReference struct { + // Group is the group of the referent. For example, "gateway.networking.k8s.io". + // When unspecified or empty string, core API group is inferred. + Group Group `json:"group"` + + // Kind is kind of the referent. For example "ConfigMap" or "Service". + Kind Kind `json:"kind"` + + // Name is the name of the referent. + Name ObjectName `json:"name"` + + // Namespace is the namespace of the referenced object. When unspecified, the local + // namespace is inferred. + // + // Note that when a namespace different than the local namespace is specified, + // a ReferenceGrant object is required in the referent namespace to allow that + // namespace's owner to accept the reference. See the ReferenceGrant + // documentation for details. + // + // Support: Core + // + // +optional + Namespace *Namespace `json:"namespace,omitempty"` +} diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/shared_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/shared_types.go similarity index 73% rename from vendor/sigs.k8s.io/gateway-api/apis/v1beta1/shared_types.go rename to vendor/sigs.k8s.io/gateway-api/apis/v1/shared_types.go index b18af1d0e06..bed2cc8b8ea 100644 --- a/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/shared_types.go +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/shared_types.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -25,7 +25,7 @@ import ( // with "Core" support: // // * Gateway (Gateway conformance profile) -// * Service (Mesh conformance profile, experimental, ClusterIP Services only) +// * Service (Mesh conformance profile, ClusterIP Services only) // // This API may be extended in the future to support additional kinds of parent // resources. @@ -49,7 +49,7 @@ type ParentReference struct { // There are two kinds of parent resources with "Core" support: // // * Gateway (Gateway conformance profile) - // * Service (Mesh conformance profile, experimental, ClusterIP Services only) + // * Service (Mesh conformance profile, ClusterIP Services only) // // Support for other resources is Implementation-Specific. // @@ -66,6 +66,7 @@ type ParentReference struct { // Gateway has the AllowedRoutes field, and ReferenceGrant provides a // generic way to enable any other kind of cross-namespace reference. // + // // ParentRefs from a Route to a Service in the same namespace are "producer" // routes, which apply default routing rules to inbound connections from // any namespace to the Service. @@ -75,6 +76,7 @@ type ParentReference struct { // connections originating from the same namespace as the Route, for which // the intended destination of the connections are a Service targeted as a // ParentRef of the Route. + // // // Support: Core // @@ -89,14 +91,12 @@ type ParentReference struct { // SectionName is the name of a section within the target resource. In the // following resources, SectionName is interpreted as the following: // - // * Gateway: Listener Name. When both Port (experimental) and SectionName + // * Gateway: Listener name. When both Port (experimental) and SectionName // are specified, the name and port of the selected listener must match // both specified values. - // * Service: Port Name. When both Port (experimental) and SectionName + // * Service: Port name. When both Port (experimental) and SectionName // are specified, the name and port of the selected listener must match - // both specified values. Note that attaching Routes to Services as Parents - // is part of experimental Mesh support and is not supported for any other - // purpose. + // both specified values. // // Implementations MAY choose to support attaching Routes to other resources. // If that is the case, they MUST clearly document how SectionName is @@ -127,9 +127,11 @@ type ParentReference struct { // and SectionName are specified, the name and port of the selected listener // must match both specified values. // + // // When the parent resource is a Service, this targets a specific port in the // Service spec. When both Port (experimental) and SectionName are specified, // the name and port of the selected port must match both specified values. + // // // Implementations MAY choose to support other parent resources. // Implementations supporting other types of parent resources MUST clearly @@ -146,7 +148,6 @@ type ParentReference struct { // Support: Extended // // +optional - // Port *PortNumber `json:"port,omitempty"` } @@ -167,15 +168,30 @@ type CommonRouteSpec struct { // There are two kinds of parent resources with "Core" support: // // * Gateway (Gateway conformance profile) - // * Service (Mesh conformance profile, experimental, ClusterIP Services only) + // * Service (Mesh conformance profile, ClusterIP Services only) // // This API may be extended in the future to support additional kinds of parent // resources. // - // It is invalid to reference an identical parent more than once. It is - // valid to reference multiple distinct sections within the same parent - // resource, such as two separate Listeners on the same Gateway or two separate - // ports on the same Service. + // ParentRefs must be _distinct_. This means either that: + // + // * They select different objects. If this is the case, then parentRef + // entries are distinct. In terms of fields, this means that the + // multi-part key defined by `group`, `kind`, `namespace`, and `name` must + // be unique across all parentRef entries in the Route. + // * They do not select different objects, but for each optional field used, + // each ParentRef that selects the same object must set the same set of + // optional fields to different values. If one ParentRef sets a + // combination of optional fields, all must set the same combination. + // + // Some examples: + // + // * If one ParentRef sets `sectionName`, all ParentRefs referencing the + // same object must also set `sectionName`. + // * If one ParentRef sets `port`, all ParentRefs referencing the same + // object must also set `port`. + // * If one ParentRef sets `sectionName` and `port`, all ParentRefs + // referencing the same object must also set `sectionName` and `port`. // // It is possible to separately reference multiple distinct objects that may // be collapsed by an implementation. For example, some implementations may @@ -189,6 +205,7 @@ type CommonRouteSpec struct { // Gateway has the AllowedRoutes field, and ReferenceGrant provides a // generic way to enable other kinds of cross-namespace reference. // + // // ParentRefs from a Route to a Service in the same namespace are "producer" // routes, which apply default routing rules to inbound connections from // any namespace to the Service. @@ -198,12 +215,13 @@ type CommonRouteSpec struct { // connections originating from the same namespace as the Route, for which // the intended destination of the connections are a Service targeted as a // ParentRef of the Route. + // // // +optional // +kubebuilder:validation:MaxItems=32 - // + // // - // + // // ParentRefs []ParentReference `json:"parentRefs,omitempty"` } @@ -221,6 +239,28 @@ type PortNumber int32 // ReferenceGrant object is required in the referent namespace to allow that // namespace's owner to accept the reference. See the ReferenceGrant // documentation for details. +// +// +// +// When the BackendRef points to a Kubernetes Service, implementations SHOULD +// honor the appProtocol field if it is set for the target Service Port. +// +// Implementations supporting appProtocol SHOULD recognize the Kubernetes +// Standard Application Protocols defined in KEP-3726. +// +// If a Service appProtocol isn't specified, an implementation MAY infer the +// backend protocol through its own means. Implementations MAY infer the +// protocol from the Route type referring to the backend Service. +// +// If a Route is not able to send traffic to the backend using the specified +// protocol then the backend is considered invalid. Implementations MUST set the +// "ResolvedRefs" condition to "False" with the "UnsupportedProtocol" reason. +// +// +// +// Note that when the BackendTLSPolicy object is enabled by the implementation, +// there are some extra rules about validity to consider here. See the fields +// where this struct is used for more information about the exact behavior. type BackendRef struct { // BackendObjectReference references a Kubernetes object. BackendObjectReference `json:",inline"` @@ -256,7 +296,7 @@ const ( // This condition indicates whether the route has been accepted or rejected // by a Gateway, and why. // - // Possible reasons for this condition to be true are: + // Possible reasons for this condition to be True are: // // * "Accepted" // @@ -307,19 +347,22 @@ const ( // are incompatible filters present on a route rule (for example if // the URLRewrite and RequestRedirect are both present on an HTTPRoute). RouteReasonIncompatibleFilters RouteConditionReason = "IncompatibleFilters" +) +const ( // This condition indicates whether the controller was able to resolve all // the object references for the Route. // - // Possible reasons for this condition to be true are: + // Possible reasons for this condition to be True are: // // * "ResolvedRefs" // - // Possible reasons for this condition to be false are: + // Possible reasons for this condition to be False are: // // * "RefNotPermitted" // * "InvalidKind" // * "BackendNotFound" + // * "UnsupportedProtocol" // // Controllers may raise this condition with other reasons, // but should prefer to use the reasons listed above to improve @@ -344,6 +387,48 @@ const ( // This reason is used with the "ResolvedRefs" condition when one of the // Route's rules has a reference to a resource that does not exist. RouteReasonBackendNotFound RouteConditionReason = "BackendNotFound" + + // This reason is used with the "ResolvedRefs" condition when one of the + // Route's rules has a reference to a resource with an app protocol that + // is not supported by this implementation. + RouteReasonUnsupportedProtocol RouteConditionReason = "UnsupportedProtocol" +) + +const ( + // This condition indicates that the Route contains a combination of both + // valid and invalid rules. + // + // When this happens, implementations MUST take one of the following + // approaches: + // + // 1) Drop Rule(s): With this approach, implementations will drop the + // invalid Route Rule(s) until they are fully valid again. The message + // for this condition MUST start with the prefix "Dropped Rule" and + // include information about which Rules have been dropped. In this + // state, the "Accepted" condition MUST be set to "True" with the latest + // generation of the resource. + // 2) Fall Back: With this approach, implementations will fall back to the + // last known good state of the entire Route. The message for this + // condition MUST start with the prefix "Fall Back" and include + // information about why the current Rule(s) are invalid. To represent + // this, the "Accepted" condition MUST be set to "True" with the + // generation of the last known good state of the resource. + // + // Reverting to the last known good state should only be done by + // implementations that have a means of restoring that state if/when they + // are restarted. + // + // This condition MUST NOT be set if a Route is fully valid, fully invalid, + // or not accepted. By extension, that means that this condition MUST only + // be set when it is "True". + // + // Possible reasons for this condition to be True are: + // + // * "UnsupportedValue" + // + // Controllers may raise this condition with other reasons, but should + // prefer to use the reasons listed above to improve interoperability. + RouteConditionPartiallyInvalid RouteConditionType = "PartiallyInvalid" ) // RouteParentStatus describes the status of a route with respect to an @@ -487,7 +572,7 @@ type Group string type Kind string // ObjectName refers to the name of a Kubernetes object. -// Object names can have a variety of forms, including RFC1123 subdomains, +// Object names can have a variety of forms, including RFC 1123 subdomains, // RFC 1123 labels, or RFC 1035 labels. // // +kubebuilder:validation:MinLength=1 @@ -517,11 +602,22 @@ type Namespace string // SectionName is the name of a section in a Kubernetes resource. // +// In the following resources, SectionName is interpreted as the following: +// +// * Gateway: Listener name +// * HTTPRoute: HTTPRouteRule name +// * Service: Port name +// +// Section names can have a variety of forms, including RFC 1123 subdomains, +// RFC 1123 labels, or RFC 1035 labels. +// // This validation is based off of the corresponding Kubernetes validation: // https://github.com/kubernetes/apimachinery/blob/02cfb53916346d085a6c6c7c66f882e3c6b0eca6/pkg/util/validation/validation.go#L208 // // Valid values include: // +// * "example" +// * "foo-example" // * "example.com" // * "foo.example.com" // @@ -610,6 +706,12 @@ type AddressType string // +k8s:deepcopy-gen=false type HeaderName string +// Duration is a string value representing a duration in time. The format is as specified +// in GEP-2257, a strict subset of the syntax parsed by Golang time.ParseDuration. +// +// +kubebuilder:validation:Pattern=`^([0-9]{1,5}(h|m|s|ms)){1,4}$` +type Duration string + const ( // A textual representation of a numeric IP address. IPv4 // addresses must be in dotted-decimal form. IPv6 addresses @@ -641,3 +743,115 @@ const ( // Support: Implementation-specific NamedAddressType AddressType = "NamedAddress" ) + +// SessionPersistence defines the desired state of SessionPersistence. +// +kubebuilder:validation:XValidation:message="AbsoluteTimeout must be specified when cookie lifetimeType is Permanent",rule="!has(self.cookieConfig.lifetimeType) || self.cookieConfig.lifetimeType != 'Permanent' || has(self.absoluteTimeout)" +type SessionPersistence struct { + // SessionName defines the name of the persistent session token + // which may be reflected in the cookie or the header. Users + // should avoid reusing session names to prevent unintended + // consequences, such as rejection or unpredictable behavior. + // + // Support: Implementation-specific + // + // +optional + // +kubebuilder:validation:MaxLength=128 + SessionName *string `json:"sessionName,omitempty"` + + // AbsoluteTimeout defines the absolute timeout of the persistent + // session. Once the AbsoluteTimeout duration has elapsed, the + // session becomes invalid. + // + // Support: Extended + // + // +optional + AbsoluteTimeout *Duration `json:"absoluteTimeout,omitempty"` + + // IdleTimeout defines the idle timeout of the persistent session. + // Once the session has been idle for more than the specified + // IdleTimeout duration, the session becomes invalid. + // + // Support: Extended + // + // +optional + IdleTimeout *Duration `json:"idleTimeout,omitempty"` + + // Type defines the type of session persistence such as through + // the use a header or cookie. Defaults to cookie based session + // persistence. + // + // Support: Core for "Cookie" type + // + // Support: Extended for "Header" type + // + // +optional + // +kubebuilder:default=Cookie + Type *SessionPersistenceType `json:"type,omitempty"` + + // CookieConfig provides configuration settings that are specific + // to cookie-based session persistence. + // + // Support: Core + // + // +optional + CookieConfig *CookieConfig `json:"cookieConfig,omitempty"` +} + +// +kubebuilder:validation:Enum=Cookie;Header +type SessionPersistenceType string + +const ( + // CookieBasedSessionPersistence specifies cookie-based session + // persistence. + // + // Support: Core + CookieBasedSessionPersistence SessionPersistenceType = "Cookie" + + // HeaderBasedSessionPersistence specifies header-based session + // persistence. + // + // Support: Extended + HeaderBasedSessionPersistence SessionPersistenceType = "Header" +) + +// CookieConfig defines the configuration for cookie-based session persistence. +type CookieConfig struct { + // LifetimeType specifies whether the cookie has a permanent or + // session-based lifetime. A permanent cookie persists until its + // specified expiry time, defined by the Expires or Max-Age cookie + // attributes, while a session cookie is deleted when the current + // session ends. + // + // When set to "Permanent", AbsoluteTimeout indicates the + // cookie's lifetime via the Expires or Max-Age cookie attributes + // and is required. + // + // When set to "Session", AbsoluteTimeout indicates the + // absolute lifetime of the cookie tracked by the gateway and + // is optional. + // + // Support: Core for "Session" type + // + // Support: Extended for "Permanent" type + // + // +optional + // +kubebuilder:default=Session + LifetimeType *CookieLifetimeType `json:"lifetimeType,omitempty"` +} + +// +kubebuilder:validation:Enum=Permanent;Session +type CookieLifetimeType string + +const ( + // SessionCookieLifetimeType specifies the type for a session + // cookie. + // + // Support: Core + SessionCookieLifetimeType CookieLifetimeType = "Session" + + // PermanentCookieLifetimeType specifies the type for a permanent + // cookie. + // + // Support: Extended + PermanentCookieLifetimeType CookieLifetimeType = "Permanent" +) diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/zz_generated.deepcopy.go similarity index 73% rename from vendor/sigs.k8s.io/gateway-api/apis/v1beta1/zz_generated.deepcopy.go rename to vendor/sigs.k8s.io/gateway-api/apis/v1/zz_generated.deepcopy.go index d1d46269d26..ddb9bb9d497 100644 --- a/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/zz_generated.deepcopy.go +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/zz_generated.deepcopy.go @@ -18,11 +18,11 @@ limitations under the License. // Code generated by controller-gen. DO NOT EDIT. -package v1beta1 +package v1 import ( - "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -130,6 +130,327 @@ func (in *CommonRouteSpec) DeepCopy() *CommonRouteSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CookieConfig) DeepCopyInto(out *CookieConfig) { + *out = *in + if in.LifetimeType != nil { + in, out := &in.LifetimeType, &out.LifetimeType + *out = new(CookieLifetimeType) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CookieConfig. +func (in *CookieConfig) DeepCopy() *CookieConfig { + if in == nil { + return nil + } + out := new(CookieConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendTLSValidation) DeepCopyInto(out *FrontendTLSValidation) { + *out = *in + if in.CACertificateRefs != nil { + in, out := &in.CACertificateRefs, &out.CACertificateRefs + *out = make([]ObjectReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendTLSValidation. +func (in *FrontendTLSValidation) DeepCopy() *FrontendTLSValidation { + if in == nil { + return nil + } + out := new(FrontendTLSValidation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCBackendRef) DeepCopyInto(out *GRPCBackendRef) { + *out = *in + in.BackendRef.DeepCopyInto(&out.BackendRef) + if in.Filters != nil { + in, out := &in.Filters, &out.Filters + *out = make([]GRPCRouteFilter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCBackendRef. +func (in *GRPCBackendRef) DeepCopy() *GRPCBackendRef { + if in == nil { + return nil + } + out := new(GRPCBackendRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCHeaderMatch) DeepCopyInto(out *GRPCHeaderMatch) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(HeaderMatchType) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCHeaderMatch. +func (in *GRPCHeaderMatch) DeepCopy() *GRPCHeaderMatch { + if in == nil { + return nil + } + out := new(GRPCHeaderMatch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCMethodMatch) DeepCopyInto(out *GRPCMethodMatch) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(GRPCMethodMatchType) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(string) + **out = **in + } + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCMethodMatch. +func (in *GRPCMethodMatch) DeepCopy() *GRPCMethodMatch { + if in == nil { + return nil + } + out := new(GRPCMethodMatch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCRoute) DeepCopyInto(out *GRPCRoute) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRoute. +func (in *GRPCRoute) DeepCopy() *GRPCRoute { + if in == nil { + return nil + } + out := new(GRPCRoute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GRPCRoute) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCRouteFilter) DeepCopyInto(out *GRPCRouteFilter) { + *out = *in + if in.RequestHeaderModifier != nil { + in, out := &in.RequestHeaderModifier, &out.RequestHeaderModifier + *out = new(HTTPHeaderFilter) + (*in).DeepCopyInto(*out) + } + if in.ResponseHeaderModifier != nil { + in, out := &in.ResponseHeaderModifier, &out.ResponseHeaderModifier + *out = new(HTTPHeaderFilter) + (*in).DeepCopyInto(*out) + } + if in.RequestMirror != nil { + in, out := &in.RequestMirror, &out.RequestMirror + *out = new(HTTPRequestMirrorFilter) + (*in).DeepCopyInto(*out) + } + if in.ExtensionRef != nil { + in, out := &in.ExtensionRef, &out.ExtensionRef + *out = new(LocalObjectReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRouteFilter. +func (in *GRPCRouteFilter) DeepCopy() *GRPCRouteFilter { + if in == nil { + return nil + } + out := new(GRPCRouteFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCRouteList) DeepCopyInto(out *GRPCRouteList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]GRPCRoute, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRouteList. +func (in *GRPCRouteList) DeepCopy() *GRPCRouteList { + if in == nil { + return nil + } + out := new(GRPCRouteList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GRPCRouteList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCRouteMatch) DeepCopyInto(out *GRPCRouteMatch) { + *out = *in + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(GRPCMethodMatch) + (*in).DeepCopyInto(*out) + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]GRPCHeaderMatch, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRouteMatch. +func (in *GRPCRouteMatch) DeepCopy() *GRPCRouteMatch { + if in == nil { + return nil + } + out := new(GRPCRouteMatch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCRouteRule) DeepCopyInto(out *GRPCRouteRule) { + *out = *in + if in.Matches != nil { + in, out := &in.Matches, &out.Matches + *out = make([]GRPCRouteMatch, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Filters != nil { + in, out := &in.Filters, &out.Filters + *out = make([]GRPCRouteFilter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BackendRefs != nil { + in, out := &in.BackendRefs, &out.BackendRefs + *out = make([]GRPCBackendRef, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SessionPersistence != nil { + in, out := &in.SessionPersistence, &out.SessionPersistence + *out = new(SessionPersistence) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRouteRule. +func (in *GRPCRouteRule) DeepCopy() *GRPCRouteRule { + if in == nil { + return nil + } + out := new(GRPCRouteRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCRouteSpec) DeepCopyInto(out *GRPCRouteSpec) { + *out = *in + in.CommonRouteSpec.DeepCopyInto(&out.CommonRouteSpec) + if in.Hostnames != nil { + in, out := &in.Hostnames, &out.Hostnames + *out = make([]Hostname, len(*in)) + copy(*out, *in) + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]GRPCRouteRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRouteSpec. +func (in *GRPCRouteSpec) DeepCopy() *GRPCRouteSpec { + if in == nil { + return nil + } + out := new(GRPCRouteSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCRouteStatus) DeepCopyInto(out *GRPCRouteStatus) { + *out = *in + in.RouteStatus.DeepCopyInto(&out.RouteStatus) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRouteStatus. +func (in *GRPCRouteStatus) DeepCopy() *GRPCRouteStatus { + if in == nil { + return nil + } + out := new(GRPCRouteStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Gateway) DeepCopyInto(out *Gateway) { *out = *in @@ -266,11 +587,16 @@ func (in *GatewayClassStatus) DeepCopyInto(out *GatewayClassStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) + *out = make([]metav1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.SupportedFeatures != nil { + in, out := &in.SupportedFeatures, &out.SupportedFeatures + *out = make([]SupportedFeature, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayClassStatus. @@ -283,6 +609,40 @@ func (in *GatewayClassStatus) DeepCopy() *GatewayClassStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayInfrastructure) DeepCopyInto(out *GatewayInfrastructure) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[AnnotationKey]AnnotationValue, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[AnnotationKey]AnnotationValue, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ParametersRef != nil { + in, out := &in.ParametersRef, &out.ParametersRef + *out = new(LocalParametersReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayInfrastructure. +func (in *GatewayInfrastructure) DeepCopy() *GatewayInfrastructure { + if in == nil { + return nil + } + out := new(GatewayInfrastructure) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GatewayList) DeepCopyInto(out *GatewayList) { *out = *in @@ -332,6 +692,11 @@ func (in *GatewaySpec) DeepCopyInto(out *GatewaySpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Infrastructure != nil { + in, out := &in.Infrastructure, &out.Infrastructure + *out = new(GatewayInfrastructure) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewaySpec. @@ -356,7 +721,7 @@ func (in *GatewayStatus) DeepCopyInto(out *GatewayStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) + *out = make([]metav1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -415,6 +780,11 @@ func (in *GatewayTLSConfig) DeepCopyInto(out *GatewayTLSConfig) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.FrontendValidation != nil { + in, out := &in.FrontendValidation, &out.FrontendValidation + *out = new(FrontendTLSValidation) + (*in).DeepCopyInto(*out) + } if in.Options != nil { in, out := &in.Options, &out.Options *out = make(map[AnnotationKey]AnnotationValue, len(*in)) @@ -815,6 +1185,16 @@ func (in *HTTPRouteRule) DeepCopyInto(out *HTTPRouteRule) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Timeouts != nil { + in, out := &in.Timeouts, &out.Timeouts + *out = new(HTTPRouteTimeouts) + (*in).DeepCopyInto(*out) + } + if in.SessionPersistence != nil { + in, out := &in.SessionPersistence, &out.SessionPersistence + *out = new(SessionPersistence) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteRule. @@ -871,6 +1251,31 @@ func (in *HTTPRouteStatus) DeepCopy() *HTTPRouteStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteTimeouts) DeepCopyInto(out *HTTPRouteTimeouts) { + *out = *in + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = new(Duration) + **out = **in + } + if in.BackendRequest != nil { + in, out := &in.BackendRequest, &out.BackendRequest + *out = new(Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteTimeouts. +func (in *HTTPRouteTimeouts) DeepCopy() *HTTPRouteTimeouts { + if in == nil { + return nil + } + out := new(HTTPRouteTimeouts) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HTTPURLRewriteFilter) DeepCopyInto(out *HTTPURLRewriteFilter) { *out = *in @@ -938,7 +1343,7 @@ func (in *ListenerStatus) DeepCopyInto(out *ListenerStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) + *out = make([]metav1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -970,6 +1375,41 @@ func (in *LocalObjectReference) DeepCopy() *LocalObjectReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalParametersReference) DeepCopyInto(out *LocalParametersReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalParametersReference. +func (in *LocalParametersReference) DeepCopy() *LocalParametersReference { + if in == nil { + return nil + } + out := new(LocalParametersReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { + *out = *in + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(Namespace) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. +func (in *ObjectReference) DeepCopy() *ObjectReference { + if in == nil { + return nil + } + out := new(ObjectReference) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ParametersReference) DeepCopyInto(out *ParametersReference) { *out = *in @@ -1030,126 +1470,6 @@ func (in *ParentReference) DeepCopy() *ParentReference { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReferenceGrant) DeepCopyInto(out *ReferenceGrant) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceGrant. -func (in *ReferenceGrant) DeepCopy() *ReferenceGrant { - if in == nil { - return nil - } - out := new(ReferenceGrant) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ReferenceGrant) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReferenceGrantFrom) DeepCopyInto(out *ReferenceGrantFrom) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceGrantFrom. -func (in *ReferenceGrantFrom) DeepCopy() *ReferenceGrantFrom { - if in == nil { - return nil - } - out := new(ReferenceGrantFrom) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReferenceGrantList) DeepCopyInto(out *ReferenceGrantList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ReferenceGrant, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceGrantList. -func (in *ReferenceGrantList) DeepCopy() *ReferenceGrantList { - if in == nil { - return nil - } - out := new(ReferenceGrantList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ReferenceGrantList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReferenceGrantSpec) DeepCopyInto(out *ReferenceGrantSpec) { - *out = *in - if in.From != nil { - in, out := &in.From, &out.From - *out = make([]ReferenceGrantFrom, len(*in)) - copy(*out, *in) - } - if in.To != nil { - in, out := &in.To, &out.To - *out = make([]ReferenceGrantTo, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceGrantSpec. -func (in *ReferenceGrantSpec) DeepCopy() *ReferenceGrantSpec { - if in == nil { - return nil - } - out := new(ReferenceGrantSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReferenceGrantTo) DeepCopyInto(out *ReferenceGrantTo) { - *out = *in - if in.Name != nil { - in, out := &in.Name, &out.Name - *out = new(ObjectName) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceGrantTo. -func (in *ReferenceGrantTo) DeepCopy() *ReferenceGrantTo { - if in == nil { - return nil - } - out := new(ReferenceGrantTo) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RouteGroupKind) DeepCopyInto(out *RouteGroupKind) { *out = *in @@ -1180,7 +1500,7 @@ func (in *RouteNamespaces) DeepCopyInto(out *RouteNamespaces) { } if in.Selector != nil { in, out := &in.Selector, &out.Selector - *out = new(v1.LabelSelector) + *out = new(metav1.LabelSelector) (*in).DeepCopyInto(*out) } } @@ -1201,7 +1521,7 @@ func (in *RouteParentStatus) DeepCopyInto(out *RouteParentStatus) { in.ParentRef.DeepCopyInto(&out.ParentRef) if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) + *out = make([]metav1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1269,3 +1589,43 @@ func (in *SecretObjectReference) DeepCopy() *SecretObjectReference { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SessionPersistence) DeepCopyInto(out *SessionPersistence) { + *out = *in + if in.SessionName != nil { + in, out := &in.SessionName, &out.SessionName + *out = new(string) + **out = **in + } + if in.AbsoluteTimeout != nil { + in, out := &in.AbsoluteTimeout, &out.AbsoluteTimeout + *out = new(Duration) + **out = **in + } + if in.IdleTimeout != nil { + in, out := &in.IdleTimeout, &out.IdleTimeout + *out = new(Duration) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(SessionPersistenceType) + **out = **in + } + if in.CookieConfig != nil { + in, out := &in.CookieConfig, &out.CookieConfig + *out = new(CookieConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionPersistence. +func (in *SessionPersistence) DeepCopy() *SessionPersistence { + if in == nil { + return nil + } + out := new(SessionPersistence) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/zz_generated.register.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/zz_generated.register.go similarity index 93% rename from vendor/sigs.k8s.io/gateway-api/apis/v1beta1/zz_generated.register.go rename to vendor/sigs.k8s.io/gateway-api/apis/v1/zz_generated.register.go index 05d7898b385..9c8db216af7 100644 --- a/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/zz_generated.register.go +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/zz_generated.register.go @@ -1,3 +1,6 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + /* Copyright The Kubernetes Authors. @@ -16,7 +19,7 @@ limitations under the License. // Code generated by register-gen. DO NOT EDIT. -package v1beta1 +package v1 import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -28,11 +31,11 @@ import ( const GroupName = "gateway.networking.k8s.io" // GroupVersion specifies the group and the version used to register the objects. -var GroupVersion = v1.GroupVersion{Group: GroupName, Version: "v1beta1"} +var GroupVersion = v1.GroupVersion{Group: GroupName, Version: "v1"} // SchemeGroupVersion is group version used to register these objects // Deprecated: use GroupVersion instead. -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { @@ -43,7 +46,7 @@ var ( // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. SchemeBuilder runtime.SchemeBuilder localSchemeBuilder = &SchemeBuilder - // Depreciated: use Install instead + // Deprecated: use Install instead AddToScheme = localSchemeBuilder.AddToScheme Install = localSchemeBuilder.AddToScheme ) @@ -58,14 +61,14 @@ func init() { // Adds the list of known types to Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, + &GRPCRoute{}, + &GRPCRouteList{}, &Gateway{}, &GatewayClass{}, &GatewayClassList{}, &GatewayList{}, &HTTPRoute{}, &HTTPRouteList{}, - &ReferenceGrant{}, - &ReferenceGrantList{}, ) // AddToGroupVersion allows the serialization of client types like ListOptions. v1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/referencegrant_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/referencegrant_types.go deleted file mode 100644 index 0b0caf70882..00000000000 --- a/vendor/sigs.k8s.io/gateway-api/apis/v1beta1/referencegrant_types.go +++ /dev/null @@ -1,143 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -// +genclient -// +kubebuilder:object:root=true -// +kubebuilder:resource:categories=gateway-api,shortName=refgrant -// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` -// +kubebuilder:storageversion - -// ReferenceGrant identifies kinds of resources in other namespaces that are -// trusted to reference the specified kinds of resources in the same namespace -// as the policy. -// -// Each ReferenceGrant can be used to represent a unique trust relationship. -// Additional Reference Grants can be used to add to the set of trusted -// sources of inbound references for the namespace they are defined within. -// -// All cross-namespace references in Gateway API (with the exception of cross-namespace -// Gateway-route attachment) require a ReferenceGrant. -// -// ReferenceGrant is a form of runtime verification allowing users to assert -// which cross-namespace object references are permitted. Implementations that -// support ReferenceGrant MUST NOT permit cross-namespace references which have -// no grant, and MUST respond to the removal of a grant by revoking the access -// that the grant allowed. -type ReferenceGrant struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Spec defines the desired state of ReferenceGrant. - Spec ReferenceGrantSpec `json:"spec,omitempty"` - - // Note that `Status` sub-resource has been excluded at the - // moment as it was difficult to work out the design. - // `Status` sub-resource may be added in future. -} - -// +kubebuilder:object:root=true -// ReferenceGrantList contains a list of ReferenceGrant. -type ReferenceGrantList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []ReferenceGrant `json:"items"` -} - -// ReferenceGrantSpec identifies a cross namespace relationship that is trusted -// for Gateway API. -type ReferenceGrantSpec struct { - // From describes the trusted namespaces and kinds that can reference the - // resources described in "To". Each entry in this list MUST be considered - // to be an additional place that references can be valid from, or to put - // this another way, entries MUST be combined using OR. - // - // Support: Core - // - // +kubebuilder:validation:MinItems=1 - // +kubebuilder:validation:MaxItems=16 - From []ReferenceGrantFrom `json:"from"` - - // To describes the resources that may be referenced by the resources - // described in "From". Each entry in this list MUST be considered to be an - // additional place that references can be valid to, or to put this another - // way, entries MUST be combined using OR. - // - // Support: Core - // - // +kubebuilder:validation:MinItems=1 - // +kubebuilder:validation:MaxItems=16 - To []ReferenceGrantTo `json:"to"` -} - -// ReferenceGrantFrom describes trusted namespaces and kinds. -type ReferenceGrantFrom struct { - // Group is the group of the referent. - // When empty, the Kubernetes core API group is inferred. - // - // Support: Core - Group Group `json:"group"` - - // Kind is the kind of the referent. Although implementations may support - // additional resources, the following types are part of the "Core" - // support level for this field. - // - // When used to permit a SecretObjectReference: - // - // * Gateway - // - // When used to permit a BackendObjectReference: - // - // * GRPCRoute - // * HTTPRoute - // * TCPRoute - // * TLSRoute - // * UDPRoute - Kind Kind `json:"kind"` - - // Namespace is the namespace of the referent. - // - // Support: Core - Namespace Namespace `json:"namespace"` -} - -// ReferenceGrantTo describes what Kinds are allowed as targets of the -// references. -type ReferenceGrantTo struct { - // Group is the group of the referent. - // When empty, the Kubernetes core API group is inferred. - // - // Support: Core - Group Group `json:"group"` - - // Kind is the kind of the referent. Although implementations may support - // additional resources, the following types are part of the "Core" - // support level for this field: - // - // * Secret when used to permit a SecretObjectReference - // * Service when used to permit a BackendObjectReference - Kind Kind `json:"kind"` - - // Name is the name of the referent. When unspecified, this policy - // refers to all resources of the specified Group and Kind in the local - // namespace. - // - // +optional - Name *ObjectName `json:"name,omitempty"` -}