From ad0b70ea801db2e8564f06ddfd3d52a37a74abad Mon Sep 17 00:00:00 2001 From: Pawel Winogrodzki Date: Tue, 26 Nov 2024 09:49:30 -0800 Subject: [PATCH 1/4] Extended CVE-2024-10224 patch and fixed ptests in `perl-Module-ScanDeps`. (#11221) --- .../perl-Module-ScanDeps/CVE-2024-10224.patch | 36 +++++++++++++++++++ .../perl-Module-ScanDeps.spec | 12 +++++-- 2 files changed, 46 insertions(+), 2 deletions(-) diff --git a/SPECS/perl-Module-ScanDeps/CVE-2024-10224.patch b/SPECS/perl-Module-ScanDeps/CVE-2024-10224.patch index 87ed0988758..c231e72652c 100644 --- a/SPECS/perl-Module-ScanDeps/CVE-2024-10224.patch +++ b/SPECS/perl-Module-ScanDeps/CVE-2024-10224.patch @@ -243,3 +243,39 @@ index 7bc9662..dd79c65 100644 # e.g. for autosplit .ix and .al files. In the latter case, # the key may also start with "./" if found via a relative path in @INC. $key =~ s|\\|/|g; + + +From 49468814a24221affe113664899be21aef60e846 Mon Sep 17 00:00:00 2001 +From: rschupp +Date: Fri, 8 Nov 2024 19:17:30 +0100 +Subject: [PATCH] fix parsing of "use if ..." + +Fixes errors in PAR::Packer test t/90-rt59710.t +--- + lib/Module/ScanDeps.pm | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +diff --git a/lib/Module/ScanDeps.pm b/lib/Module/ScanDeps.pm +index f911440..71d8b75 100644 +--- a/lib/Module/ScanDeps.pm ++++ b/lib/Module/ScanDeps.pm +@@ -925,7 +925,7 @@ sub scan_line { + next CHUNK; + } + +- if (my ($pragma, $args) = /^use \s+ (autouse|if) \s+ (.+)/x) ++ if (my ($pragma, $args) = /^(?:use|no) \s+ (autouse|if) \s+ (.+)/x) + { + # NOTE: There are different ways the MODULE may + # be specified for the "autouse" and "if" pragmas, e.g. +@@ -938,7 +938,9 @@ sub scan_line { + else { + # The syntax of the "if" pragma is + # use if COND, MODULE => ARGUMENTS +- (undef, $module) = _parse_module_list($args); ++ # NOTE: This works only for simple conditions. ++ $args =~ s/.*? (?:,|=>) \s*//x; ++ ($module) = _parse_module_list($args); + } + $found{_mod2pm($pragma)}++; + $found{_mod2pm($module)}++ if $module; diff --git a/SPECS/perl-Module-ScanDeps/perl-Module-ScanDeps.spec b/SPECS/perl-Module-ScanDeps/perl-Module-ScanDeps.spec index 0004cef4881..e10dfd793f0 100644 --- a/SPECS/perl-Module-ScanDeps/perl-Module-ScanDeps.spec +++ b/SPECS/perl-Module-ScanDeps/perl-Module-ScanDeps.spec @@ -2,7 +2,7 @@ Summary: Recursively scan Perl code for dependencies Name: perl-Module-ScanDeps Version: 1.35 -Release: 2%{?dist} +Release: 3%{?dist} License: GPL+ or Artistic Group: Development/Libraries Source0: https://cpan.metacpan.org/authors/id/R/RS/RSCHUPP/Module-ScanDeps-%{version}.tar.gz @@ -15,10 +15,14 @@ BuildRequires: perl >= 5.28.0 BuildRequires: perl(ExtUtils::MakeMaker) BuildRequires: perl-generators %if 0%{?with_check} +BuildRequires: perl(AutoLoader) +BuildRequires: perl(blib) BuildRequires: perl(CPAN) BuildRequires: perl(CPAN::Meta) BuildRequires: perl(FindBin) +BuildRequires: perl(Test) BuildRequires: perl(Test::More) +BuildRequires: perl(Test::Pod) %endif Requires: perl(:MODULE_COMPAT_%(eval "`perl -V:version`"; echo $version)) @@ -55,6 +59,7 @@ find %{buildroot} -type f -name .packlist -exec rm -f {} + export PERL_MM_USE_DEFAULT=1 cpan local::lib cpan Test::Requires +cpan IPC::Run3 make %{?_smp_mflags} test %files @@ -65,13 +70,16 @@ make %{?_smp_mflags} test %{_mandir}/man3/* %changelog +* Mon Nov 25 2024 Pawel Winogrodzki - 1.35-3 +- Fixing perl-Module-ScanDeps tests. + * Fri Nov 15 2024 Pawel Winogrodzki - 1.35-2 - Patched CVE-2024-10224. * Mon Dec 18 2023 CBL-Mariner Servicing Account - 1.35-1 - Auto-upgrade to 1.35 - Azure Linux 3.0 - package upgrades -* Tue Aug 23 2020 Muhammad Falak - 1.31-2 +* Tue Aug 23 2022 Muhammad Falak - 1.31-2 - Add BR on `perl-{(CPAN::*),(FindBin),(Test::More)}` to enable ptest * Fri Apr 22 2022 Mateusz Malisz - 1.31-1 From 2ec4001ad1ce916b413ccab5f7a877b39674132b Mon Sep 17 00:00:00 2001 From: Mykhailo Bykhovtsev <108374904+mbykhovtsev-ms@users.noreply.github.com> Date: Tue, 26 Nov 2024 17:46:22 -0800 Subject: [PATCH 2/4] Changing name produced for cvm and marketplace images (#10403) --- toolkit/imageconfigs/{cvm.json => marketplace-gen2-cvm.json} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename toolkit/imageconfigs/{cvm.json => marketplace-gen2-cvm.json} (97%) diff --git a/toolkit/imageconfigs/cvm.json b/toolkit/imageconfigs/marketplace-gen2-cvm.json similarity index 97% rename from toolkit/imageconfigs/cvm.json rename to toolkit/imageconfigs/marketplace-gen2-cvm.json index 158f3cdb4e7..094c7d44bdb 100644 --- a/toolkit/imageconfigs/cvm.json +++ b/toolkit/imageconfigs/marketplace-gen2-cvm.json @@ -5,7 +5,7 @@ "MaxSize": 2048, "Artifacts": [ { - "Name": "cvm", + "Name": "cblmariner-gen2-cvm", "Type": "vhd" } ], From 0d249c0d06d1cd19cfcdc415ce453a215c92b0c7 Mon Sep 17 00:00:00 2001 From: CBL-Mariner-Bot <75509084+CBL-Mariner-Bot@users.noreply.github.com> Date: Tue, 26 Nov 2024 20:33:02 -0800 Subject: [PATCH 3/4] [AUTOPATCHER-CORE] Upgrade SymCrypt-OpenSSL to 1.6.1 bug fixes (#11237) --- SPECS/SymCrypt-OpenSSL/SymCrypt-OpenSSL.signatures.json | 6 +++--- SPECS/SymCrypt-OpenSSL/SymCrypt-OpenSSL.spec | 5 ++++- cgmanifest.json | 4 ++-- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/SPECS/SymCrypt-OpenSSL/SymCrypt-OpenSSL.signatures.json b/SPECS/SymCrypt-OpenSSL/SymCrypt-OpenSSL.signatures.json index e9c666280bf..9166e5b5173 100644 --- a/SPECS/SymCrypt-OpenSSL/SymCrypt-OpenSSL.signatures.json +++ b/SPECS/SymCrypt-OpenSSL/SymCrypt-OpenSSL.signatures.json @@ -1,5 +1,5 @@ { - "Signatures": { - "SymCrypt-OpenSSL-1.6.0.tar.gz": "7265ddd737b582418a7f0c29144ab11af1bdfd8ea65fbb1a92711068fa606f61" - } + "Signatures": { + "SymCrypt-OpenSSL-1.6.1.tar.gz": "8766d2f5c977960b1aab0099c0d74190b0705bc29f29ff4b266dac3729644658" + } } diff --git a/SPECS/SymCrypt-OpenSSL/SymCrypt-OpenSSL.spec b/SPECS/SymCrypt-OpenSSL/SymCrypt-OpenSSL.spec index 55ad3e8e8b4..95f6c2150c5 100644 --- a/SPECS/SymCrypt-OpenSSL/SymCrypt-OpenSSL.spec +++ b/SPECS/SymCrypt-OpenSSL/SymCrypt-OpenSSL.spec @@ -1,6 +1,6 @@ Summary: The SymCrypt engine for OpenSSL (SCOSSL) allows the use of OpenSSL with SymCrypt as the provider for core cryptographic operations Name: SymCrypt-OpenSSL -Version: 1.6.0 +Version: 1.6.1 Release: 1%{?dist} License: MIT Vendor: Microsoft Corporation @@ -67,6 +67,9 @@ install SymCryptProvider/symcrypt_prov.cnf %{buildroot}%{_sysconfdir}/pki/tls/sy %{_sysconfdir}/pki/tls/symcrypt_prov.cnf %changelog +* Wed Nov 27 2024 CBL-Mariner Servicing Account - 1.6.1-1 +- Auto-upgrade to 1.6.1 - bug fixes + * Mon Nov 25 2024 Tobias Brick - 1.6.0-1 - Upgrade to SymCrypt-OpenSSL 1.6.0 diff --git a/cgmanifest.json b/cgmanifest.json index 9c5e6085948..c275a93d590 100644 --- a/cgmanifest.json +++ b/cgmanifest.json @@ -28246,8 +28246,8 @@ "type": "other", "other": { "name": "SymCrypt-OpenSSL", - "version": "1.6.0", - "downloadUrl": "https://github.com/microsoft/SymCrypt-OpenSSL/archive/v1.6.0.tar.gz" + "version": "1.6.1", + "downloadUrl": "https://github.com/microsoft/SymCrypt-OpenSSL/archive/v1.6.1.tar.gz" } } }, From 9c00c578d03a4e291eeb0449256e0b38d399c484 Mon Sep 17 00:00:00 2001 From: CBL-Mariner-Bot <75509084+CBL-Mariner-Bot@users.noreply.github.com> Date: Tue, 26 Nov 2024 20:47:44 -0800 Subject: [PATCH 4/4] [AUTO-CHERRYPICK] fix CVE-2023-39325, CVE-2023-44487 and CVE-2023-45288 - branch 3.0-dev (#11238) Co-authored-by: xiaohong --- SPECS/multus/CVE-2023-44487.patch | 71077 ++++++++++++++++++++++++++++ SPECS/multus/CVE-2023-45288.patch | 8790 ++++ SPECS/multus/multus.spec | 8 +- 3 files changed, 79874 insertions(+), 1 deletion(-) create mode 100644 SPECS/multus/CVE-2023-44487.patch create mode 100644 SPECS/multus/CVE-2023-45288.patch diff --git a/SPECS/multus/CVE-2023-44487.patch b/SPECS/multus/CVE-2023-44487.patch new file mode 100644 index 00000000000..37d6dd702b3 --- /dev/null +++ b/SPECS/multus/CVE-2023-44487.patch @@ -0,0 +1,71077 @@ +From fcfccf7df38a19631c7a8740a2c4aee7688ad9c6 Mon Sep 17 00:00:00 2001 +From: xiaohongdeng <“worldsky86rough@gmail.com”> +Date: Tue, 26 Nov 2024 23:44:28 +0000 +Subject: [PATCH] upgrade golang.org/x/net to 0.17.0 and google.golang.org/grpc + 1.56.3 + +--- + go.mod | 26 +- + go.sum | 41 +- + vendor/github.com/cespare/xxhash/v2/README.md | 31 +- + .../github.com/cespare/xxhash/v2/testall.sh | 10 + + vendor/github.com/cespare/xxhash/v2/xxhash.go | 47 +- + .../cespare/xxhash/v2/xxhash_amd64.s | 336 +- + .../cespare/xxhash/v2/xxhash_arm64.s | 183 + + .../v2/{xxhash_amd64.go => xxhash_asm.go} | 2 + + .../cespare/xxhash/v2/xxhash_other.go | 22 +- + .../cespare/xxhash/v2/xxhash_safe.go | 1 + + .../cespare/xxhash/v2/xxhash_unsafe.go | 3 +- + .../golang/protobuf/jsonpb/decode.go | 530 ++ + .../golang/protobuf/jsonpb/encode.go | 559 ++ + .../github.com/golang/protobuf/jsonpb/json.go | 69 + + .../x/net/context/ctxhttp/ctxhttp.go | 71 - + vendor/golang.org/x/net/html/doc.go | 21 + + vendor/golang.org/x/net/html/escape.go | 81 + + vendor/golang.org/x/net/html/render.go | 2 +- + vendor/golang.org/x/net/html/token.go | 19 +- + vendor/golang.org/x/net/http2/Dockerfile | 51 - + vendor/golang.org/x/net/http2/Makefile | 3 - + vendor/golang.org/x/net/http2/pipe.go | 6 +- + vendor/golang.org/x/net/http2/server.go | 102 +- + vendor/golang.org/x/net/http2/transport.go | 81 +- + vendor/golang.org/x/net/http2/writesched.go | 3 +- + .../x/net/http2/writesched_roundrobin.go | 119 + + vendor/golang.org/x/net/idna/idna9.0.0.go | 2 +- + vendor/golang.org/x/net/idna/tables13.0.0.go | 2988 ++++--- + vendor/golang.org/x/net/idna/tables15.0.0.go | 5145 +++++++++++ + vendor/golang.org/x/net/idna/trie.go | 21 - + vendor/golang.org/x/net/idna/trie12.0.0.go | 31 + + vendor/golang.org/x/net/idna/trie13.0.0.go | 31 + + vendor/golang.org/x/oauth2/AUTHORS | 3 - + vendor/golang.org/x/oauth2/CONTRIBUTORS | 3 - + vendor/golang.org/x/oauth2/README.md | 12 +- + vendor/golang.org/x/oauth2/internal/token.go | 4 +- + vendor/golang.org/x/oauth2/oauth2.go | 33 +- + vendor/golang.org/x/oauth2/token.go | 14 +- + .../sys/internal/unsafeheader/unsafeheader.go | 30 - + vendor/golang.org/x/sys/unix/ioctl_signed.go | 70 + + .../sys/unix/{ioctl.go => ioctl_unsigned.go} | 21 +- + vendor/golang.org/x/sys/unix/ioctl_zos.go | 20 +- + vendor/golang.org/x/sys/unix/mkall.sh | 2 +- + vendor/golang.org/x/sys/unix/mkerrors.sh | 14 +- + vendor/golang.org/x/sys/unix/mmap_nomremap.go | 14 + + vendor/golang.org/x/sys/unix/mremap.go | 53 + + vendor/golang.org/x/sys/unix/syscall_aix.go | 24 +- + .../golang.org/x/sys/unix/syscall_aix_ppc.go | 1 - + .../x/sys/unix/syscall_aix_ppc64.go | 1 - + vendor/golang.org/x/sys/unix/syscall_bsd.go | 17 +- + .../golang.org/x/sys/unix/syscall_darwin.go | 251 +- + .../x/sys/unix/syscall_dragonfly.go | 200 +- + .../golang.org/x/sys/unix/syscall_freebsd.go | 236 +- + .../x/sys/unix/syscall_freebsd_386.go | 17 +- + .../x/sys/unix/syscall_freebsd_amd64.go | 17 +- + .../x/sys/unix/syscall_freebsd_arm.go | 15 +- + .../x/sys/unix/syscall_freebsd_arm64.go | 15 +- + .../x/sys/unix/syscall_freebsd_riscv64.go | 15 +- + vendor/golang.org/x/sys/unix/syscall_hurd.go | 8 + + vendor/golang.org/x/sys/unix/syscall_linux.go | 266 +- + .../x/sys/unix/syscall_linux_386.go | 27 - + .../x/sys/unix/syscall_linux_amd64.go | 3 +- + .../x/sys/unix/syscall_linux_arm.go | 27 - + .../x/sys/unix/syscall_linux_arm64.go | 12 +- + .../x/sys/unix/syscall_linux_loong64.go | 7 +- + .../x/sys/unix/syscall_linux_mips64x.go | 3 +- + .../x/sys/unix/syscall_linux_mipsx.go | 27 - + .../x/sys/unix/syscall_linux_ppc.go | 27 - + .../x/sys/unix/syscall_linux_ppc64x.go | 1 - + .../x/sys/unix/syscall_linux_riscv64.go | 14 +- + .../x/sys/unix/syscall_linux_s390x.go | 1 - + .../x/sys/unix/syscall_linux_sparc64.go | 1 - + .../golang.org/x/sys/unix/syscall_netbsd.go | 279 +- + .../golang.org/x/sys/unix/syscall_openbsd.go | 93 +- + .../golang.org/x/sys/unix/syscall_solaris.go | 68 +- + vendor/golang.org/x/sys/unix/syscall_unix.go | 18 + + .../x/sys/unix/syscall_zos_s390x.go | 21 +- + .../x/sys/unix/zerrors_darwin_amd64.go | 19 + + .../x/sys/unix/zerrors_darwin_arm64.go | 19 + + vendor/golang.org/x/sys/unix/zerrors_linux.go | 76 +- + .../x/sys/unix/zerrors_linux_386.go | 11 + + .../x/sys/unix/zerrors_linux_amd64.go | 11 + + .../x/sys/unix/zerrors_linux_arm.go | 11 + + .../x/sys/unix/zerrors_linux_arm64.go | 13 + + .../x/sys/unix/zerrors_linux_loong64.go | 13 + + .../x/sys/unix/zerrors_linux_mips.go | 11 + + .../x/sys/unix/zerrors_linux_mips64.go | 11 + + .../x/sys/unix/zerrors_linux_mips64le.go | 11 + + .../x/sys/unix/zerrors_linux_mipsle.go | 11 + + .../x/sys/unix/zerrors_linux_ppc.go | 11 + + .../x/sys/unix/zerrors_linux_ppc64.go | 11 + + .../x/sys/unix/zerrors_linux_ppc64le.go | 11 + + .../x/sys/unix/zerrors_linux_riscv64.go | 11 + + .../x/sys/unix/zerrors_linux_s390x.go | 11 + + .../x/sys/unix/zerrors_linux_sparc64.go | 59 + + .../x/sys/unix/zptrace_armnn_linux.go | 8 +- + .../x/sys/unix/zptrace_linux_arm64.go | 4 +- + .../x/sys/unix/zptrace_mipsnn_linux.go | 8 +- + .../x/sys/unix/zptrace_mipsnnle_linux.go | 8 +- + .../x/sys/unix/zptrace_x86_linux.go | 8 +- + .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 45 +- + .../x/sys/unix/zsyscall_aix_ppc64.go | 46 +- + .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 17 +- + .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 18 +- + .../x/sys/unix/zsyscall_darwin_amd64.go | 71 +- + .../x/sys/unix/zsyscall_darwin_amd64.s | 160 +- + .../x/sys/unix/zsyscall_darwin_arm64.go | 71 +- + .../x/sys/unix/zsyscall_darwin_arm64.s | 160 +- + .../x/sys/unix/zsyscall_dragonfly_amd64.go | 42 +- + .../x/sys/unix/zsyscall_freebsd_386.go | 52 +- + .../x/sys/unix/zsyscall_freebsd_amd64.go | 52 +- + .../x/sys/unix/zsyscall_freebsd_arm.go | 52 +- + .../x/sys/unix/zsyscall_freebsd_arm64.go | 52 +- + .../x/sys/unix/zsyscall_freebsd_riscv64.go | 52 +- + .../x/sys/unix/zsyscall_illumos_amd64.go | 10 +- + .../golang.org/x/sys/unix/zsyscall_linux.go | 89 +- + .../x/sys/unix/zsyscall_linux_386.go | 10 - + .../x/sys/unix/zsyscall_linux_amd64.go | 10 - + .../x/sys/unix/zsyscall_linux_arm.go | 10 - + .../x/sys/unix/zsyscall_linux_arm64.go | 10 - + .../x/sys/unix/zsyscall_linux_mips.go | 10 - + .../x/sys/unix/zsyscall_linux_mips64.go | 10 - + .../x/sys/unix/zsyscall_linux_mips64le.go | 10 - + .../x/sys/unix/zsyscall_linux_mipsle.go | 10 - + .../x/sys/unix/zsyscall_linux_ppc.go | 10 - + .../x/sys/unix/zsyscall_linux_ppc64.go | 10 - + .../x/sys/unix/zsyscall_linux_ppc64le.go | 10 - + .../x/sys/unix/zsyscall_linux_riscv64.go | 26 +- + .../x/sys/unix/zsyscall_linux_s390x.go | 10 - + .../x/sys/unix/zsyscall_linux_sparc64.go | 10 - + .../x/sys/unix/zsyscall_netbsd_386.go | 49 +- + .../x/sys/unix/zsyscall_netbsd_amd64.go | 49 +- + .../x/sys/unix/zsyscall_netbsd_arm.go | 49 +- + .../x/sys/unix/zsyscall_netbsd_arm64.go | 49 +- + .../x/sys/unix/zsyscall_openbsd_386.go | 68 +- + .../x/sys/unix/zsyscall_openbsd_386.s | 15 +- + .../x/sys/unix/zsyscall_openbsd_amd64.go | 68 +- + .../x/sys/unix/zsyscall_openbsd_amd64.s | 15 +- + .../x/sys/unix/zsyscall_openbsd_arm.go | 68 +- + .../x/sys/unix/zsyscall_openbsd_arm.s | 15 +- + .../x/sys/unix/zsyscall_openbsd_arm64.go | 68 +- + .../x/sys/unix/zsyscall_openbsd_arm64.s | 15 +- + .../x/sys/unix/zsyscall_openbsd_mips64.go | 68 +- + .../x/sys/unix/zsyscall_openbsd_mips64.s | 15 +- + .../x/sys/unix/zsyscall_openbsd_ppc64.go | 68 +- + .../x/sys/unix/zsyscall_openbsd_ppc64.s | 18 +- + .../x/sys/unix/zsyscall_openbsd_riscv64.go | 68 +- + .../x/sys/unix/zsyscall_openbsd_riscv64.s | 15 +- + .../x/sys/unix/zsyscall_solaris_amd64.go | 280 +- + .../x/sys/unix/zsyscall_zos_s390x.go | 23 +- + .../x/sys/unix/zsysnum_linux_386.go | 1 + + .../x/sys/unix/zsysnum_linux_amd64.go | 1 + + .../x/sys/unix/zsysnum_linux_arm.go | 1 + + .../x/sys/unix/zsysnum_linux_arm64.go | 1 + + .../x/sys/unix/zsysnum_linux_loong64.go | 1 + + .../x/sys/unix/zsysnum_linux_mips.go | 1 + + .../x/sys/unix/zsysnum_linux_mips64.go | 1 + + .../x/sys/unix/zsysnum_linux_mips64le.go | 1 + + .../x/sys/unix/zsysnum_linux_mipsle.go | 1 + + .../x/sys/unix/zsysnum_linux_ppc.go | 1 + + .../x/sys/unix/zsysnum_linux_ppc64.go | 1 + + .../x/sys/unix/zsysnum_linux_ppc64le.go | 1 + + .../x/sys/unix/zsysnum_linux_riscv64.go | 3 + + .../x/sys/unix/zsysnum_linux_s390x.go | 2 + + .../x/sys/unix/zsysnum_linux_sparc64.go | 1 + + .../x/sys/unix/ztypes_darwin_amd64.go | 11 + + .../x/sys/unix/ztypes_darwin_arm64.go | 11 + + .../x/sys/unix/ztypes_freebsd_386.go | 2 +- + .../x/sys/unix/ztypes_freebsd_amd64.go | 2 +- + .../x/sys/unix/ztypes_freebsd_arm.go | 2 +- + .../x/sys/unix/ztypes_freebsd_arm64.go | 2 +- + .../x/sys/unix/ztypes_freebsd_riscv64.go | 2 +- + vendor/golang.org/x/sys/unix/ztypes_linux.go | 225 +- + .../golang.org/x/sys/unix/ztypes_linux_386.go | 4 +- + .../x/sys/unix/ztypes_linux_amd64.go | 4 +- + .../golang.org/x/sys/unix/ztypes_linux_arm.go | 4 +- + .../x/sys/unix/ztypes_linux_arm64.go | 4 +- + .../x/sys/unix/ztypes_linux_loong64.go | 4 +- + .../x/sys/unix/ztypes_linux_mips.go | 4 +- + .../x/sys/unix/ztypes_linux_mips64.go | 4 +- + .../x/sys/unix/ztypes_linux_mips64le.go | 4 +- + .../x/sys/unix/ztypes_linux_mipsle.go | 4 +- + .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 4 +- + .../x/sys/unix/ztypes_linux_ppc64.go | 4 +- + .../x/sys/unix/ztypes_linux_ppc64le.go | 4 +- + .../x/sys/unix/ztypes_linux_riscv64.go | 31 +- + .../x/sys/unix/ztypes_linux_s390x.go | 4 +- + .../x/sys/unix/ztypes_linux_sparc64.go | 4 +- + .../golang.org/x/sys/windows/env_windows.go | 6 +- + .../golang.org/x/sys/windows/exec_windows.go | 92 +- + .../x/sys/windows/security_windows.go | 21 +- + vendor/golang.org/x/sys/windows/service.go | 11 + + .../x/sys/windows/syscall_windows.go | 76 +- + .../golang.org/x/sys/windows/types_windows.go | 102 +- + .../x/sys/windows/zsyscall_windows.go | 98 +- + vendor/golang.org/x/term/term_unix.go | 2 +- + .../x/text/encoding/internal/internal.go | 2 +- + .../text/internal/language/compact/tables.go | 356 +- + .../x/text/internal/language/tables.go | 4686 +++++----- + vendor/golang.org/x/text/language/match.go | 2 +- + vendor/golang.org/x/text/language/tables.go | 138 +- + .../x/text/unicode/bidi/tables13.0.0.go | 4 +- + .../x/text/unicode/bidi/tables15.0.0.go | 2043 +++++ + .../x/text/unicode/norm/forminfo.go | 2 +- + .../x/text/unicode/norm/tables13.0.0.go | 4 +- + .../x/text/unicode/norm/tables15.0.0.go | 7908 +++++++++++++++++ + vendor/golang.org/x/text/unicode/norm/trie.go | 2 +- + .../googleapis/rpc/status/status.pb.go | 10 +- + vendor/google.golang.org/grpc/CONTRIBUTING.md | 32 +- + vendor/google.golang.org/grpc/MAINTAINERS.md | 5 +- + vendor/google.golang.org/grpc/Makefile | 2 - + vendor/google.golang.org/grpc/NOTICE.txt | 13 + + .../grpc/attributes/attributes.go | 109 +- + vendor/google.golang.org/grpc/backoff.go | 2 +- + .../grpc/balancer/balancer.go | 142 +- + .../grpc/balancer/base/balancer.go | 88 +- + .../grpc/balancer/conn_state_evaluator.go | 74 + + .../grpc/balancer/grpclb/state/state.go | 2 +- + .../grpc/balancer/roundrobin/roundrobin.go | 20 +- + .../grpc/balancer_conn_wrappers.go | 488 +- + .../grpc_binarylog_v1/binarylog.pb.go | 22 +- + vendor/google.golang.org/grpc/call.go | 5 + + .../grpc/channelz/channelz.go | 36 + + vendor/google.golang.org/grpc/clientconn.go | 1327 +-- + .../grpc/codes/code_string.go | 51 +- + .../grpc/connectivity/connectivity.go | 35 +- + .../grpc/credentials/credentials.go | 45 +- + .../grpc/credentials/go12.go | 30 - + .../grpc/credentials/insecure/insecure.go | 34 +- + .../google.golang.org/grpc/credentials/tls.go | 9 +- + vendor/google.golang.org/grpc/dialoptions.go | 212 +- + .../grpc/encoding/encoding.go | 9 +- + .../grpc/grpclog/loggerv2.go | 103 +- + vendor/google.golang.org/grpc/idle.go | 287 + + vendor/google.golang.org/grpc/install_gae.sh | 6 - + vendor/google.golang.org/grpc/interceptor.go | 9 +- + .../balancer/gracefulswitch/gracefulswitch.go | 384 + + .../grpc/internal/binarylog/binarylog.go | 118 +- + .../grpc/internal/binarylog/env_config.go | 26 +- + .../grpc/internal/binarylog/method_logger.go | 161 +- + .../grpc/internal/binarylog/sink.go | 12 +- + .../grpc/internal/buffer/unbounded.go | 26 +- + .../grpc/internal/channelz/funcs.go | 240 +- + .../grpc/internal/channelz/id.go | 75 + + .../grpc/internal/channelz/logging.go | 91 +- + .../grpc/internal/channelz/types.go | 39 +- + .../grpc/internal/channelz/types_linux.go | 2 - + .../grpc/internal/channelz/types_nonlinux.go | 5 +- + .../grpc/internal/channelz/util_linux.go | 2 - + .../grpc/internal/channelz/util_nonlinux.go | 3 +- + .../grpc/internal/credentials/spiffe.go | 2 - + .../grpc/internal/credentials/syscallconn.go | 2 - + .../grpc/internal/credentials/util.go | 4 +- + .../grpc/internal/envconfig/envconfig.go | 46 +- + .../grpc/internal/envconfig/observability.go | 42 + + .../grpc/internal/envconfig/xds.go | 95 + + .../grpc/internal/grpclog/grpclog.go | 10 +- + .../grpc/internal/grpclog/prefixLogger.go | 12 + + .../grpc/internal/grpcrand/grpcrand.go | 21 + + .../internal/grpcsync/callback_serializer.go | 119 + + .../oncefunc.go} | 19 +- + .../grpc/internal/grpcutil/compressor.go | 47 + + .../grpcutil.go} | 16 +- + .../grpc/internal/grpcutil/method.go | 6 +- + .../dns/go113.go => grpcutil/regex.go} | 22 +- + .../grpc/internal/grpcutil/target.go | 89 - + .../grpc/internal/internal.go | 114 +- + .../grpc/internal/metadata/metadata.go | 88 +- + .../grpc/internal/pretty/pretty.go | 82 + + .../grpc/internal/resolver/config_selector.go | 9 +- + .../internal/resolver/dns/dns_resolver.go | 15 +- + .../resolver/passthrough/passthrough.go | 11 +- + .../grpc/internal/resolver/unix/unix.go | 21 +- + .../grpc/internal/serviceconfig/duration.go | 130 + + .../internal/serviceconfig/serviceconfig.go | 12 +- + .../grpc/internal/status/status.go | 10 + + .../grpc/internal/syscall/syscall_linux.go | 2 - + .../grpc/internal/syscall/syscall_nonlinux.go | 21 +- + .../grpc/internal/transport/controlbuf.go | 145 +- + .../grpc/internal/transport/defaults.go | 6 + + .../grpc/internal/transport/flowcontrol.go | 4 +- + .../grpc/internal/transport/handler_server.go | 80 +- + .../grpc/internal/transport/http2_client.go | 500 +- + .../grpc/internal/transport/http2_server.go | 477 +- + .../grpc/internal/transport/http_util.go | 60 +- + .../grpc/internal/transport/logging.go | 40 + + .../transport/networktype/networktype.go | 2 +- + .../grpc/internal/transport/proxy.go | 4 +- + .../grpc/internal/transport/transport.go | 62 +- + .../grpc/internal/xds_handshake_cluster.go | 2 +- + .../grpc/metadata/metadata.go | 82 +- + .../google.golang.org/grpc/picker_wrapper.go | 77 +- + vendor/google.golang.org/grpc/pickfirst.go | 181 +- + vendor/google.golang.org/grpc/preloader.go | 2 +- + vendor/google.golang.org/grpc/regenerate.sh | 44 +- + vendor/google.golang.org/grpc/resolver/map.go | 138 + + .../grpc/resolver/resolver.go | 110 +- + .../grpc/resolver_conn_wrapper.go | 238 +- + vendor/google.golang.org/grpc/rpc_util.go | 121 +- + vendor/google.golang.org/grpc/server.go | 658 +- + .../google.golang.org/grpc/service_config.go | 95 +- + .../grpc/serviceconfig/serviceconfig.go | 2 +- + vendor/google.golang.org/grpc/stats/stats.go | 29 +- + .../google.golang.org/grpc/status/status.go | 83 +- + vendor/google.golang.org/grpc/stream.go | 617 +- + vendor/google.golang.org/grpc/tap/tap.go | 2 +- + vendor/google.golang.org/grpc/version.go | 2 +- + vendor/google.golang.org/grpc/vet.sh | 47 +- + vendor/google.golang.org/protobuf/AUTHORS | 3 - + .../google.golang.org/protobuf/CONTRIBUTORS | 3 - + .../protobuf/encoding/protojson/decode.go | 665 ++ + .../protobuf/encoding/protojson/doc.go | 11 + + .../protobuf/encoding/protojson/encode.go | 343 + + .../encoding/protojson/well_known_types.go | 895 ++ + .../protobuf/encoding/prototext/decode.go | 116 +- + .../protobuf/encoding/prototext/encode.go | 39 +- + .../protobuf/encoding/protowire/wire.go | 12 +- + .../protobuf/internal/descfmt/stringer.go | 66 +- + .../internal/encoding/defval/default.go | 78 +- + .../protobuf/internal/encoding/json/decode.go | 340 + + .../internal/encoding/json/decode_number.go | 254 + + .../internal/encoding/json/decode_string.go | 91 + + .../internal/encoding/json/decode_token.go | 192 + + .../protobuf/internal/encoding/json/encode.go | 276 + + .../encoding/messageset/messageset.go | 7 +- + .../protobuf/internal/encoding/tag/tag.go | 96 +- + .../protobuf/internal/encoding/text/decode.go | 35 +- + .../internal/encoding/text/decode_number.go | 49 +- + .../protobuf/internal/encoding/text/doc.go | 4 +- + .../protobuf/internal/filedesc/build.go | 19 +- + .../protobuf/internal/filedesc/desc.go | 380 +- + .../protobuf/internal/filedesc/desc_init.go | 36 +- + .../protobuf/internal/filedesc/desc_lazy.go | 80 +- + .../protobuf/internal/filedesc/desc_list.go | 167 +- + .../protobuf/internal/filedesc/placeholder.go | 136 +- + .../protobuf/internal/filetype/build.go | 87 +- + .../protobuf/internal/genid/descriptor_gen.go | 90 +- + .../protobuf/internal/impl/api_export.go | 42 +- + .../protobuf/internal/impl/checkinit.go | 12 +- + .../protobuf/internal/impl/codec_extension.go | 36 +- + .../protobuf/internal/impl/codec_field.go | 90 +- + .../protobuf/internal/impl/codec_map.go | 20 +- + .../protobuf/internal/impl/codec_message.go | 30 +- + .../protobuf/internal/impl/codec_tables.go | 290 +- + .../protobuf/internal/impl/convert.go | 229 +- + .../protobuf/internal/impl/convert_list.go | 42 +- + .../protobuf/internal/impl/convert_map.go | 32 +- + .../protobuf/internal/impl/decode.go | 21 +- + .../protobuf/internal/impl/enum.go | 10 +- + .../protobuf/internal/impl/extension.go | 26 +- + .../protobuf/internal/impl/legacy_enum.go | 57 +- + .../protobuf/internal/impl/legacy_export.go | 18 +- + .../internal/impl/legacy_extension.go | 100 +- + .../protobuf/internal/impl/legacy_message.go | 122 +- + .../protobuf/internal/impl/merge.go | 32 +- + .../protobuf/internal/impl/message.go | 41 +- + .../protobuf/internal/impl/message_reflect.go | 74 +- + .../internal/impl/message_reflect_field.go | 118 +- + .../protobuf/internal/impl/validate.go | 50 +- + .../protobuf/internal/impl/weak.go | 16 +- + .../protobuf/internal/order/order.go | 16 +- + .../protobuf/internal/order/range.go | 22 +- + .../protobuf/internal/strs/strings_unsafe.go | 8 +- + .../protobuf/internal/version/version.go | 54 +- + .../protobuf/proto/decode.go | 3 +- + .../google.golang.org/protobuf/proto/doc.go | 24 +- + .../protobuf/proto/encode.go | 5 +- + .../google.golang.org/protobuf/proto/equal.go | 178 +- + .../reflect/protodesc/desc_resolve.go | 6 +- + .../protobuf/reflect/protoreflect/proto.go | 32 +- + .../protobuf/reflect/protoreflect/source.go | 1 + + .../reflect/protoreflect/source_gen.go | 14 + + .../protobuf/reflect/protoreflect/type.go | 1 + + .../protobuf/reflect/protoreflect/value.go | 2 +- + .../reflect/protoreflect/value_equal.go | 168 + + .../reflect/protoreflect/value_union.go | 6 +- + .../reflect/protoregistry/registry.go | 4 +- + .../protobuf/runtime/protoimpl/version.go | 8 +- + .../types/descriptorpb/descriptor.pb.go | 1547 ++-- + .../protobuf/types/known/anypb/any.pb.go | 135 +- + .../types/known/durationpb/duration.pb.go | 63 +- + .../types/known/timestamppb/timestamp.pb.go | 61 +- + vendor/k8s.io/client-go/pkg/version/base.go | 4 +- + vendor/modules.txt | 34 +- + 384 files changed, 36193 insertions(+), 12803 deletions(-) + create mode 100644 vendor/github.com/cespare/xxhash/v2/testall.sh + create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s + rename vendor/github.com/cespare/xxhash/v2/{xxhash_amd64.go => xxhash_asm.go} (73%) + create mode 100644 vendor/github.com/golang/protobuf/jsonpb/decode.go + create mode 100644 vendor/github.com/golang/protobuf/jsonpb/encode.go + create mode 100644 vendor/github.com/golang/protobuf/jsonpb/json.go + delete mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go + delete mode 100644 vendor/golang.org/x/net/http2/Dockerfile + delete mode 100644 vendor/golang.org/x/net/http2/Makefile + create mode 100644 vendor/golang.org/x/net/http2/writesched_roundrobin.go + create mode 100644 vendor/golang.org/x/net/idna/tables15.0.0.go + create mode 100644 vendor/golang.org/x/net/idna/trie12.0.0.go + create mode 100644 vendor/golang.org/x/net/idna/trie13.0.0.go + delete mode 100644 vendor/golang.org/x/oauth2/AUTHORS + delete mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTORS + delete mode 100644 vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go + create mode 100644 vendor/golang.org/x/sys/unix/ioctl_signed.go + rename vendor/golang.org/x/sys/unix/{ioctl.go => ioctl_unsigned.go} (76%) + create mode 100644 vendor/golang.org/x/sys/unix/mmap_nomremap.go + create mode 100644 vendor/golang.org/x/sys/unix/mremap.go + create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go + create mode 100644 vendor/golang.org/x/text/unicode/norm/tables15.0.0.go + create mode 100644 vendor/google.golang.org/grpc/NOTICE.txt + create mode 100644 vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go + create mode 100644 vendor/google.golang.org/grpc/channelz/channelz.go + delete mode 100644 vendor/google.golang.org/grpc/credentials/go12.go + create mode 100644 vendor/google.golang.org/grpc/idle.go + delete mode 100644 vendor/google.golang.org/grpc/install_gae.sh + create mode 100644 vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go + create mode 100644 vendor/google.golang.org/grpc/internal/channelz/id.go + create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/observability.go + create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/xds.go + create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go + rename vendor/google.golang.org/grpc/internal/{credentials/spiffe_appengine.go => grpcsync/oncefunc.go} (67%) + create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/compressor.go + rename vendor/google.golang.org/grpc/internal/{credentials/syscallconn_appengine.go => grpcutil/grpcutil.go} (72%) + rename vendor/google.golang.org/grpc/internal/{resolver/dns/go113.go => grpcutil/regex.go} (63%) + delete mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/target.go + create mode 100644 vendor/google.golang.org/grpc/internal/pretty/pretty.go + create mode 100644 vendor/google.golang.org/grpc/internal/serviceconfig/duration.go + create mode 100644 vendor/google.golang.org/grpc/internal/transport/logging.go + create mode 100644 vendor/google.golang.org/grpc/resolver/map.go + delete mode 100644 vendor/google.golang.org/protobuf/AUTHORS + delete mode 100644 vendor/google.golang.org/protobuf/CONTRIBUTORS + create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/decode.go + create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/doc.go + create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/encode.go + create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go + create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode.go + create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go + create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go + create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go + create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/encode.go + create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go + +diff --git a/go.mod b/go.mod +index c731104..192b65c 100644 +--- a/go.mod ++++ b/go.mod +@@ -13,9 +13,9 @@ require ( + github.com/onsi/gomega v1.24.0 + github.com/pkg/errors v0.9.1 // indirect + github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5 +- golang.org/x/net v0.7.0 +- golang.org/x/sys v0.5.0 +- google.golang.org/grpc v1.40.0 ++ golang.org/x/net v0.17.0 ++ golang.org/x/sys v0.13.0 ++ google.golang.org/grpc v1.56.3 + gopkg.in/natefinch/lumberjack.v2 v2.0.0 + k8s.io/api v0.22.8 + k8s.io/apimachinery v0.22.8 +@@ -27,16 +27,19 @@ require ( + sigs.k8s.io/yaml v1.3.0 // indirect + ) + +-require github.com/prometheus/client_golang v1.12.2 ++require ( ++ github.com/prometheus/client_golang v1.12.2 ++ github.com/spf13/pflag v1.0.5 ++) + + require ( + github.com/beorn7/perks v1.0.1 // indirect +- github.com/cespare/xxhash/v2 v2.1.2 // indirect ++ github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/evanphx/json-patch v4.12.0+incompatible // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect +- github.com/golang/protobuf v1.5.2 // indirect ++ github.com/golang/protobuf v1.5.3 // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/googleapis/gnostic v0.5.5 // indirect +@@ -48,15 +51,14 @@ require ( + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.32.1 // indirect + github.com/prometheus/procfs v0.7.3 // indirect +- github.com/spf13/pflag v1.0.5 // indirect + github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect +- golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect +- golang.org/x/term v0.5.0 // indirect +- golang.org/x/text v0.7.0 // indirect ++ golang.org/x/oauth2 v0.7.0 // indirect ++ golang.org/x/term v0.13.0 // indirect ++ golang.org/x/text v0.13.0 // indirect + golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect + google.golang.org/appengine v1.6.7 // indirect +- google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 // indirect +- google.golang.org/protobuf v1.28.0 // indirect ++ google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect ++ google.golang.org/protobuf v1.30.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +diff --git a/go.sum b/go.sum +index 1b9825d..0dabda6 100644 +--- a/go.sum ++++ b/go.sum +@@ -70,15 +70,15 @@ github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnweb + github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= + github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= + github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +-github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= + github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= ++github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= ++github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= + github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= + github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= + github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= + github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= + github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ= + github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= + github.com/containernetworking/plugins v1.1.0 h1:kTIldaDo9SlbQsjhUKvDx0v9q7zyIFJH/Rm9F4xRBro= +@@ -104,7 +104,6 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m + github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= + github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= + github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= + github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= + github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +@@ -168,8 +167,9 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD + github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= + github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= + github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= + github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= ++github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= ++github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= + github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= + github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= + github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +@@ -501,16 +501,16 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b + golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= + golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= + golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +-golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +-golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= ++golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= ++golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= + golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= + golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +-golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw= +-golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= ++golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= ++golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -576,13 +576,13 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc + golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +-golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +-golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= ++golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= + golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= + golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +-golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= +-golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= ++golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= ++golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= + golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= + golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= + golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +@@ -590,8 +590,8 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= + golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +-golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +-golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= ++golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= ++golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= + golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= + golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= + golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +@@ -704,8 +704,8 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D + google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +-google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 h1:Et6SkiuvnBn+SgrSYXs/BrUpGB4mbdwt4R3vaPIlicA= +-google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= ++google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= ++google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= + google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= + google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= + google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +@@ -722,8 +722,8 @@ google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTp + google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= + google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= + google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +-google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= +-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= ++google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= ++google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= + google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= + google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= + google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +@@ -736,9 +736,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj + google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= + google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= + google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +-google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +-google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= ++google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= ++google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= + gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md +index 792b4a6..8bf0e5b 100644 +--- a/vendor/github.com/cespare/xxhash/v2/README.md ++++ b/vendor/github.com/cespare/xxhash/v2/README.md +@@ -3,8 +3,7 @@ + [![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2) + [![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml) + +-xxhash is a Go implementation of the 64-bit +-[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a ++xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a + high-quality hashing algorithm that is much faster than anything in the Go + standard library. + +@@ -25,8 +24,11 @@ func (*Digest) WriteString(string) (int, error) + func (*Digest) Sum64() uint64 + ``` + +-This implementation provides a fast pure-Go implementation and an even faster +-assembly implementation for amd64. ++The package is written with optimized pure Go and also contains even faster ++assembly implementations for amd64 and arm64. If desired, the `purego` build tag ++opts into using the Go code even on those architectures. ++ ++[xxHash]: http://cyan4973.github.io/xxHash/ + + ## Compatibility + +@@ -45,19 +47,20 @@ I recommend using the latest release of Go. + Here are some quick benchmarks comparing the pure-Go and assembly + implementations of Sum64. + +-| input size | purego | asm | +-| --- | --- | --- | +-| 5 B | 979.66 MB/s | 1291.17 MB/s | +-| 100 B | 7475.26 MB/s | 7973.40 MB/s | +-| 4 KB | 17573.46 MB/s | 17602.65 MB/s | +-| 10 MB | 17131.46 MB/s | 17142.16 MB/s | ++| input size | purego | asm | ++| ---------- | --------- | --------- | ++| 4 B | 1.3 GB/s | 1.2 GB/s | ++| 16 B | 2.9 GB/s | 3.5 GB/s | ++| 100 B | 6.9 GB/s | 8.1 GB/s | ++| 4 KB | 11.7 GB/s | 16.7 GB/s | ++| 10 MB | 12.0 GB/s | 17.3 GB/s | + +-These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using +-the following commands under Go 1.11.2: ++These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C ++CPU using the following commands under Go 1.19.2: + + ``` +-$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' +-$ go test -benchtime 10s -bench '/xxhash,direct,bytes' ++benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') ++benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') + ``` + + ## Projects using this package +diff --git a/vendor/github.com/cespare/xxhash/v2/testall.sh b/vendor/github.com/cespare/xxhash/v2/testall.sh +new file mode 100644 +index 0000000..94b9c44 +--- /dev/null ++++ b/vendor/github.com/cespare/xxhash/v2/testall.sh +@@ -0,0 +1,10 @@ ++#!/bin/bash ++set -eu -o pipefail ++ ++# Small convenience script for running the tests with various combinations of ++# arch/tags. This assumes we're running on amd64 and have qemu available. ++ ++go test ./... ++go test -tags purego ./... ++GOARCH=arm64 go test ++GOARCH=arm64 go test -tags purego +diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go +index 15c835d..a9e0d45 100644 +--- a/vendor/github.com/cespare/xxhash/v2/xxhash.go ++++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go +@@ -16,19 +16,11 @@ const ( + prime5 uint64 = 2870177450012600261 + ) + +-// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where +-// possible in the Go code is worth a small (but measurable) performance boost +-// by avoiding some MOVQs. Vars are needed for the asm and also are useful for +-// convenience in the Go code in a few places where we need to intentionally +-// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the +-// result overflows a uint64). +-var ( +- prime1v = prime1 +- prime2v = prime2 +- prime3v = prime3 +- prime4v = prime4 +- prime5v = prime5 +-) ++// Store the primes in an array as well. ++// ++// The consts are used when possible in Go code to avoid MOVs but we need a ++// contiguous array of the assembly code. ++var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} + + // Digest implements hash.Hash64. + type Digest struct { +@@ -50,10 +42,10 @@ func New() *Digest { + + // Reset clears the Digest's state so that it can be reused. + func (d *Digest) Reset() { +- d.v1 = prime1v + prime2 ++ d.v1 = primes[0] + prime2 + d.v2 = prime2 + d.v3 = 0 +- d.v4 = -prime1v ++ d.v4 = -primes[0] + d.total = 0 + d.n = 0 + } +@@ -69,21 +61,23 @@ func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + ++ memleft := d.mem[d.n&(len(d.mem)-1):] ++ + if d.n+n < 32 { + // This new data doesn't even fill the current block. +- copy(d.mem[d.n:], b) ++ copy(memleft, b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. +- copy(d.mem[d.n:], b) ++ c := copy(memleft, b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) +- b = b[32-d.n:] ++ b = b[c:] + d.n = 0 + } + +@@ -133,21 +127,20 @@ func (d *Digest) Sum64() uint64 { + + h += d.total + +- i, end := 0, d.n +- for ; i+8 <= end; i += 8 { +- k1 := round(0, u64(d.mem[i:i+8])) ++ b := d.mem[:d.n&(len(d.mem)-1)] ++ for ; len(b) >= 8; b = b[8:] { ++ k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } +- if i+4 <= end { +- h ^= uint64(u32(d.mem[i:i+4])) * prime1 ++ if len(b) >= 4 { ++ h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 +- i += 4 ++ b = b[4:] + } +- for i < end { +- h ^= uint64(d.mem[i]) * prime5 ++ for ; len(b) > 0; b = b[1:] { ++ h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 +- i++ + } + + h ^= h >> 33 +diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s +index be8db5b..3e8b132 100644 +--- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s ++++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s +@@ -1,215 +1,209 @@ ++//go:build !appengine && gc && !purego + // +build !appengine + // +build gc + // +build !purego + + #include "textflag.h" + +-// Register allocation: +-// AX h +-// SI pointer to advance through b +-// DX n +-// BX loop end +-// R8 v1, k1 +-// R9 v2 +-// R10 v3 +-// R11 v4 +-// R12 tmp +-// R13 prime1v +-// R14 prime2v +-// DI prime4v +- +-// round reads from and advances the buffer pointer in SI. +-// It assumes that R13 has prime1v and R14 has prime2v. +-#define round(r) \ +- MOVQ (SI), R12 \ +- ADDQ $8, SI \ +- IMULQ R14, R12 \ +- ADDQ R12, r \ +- ROLQ $31, r \ +- IMULQ R13, r +- +-// mergeRound applies a merge round on the two registers acc and val. +-// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. +-#define mergeRound(acc, val) \ +- IMULQ R14, val \ +- ROLQ $31, val \ +- IMULQ R13, val \ +- XORQ val, acc \ +- IMULQ R13, acc \ +- ADDQ DI, acc ++// Registers: ++#define h AX ++#define d AX ++#define p SI // pointer to advance through b ++#define n DX ++#define end BX // loop end ++#define v1 R8 ++#define v2 R9 ++#define v3 R10 ++#define v4 R11 ++#define x R12 ++#define prime1 R13 ++#define prime2 R14 ++#define prime4 DI ++ ++#define round(acc, x) \ ++ IMULQ prime2, x \ ++ ADDQ x, acc \ ++ ROLQ $31, acc \ ++ IMULQ prime1, acc ++ ++// round0 performs the operation x = round(0, x). ++#define round0(x) \ ++ IMULQ prime2, x \ ++ ROLQ $31, x \ ++ IMULQ prime1, x ++ ++// mergeRound applies a merge round on the two registers acc and x. ++// It assumes that prime1, prime2, and prime4 have been loaded. ++#define mergeRound(acc, x) \ ++ round0(x) \ ++ XORQ x, acc \ ++ IMULQ prime1, acc \ ++ ADDQ prime4, acc ++ ++// blockLoop processes as many 32-byte blocks as possible, ++// updating v1, v2, v3, and v4. It assumes that there is at least one block ++// to process. ++#define blockLoop() \ ++loop: \ ++ MOVQ +0(p), x \ ++ round(v1, x) \ ++ MOVQ +8(p), x \ ++ round(v2, x) \ ++ MOVQ +16(p), x \ ++ round(v3, x) \ ++ MOVQ +24(p), x \ ++ round(v4, x) \ ++ ADDQ $32, p \ ++ CMPQ p, end \ ++ JLE loop + + // func Sum64(b []byte) uint64 +-TEXT ·Sum64(SB), NOSPLIT, $0-32 ++TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + // Load fixed primes. +- MOVQ ·prime1v(SB), R13 +- MOVQ ·prime2v(SB), R14 +- MOVQ ·prime4v(SB), DI ++ MOVQ ·primes+0(SB), prime1 ++ MOVQ ·primes+8(SB), prime2 ++ MOVQ ·primes+24(SB), prime4 + + // Load slice. +- MOVQ b_base+0(FP), SI +- MOVQ b_len+8(FP), DX +- LEAQ (SI)(DX*1), BX ++ MOVQ b_base+0(FP), p ++ MOVQ b_len+8(FP), n ++ LEAQ (p)(n*1), end + + // The first loop limit will be len(b)-32. +- SUBQ $32, BX ++ SUBQ $32, end + + // Check whether we have at least one block. +- CMPQ DX, $32 ++ CMPQ n, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). +- MOVQ R13, R8 +- ADDQ R14, R8 +- MOVQ R14, R9 +- XORQ R10, R10 +- XORQ R11, R11 +- SUBQ R13, R11 +- +- // Loop until SI > BX. +-blockLoop: +- round(R8) +- round(R9) +- round(R10) +- round(R11) +- +- CMPQ SI, BX +- JLE blockLoop +- +- MOVQ R8, AX +- ROLQ $1, AX +- MOVQ R9, R12 +- ROLQ $7, R12 +- ADDQ R12, AX +- MOVQ R10, R12 +- ROLQ $12, R12 +- ADDQ R12, AX +- MOVQ R11, R12 +- ROLQ $18, R12 +- ADDQ R12, AX +- +- mergeRound(AX, R8) +- mergeRound(AX, R9) +- mergeRound(AX, R10) +- mergeRound(AX, R11) ++ MOVQ prime1, v1 ++ ADDQ prime2, v1 ++ MOVQ prime2, v2 ++ XORQ v3, v3 ++ XORQ v4, v4 ++ SUBQ prime1, v4 ++ ++ blockLoop() ++ ++ MOVQ v1, h ++ ROLQ $1, h ++ MOVQ v2, x ++ ROLQ $7, x ++ ADDQ x, h ++ MOVQ v3, x ++ ROLQ $12, x ++ ADDQ x, h ++ MOVQ v4, x ++ ROLQ $18, x ++ ADDQ x, h ++ ++ mergeRound(h, v1) ++ mergeRound(h, v2) ++ mergeRound(h, v3) ++ mergeRound(h, v4) + + JMP afterBlocks + + noBlocks: +- MOVQ ·prime5v(SB), AX ++ MOVQ ·primes+32(SB), h + + afterBlocks: +- ADDQ DX, AX +- +- // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. +- ADDQ $24, BX +- +- CMPQ SI, BX +- JG fourByte +- +-wordLoop: +- // Calculate k1. +- MOVQ (SI), R8 +- ADDQ $8, SI +- IMULQ R14, R8 +- ROLQ $31, R8 +- IMULQ R13, R8 +- +- XORQ R8, AX +- ROLQ $27, AX +- IMULQ R13, AX +- ADDQ DI, AX +- +- CMPQ SI, BX +- JLE wordLoop +- +-fourByte: +- ADDQ $4, BX +- CMPQ SI, BX +- JG singles +- +- MOVL (SI), R8 +- ADDQ $4, SI +- IMULQ R13, R8 +- XORQ R8, AX +- +- ROLQ $23, AX +- IMULQ R14, AX +- ADDQ ·prime3v(SB), AX +- +-singles: +- ADDQ $4, BX +- CMPQ SI, BX ++ ADDQ n, h ++ ++ ADDQ $24, end ++ CMPQ p, end ++ JG try4 ++ ++loop8: ++ MOVQ (p), x ++ ADDQ $8, p ++ round0(x) ++ XORQ x, h ++ ROLQ $27, h ++ IMULQ prime1, h ++ ADDQ prime4, h ++ ++ CMPQ p, end ++ JLE loop8 ++ ++try4: ++ ADDQ $4, end ++ CMPQ p, end ++ JG try1 ++ ++ MOVL (p), x ++ ADDQ $4, p ++ IMULQ prime1, x ++ XORQ x, h ++ ++ ROLQ $23, h ++ IMULQ prime2, h ++ ADDQ ·primes+16(SB), h ++ ++try1: ++ ADDQ $4, end ++ CMPQ p, end + JGE finalize + +-singlesLoop: +- MOVBQZX (SI), R12 +- ADDQ $1, SI +- IMULQ ·prime5v(SB), R12 +- XORQ R12, AX ++loop1: ++ MOVBQZX (p), x ++ ADDQ $1, p ++ IMULQ ·primes+32(SB), x ++ XORQ x, h ++ ROLQ $11, h ++ IMULQ prime1, h + +- ROLQ $11, AX +- IMULQ R13, AX +- +- CMPQ SI, BX +- JL singlesLoop ++ CMPQ p, end ++ JL loop1 + + finalize: +- MOVQ AX, R12 +- SHRQ $33, R12 +- XORQ R12, AX +- IMULQ R14, AX +- MOVQ AX, R12 +- SHRQ $29, R12 +- XORQ R12, AX +- IMULQ ·prime3v(SB), AX +- MOVQ AX, R12 +- SHRQ $32, R12 +- XORQ R12, AX +- +- MOVQ AX, ret+24(FP) ++ MOVQ h, x ++ SHRQ $33, x ++ XORQ x, h ++ IMULQ prime2, h ++ MOVQ h, x ++ SHRQ $29, x ++ XORQ x, h ++ IMULQ ·primes+16(SB), h ++ MOVQ h, x ++ SHRQ $32, x ++ XORQ x, h ++ ++ MOVQ h, ret+24(FP) + RET + +-// writeBlocks uses the same registers as above except that it uses AX to store +-// the d pointer. +- + // func writeBlocks(d *Digest, b []byte) int +-TEXT ·writeBlocks(SB), NOSPLIT, $0-40 ++TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + // Load fixed primes needed for round. +- MOVQ ·prime1v(SB), R13 +- MOVQ ·prime2v(SB), R14 ++ MOVQ ·primes+0(SB), prime1 ++ MOVQ ·primes+8(SB), prime2 + + // Load slice. +- MOVQ b_base+8(FP), SI +- MOVQ b_len+16(FP), DX +- LEAQ (SI)(DX*1), BX +- SUBQ $32, BX ++ MOVQ b_base+8(FP), p ++ MOVQ b_len+16(FP), n ++ LEAQ (p)(n*1), end ++ SUBQ $32, end + + // Load vN from d. +- MOVQ d+0(FP), AX +- MOVQ 0(AX), R8 // v1 +- MOVQ 8(AX), R9 // v2 +- MOVQ 16(AX), R10 // v3 +- MOVQ 24(AX), R11 // v4 ++ MOVQ s+0(FP), d ++ MOVQ 0(d), v1 ++ MOVQ 8(d), v2 ++ MOVQ 16(d), v3 ++ MOVQ 24(d), v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. +-blockLoop: +- round(R8) +- round(R9) +- round(R10) +- round(R11) +- +- CMPQ SI, BX +- JLE blockLoop ++ blockLoop() + + // Copy vN back to d. +- MOVQ R8, 0(AX) +- MOVQ R9, 8(AX) +- MOVQ R10, 16(AX) +- MOVQ R11, 24(AX) +- +- // The number of bytes written is SI minus the old base pointer. +- SUBQ b_base+8(FP), SI +- MOVQ SI, ret+32(FP) ++ MOVQ v1, 0(d) ++ MOVQ v2, 8(d) ++ MOVQ v3, 16(d) ++ MOVQ v4, 24(d) ++ ++ // The number of bytes written is p minus the old base pointer. ++ SUBQ b_base+8(FP), p ++ MOVQ p, ret+32(FP) + + RET +diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s +new file mode 100644 +index 0000000..7e3145a +--- /dev/null ++++ b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s +@@ -0,0 +1,183 @@ ++//go:build !appengine && gc && !purego ++// +build !appengine ++// +build gc ++// +build !purego ++ ++#include "textflag.h" ++ ++// Registers: ++#define digest R1 ++#define h R2 // return value ++#define p R3 // input pointer ++#define n R4 // input length ++#define nblocks R5 // n / 32 ++#define prime1 R7 ++#define prime2 R8 ++#define prime3 R9 ++#define prime4 R10 ++#define prime5 R11 ++#define v1 R12 ++#define v2 R13 ++#define v3 R14 ++#define v4 R15 ++#define x1 R20 ++#define x2 R21 ++#define x3 R22 ++#define x4 R23 ++ ++#define round(acc, x) \ ++ MADD prime2, acc, x, acc \ ++ ROR $64-31, acc \ ++ MUL prime1, acc ++ ++// round0 performs the operation x = round(0, x). ++#define round0(x) \ ++ MUL prime2, x \ ++ ROR $64-31, x \ ++ MUL prime1, x ++ ++#define mergeRound(acc, x) \ ++ round0(x) \ ++ EOR x, acc \ ++ MADD acc, prime4, prime1, acc ++ ++// blockLoop processes as many 32-byte blocks as possible, ++// updating v1, v2, v3, and v4. It assumes that n >= 32. ++#define blockLoop() \ ++ LSR $5, n, nblocks \ ++ PCALIGN $16 \ ++ loop: \ ++ LDP.P 16(p), (x1, x2) \ ++ LDP.P 16(p), (x3, x4) \ ++ round(v1, x1) \ ++ round(v2, x2) \ ++ round(v3, x3) \ ++ round(v4, x4) \ ++ SUB $1, nblocks \ ++ CBNZ nblocks, loop ++ ++// func Sum64(b []byte) uint64 ++TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 ++ LDP b_base+0(FP), (p, n) ++ ++ LDP ·primes+0(SB), (prime1, prime2) ++ LDP ·primes+16(SB), (prime3, prime4) ++ MOVD ·primes+32(SB), prime5 ++ ++ CMP $32, n ++ CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } ++ BLT afterLoop ++ ++ ADD prime1, prime2, v1 ++ MOVD prime2, v2 ++ MOVD $0, v3 ++ NEG prime1, v4 ++ ++ blockLoop() ++ ++ ROR $64-1, v1, x1 ++ ROR $64-7, v2, x2 ++ ADD x1, x2 ++ ROR $64-12, v3, x3 ++ ROR $64-18, v4, x4 ++ ADD x3, x4 ++ ADD x2, x4, h ++ ++ mergeRound(h, v1) ++ mergeRound(h, v2) ++ mergeRound(h, v3) ++ mergeRound(h, v4) ++ ++afterLoop: ++ ADD n, h ++ ++ TBZ $4, n, try8 ++ LDP.P 16(p), (x1, x2) ++ ++ round0(x1) ++ ++ // NOTE: here and below, sequencing the EOR after the ROR (using a ++ // rotated register) is worth a small but measurable speedup for small ++ // inputs. ++ ROR $64-27, h ++ EOR x1 @> 64-27, h, h ++ MADD h, prime4, prime1, h ++ ++ round0(x2) ++ ROR $64-27, h ++ EOR x2 @> 64-27, h, h ++ MADD h, prime4, prime1, h ++ ++try8: ++ TBZ $3, n, try4 ++ MOVD.P 8(p), x1 ++ ++ round0(x1) ++ ROR $64-27, h ++ EOR x1 @> 64-27, h, h ++ MADD h, prime4, prime1, h ++ ++try4: ++ TBZ $2, n, try2 ++ MOVWU.P 4(p), x2 ++ ++ MUL prime1, x2 ++ ROR $64-23, h ++ EOR x2 @> 64-23, h, h ++ MADD h, prime3, prime2, h ++ ++try2: ++ TBZ $1, n, try1 ++ MOVHU.P 2(p), x3 ++ AND $255, x3, x1 ++ LSR $8, x3, x2 ++ ++ MUL prime5, x1 ++ ROR $64-11, h ++ EOR x1 @> 64-11, h, h ++ MUL prime1, h ++ ++ MUL prime5, x2 ++ ROR $64-11, h ++ EOR x2 @> 64-11, h, h ++ MUL prime1, h ++ ++try1: ++ TBZ $0, n, finalize ++ MOVBU (p), x4 ++ ++ MUL prime5, x4 ++ ROR $64-11, h ++ EOR x4 @> 64-11, h, h ++ MUL prime1, h ++ ++finalize: ++ EOR h >> 33, h ++ MUL prime2, h ++ EOR h >> 29, h ++ MUL prime3, h ++ EOR h >> 32, h ++ ++ MOVD h, ret+24(FP) ++ RET ++ ++// func writeBlocks(d *Digest, b []byte) int ++TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 ++ LDP ·primes+0(SB), (prime1, prime2) ++ ++ // Load state. Assume v[1-4] are stored contiguously. ++ MOVD d+0(FP), digest ++ LDP 0(digest), (v1, v2) ++ LDP 16(digest), (v3, v4) ++ ++ LDP b_base+8(FP), (p, n) ++ ++ blockLoop() ++ ++ // Store updated state. ++ STP (v1, v2), 0(digest) ++ STP (v3, v4), 16(digest) ++ ++ BIC $31, n ++ MOVD n, ret+32(FP) ++ RET +diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go +similarity index 73% +rename from vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go +rename to vendor/github.com/cespare/xxhash/v2/xxhash_asm.go +index ad14b80..9216e0a 100644 +--- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go ++++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go +@@ -1,3 +1,5 @@ ++//go:build (amd64 || arm64) && !appengine && gc && !purego ++// +build amd64 arm64 + // +build !appengine + // +build gc + // +build !purego +diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +index 4a5a821..26df13b 100644 +--- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go ++++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +@@ -1,4 +1,5 @@ +-// +build !amd64 appengine !gc purego ++//go:build (!amd64 && !arm64) || appengine || !gc || purego ++// +build !amd64,!arm64 appengine !gc purego + + package xxhash + +@@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 { + var h uint64 + + if n >= 32 { +- v1 := prime1v + prime2 ++ v1 := primes[0] + prime2 + v2 := prime2 + v3 := uint64(0) +- v4 := -prime1v ++ v4 := -primes[0] + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) +@@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 { + + h += uint64(n) + +- i, end := 0, len(b) +- for ; i+8 <= end; i += 8 { +- k1 := round(0, u64(b[i:i+8:len(b)])) ++ for ; len(b) >= 8; b = b[8:] { ++ k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } +- if i+4 <= end { +- h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 ++ if len(b) >= 4 { ++ h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 +- i += 4 ++ b = b[4:] + } +- for ; i < end; i++ { +- h ^= uint64(b[i]) * prime5 ++ for ; len(b) > 0; b = b[1:] { ++ h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + +diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +index fc9bea7..e86f1b5 100644 +--- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go ++++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +@@ -1,3 +1,4 @@ ++//go:build appengine + // +build appengine + + // This file contains the safe implementations of otherwise unsafe-using code. +diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +index 376e0ca..1c1638f 100644 +--- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go ++++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +@@ -1,3 +1,4 @@ ++//go:build !appengine + // +build !appengine + + // This file encapsulates usage of unsafe. +@@ -11,7 +12,7 @@ import ( + + // In the future it's possible that compiler optimizations will make these + // XxxString functions unnecessary by realizing that calls such as +-// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205. ++// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205. + // If that happens, even if we keep these functions they can be replaced with + // the trivial safe code. + +diff --git a/vendor/github.com/golang/protobuf/jsonpb/decode.go b/vendor/github.com/golang/protobuf/jsonpb/decode.go +new file mode 100644 +index 0000000..6c16c25 +--- /dev/null ++++ b/vendor/github.com/golang/protobuf/jsonpb/decode.go +@@ -0,0 +1,530 @@ ++// Copyright 2015 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package jsonpb ++ ++import ( ++ "encoding/json" ++ "errors" ++ "fmt" ++ "io" ++ "math" ++ "reflect" ++ "strconv" ++ "strings" ++ "time" ++ ++ "github.com/golang/protobuf/proto" ++ "google.golang.org/protobuf/encoding/protojson" ++ protoV2 "google.golang.org/protobuf/proto" ++ "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/reflect/protoregistry" ++) ++ ++const wrapJSONUnmarshalV2 = false ++ ++// UnmarshalNext unmarshals the next JSON object from d into m. ++func UnmarshalNext(d *json.Decoder, m proto.Message) error { ++ return new(Unmarshaler).UnmarshalNext(d, m) ++} ++ ++// Unmarshal unmarshals a JSON object from r into m. ++func Unmarshal(r io.Reader, m proto.Message) error { ++ return new(Unmarshaler).Unmarshal(r, m) ++} ++ ++// UnmarshalString unmarshals a JSON object from s into m. ++func UnmarshalString(s string, m proto.Message) error { ++ return new(Unmarshaler).Unmarshal(strings.NewReader(s), m) ++} ++ ++// Unmarshaler is a configurable object for converting from a JSON ++// representation to a protocol buffer object. ++type Unmarshaler struct { ++ // AllowUnknownFields specifies whether to allow messages to contain ++ // unknown JSON fields, as opposed to failing to unmarshal. ++ AllowUnknownFields bool ++ ++ // AnyResolver is used to resolve the google.protobuf.Any well-known type. ++ // If unset, the global registry is used by default. ++ AnyResolver AnyResolver ++} ++ ++// JSONPBUnmarshaler is implemented by protobuf messages that customize the way ++// they are unmarshaled from JSON. Messages that implement this should also ++// implement JSONPBMarshaler so that the custom format can be produced. ++// ++// The JSON unmarshaling must follow the JSON to proto specification: ++// https://developers.google.com/protocol-buffers/docs/proto3#json ++// ++// Deprecated: Custom types should implement protobuf reflection instead. ++type JSONPBUnmarshaler interface { ++ UnmarshalJSONPB(*Unmarshaler, []byte) error ++} ++ ++// Unmarshal unmarshals a JSON object from r into m. ++func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error { ++ return u.UnmarshalNext(json.NewDecoder(r), m) ++} ++ ++// UnmarshalNext unmarshals the next JSON object from d into m. ++func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error { ++ if m == nil { ++ return errors.New("invalid nil message") ++ } ++ ++ // Parse the next JSON object from the stream. ++ raw := json.RawMessage{} ++ if err := d.Decode(&raw); err != nil { ++ return err ++ } ++ ++ // Check for custom unmarshalers first since they may not properly ++ // implement protobuf reflection that the logic below relies on. ++ if jsu, ok := m.(JSONPBUnmarshaler); ok { ++ return jsu.UnmarshalJSONPB(u, raw) ++ } ++ ++ mr := proto.MessageReflect(m) ++ ++ // NOTE: For historical reasons, a top-level null is treated as a noop. ++ // This is incorrect, but kept for compatibility. ++ if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" { ++ return nil ++ } ++ ++ if wrapJSONUnmarshalV2 { ++ // NOTE: If input message is non-empty, we need to preserve merge semantics ++ // of the old jsonpb implementation. These semantics are not supported by ++ // the protobuf JSON specification. ++ isEmpty := true ++ mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool { ++ isEmpty = false // at least one iteration implies non-empty ++ return false ++ }) ++ if !isEmpty { ++ // Perform unmarshaling into a newly allocated, empty message. ++ mr = mr.New() ++ ++ // Use a defer to copy all unmarshaled fields into the original message. ++ dst := proto.MessageReflect(m) ++ defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { ++ dst.Set(fd, v) ++ return true ++ }) ++ } ++ ++ // Unmarshal using the v2 JSON unmarshaler. ++ opts := protojson.UnmarshalOptions{ ++ DiscardUnknown: u.AllowUnknownFields, ++ } ++ if u.AnyResolver != nil { ++ opts.Resolver = anyResolver{u.AnyResolver} ++ } ++ return opts.Unmarshal(raw, mr.Interface()) ++ } else { ++ if err := u.unmarshalMessage(mr, raw); err != nil { ++ return err ++ } ++ return protoV2.CheckInitialized(mr.Interface()) ++ } ++} ++ ++func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error { ++ md := m.Descriptor() ++ fds := md.Fields() ++ ++ if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok { ++ return jsu.UnmarshalJSONPB(u, in) ++ } ++ ++ if string(in) == "null" && md.FullName() != "google.protobuf.Value" { ++ return nil ++ } ++ ++ switch wellKnownType(md.FullName()) { ++ case "Any": ++ var jsonObject map[string]json.RawMessage ++ if err := json.Unmarshal(in, &jsonObject); err != nil { ++ return err ++ } ++ ++ rawTypeURL, ok := jsonObject["@type"] ++ if !ok { ++ return errors.New("Any JSON doesn't have '@type'") ++ } ++ typeURL, err := unquoteString(string(rawTypeURL)) ++ if err != nil { ++ return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL) ++ } ++ m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL)) ++ ++ var m2 protoreflect.Message ++ if u.AnyResolver != nil { ++ mi, err := u.AnyResolver.Resolve(typeURL) ++ if err != nil { ++ return err ++ } ++ m2 = proto.MessageReflect(mi) ++ } else { ++ mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) ++ if err != nil { ++ if err == protoregistry.NotFound { ++ return fmt.Errorf("could not resolve Any message type: %v", typeURL) ++ } ++ return err ++ } ++ m2 = mt.New() ++ } ++ ++ if wellKnownType(m2.Descriptor().FullName()) != "" { ++ rawValue, ok := jsonObject["value"] ++ if !ok { ++ return errors.New("Any JSON doesn't have 'value'") ++ } ++ if err := u.unmarshalMessage(m2, rawValue); err != nil { ++ return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) ++ } ++ } else { ++ delete(jsonObject, "@type") ++ rawJSON, err := json.Marshal(jsonObject) ++ if err != nil { ++ return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) ++ } ++ if err = u.unmarshalMessage(m2, rawJSON); err != nil { ++ return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) ++ } ++ } ++ ++ rawWire, err := protoV2.Marshal(m2.Interface()) ++ if err != nil { ++ return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err) ++ } ++ m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire)) ++ return nil ++ case "BoolValue", "BytesValue", "StringValue", ++ "Int32Value", "UInt32Value", "FloatValue", ++ "Int64Value", "UInt64Value", "DoubleValue": ++ fd := fds.ByNumber(1) ++ v, err := u.unmarshalValue(m.NewField(fd), in, fd) ++ if err != nil { ++ return err ++ } ++ m.Set(fd, v) ++ return nil ++ case "Duration": ++ v, err := unquoteString(string(in)) ++ if err != nil { ++ return err ++ } ++ d, err := time.ParseDuration(v) ++ if err != nil { ++ return fmt.Errorf("bad Duration: %v", err) ++ } ++ ++ sec := d.Nanoseconds() / 1e9 ++ nsec := d.Nanoseconds() % 1e9 ++ m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) ++ m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) ++ return nil ++ case "Timestamp": ++ v, err := unquoteString(string(in)) ++ if err != nil { ++ return err ++ } ++ t, err := time.Parse(time.RFC3339Nano, v) ++ if err != nil { ++ return fmt.Errorf("bad Timestamp: %v", err) ++ } ++ ++ sec := t.Unix() ++ nsec := t.Nanosecond() ++ m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) ++ m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) ++ return nil ++ case "Value": ++ switch { ++ case string(in) == "null": ++ m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0)) ++ case string(in) == "true": ++ m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true)) ++ case string(in) == "false": ++ m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false)) ++ case hasPrefixAndSuffix('"', in, '"'): ++ s, err := unquoteString(string(in)) ++ if err != nil { ++ return fmt.Errorf("unrecognized type for Value %q", in) ++ } ++ m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s)) ++ case hasPrefixAndSuffix('[', in, ']'): ++ v := m.Mutable(fds.ByNumber(6)) ++ return u.unmarshalMessage(v.Message(), in) ++ case hasPrefixAndSuffix('{', in, '}'): ++ v := m.Mutable(fds.ByNumber(5)) ++ return u.unmarshalMessage(v.Message(), in) ++ default: ++ f, err := strconv.ParseFloat(string(in), 0) ++ if err != nil { ++ return fmt.Errorf("unrecognized type for Value %q", in) ++ } ++ m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f)) ++ } ++ return nil ++ case "ListValue": ++ var jsonArray []json.RawMessage ++ if err := json.Unmarshal(in, &jsonArray); err != nil { ++ return fmt.Errorf("bad ListValue: %v", err) ++ } ++ ++ lv := m.Mutable(fds.ByNumber(1)).List() ++ for _, raw := range jsonArray { ++ ve := lv.NewElement() ++ if err := u.unmarshalMessage(ve.Message(), raw); err != nil { ++ return err ++ } ++ lv.Append(ve) ++ } ++ return nil ++ case "Struct": ++ var jsonObject map[string]json.RawMessage ++ if err := json.Unmarshal(in, &jsonObject); err != nil { ++ return fmt.Errorf("bad StructValue: %v", err) ++ } ++ ++ mv := m.Mutable(fds.ByNumber(1)).Map() ++ for key, raw := range jsonObject { ++ kv := protoreflect.ValueOf(key).MapKey() ++ vv := mv.NewValue() ++ if err := u.unmarshalMessage(vv.Message(), raw); err != nil { ++ return fmt.Errorf("bad value in StructValue for key %q: %v", key, err) ++ } ++ mv.Set(kv, vv) ++ } ++ return nil ++ } ++ ++ var jsonObject map[string]json.RawMessage ++ if err := json.Unmarshal(in, &jsonObject); err != nil { ++ return err ++ } ++ ++ // Handle known fields. ++ for i := 0; i < fds.Len(); i++ { ++ fd := fds.Get(i) ++ if fd.IsWeak() && fd.Message().IsPlaceholder() { ++ continue // weak reference is not linked in ++ } ++ ++ // Search for any raw JSON value associated with this field. ++ var raw json.RawMessage ++ name := string(fd.Name()) ++ if fd.Kind() == protoreflect.GroupKind { ++ name = string(fd.Message().Name()) ++ } ++ if v, ok := jsonObject[name]; ok { ++ delete(jsonObject, name) ++ raw = v ++ } ++ name = string(fd.JSONName()) ++ if v, ok := jsonObject[name]; ok { ++ delete(jsonObject, name) ++ raw = v ++ } ++ ++ field := m.NewField(fd) ++ // Unmarshal the field value. ++ if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { ++ continue ++ } ++ v, err := u.unmarshalValue(field, raw, fd) ++ if err != nil { ++ return err ++ } ++ m.Set(fd, v) ++ } ++ ++ // Handle extension fields. ++ for name, raw := range jsonObject { ++ if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") { ++ continue ++ } ++ ++ // Resolve the extension field by name. ++ xname := protoreflect.FullName(name[len("[") : len(name)-len("]")]) ++ xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) ++ if xt == nil && isMessageSet(md) { ++ xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) ++ } ++ if xt == nil { ++ continue ++ } ++ delete(jsonObject, name) ++ fd := xt.TypeDescriptor() ++ if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { ++ return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName()) ++ } ++ ++ field := m.NewField(fd) ++ // Unmarshal the field value. ++ if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { ++ continue ++ } ++ v, err := u.unmarshalValue(field, raw, fd) ++ if err != nil { ++ return err ++ } ++ m.Set(fd, v) ++ } ++ ++ if !u.AllowUnknownFields && len(jsonObject) > 0 { ++ for name := range jsonObject { ++ return fmt.Errorf("unknown field %q in %v", name, md.FullName()) ++ } ++ } ++ return nil ++} ++ ++func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool { ++ if fd.Cardinality() == protoreflect.Repeated { ++ return false ++ } ++ if md := fd.Message(); md != nil { ++ return md.FullName() == "google.protobuf.Value" ++ } ++ if ed := fd.Enum(); ed != nil { ++ return ed.FullName() == "google.protobuf.NullValue" ++ } ++ return false ++} ++ ++func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool { ++ if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated { ++ _, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler) ++ return ok ++ } ++ return false ++} ++ ++func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { ++ switch { ++ case fd.IsList(): ++ var jsonArray []json.RawMessage ++ if err := json.Unmarshal(in, &jsonArray); err != nil { ++ return v, err ++ } ++ lv := v.List() ++ for _, raw := range jsonArray { ++ ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd) ++ if err != nil { ++ return v, err ++ } ++ lv.Append(ve) ++ } ++ return v, nil ++ case fd.IsMap(): ++ var jsonObject map[string]json.RawMessage ++ if err := json.Unmarshal(in, &jsonObject); err != nil { ++ return v, err ++ } ++ kfd := fd.MapKey() ++ vfd := fd.MapValue() ++ mv := v.Map() ++ for key, raw := range jsonObject { ++ var kv protoreflect.MapKey ++ if kfd.Kind() == protoreflect.StringKind { ++ kv = protoreflect.ValueOf(key).MapKey() ++ } else { ++ v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd) ++ if err != nil { ++ return v, err ++ } ++ kv = v.MapKey() ++ } ++ ++ vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd) ++ if err != nil { ++ return v, err ++ } ++ mv.Set(kv, vv) ++ } ++ return v, nil ++ default: ++ return u.unmarshalSingularValue(v, in, fd) ++ } ++} ++ ++var nonFinite = map[string]float64{ ++ `"NaN"`: math.NaN(), ++ `"Infinity"`: math.Inf(+1), ++ `"-Infinity"`: math.Inf(-1), ++} ++ ++func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { ++ switch fd.Kind() { ++ case protoreflect.BoolKind: ++ return unmarshalValue(in, new(bool)) ++ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: ++ return unmarshalValue(trimQuote(in), new(int32)) ++ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: ++ return unmarshalValue(trimQuote(in), new(int64)) ++ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: ++ return unmarshalValue(trimQuote(in), new(uint32)) ++ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: ++ return unmarshalValue(trimQuote(in), new(uint64)) ++ case protoreflect.FloatKind: ++ if f, ok := nonFinite[string(in)]; ok { ++ return protoreflect.ValueOfFloat32(float32(f)), nil ++ } ++ return unmarshalValue(trimQuote(in), new(float32)) ++ case protoreflect.DoubleKind: ++ if f, ok := nonFinite[string(in)]; ok { ++ return protoreflect.ValueOfFloat64(float64(f)), nil ++ } ++ return unmarshalValue(trimQuote(in), new(float64)) ++ case protoreflect.StringKind: ++ return unmarshalValue(in, new(string)) ++ case protoreflect.BytesKind: ++ return unmarshalValue(in, new([]byte)) ++ case protoreflect.EnumKind: ++ if hasPrefixAndSuffix('"', in, '"') { ++ vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in))) ++ if vd == nil { ++ return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName()) ++ } ++ return protoreflect.ValueOfEnum(vd.Number()), nil ++ } ++ return unmarshalValue(in, new(protoreflect.EnumNumber)) ++ case protoreflect.MessageKind, protoreflect.GroupKind: ++ err := u.unmarshalMessage(v.Message(), in) ++ return v, err ++ default: ++ panic(fmt.Sprintf("invalid kind %v", fd.Kind())) ++ } ++} ++ ++func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) { ++ err := json.Unmarshal(in, v) ++ return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err ++} ++ ++func unquoteString(in string) (out string, err error) { ++ err = json.Unmarshal([]byte(in), &out) ++ return out, err ++} ++ ++func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool { ++ if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix { ++ return true ++ } ++ return false ++} ++ ++// trimQuote is like unquoteString but simply strips surrounding quotes. ++// This is incorrect, but is behavior done by the legacy implementation. ++func trimQuote(in []byte) []byte { ++ if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' { ++ in = in[1 : len(in)-1] ++ } ++ return in ++} +diff --git a/vendor/github.com/golang/protobuf/jsonpb/encode.go b/vendor/github.com/golang/protobuf/jsonpb/encode.go +new file mode 100644 +index 0000000..685c80a +--- /dev/null ++++ b/vendor/github.com/golang/protobuf/jsonpb/encode.go +@@ -0,0 +1,559 @@ ++// Copyright 2015 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package jsonpb ++ ++import ( ++ "encoding/json" ++ "errors" ++ "fmt" ++ "io" ++ "math" ++ "reflect" ++ "sort" ++ "strconv" ++ "strings" ++ "time" ++ ++ "github.com/golang/protobuf/proto" ++ "google.golang.org/protobuf/encoding/protojson" ++ protoV2 "google.golang.org/protobuf/proto" ++ "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/reflect/protoregistry" ++) ++ ++const wrapJSONMarshalV2 = false ++ ++// Marshaler is a configurable object for marshaling protocol buffer messages ++// to the specified JSON representation. ++type Marshaler struct { ++ // OrigName specifies whether to use the original protobuf name for fields. ++ OrigName bool ++ ++ // EnumsAsInts specifies whether to render enum values as integers, ++ // as opposed to string values. ++ EnumsAsInts bool ++ ++ // EmitDefaults specifies whether to render fields with zero values. ++ EmitDefaults bool ++ ++ // Indent controls whether the output is compact or not. ++ // If empty, the output is compact JSON. Otherwise, every JSON object ++ // entry and JSON array value will be on its own line. ++ // Each line will be preceded by repeated copies of Indent, where the ++ // number of copies is the current indentation depth. ++ Indent string ++ ++ // AnyResolver is used to resolve the google.protobuf.Any well-known type. ++ // If unset, the global registry is used by default. ++ AnyResolver AnyResolver ++} ++ ++// JSONPBMarshaler is implemented by protobuf messages that customize the ++// way they are marshaled to JSON. Messages that implement this should also ++// implement JSONPBUnmarshaler so that the custom format can be parsed. ++// ++// The JSON marshaling must follow the proto to JSON specification: ++// https://developers.google.com/protocol-buffers/docs/proto3#json ++// ++// Deprecated: Custom types should implement protobuf reflection instead. ++type JSONPBMarshaler interface { ++ MarshalJSONPB(*Marshaler) ([]byte, error) ++} ++ ++// Marshal serializes a protobuf message as JSON into w. ++func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error { ++ b, err := jm.marshal(m) ++ if len(b) > 0 { ++ if _, err := w.Write(b); err != nil { ++ return err ++ } ++ } ++ return err ++} ++ ++// MarshalToString serializes a protobuf message as JSON in string form. ++func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) { ++ b, err := jm.marshal(m) ++ if err != nil { ++ return "", err ++ } ++ return string(b), nil ++} ++ ++func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) { ++ v := reflect.ValueOf(m) ++ if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { ++ return nil, errors.New("Marshal called with nil") ++ } ++ ++ // Check for custom marshalers first since they may not properly ++ // implement protobuf reflection that the logic below relies on. ++ if jsm, ok := m.(JSONPBMarshaler); ok { ++ return jsm.MarshalJSONPB(jm) ++ } ++ ++ if wrapJSONMarshalV2 { ++ opts := protojson.MarshalOptions{ ++ UseProtoNames: jm.OrigName, ++ UseEnumNumbers: jm.EnumsAsInts, ++ EmitUnpopulated: jm.EmitDefaults, ++ Indent: jm.Indent, ++ } ++ if jm.AnyResolver != nil { ++ opts.Resolver = anyResolver{jm.AnyResolver} ++ } ++ return opts.Marshal(proto.MessageReflect(m).Interface()) ++ } else { ++ // Check for unpopulated required fields first. ++ m2 := proto.MessageReflect(m) ++ if err := protoV2.CheckInitialized(m2.Interface()); err != nil { ++ return nil, err ++ } ++ ++ w := jsonWriter{Marshaler: jm} ++ err := w.marshalMessage(m2, "", "") ++ return w.buf, err ++ } ++} ++ ++type jsonWriter struct { ++ *Marshaler ++ buf []byte ++} ++ ++func (w *jsonWriter) write(s string) { ++ w.buf = append(w.buf, s...) ++} ++ ++func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error { ++ if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok { ++ b, err := jsm.MarshalJSONPB(w.Marshaler) ++ if err != nil { ++ return err ++ } ++ if typeURL != "" { ++ // we are marshaling this object to an Any type ++ var js map[string]*json.RawMessage ++ if err = json.Unmarshal(b, &js); err != nil { ++ return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err) ++ } ++ turl, err := json.Marshal(typeURL) ++ if err != nil { ++ return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) ++ } ++ js["@type"] = (*json.RawMessage)(&turl) ++ if b, err = json.Marshal(js); err != nil { ++ return err ++ } ++ } ++ w.write(string(b)) ++ return nil ++ } ++ ++ md := m.Descriptor() ++ fds := md.Fields() ++ ++ // Handle well-known types. ++ const secondInNanos = int64(time.Second / time.Nanosecond) ++ switch wellKnownType(md.FullName()) { ++ case "Any": ++ return w.marshalAny(m, indent) ++ case "BoolValue", "BytesValue", "StringValue", ++ "Int32Value", "UInt32Value", "FloatValue", ++ "Int64Value", "UInt64Value", "DoubleValue": ++ fd := fds.ByNumber(1) ++ return w.marshalValue(fd, m.Get(fd), indent) ++ case "Duration": ++ const maxSecondsInDuration = 315576000000 ++ // "Generated output always contains 0, 3, 6, or 9 fractional digits, ++ // depending on required precision." ++ s := m.Get(fds.ByNumber(1)).Int() ++ ns := m.Get(fds.ByNumber(2)).Int() ++ if s < -maxSecondsInDuration || s > maxSecondsInDuration { ++ return fmt.Errorf("seconds out of range %v", s) ++ } ++ if ns <= -secondInNanos || ns >= secondInNanos { ++ return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) ++ } ++ if (s > 0 && ns < 0) || (s < 0 && ns > 0) { ++ return errors.New("signs of seconds and nanos do not match") ++ } ++ var sign string ++ if s < 0 || ns < 0 { ++ sign, s, ns = "-", -1*s, -1*ns ++ } ++ x := fmt.Sprintf("%s%d.%09d", sign, s, ns) ++ x = strings.TrimSuffix(x, "000") ++ x = strings.TrimSuffix(x, "000") ++ x = strings.TrimSuffix(x, ".000") ++ w.write(fmt.Sprintf(`"%vs"`, x)) ++ return nil ++ case "Timestamp": ++ // "RFC 3339, where generated output will always be Z-normalized ++ // and uses 0, 3, 6 or 9 fractional digits." ++ s := m.Get(fds.ByNumber(1)).Int() ++ ns := m.Get(fds.ByNumber(2)).Int() ++ if ns < 0 || ns >= secondInNanos { ++ return fmt.Errorf("ns out of range [0, %v)", secondInNanos) ++ } ++ t := time.Unix(s, ns).UTC() ++ // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). ++ x := t.Format("2006-01-02T15:04:05.000000000") ++ x = strings.TrimSuffix(x, "000") ++ x = strings.TrimSuffix(x, "000") ++ x = strings.TrimSuffix(x, ".000") ++ w.write(fmt.Sprintf(`"%vZ"`, x)) ++ return nil ++ case "Value": ++ // JSON value; which is a null, number, string, bool, object, or array. ++ od := md.Oneofs().Get(0) ++ fd := m.WhichOneof(od) ++ if fd == nil { ++ return errors.New("nil Value") ++ } ++ return w.marshalValue(fd, m.Get(fd), indent) ++ case "Struct", "ListValue": ++ // JSON object or array. ++ fd := fds.ByNumber(1) ++ return w.marshalValue(fd, m.Get(fd), indent) ++ } ++ ++ w.write("{") ++ if w.Indent != "" { ++ w.write("\n") ++ } ++ ++ firstField := true ++ if typeURL != "" { ++ if err := w.marshalTypeURL(indent, typeURL); err != nil { ++ return err ++ } ++ firstField = false ++ } ++ ++ for i := 0; i < fds.Len(); { ++ fd := fds.Get(i) ++ if od := fd.ContainingOneof(); od != nil { ++ fd = m.WhichOneof(od) ++ i += od.Fields().Len() ++ if fd == nil { ++ continue ++ } ++ } else { ++ i++ ++ } ++ ++ v := m.Get(fd) ++ ++ if !m.Has(fd) { ++ if !w.EmitDefaults || fd.ContainingOneof() != nil { ++ continue ++ } ++ if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) { ++ v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars ++ } ++ } ++ ++ if !firstField { ++ w.writeComma() ++ } ++ if err := w.marshalField(fd, v, indent); err != nil { ++ return err ++ } ++ firstField = false ++ } ++ ++ // Handle proto2 extensions. ++ if md.ExtensionRanges().Len() > 0 { ++ // Collect a sorted list of all extension descriptor and values. ++ type ext struct { ++ desc protoreflect.FieldDescriptor ++ val protoreflect.Value ++ } ++ var exts []ext ++ m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { ++ if fd.IsExtension() { ++ exts = append(exts, ext{fd, v}) ++ } ++ return true ++ }) ++ sort.Slice(exts, func(i, j int) bool { ++ return exts[i].desc.Number() < exts[j].desc.Number() ++ }) ++ ++ for _, ext := range exts { ++ if !firstField { ++ w.writeComma() ++ } ++ if err := w.marshalField(ext.desc, ext.val, indent); err != nil { ++ return err ++ } ++ firstField = false ++ } ++ } ++ ++ if w.Indent != "" { ++ w.write("\n") ++ w.write(indent) ++ } ++ w.write("}") ++ return nil ++} ++ ++func (w *jsonWriter) writeComma() { ++ if w.Indent != "" { ++ w.write(",\n") ++ } else { ++ w.write(",") ++ } ++} ++ ++func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error { ++ // "If the Any contains a value that has a special JSON mapping, ++ // it will be converted as follows: {"@type": xxx, "value": yyy}. ++ // Otherwise, the value will be converted into a JSON object, ++ // and the "@type" field will be inserted to indicate the actual data type." ++ md := m.Descriptor() ++ typeURL := m.Get(md.Fields().ByNumber(1)).String() ++ rawVal := m.Get(md.Fields().ByNumber(2)).Bytes() ++ ++ var m2 protoreflect.Message ++ if w.AnyResolver != nil { ++ mi, err := w.AnyResolver.Resolve(typeURL) ++ if err != nil { ++ return err ++ } ++ m2 = proto.MessageReflect(mi) ++ } else { ++ mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) ++ if err != nil { ++ return err ++ } ++ m2 = mt.New() ++ } ++ ++ if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil { ++ return err ++ } ++ ++ if wellKnownType(m2.Descriptor().FullName()) == "" { ++ return w.marshalMessage(m2, indent, typeURL) ++ } ++ ++ w.write("{") ++ if w.Indent != "" { ++ w.write("\n") ++ } ++ if err := w.marshalTypeURL(indent, typeURL); err != nil { ++ return err ++ } ++ w.writeComma() ++ if w.Indent != "" { ++ w.write(indent) ++ w.write(w.Indent) ++ w.write(`"value": `) ++ } else { ++ w.write(`"value":`) ++ } ++ if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil { ++ return err ++ } ++ if w.Indent != "" { ++ w.write("\n") ++ w.write(indent) ++ } ++ w.write("}") ++ return nil ++} ++ ++func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error { ++ if w.Indent != "" { ++ w.write(indent) ++ w.write(w.Indent) ++ } ++ w.write(`"@type":`) ++ if w.Indent != "" { ++ w.write(" ") ++ } ++ b, err := json.Marshal(typeURL) ++ if err != nil { ++ return err ++ } ++ w.write(string(b)) ++ return nil ++} ++ ++// marshalField writes field description and value to the Writer. ++func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { ++ if w.Indent != "" { ++ w.write(indent) ++ w.write(w.Indent) ++ } ++ w.write(`"`) ++ switch { ++ case fd.IsExtension(): ++ // For message set, use the fname of the message as the extension name. ++ name := string(fd.FullName()) ++ if isMessageSet(fd.ContainingMessage()) { ++ name = strings.TrimSuffix(name, ".message_set_extension") ++ } ++ ++ w.write("[" + name + "]") ++ case w.OrigName: ++ name := string(fd.Name()) ++ if fd.Kind() == protoreflect.GroupKind { ++ name = string(fd.Message().Name()) ++ } ++ w.write(name) ++ default: ++ w.write(string(fd.JSONName())) ++ } ++ w.write(`":`) ++ if w.Indent != "" { ++ w.write(" ") ++ } ++ return w.marshalValue(fd, v, indent) ++} ++ ++func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { ++ switch { ++ case fd.IsList(): ++ w.write("[") ++ comma := "" ++ lv := v.List() ++ for i := 0; i < lv.Len(); i++ { ++ w.write(comma) ++ if w.Indent != "" { ++ w.write("\n") ++ w.write(indent) ++ w.write(w.Indent) ++ w.write(w.Indent) ++ } ++ if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil { ++ return err ++ } ++ comma = "," ++ } ++ if w.Indent != "" { ++ w.write("\n") ++ w.write(indent) ++ w.write(w.Indent) ++ } ++ w.write("]") ++ return nil ++ case fd.IsMap(): ++ kfd := fd.MapKey() ++ vfd := fd.MapValue() ++ mv := v.Map() ++ ++ // Collect a sorted list of all map keys and values. ++ type entry struct{ key, val protoreflect.Value } ++ var entries []entry ++ mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { ++ entries = append(entries, entry{k.Value(), v}) ++ return true ++ }) ++ sort.Slice(entries, func(i, j int) bool { ++ switch kfd.Kind() { ++ case protoreflect.BoolKind: ++ return !entries[i].key.Bool() && entries[j].key.Bool() ++ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: ++ return entries[i].key.Int() < entries[j].key.Int() ++ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: ++ return entries[i].key.Uint() < entries[j].key.Uint() ++ case protoreflect.StringKind: ++ return entries[i].key.String() < entries[j].key.String() ++ default: ++ panic("invalid kind") ++ } ++ }) ++ ++ w.write(`{`) ++ comma := "" ++ for _, entry := range entries { ++ w.write(comma) ++ if w.Indent != "" { ++ w.write("\n") ++ w.write(indent) ++ w.write(w.Indent) ++ w.write(w.Indent) ++ } ++ ++ s := fmt.Sprint(entry.key.Interface()) ++ b, err := json.Marshal(s) ++ if err != nil { ++ return err ++ } ++ w.write(string(b)) ++ ++ w.write(`:`) ++ if w.Indent != "" { ++ w.write(` `) ++ } ++ ++ if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil { ++ return err ++ } ++ comma = "," ++ } ++ if w.Indent != "" { ++ w.write("\n") ++ w.write(indent) ++ w.write(w.Indent) ++ } ++ w.write(`}`) ++ return nil ++ default: ++ return w.marshalSingularValue(fd, v, indent) ++ } ++} ++ ++func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { ++ switch { ++ case !v.IsValid(): ++ w.write("null") ++ return nil ++ case fd.Message() != nil: ++ return w.marshalMessage(v.Message(), indent+w.Indent, "") ++ case fd.Enum() != nil: ++ if fd.Enum().FullName() == "google.protobuf.NullValue" { ++ w.write("null") ++ return nil ++ } ++ ++ vd := fd.Enum().Values().ByNumber(v.Enum()) ++ if vd == nil || w.EnumsAsInts { ++ w.write(strconv.Itoa(int(v.Enum()))) ++ } else { ++ w.write(`"` + string(vd.Name()) + `"`) ++ } ++ return nil ++ default: ++ switch v.Interface().(type) { ++ case float32, float64: ++ switch { ++ case math.IsInf(v.Float(), +1): ++ w.write(`"Infinity"`) ++ return nil ++ case math.IsInf(v.Float(), -1): ++ w.write(`"-Infinity"`) ++ return nil ++ case math.IsNaN(v.Float()): ++ w.write(`"NaN"`) ++ return nil ++ } ++ case int64, uint64: ++ w.write(fmt.Sprintf(`"%d"`, v.Interface())) ++ return nil ++ } ++ ++ b, err := json.Marshal(v.Interface()) ++ if err != nil { ++ return err ++ } ++ w.write(string(b)) ++ return nil ++ } ++} +diff --git a/vendor/github.com/golang/protobuf/jsonpb/json.go b/vendor/github.com/golang/protobuf/jsonpb/json.go +new file mode 100644 +index 0000000..480e244 +--- /dev/null ++++ b/vendor/github.com/golang/protobuf/jsonpb/json.go +@@ -0,0 +1,69 @@ ++// Copyright 2015 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// Package jsonpb provides functionality to marshal and unmarshal between a ++// protocol buffer message and JSON. It follows the specification at ++// https://developers.google.com/protocol-buffers/docs/proto3#json. ++// ++// Do not rely on the default behavior of the standard encoding/json package ++// when called on generated message types as it does not operate correctly. ++// ++// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson" ++// package instead. ++package jsonpb ++ ++import ( ++ "github.com/golang/protobuf/proto" ++ "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/reflect/protoregistry" ++ "google.golang.org/protobuf/runtime/protoimpl" ++) ++ ++// AnyResolver takes a type URL, present in an Any message, ++// and resolves it into an instance of the associated message. ++type AnyResolver interface { ++ Resolve(typeURL string) (proto.Message, error) ++} ++ ++type anyResolver struct{ AnyResolver } ++ ++func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { ++ return r.FindMessageByURL(string(message)) ++} ++ ++func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) { ++ m, err := r.Resolve(url) ++ if err != nil { ++ return nil, err ++ } ++ return protoimpl.X.MessageTypeOf(m), nil ++} ++ ++func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { ++ return protoregistry.GlobalTypes.FindExtensionByName(field) ++} ++ ++func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { ++ return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) ++} ++ ++func wellKnownType(s protoreflect.FullName) string { ++ if s.Parent() == "google.protobuf" { ++ switch s.Name() { ++ case "Empty", "Any", ++ "BoolValue", "BytesValue", "StringValue", ++ "Int32Value", "UInt32Value", "FloatValue", ++ "Int64Value", "UInt64Value", "DoubleValue", ++ "Duration", "Timestamp", ++ "NullValue", "Struct", "Value", "ListValue": ++ return string(s.Name()) ++ } ++ } ++ return "" ++} ++ ++func isMessageSet(md protoreflect.MessageDescriptor) bool { ++ ms, ok := md.(interface{ IsMessageSet() bool }) ++ return ok && ms.IsMessageSet() ++} +diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go +deleted file mode 100644 +index 37dc0cf..0000000 +--- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go ++++ /dev/null +@@ -1,71 +0,0 @@ +-// Copyright 2016 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// Package ctxhttp provides helper functions for performing context-aware HTTP requests. +-package ctxhttp // import "golang.org/x/net/context/ctxhttp" +- +-import ( +- "context" +- "io" +- "net/http" +- "net/url" +- "strings" +-) +- +-// Do sends an HTTP request with the provided http.Client and returns +-// an HTTP response. +-// +-// If the client is nil, http.DefaultClient is used. +-// +-// The provided ctx must be non-nil. If it is canceled or times out, +-// ctx.Err() will be returned. +-func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { +- if client == nil { +- client = http.DefaultClient +- } +- resp, err := client.Do(req.WithContext(ctx)) +- // If we got an error, and the context has been canceled, +- // the context's error is probably more useful. +- if err != nil { +- select { +- case <-ctx.Done(): +- err = ctx.Err() +- default: +- } +- } +- return resp, err +-} +- +-// Get issues a GET request via the Do function. +-func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { +- req, err := http.NewRequest("GET", url, nil) +- if err != nil { +- return nil, err +- } +- return Do(ctx, client, req) +-} +- +-// Head issues a HEAD request via the Do function. +-func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { +- req, err := http.NewRequest("HEAD", url, nil) +- if err != nil { +- return nil, err +- } +- return Do(ctx, client, req) +-} +- +-// Post issues a POST request via the Do function. +-func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { +- req, err := http.NewRequest("POST", url, body) +- if err != nil { +- return nil, err +- } +- req.Header.Set("Content-Type", bodyType) +- return Do(ctx, client, req) +-} +- +-// PostForm issues a POST request via the Do function. +-func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { +- return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +-} +diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go +index 822ed42..2466ae3 100644 +--- a/vendor/golang.org/x/net/html/doc.go ++++ b/vendor/golang.org/x/net/html/doc.go +@@ -92,6 +92,27 @@ example, to process each anchor node in depth-first order: + The relevant specifications include: + https://html.spec.whatwg.org/multipage/syntax.html and + https://html.spec.whatwg.org/multipage/syntax.html#tokenization ++ ++# Security Considerations ++ ++Care should be taken when parsing and interpreting HTML, whether full documents ++or fragments, within the framework of the HTML specification, especially with ++regard to untrusted inputs. ++ ++This package provides both a tokenizer and a parser, which implement the ++tokenization, and tokenization and tree construction stages of the WHATWG HTML ++parsing specification respectively. While the tokenizer parses and normalizes ++individual HTML tokens, only the parser constructs the DOM tree from the ++tokenized HTML, as described in the tree construction stage of the ++specification, dynamically modifying or extending the docuemnt's DOM tree. ++ ++If your use case requires semantically well-formed HTML documents, as defined by ++the WHATWG specification, the parser should be used rather than the tokenizer. ++ ++In security contexts, if trust decisions are being made using the tokenized or ++parsed content, the input must be re-serialized (for instance by using Render or ++Token.String) in order for those trust decisions to hold, as the process of ++tokenization or parsing may alter the content. + */ + package html // import "golang.org/x/net/html" + +diff --git a/vendor/golang.org/x/net/html/escape.go b/vendor/golang.org/x/net/html/escape.go +index d856139..04c6bec 100644 +--- a/vendor/golang.org/x/net/html/escape.go ++++ b/vendor/golang.org/x/net/html/escape.go +@@ -193,6 +193,87 @@ func lower(b []byte) []byte { + return b + } + ++// escapeComment is like func escape but escapes its input bytes less often. ++// Per https://github.com/golang/go/issues/58246 some HTML comments are (1) ++// meaningful and (2) contain angle brackets that we'd like to avoid escaping ++// unless we have to. ++// ++// "We have to" includes the '&' byte, since that introduces other escapes. ++// ++// It also includes those bytes (not including EOF) that would otherwise end ++// the comment. Per the summary table at the bottom of comment_test.go, this is ++// the '>' byte that, per above, we'd like to avoid escaping unless we have to. ++// ++// Studying the summary table (and T actions in its '>' column) closely, we ++// only need to escape in states 43, 44, 49, 51 and 52. State 43 is at the ++// start of the comment data. State 52 is after a '!'. The other three states ++// are after a '-'. ++// ++// Our algorithm is thus to escape every '&' and to escape '>' if and only if: ++// - The '>' is after a '!' or '-' (in the unescaped data) or ++// - The '>' is at the start of the comment data (after the opening ""); err != nil { +diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go +index 50f7c6a..de67f93 100644 +--- a/vendor/golang.org/x/net/html/token.go ++++ b/vendor/golang.org/x/net/html/token.go +@@ -110,7 +110,7 @@ func (t Token) String() string { + case SelfClosingTagToken: + return "<" + t.tagString() + "/>" + case CommentToken: +- return "" ++ return "" + case DoctypeToken: + return "" + } +@@ -598,10 +598,10 @@ scriptDataDoubleEscapeEnd: + // readComment reads the next comment token starting with " balancer) calls are guaranteed to execute in a ++ // mutually exclusive manner as they are scheduled in the serializer. Fields ++ // accessed *only* in these serializer callbacks, can therefore be accessed ++ // without a mutex. ++ balancer *gracefulswitch.Balancer ++ curBalancerName string ++ ++ // mu guards access to the below fields. Access to the serializer and its ++ // cancel function needs to be mutex protected because they are overwritten ++ // when the wrapper exits idle mode. ++ mu sync.Mutex ++ serializer *grpcsync.CallbackSerializer // To serialize all outoing calls. ++ serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time. ++ mode ccbMode // Tracks the current mode of the wrapper. + } + +-func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { ++// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer ++// is not created until the switchTo() method is invoked. ++func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { ++ ctx, cancel := context.WithCancel(context.Background()) + ccb := &ccBalancerWrapper{ +- cc: cc, +- updateCh: buffer.NewUnbounded(), +- closed: grpcsync.NewEvent(), +- done: grpcsync.NewEvent(), +- subConns: make(map[*acBalancerWrapper]struct{}), ++ cc: cc, ++ opts: bopts, ++ serializer: grpcsync.NewCallbackSerializer(ctx), ++ serializerCancel: cancel, + } +- go ccb.watcher() +- ccb.balancer = b.Build(ccb, bopts) ++ ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) + return ccb + } + +-// watcher balancer functions sequentially, so the balancer can be implemented +-// lock-free. +-func (ccb *ccBalancerWrapper) watcher() { +- for { +- select { +- case t := <-ccb.updateCh.Get(): +- ccb.updateCh.Load() +- if ccb.closed.HasFired() { +- break +- } +- switch u := t.(type) { +- case *scStateUpdate: +- ccb.balancerMu.Lock() +- ccb.balancer.UpdateSubConnState(u.sc, balancer.SubConnState{ConnectivityState: u.state, ConnectionError: u.err}) +- ccb.balancerMu.Unlock() +- case *acBalancerWrapper: +- ccb.mu.Lock() +- if ccb.subConns != nil { +- delete(ccb.subConns, u) +- ccb.cc.removeAddrConn(u.getAddrConn(), errConnDrain) ++// updateClientConnState is invoked by grpc to push a ClientConnState update to ++// the underlying balancer. ++func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { ++ ccb.mu.Lock() ++ errCh := make(chan error, 1) ++ // Here and everywhere else where Schedule() is called, it is done with the ++ // lock held. But the lock guards only the scheduling part. The actual ++ // callback is called asynchronously without the lock being held. ++ ok := ccb.serializer.Schedule(func(_ context.Context) { ++ // If the addresses specified in the update contain addresses of type ++ // "grpclb" and the selected LB policy is not "grpclb", these addresses ++ // will be filtered out and ccs will be modified with the updated ++ // address list. ++ if ccb.curBalancerName != grpclbName { ++ var addrs []resolver.Address ++ for _, addr := range ccs.ResolverState.Addresses { ++ if addr.Type == resolver.GRPCLB { ++ continue + } +- ccb.mu.Unlock() +- default: +- logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", t, t) ++ addrs = append(addrs, addr) + } +- case <-ccb.closed.Done(): ++ ccs.ResolverState.Addresses = addrs + } ++ errCh <- ccb.balancer.UpdateClientConnState(*ccs) ++ }) ++ if !ok { ++ // If we are unable to schedule a function with the serializer, it ++ // indicates that it has been closed. A serializer is only closed when ++ // the wrapper is closed or is in idle. ++ ccb.mu.Unlock() ++ return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer") ++ } ++ ccb.mu.Unlock() + +- if ccb.closed.HasFired() { +- ccb.balancerMu.Lock() +- ccb.balancer.Close() +- ccb.balancerMu.Unlock() +- ccb.mu.Lock() +- scs := ccb.subConns +- ccb.subConns = nil +- ccb.mu.Unlock() +- ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) +- ccb.done.Fire() +- // Fire done before removing the addr conns. We can safely unblock +- // ccb.close and allow the removeAddrConns to happen +- // asynchronously. +- for acbw := range scs { +- ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +- } ++ // We get here only if the above call to Schedule succeeds, in which case it ++ // is guaranteed that the scheduled function will run. Therefore it is safe ++ // to block on this channel. ++ err := <-errCh ++ if logger.V(2) && err != nil { ++ logger.Infof("error from balancer.UpdateClientConnState: %v", err) ++ } ++ return err ++} ++ ++// updateSubConnState is invoked by grpc to push a subConn state update to the ++// underlying balancer. ++func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { ++ ccb.mu.Lock() ++ ccb.serializer.Schedule(func(_ context.Context) { ++ ccb.balancer.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) ++ }) ++ ccb.mu.Unlock() ++} ++ ++func (ccb *ccBalancerWrapper) resolverError(err error) { ++ ccb.mu.Lock() ++ ccb.serializer.Schedule(func(_ context.Context) { ++ ccb.balancer.ResolverError(err) ++ }) ++ ccb.mu.Unlock() ++} ++ ++// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the ++// LB policy identified by name. ++// ++// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the ++// first good update from the name resolver, it determines the LB policy to use ++// and invokes the switchTo() method. Upon receipt of every subsequent update ++// from the name resolver, it invokes this method. ++// ++// the ccBalancerWrapper keeps track of the current LB policy name, and skips ++// the graceful balancer switching process if the name does not change. ++func (ccb *ccBalancerWrapper) switchTo(name string) { ++ ccb.mu.Lock() ++ ccb.serializer.Schedule(func(_ context.Context) { ++ // TODO: Other languages use case-sensitive balancer registries. We should ++ // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. ++ if strings.EqualFold(ccb.curBalancerName, name) { + return + } ++ ccb.buildLoadBalancingPolicy(name) ++ }) ++ ccb.mu.Unlock() ++} ++ ++// buildLoadBalancingPolicy performs the following: ++// - retrieve a balancer builder for the given name. Use the default LB ++// policy, pick_first, if no LB policy with name is found in the registry. ++// - instruct the gracefulswitch balancer to switch to the above builder. This ++// will actually build the new balancer. ++// - update the `curBalancerName` field ++// ++// Must be called from a serializer callback. ++func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { ++ builder := balancer.Get(name) ++ if builder == nil { ++ channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) ++ builder = newPickfirstBuilder() ++ } else { ++ channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) + } ++ ++ if err := ccb.balancer.SwitchTo(builder); err != nil { ++ channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) ++ return ++ } ++ ccb.curBalancerName = builder.Name() + } + + func (ccb *ccBalancerWrapper) close() { +- ccb.closed.Fire() +- <-ccb.done.Done() ++ channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") ++ ccb.closeBalancer(ccbModeClosed) + } + +-func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { +- // When updating addresses for a SubConn, if the address in use is not in +- // the new addresses, the old ac will be tearDown() and a new ac will be +- // created. tearDown() generates a state change with Shutdown state, we +- // don't want the balancer to receive this state change. So before +- // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and +- // this function will be called with (nil, Shutdown). We don't need to call +- // balancer method in this case. +- if sc == nil { ++// enterIdleMode is invoked by grpc when the channel enters idle mode upon ++// expiry of idle_timeout. This call blocks until the balancer is closed. ++func (ccb *ccBalancerWrapper) enterIdleMode() { ++ channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode") ++ ccb.closeBalancer(ccbModeIdle) ++} ++ ++// closeBalancer is invoked when the channel is being closed or when it enters ++// idle mode upon expiry of idle_timeout. ++func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { ++ ccb.mu.Lock() ++ if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle { ++ ccb.mu.Unlock() + return + } +- ccb.updateCh.Put(&scStateUpdate{ +- sc: sc, +- state: s, +- err: err, ++ ++ ccb.mode = m ++ done := ccb.serializer.Done ++ b := ccb.balancer ++ ok := ccb.serializer.Schedule(func(_ context.Context) { ++ // Close the serializer to ensure that no more calls from gRPC are sent ++ // to the balancer. ++ ccb.serializerCancel() ++ // Empty the current balancer name because we don't have a balancer ++ // anymore and also so that we act on the next call to switchTo by ++ // creating a new balancer specified by the new resolver. ++ ccb.curBalancerName = "" + }) ++ if !ok { ++ ccb.mu.Unlock() ++ return ++ } ++ ccb.mu.Unlock() ++ ++ // Give enqueued callbacks a chance to finish. ++ <-done ++ // Spawn a goroutine to close the balancer (since it may block trying to ++ // cleanup all allocated resources) and return early. ++ go b.Close() + } + +-func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { +- ccb.balancerMu.Lock() +- defer ccb.balancerMu.Unlock() +- return ccb.balancer.UpdateClientConnState(*ccs) ++// exitIdleMode is invoked by grpc when the channel exits idle mode either ++// because of an RPC or because of an invocation of the Connect() API. This ++// recreates the balancer that was closed previously when entering idle mode. ++// ++// If the channel is not in idle mode, we know for a fact that we are here as a ++// result of the user calling the Connect() method on the ClientConn. In this ++// case, we can simply forward the call to the underlying balancer, instructing ++// it to reconnect to the backends. ++func (ccb *ccBalancerWrapper) exitIdleMode() { ++ ccb.mu.Lock() ++ if ccb.mode == ccbModeClosed { ++ // Request to exit idle is a no-op when wrapper is already closed. ++ ccb.mu.Unlock() ++ return ++ } ++ ++ if ccb.mode == ccbModeIdle { ++ // Recreate the serializer which was closed when we entered idle. ++ ctx, cancel := context.WithCancel(context.Background()) ++ ccb.serializer = grpcsync.NewCallbackSerializer(ctx) ++ ccb.serializerCancel = cancel ++ } ++ ++ // The ClientConn guarantees that mutual exclusion between close() and ++ // exitIdleMode(), and since we just created a new serializer, we can be ++ // sure that the below function will be scheduled. ++ done := make(chan struct{}) ++ ccb.serializer.Schedule(func(_ context.Context) { ++ defer close(done) ++ ++ ccb.mu.Lock() ++ defer ccb.mu.Unlock() ++ ++ if ccb.mode != ccbModeIdle { ++ ccb.balancer.ExitIdle() ++ return ++ } ++ ++ // Gracefulswitch balancer does not support a switchTo operation after ++ // being closed. Hence we need to create a new one here. ++ ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) ++ ccb.mode = ccbModeActive ++ channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode") ++ ++ }) ++ ccb.mu.Unlock() ++ ++ <-done + } + +-func (ccb *ccBalancerWrapper) resolverError(err error) { +- ccb.balancerMu.Lock() +- ccb.balancer.ResolverError(err) +- ccb.balancerMu.Unlock() ++func (ccb *ccBalancerWrapper) isIdleOrClosed() bool { ++ ccb.mu.Lock() ++ defer ccb.mu.Unlock() ++ return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed + } + + func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { +- if len(addrs) <= 0 { +- return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") ++ if ccb.isIdleOrClosed() { ++ return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle") + } +- ccb.mu.Lock() +- defer ccb.mu.Unlock() +- if ccb.subConns == nil { +- return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") ++ ++ if len(addrs) == 0 { ++ return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") + } + ac, err := ccb.cc.newAddrConn(addrs, opts) + if err != nil { ++ channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) + return nil, err + } +- acbw := &acBalancerWrapper{ac: ac} +- acbw.ac.mu.Lock() ++ acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} + ac.acbw = acbw +- acbw.ac.mu.Unlock() +- ccb.subConns[acbw] = struct{}{} + return acbw, nil + } + + func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { +- // The RemoveSubConn() is handled in the run() goroutine, to avoid deadlock +- // during switchBalancer() if the old balancer calls RemoveSubConn() in its +- // Close(). +- ccb.updateCh.Put(sc) ++ if ccb.isIdleOrClosed() { ++ // It it safe to ignore this call when the balancer is closed or in idle ++ // because the ClientConn takes care of closing the connections. ++ // ++ // Not returning early from here when the balancer is closed or in idle ++ // leads to a deadlock though, because of the following sequence of ++ // calls when holding cc.mu: ++ // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> ++ // ccb.RemoveAddrConn --> cc.removeAddrConn ++ return ++ } ++ ++ acbw, ok := sc.(*acBalancerWrapper) ++ if !ok { ++ return ++ } ++ ccb.cc.removeAddrConn(acbw.ac, errConnDrain) + } + + func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { ++ if ccb.isIdleOrClosed() { ++ return ++ } ++ + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return +@@ -185,11 +352,10 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol + } + + func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { +- ccb.mu.Lock() +- defer ccb.mu.Unlock() +- if ccb.subConns == nil { ++ if ccb.isIdleOrClosed() { + return + } ++ + // Update picker before updating state. Even though the ordering here does + // not matter, it can lead to multiple calls of Pick in the common start-up + // case where we wait for ready and then perform an RPC. If the picker is +@@ -200,6 +366,10 @@ func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { + } + + func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { ++ if ccb.isIdleOrClosed() { ++ return ++ } ++ + ccb.cc.resolveNow(o) + } + +@@ -210,58 +380,80 @@ func (ccb *ccBalancerWrapper) Target() string { + // acBalancerWrapper is a wrapper on top of ac for balancers. + // It implements balancer.SubConn interface. + type acBalancerWrapper struct { +- mu sync.Mutex +- ac *addrConn ++ ac *addrConn // read-only ++ ++ mu sync.Mutex ++ producers map[balancer.ProducerBuilder]*refCountedProducer ++} ++ ++func (acbw *acBalancerWrapper) String() string { ++ return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int()) + } + + func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { +- acbw.mu.Lock() +- defer acbw.mu.Unlock() +- if len(addrs) <= 0 { +- acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) +- return ++ acbw.ac.updateAddrs(addrs) ++} ++ ++func (acbw *acBalancerWrapper) Connect() { ++ go acbw.ac.connect() ++} ++ ++// NewStream begins a streaming RPC on the addrConn. If the addrConn is not ++// ready, blocks until it is or ctx expires. Returns an error when the context ++// expires or the addrConn is shut down. ++func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { ++ transport, err := acbw.ac.getTransport(ctx) ++ if err != nil { ++ return nil, err + } +- if !acbw.ac.tryUpdateAddrs(addrs) { +- cc := acbw.ac.cc +- opts := acbw.ac.scopts +- acbw.ac.mu.Lock() +- // Set old ac.acbw to nil so the Shutdown state update will be ignored +- // by balancer. +- // +- // TODO(bar) the state transition could be wrong when tearDown() old ac +- // and creating new ac, fix the transition. +- acbw.ac.acbw = nil +- acbw.ac.mu.Unlock() +- acState := acbw.ac.getState() +- acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) +- +- if acState == connectivity.Shutdown { +- return +- } ++ return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) ++} + +- ac, err := cc.newAddrConn(addrs, opts) +- if err != nil { +- channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) +- return +- } +- acbw.ac = ac +- ac.mu.Lock() +- ac.acbw = acbw +- ac.mu.Unlock() +- if acState != connectivity.Idle { +- ac.connect() +- } ++// Invoke performs a unary RPC. If the addrConn is not ready, returns ++// errSubConnNotReady. ++func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error { ++ cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) ++ if err != nil { ++ return err + } ++ if err := cs.SendMsg(args); err != nil { ++ return err ++ } ++ return cs.RecvMsg(reply) + } + +-func (acbw *acBalancerWrapper) Connect() { +- acbw.mu.Lock() +- defer acbw.mu.Unlock() +- acbw.ac.connect() ++type refCountedProducer struct { ++ producer balancer.Producer ++ refs int // number of current refs to the producer ++ close func() // underlying producer's close function + } + +-func (acbw *acBalancerWrapper) getAddrConn() *addrConn { ++func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { + acbw.mu.Lock() + defer acbw.mu.Unlock() +- return acbw.ac ++ ++ // Look up existing producer from this builder. ++ pData := acbw.producers[pb] ++ if pData == nil { ++ // Not found; create a new one and add it to the producers map. ++ p, close := pb.Build(acbw) ++ pData = &refCountedProducer{producer: p, close: close} ++ acbw.producers[pb] = pData ++ } ++ // Account for this new reference. ++ pData.refs++ ++ ++ // Return a cleanup function wrapped in a OnceFunc to remove this reference ++ // and delete the refCountedProducer from the map if the total reference ++ // count goes to zero. ++ unref := func() { ++ acbw.mu.Lock() ++ pData.refs-- ++ if pData.refs == 0 { ++ defer pData.close() // Run outside the acbw mutex ++ delete(acbw.producers, pb) ++ } ++ acbw.mu.Unlock() ++ } ++ return pData.producer, grpcsync.OnceFunc(unref) + } +diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +index ed75290..ec2c2fa 100644 +--- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go ++++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +@@ -18,14 +18,13 @@ + + // Code generated by protoc-gen-go. DO NOT EDIT. + // versions: +-// protoc-gen-go v1.25.0 +-// protoc v3.14.0 ++// protoc-gen-go v1.30.0 ++// protoc v4.22.0 + // source: grpc/binlog/v1/binarylog.proto + + package grpc_binarylog_v1 + + import ( +- proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" +@@ -41,10 +40,6 @@ const ( + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) + ) + +-// This is a compile-time assertion that a sufficiently up-to-date version +-// of the legacy proto package is being used. +-const _ = proto.ProtoPackageIsVersion4 +- + // Enumerates the type of event + // Note the terminology is different from the RPC semantics + // definition, but the same meaning is expressed here. +@@ -261,6 +256,7 @@ type GrpcLogEntry struct { + // according to the type of the log entry. + // + // Types that are assignable to Payload: ++ // + // *GrpcLogEntry_ClientHeader + // *GrpcLogEntry_ServerHeader + // *GrpcLogEntry_Message +@@ -694,12 +690,12 @@ func (x *Message) GetData() []byte { + // Header keys added by gRPC are omitted. To be more specific, + // implementations will not log the following entries, and this is + // not to be treated as a truncation: +-// - entries handled by grpc that are not user visible, such as those +-// that begin with 'grpc-' (with exception of grpc-trace-bin) +-// or keys like 'lb-token' +-// - transport specific entries, including but not limited to: +-// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc +-// - entries added for call credentials ++// - entries handled by grpc that are not user visible, such as those ++// that begin with 'grpc-' (with exception of grpc-trace-bin) ++// or keys like 'lb-token' ++// - transport specific entries, including but not limited to: ++// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc ++// - entries added for call credentials + // + // Implementations must always log grpc-trace-bin if it is present. + // Practically speaking it will only be visible on server side because +diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go +index 9e20e4d..e6a1dc5 100644 +--- a/vendor/google.golang.org/grpc/call.go ++++ b/vendor/google.golang.org/grpc/call.go +@@ -27,6 +27,11 @@ import ( + // + // All errors returned by Invoke are compatible with the status package. + func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { ++ if err := cc.idlenessMgr.onCallBegin(); err != nil { ++ return err ++ } ++ defer cc.idlenessMgr.onCallEnd() ++ + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) +diff --git a/vendor/google.golang.org/grpc/channelz/channelz.go b/vendor/google.golang.org/grpc/channelz/channelz.go +new file mode 100644 +index 0000000..32b7fa5 +--- /dev/null ++++ b/vendor/google.golang.org/grpc/channelz/channelz.go +@@ -0,0 +1,36 @@ ++/* ++ * ++ * Copyright 2020 gRPC authors. ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * http://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++// Package channelz exports internals of the channelz implementation as required ++// by other gRPC packages. ++// ++// The implementation of the channelz spec as defined in ++// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by ++// the `internal/channelz` package. ++// ++// # Experimental ++// ++// Notice: All APIs in this package are experimental and may be removed in a ++// later release. ++package channelz ++ ++import "google.golang.org/grpc/internal/channelz" ++ ++// Identifier is an opaque identifier which uniquely identifies an entity in the ++// channelz database. ++type Identifier = channelz.Identifier +diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go +index b2bccfe..95a7459 100644 +--- a/vendor/google.golang.org/grpc/clientconn.go ++++ b/vendor/google.golang.org/grpc/clientconn.go +@@ -23,7 +23,7 @@ import ( + "errors" + "fmt" + "math" +- "reflect" ++ "net/url" + "strings" + "sync" + "sync/atomic" +@@ -37,7 +37,6 @@ import ( + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" +- "google.golang.org/grpc/internal/grpcutil" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" +@@ -69,6 +68,9 @@ var ( + errConnDrain = errors.New("grpc: the connection is drained") + // errConnClosing indicates that the connection is closing. + errConnClosing = errors.New("grpc: the connection is closing") ++ // errConnIdling indicates the the connection is being closed as the channel ++ // is moving to an idle mode due to inactivity. ++ errConnIdling = errors.New("grpc: the connection is closing due to channel idleness") + // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default + // service config. + invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" +@@ -79,17 +81,17 @@ var ( + // errNoTransportSecurity indicates that there is no transport security + // being set for ClientConn. Users should either set one or explicitly + // call WithInsecure DialOption to disable security. +- errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") ++ errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)") + // errTransportCredsAndBundle indicates that creds bundle is used together + // with other individual Transport Credentials. + errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") +- // errTransportCredentialsMissing indicates that users want to transmit security +- // information (e.g., OAuth2 token) which requires secure connection on an insecure +- // connection. ++ // errNoTransportCredsInBundle indicated that the configured creds bundle ++ // returned a transport credentials which was nil. ++ errNoTransportCredsInBundle = errors.New("grpc: credentials.Bundle must return non-nil transport credentials") ++ // errTransportCredentialsMissing indicates that users want to transmit ++ // security information (e.g., OAuth2 token) which requires secure ++ // connection on an insecure connection. + errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") +- // errCredentialsConflict indicates that grpc.WithTransportCredentials() +- // and grpc.WithInsecure() are both called for a connection. +- errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") + ) + + const ( +@@ -134,17 +136,43 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires + // e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. + func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + cc := &ClientConn{ +- target: target, +- csMgr: &connectivityStateManager{}, +- conns: make(map[*addrConn]struct{}), +- dopts: defaultDialOptions(), +- blockingpicker: newPickerWrapper(), +- czData: new(channelzData), +- firstResolveEvent: grpcsync.NewEvent(), +- } ++ target: target, ++ csMgr: &connectivityStateManager{}, ++ conns: make(map[*addrConn]struct{}), ++ dopts: defaultDialOptions(), ++ czData: new(channelzData), ++ } ++ ++ // We start the channel off in idle mode, but kick it out of idle at the end ++ // of this method, instead of waiting for the first RPC. Other gRPC ++ // implementations do wait for the first RPC to kick the channel out of ++ // idle. But doing so would be a major behavior change for our users who are ++ // used to seeing the channel active after Dial. ++ // ++ // Taking this approach of kicking it out of idle at the end of this method ++ // allows us to share the code between channel creation and exiting idle ++ // mode. This will also make it easy for us to switch to starting the ++ // channel off in idle, if at all we ever get to do that. ++ cc.idlenessState = ccIdlenessStateIdle ++ + cc.retryThrottler.Store((*retryThrottler)(nil)) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) + cc.ctx, cc.cancel = context.WithCancel(context.Background()) ++ cc.exitIdleCond = sync.NewCond(&cc.mu) ++ ++ disableGlobalOpts := false ++ for _, opt := range opts { ++ if _, ok := opt.(*disableGlobalDialOptions); ok { ++ disableGlobalOpts = true ++ break ++ } ++ } ++ ++ if !disableGlobalOpts { ++ for _, opt := range globalDialOptions { ++ opt.apply(&cc.dopts) ++ } ++ } + + for _, opt := range opts { + opt.apply(&cc.dopts) +@@ -159,40 +187,11 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * + } + }() + +- if channelz.IsOn() { +- if cc.dopts.channelzParentID != 0 { +- cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) +- channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{ +- Desc: "Channel Created", +- Severity: channelz.CtInfo, +- Parent: &channelz.TraceEventDesc{ +- Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID), +- Severity: channelz.CtInfo, +- }, +- }) +- } else { +- cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) +- channelz.Info(logger, cc.channelzID, "Channel Created") +- } +- cc.csMgr.channelzID = cc.channelzID +- } ++ // Register ClientConn with channelz. ++ cc.channelzRegistration(target) + +- if !cc.dopts.insecure { +- if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { +- return nil, errNoTransportSecurity +- } +- if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { +- return nil, errTransportCredsAndBundle +- } +- } else { +- if cc.dopts.copts.TransportCredentials != nil || cc.dopts.copts.CredsBundle != nil { +- return nil, errCredentialsConflict +- } +- for _, cd := range cc.dopts.copts.PerRPCCredentials { +- if cd.RequireTransportSecurity() { +- return nil, errTransportCredentialsMissing +- } +- } ++ if err := cc.validateTransportCredentials(); err != nil { ++ return nil, err + } + + if cc.dopts.defaultServiceConfigRawJSON != nil { +@@ -230,58 +229,19 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * + } + }() + +- scSet := false +- if cc.dopts.scChan != nil { +- // Try to get an initial service config. +- select { +- case sc, ok := <-cc.dopts.scChan: +- if ok { +- cc.sc = &sc +- cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) +- scSet = true +- } +- default: +- } +- } + if cc.dopts.bs == nil { + cc.dopts.bs = backoff.DefaultExponential + } + + // Determine the resolver to use. +- cc.parsedTarget = grpcutil.ParseTarget(cc.target, cc.dopts.copts.Dialer != nil) +- channelz.Infof(logger, cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme) +- resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme) +- if resolverBuilder == nil { +- // If resolver builder is still nil, the parsed target's scheme is +- // not registered. Fallback to default resolver and set Endpoint to +- // the original target. +- channelz.Infof(logger, cc.channelzID, "scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) +- cc.parsedTarget = resolver.Target{ +- Scheme: resolver.GetDefaultScheme(), +- Endpoint: target, +- } +- resolverBuilder = cc.getResolver(cc.parsedTarget.Scheme) +- if resolverBuilder == nil { +- return nil, fmt.Errorf("could not get resolver for default scheme: %q", cc.parsedTarget.Scheme) +- } ++ if err := cc.parseTargetAndFindResolver(); err != nil { ++ return nil, err + } +- +- creds := cc.dopts.copts.TransportCredentials +- if creds != nil && creds.Info().ServerName != "" { +- cc.authority = creds.Info().ServerName +- } else if cc.dopts.insecure && cc.dopts.authority != "" { +- cc.authority = cc.dopts.authority +- } else if strings.HasPrefix(cc.target, "unix:") || strings.HasPrefix(cc.target, "unix-abstract:") { +- cc.authority = "localhost" +- } else if strings.HasPrefix(cc.parsedTarget.Endpoint, ":") { +- cc.authority = "localhost" + cc.parsedTarget.Endpoint +- } else { +- // Use endpoint from "scheme://authority/endpoint" as the default +- // authority for ClientConn. +- cc.authority = cc.parsedTarget.Endpoint ++ if err = cc.determineAuthority(); err != nil { ++ return nil, err + } + +- if cc.dopts.scChan != nil && !scSet { ++ if cc.dopts.scChan != nil { + // Blocking wait for the initial service config. + select { + case sc, ok := <-cc.dopts.scChan: +@@ -297,55 +257,224 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * + go cc.scWatcher() + } + ++ // This creates the name resolver, load balancer, blocking picker etc. ++ if err := cc.exitIdleMode(); err != nil { ++ return nil, err ++ } ++ ++ // Configure idleness support with configured idle timeout or default idle ++ // timeout duration. Idleness can be explicitly disabled by the user, by ++ // setting the dial option to 0. ++ cc.idlenessMgr = newIdlenessManager(cc, cc.dopts.idleTimeout) ++ ++ // Return early for non-blocking dials. ++ if !cc.dopts.block { ++ return cc, nil ++ } ++ ++ // A blocking dial blocks until the clientConn is ready. ++ for { ++ s := cc.GetState() ++ if s == connectivity.Idle { ++ cc.Connect() ++ } ++ if s == connectivity.Ready { ++ return cc, nil ++ } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { ++ if err = cc.connectionError(); err != nil { ++ terr, ok := err.(interface { ++ Temporary() bool ++ }) ++ if ok && !terr.Temporary() { ++ return nil, err ++ } ++ } ++ } ++ if !cc.WaitForStateChange(ctx, s) { ++ // ctx got timeout or canceled. ++ if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { ++ return nil, err ++ } ++ return nil, ctx.Err() ++ } ++ } ++} ++ ++// addTraceEvent is a helper method to add a trace event on the channel. If the ++// channel is a nested one, the same event is also added on the parent channel. ++func (cc *ClientConn) addTraceEvent(msg string) { ++ ted := &channelz.TraceEventDesc{ ++ Desc: fmt.Sprintf("Channel %s", msg), ++ Severity: channelz.CtInfo, ++ } ++ if cc.dopts.channelzParentID != nil { ++ ted.Parent = &channelz.TraceEventDesc{ ++ Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelzID.Int(), msg), ++ Severity: channelz.CtInfo, ++ } ++ } ++ channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) ++} ++ ++// exitIdleMode moves the channel out of idle mode by recreating the name ++// resolver and load balancer. ++func (cc *ClientConn) exitIdleMode() error { ++ cc.mu.Lock() ++ if cc.conns == nil { ++ cc.mu.Unlock() ++ return errConnClosing ++ } ++ if cc.idlenessState != ccIdlenessStateIdle { ++ cc.mu.Unlock() ++ logger.Info("ClientConn asked to exit idle mode when not in idle mode") ++ return nil ++ } ++ ++ defer func() { ++ // When Close() and exitIdleMode() race against each other, one of the ++ // following two can happen: ++ // - Close() wins the race and runs first. exitIdleMode() runs after, and ++ // sees that the ClientConn is already closed and hence returns early. ++ // - exitIdleMode() wins the race and runs first and recreates the balancer ++ // and releases the lock before recreating the resolver. If Close() runs ++ // in this window, it will wait for exitIdleMode to complete. ++ // ++ // We achieve this synchronization using the below condition variable. ++ cc.mu.Lock() ++ cc.idlenessState = ccIdlenessStateActive ++ cc.exitIdleCond.Signal() ++ cc.mu.Unlock() ++ }() ++ ++ cc.idlenessState = ccIdlenessStateExitingIdle ++ exitedIdle := false ++ if cc.blockingpicker == nil { ++ cc.blockingpicker = newPickerWrapper() ++ } else { ++ cc.blockingpicker.exitIdleMode() ++ exitedIdle = true ++ } ++ + var credsClone credentials.TransportCredentials + if creds := cc.dopts.copts.TransportCredentials; creds != nil { + credsClone = creds.Clone() + } +- cc.balancerBuildOpts = balancer.BuildOptions{ +- DialCreds: credsClone, +- CredsBundle: cc.dopts.copts.CredsBundle, +- Dialer: cc.dopts.copts.Dialer, +- CustomUserAgent: cc.dopts.copts.UserAgent, +- ChannelzParentID: cc.channelzID, +- Target: cc.parsedTarget, ++ if cc.balancerWrapper == nil { ++ cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ ++ DialCreds: credsClone, ++ CredsBundle: cc.dopts.copts.CredsBundle, ++ Dialer: cc.dopts.copts.Dialer, ++ Authority: cc.authority, ++ CustomUserAgent: cc.dopts.copts.UserAgent, ++ ChannelzParentID: cc.channelzID, ++ Target: cc.parsedTarget, ++ }) ++ } else { ++ cc.balancerWrapper.exitIdleMode() + } ++ cc.firstResolveEvent = grpcsync.NewEvent() ++ cc.mu.Unlock() + +- // Build the resolver. +- rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) +- if err != nil { +- return nil, fmt.Errorf("failed to build resolver: %v", err) ++ // This needs to be called without cc.mu because this builds a new resolver ++ // which might update state or report error inline which needs to be handled ++ // by cc.updateResolverState() which also grabs cc.mu. ++ if err := cc.initResolverWrapper(credsClone); err != nil { ++ return err ++ } ++ ++ if exitedIdle { ++ cc.addTraceEvent("exiting idle mode") + } ++ return nil ++} ++ ++// enterIdleMode puts the channel in idle mode, and as part of it shuts down the ++// name resolver, load balancer and any subchannels. ++func (cc *ClientConn) enterIdleMode() error { + cc.mu.Lock() +- cc.resolverWrapper = rWrapper ++ if cc.conns == nil { ++ cc.mu.Unlock() ++ return ErrClientConnClosing ++ } ++ if cc.idlenessState != ccIdlenessStateActive { ++ logger.Error("ClientConn asked to enter idle mode when not active") ++ return nil ++ } ++ ++ // cc.conns == nil is a proxy for the ClientConn being closed. So, instead ++ // of setting it to nil here, we recreate the map. This also means that we ++ // don't have to do this when exiting idle mode. ++ conns := cc.conns ++ cc.conns = make(map[*addrConn]struct{}) ++ ++ // TODO: Currently, we close the resolver wrapper upon entering idle mode ++ // and create a new one upon exiting idle mode. This means that the ++ // `cc.resolverWrapper` field would be overwritten everytime we exit idle ++ // mode. While this means that we need to hold `cc.mu` when accessing ++ // `cc.resolverWrapper`, it makes the code simpler in the wrapper. We should ++ // try to do the same for the balancer and picker wrappers too. ++ cc.resolverWrapper.close() ++ cc.blockingpicker.enterIdleMode() ++ cc.balancerWrapper.enterIdleMode() ++ cc.csMgr.updateState(connectivity.Idle) ++ cc.idlenessState = ccIdlenessStateIdle + cc.mu.Unlock() + +- // A blocking dial blocks until the clientConn is ready. +- if cc.dopts.block { +- for { +- s := cc.GetState() +- if s == connectivity.Ready { +- break +- } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { +- if err = cc.connectionError(); err != nil { +- terr, ok := err.(interface { +- Temporary() bool +- }) +- if ok && !terr.Temporary() { +- return nil, err +- } +- } +- } +- if !cc.WaitForStateChange(ctx, s) { +- // ctx got timeout or canceled. +- if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { +- return nil, err +- } +- return nil, ctx.Err() ++ go func() { ++ cc.addTraceEvent("entering idle mode") ++ for ac := range conns { ++ ac.tearDown(errConnIdling) ++ } ++ }() ++ return nil ++} ++ ++// validateTransportCredentials performs a series of checks on the configured ++// transport credentials. It returns a non-nil error if any of these conditions ++// are met: ++// - no transport creds and no creds bundle is configured ++// - both transport creds and creds bundle are configured ++// - creds bundle is configured, but it lacks a transport credentials ++// - insecure transport creds configured alongside call creds that require ++// transport level security ++// ++// If none of the above conditions are met, the configured credentials are ++// deemed valid and a nil error is returned. ++func (cc *ClientConn) validateTransportCredentials() error { ++ if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { ++ return errNoTransportSecurity ++ } ++ if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { ++ return errTransportCredsAndBundle ++ } ++ if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { ++ return errNoTransportCredsInBundle ++ } ++ transportCreds := cc.dopts.copts.TransportCredentials ++ if transportCreds == nil { ++ transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() ++ } ++ if transportCreds.Info().SecurityProtocol == "insecure" { ++ for _, cd := range cc.dopts.copts.PerRPCCredentials { ++ if cd.RequireTransportSecurity() { ++ return errTransportCredentialsMissing + } + } + } ++ return nil ++} + +- return cc, nil ++// channelzRegistration registers the newly created ClientConn with channelz and ++// stores the returned identifier in `cc.channelzID` and `cc.csMgr.channelzID`. ++// A channelz trace event is emitted for ClientConn creation. If the newly ++// created ClientConn is a nested one, i.e a valid parent ClientConn ID is ++// specified via a dial option, the trace event is also added to the parent. ++// ++// Doesn't grab cc.mu as this method is expected to be called only at Dial time. ++func (cc *ClientConn) channelzRegistration(target string) { ++ cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) ++ cc.addTraceEvent("created") ++ cc.csMgr.channelzID = cc.channelzID + } + + // chainUnaryClientInterceptors chains all unary client interceptors into one. +@@ -416,7 +545,7 @@ type connectivityStateManager struct { + mu sync.Mutex + state connectivity.State + notifyChan chan struct{} +- channelzID int64 ++ channelzID *channelz.Identifier + } + + // updateState updates the connectivity.State of ClientConn. +@@ -482,43 +611,67 @@ var _ ClientConnInterface = (*ClientConn)(nil) + // handshakes. It also handles errors on established connections by + // re-resolving the name and reconnecting. + type ClientConn struct { +- ctx context.Context +- cancel context.CancelFunc +- +- target string +- parsedTarget resolver.Target +- authority string +- dopts dialOptions +- csMgr *connectivityStateManager +- +- balancerBuildOpts balancer.BuildOptions +- blockingpicker *pickerWrapper +- ++ ctx context.Context // Initialized using the background context at dial time. ++ cancel context.CancelFunc // Cancelled on close. ++ ++ // The following are initialized at dial time, and are read-only after that. ++ target string // User's dial target. ++ parsedTarget resolver.Target // See parseTargetAndFindResolver(). ++ authority string // See determineAuthority(). ++ dopts dialOptions // Default and user specified dial options. ++ channelzID *channelz.Identifier // Channelz identifier for the channel. ++ resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). ++ balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. ++ idlenessMgr idlenessManager ++ ++ // The following provide their own synchronization, and therefore don't ++ // require cc.mu to be held to access them. ++ csMgr *connectivityStateManager ++ blockingpicker *pickerWrapper + safeConfigSelector iresolver.SafeConfigSelector ++ czData *channelzData ++ retryThrottler atomic.Value // Updated from service config. + +- mu sync.RWMutex +- resolverWrapper *ccResolverWrapper +- sc *ServiceConfig +- conns map[*addrConn]struct{} +- // Keepalive parameter can be updated if a GoAway is received. +- mkp keepalive.ClientParameters +- curBalancerName string +- balancerWrapper *ccBalancerWrapper +- retryThrottler atomic.Value +- ++ // firstResolveEvent is used to track whether the name resolver sent us at ++ // least one update. RPCs block on this event. + firstResolveEvent *grpcsync.Event + +- channelzID int64 // channelz unique identification number +- czData *channelzData ++ // mu protects the following fields. ++ // TODO: split mu so the same mutex isn't used for everything. ++ mu sync.RWMutex ++ resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close. ++ sc *ServiceConfig // Latest service config received from the resolver. ++ conns map[*addrConn]struct{} // Set to nil on close. ++ mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. ++ idlenessState ccIdlenessState // Tracks idleness state of the channel. ++ exitIdleCond *sync.Cond // Signalled when channel exits idle. + + lceMu sync.Mutex // protects lastConnectionError + lastConnectionError error + } + ++// ccIdlenessState tracks the idleness state of the channel. ++// ++// Channels start off in `active` and move to `idle` after a period of ++// inactivity. When moving back to `active` upon an incoming RPC, they ++// transition through `exiting_idle`. This state is useful for synchronization ++// with Close(). ++// ++// This state tracking is mostly for self-protection. The idlenessManager is ++// expected to keep track of the state as well, and is expected not to call into ++// the ClientConn unnecessarily. ++type ccIdlenessState int8 ++ ++const ( ++ ccIdlenessStateActive ccIdlenessState = iota ++ ccIdlenessStateIdle ++ ccIdlenessStateExitingIdle ++) ++ + // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or + // ctx expires. A true value is returned in former case and false in latter. + // +-// Experimental ++// # Experimental + // + // Notice: This API is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -537,14 +690,29 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec + + // GetState returns the connectivity.State of ClientConn. + // +-// Experimental ++// # Experimental + // +-// Notice: This API is EXPERIMENTAL and may be changed or removed in a +-// later release. ++// Notice: This API is EXPERIMENTAL and may be changed or removed in a later ++// release. + func (cc *ClientConn) GetState() connectivity.State { + return cc.csMgr.getState() + } + ++// Connect causes all subchannels in the ClientConn to attempt to connect if ++// the channel is idle. Does not wait for the connection attempts to begin ++// before returning. ++// ++// # Experimental ++// ++// Notice: This API is EXPERIMENTAL and may be changed or removed in a later ++// release. ++func (cc *ClientConn) Connect() { ++ cc.exitIdleMode() ++ // If the ClientConn was not in idle mode, we need to call ExitIdle on the ++ // LB policy so that connections can be created. ++ cc.balancerWrapper.exitIdleMode() ++} ++ + func (cc *ClientConn) scWatcher() { + for { + select { +@@ -622,9 +790,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { + // with the new addresses. + cc.maybeApplyDefaultServiceConfig(nil) + +- if cc.balancerWrapper != nil { +- cc.balancerWrapper.resolverError(err) +- } ++ cc.balancerWrapper.resolverError(err) + + // No addresses are valid with err set; return early. + cc.mu.Unlock() +@@ -632,7 +798,10 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { + } + + var ret error +- if cc.dopts.disableServiceConfig || s.ServiceConfig == nil { ++ if cc.dopts.disableServiceConfig { ++ channelz.Infof(logger, cc.channelzID, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig) ++ cc.maybeApplyDefaultServiceConfig(s.Addresses) ++ } else if s.ServiceConfig == nil { + cc.maybeApplyDefaultServiceConfig(s.Addresses) + // TODO: do we need to apply a failing LB policy if there is no + // default, per the error handling design? +@@ -649,16 +818,10 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { + cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses) + } else { + ret = balancer.ErrBadResolverState +- if cc.balancerWrapper == nil { +- var err error +- if s.ServiceConfig.Err != nil { +- err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err) +- } else { +- err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config) +- } +- cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{cc.sc}) +- cc.blockingpicker.updatePicker(base.NewErrPicker(err)) +- cc.csMgr.updateState(connectivity.TransientFailure) ++ if cc.sc == nil { ++ // Apply the failing LB only if we haven't received valid service config ++ // from the name resolver in the past. ++ cc.applyFailingLB(s.ServiceConfig) + cc.mu.Unlock() + return ret + } +@@ -666,24 +829,12 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { + } + + var balCfg serviceconfig.LoadBalancingConfig +- if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil { ++ if cc.sc != nil && cc.sc.lbConfig != nil { + balCfg = cc.sc.lbConfig.cfg + } +- +- cbn := cc.curBalancerName + bw := cc.balancerWrapper + cc.mu.Unlock() +- if cbn != grpclbName { +- // Filter any grpclb addresses since we don't have the grpclb balancer. +- for i := 0; i < len(s.Addresses); { +- if s.Addresses[i].Type == resolver.GRPCLB { +- copy(s.Addresses[i:], s.Addresses[i+1:]) +- s.Addresses = s.Addresses[:len(s.Addresses)-1] +- continue +- } +- i++ +- } +- } ++ + uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) + if ret == nil { + ret = uccsErr // prefer ErrBadResolver state since any other error is +@@ -692,56 +843,28 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { + return ret + } + +-// switchBalancer starts the switching from current balancer to the balancer +-// with the given name. +-// +-// It will NOT send the current address list to the new balancer. If needed, +-// caller of this function should send address list to the new balancer after +-// this function returns. ++// applyFailingLB is akin to configuring an LB policy on the channel which ++// always fails RPCs. Here, an actual LB policy is not configured, but an always ++// erroring picker is configured, which returns errors with information about ++// what was invalid in the received service config. A config selector with no ++// service config is configured, and the connectivity state of the channel is ++// set to TransientFailure. + // + // Caller must hold cc.mu. +-func (cc *ClientConn) switchBalancer(name string) { +- if strings.EqualFold(cc.curBalancerName, name) { +- return +- } +- +- channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name) +- if cc.dopts.balancerBuilder != nil { +- channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead") +- return +- } +- if cc.balancerWrapper != nil { +- // Don't hold cc.mu while closing the balancers. The balancers may call +- // methods that require cc.mu (e.g. cc.NewSubConn()). Holding the mutex +- // would cause a deadlock in that case. +- cc.mu.Unlock() +- cc.balancerWrapper.close() +- cc.mu.Lock() +- } +- +- builder := balancer.Get(name) +- if builder == nil { +- channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName) +- channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name) +- builder = newPickfirstBuilder() ++func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { ++ var err error ++ if sc.Err != nil { ++ err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) + } else { +- channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name) ++ err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) + } +- +- cc.curBalancerName = builder.Name() +- cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) ++ cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) ++ cc.blockingpicker.updatePicker(base.NewErrPicker(err)) ++ cc.csMgr.updateState(connectivity.TransientFailure) + } + + func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { +- cc.mu.Lock() +- if cc.conns == nil { +- cc.mu.Unlock() +- return +- } +- // TODO(bar switching) send updates to all balancer wrappers when balancer +- // gracefully switching is supported. +- cc.balancerWrapper.handleSubConnStateChange(sc, s, err) +- cc.mu.Unlock() ++ cc.balancerWrapper.updateSubConnState(sc, s, err) + } + + // newAddrConn creates an addrConn for addrs and adds it to cc.conns. +@@ -756,27 +879,31 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub + dopts: cc.dopts, + czData: new(channelzData), + resetBackoff: make(chan struct{}), ++ stateChan: make(chan struct{}), + } + ac.ctx, ac.cancel = context.WithCancel(cc.ctx) + // Track ac in cc. This needs to be done before any getTransport(...) is called. + cc.mu.Lock() ++ defer cc.mu.Unlock() + if cc.conns == nil { +- cc.mu.Unlock() + return nil, ErrClientConnClosing + } +- if channelz.IsOn() { +- ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") +- channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ +- Desc: "Subchannel Created", +- Severity: channelz.CtInfo, +- Parent: &channelz.TraceEventDesc{ +- Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID), +- Severity: channelz.CtInfo, +- }, +- }) ++ ++ var err error ++ ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") ++ if err != nil { ++ return nil, err + } ++ channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ ++ Desc: "Subchannel created", ++ Severity: channelz.CtInfo, ++ Parent: &channelz.TraceEventDesc{ ++ Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()), ++ Severity: channelz.CtInfo, ++ }, ++ }) ++ + cc.conns[ac] = struct{}{} +- cc.mu.Unlock() + return ac, nil + } + +@@ -806,7 +933,7 @@ func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { + + // Target returns the target string of the ClientConn. + // +-// Experimental ++// # Experimental + // + // Notice: This API is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -833,67 +960,113 @@ func (cc *ClientConn) incrCallsFailed() { + func (ac *addrConn) connect() error { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { ++ if logger.V(2) { ++ logger.Infof("connect called on shutdown addrConn; ignoring.") ++ } + ac.mu.Unlock() + return errConnClosing + } + if ac.state != connectivity.Idle { ++ if logger.V(2) { ++ logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state) ++ } + ac.mu.Unlock() + return nil + } +- // Update connectivity state within the lock to prevent subsequent or +- // concurrent calls from resetting the transport more than once. +- ac.updateConnectivityState(connectivity.Connecting, nil) + ac.mu.Unlock() + +- // Start a goroutine connecting to the server asynchronously. +- go ac.resetTransport() ++ ac.resetTransport() + return nil + } + +-// tryUpdateAddrs tries to update ac.addrs with the new addresses list. +-// +-// If ac is Connecting, it returns false. The caller should tear down the ac and +-// create a new one. Note that the backoff will be reset when this happens. +-// +-// If ac is TransientFailure, it updates ac.addrs and returns true. The updated +-// addresses will be picked up by retry in the next iteration after backoff. +-// +-// If ac is Shutdown or Idle, it updates ac.addrs and returns true. +-// +-// If ac is Ready, it checks whether current connected address of ac is in the +-// new addrs list. +-// - If true, it updates ac.addrs and returns true. The ac will keep using +-// the existing connection. +-// - If false, it does nothing and returns false. +-func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { ++func equalAddresses(a, b []resolver.Address) bool { ++ if len(a) != len(b) { ++ return false ++ } ++ for i, v := range a { ++ if !v.Equal(b[i]) { ++ return false ++ } ++ } ++ return true ++} ++ ++// updateAddrs updates ac.addrs with the new addresses list and handles active ++// connections or connection attempts. ++func (ac *addrConn) updateAddrs(addrs []resolver.Address) { + ac.mu.Lock() +- defer ac.mu.Unlock() +- channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) ++ channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) ++ ++ if equalAddresses(ac.addrs, addrs) { ++ ac.mu.Unlock() ++ return ++ } ++ ++ ac.addrs = addrs ++ + if ac.state == connectivity.Shutdown || + ac.state == connectivity.TransientFailure || + ac.state == connectivity.Idle { +- ac.addrs = addrs +- return true ++ // We were not connecting, so do nothing but update the addresses. ++ ac.mu.Unlock() ++ return + } + +- if ac.state == connectivity.Connecting { +- return false ++ if ac.state == connectivity.Ready { ++ // Try to find the connected address. ++ for _, a := range addrs { ++ a.ServerName = ac.cc.getServerName(a) ++ if a.Equal(ac.curAddr) { ++ // We are connected to a valid address, so do nothing but ++ // update the addresses. ++ ac.mu.Unlock() ++ return ++ } ++ } + } + +- // ac.state is Ready, try to find the connected address. +- var curAddrFound bool +- for _, a := range addrs { +- if reflect.DeepEqual(ac.curAddr, a) { +- curAddrFound = true +- break +- } ++ // We are either connected to the wrong address or currently connecting. ++ // Stop the current iteration and restart. ++ ++ ac.cancel() ++ ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx) ++ ++ // We have to defer here because GracefulClose => Close => onClose, which ++ // requires locking ac.mu. ++ if ac.transport != nil { ++ defer ac.transport.GracefulClose() ++ ac.transport = nil + } +- channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) +- if curAddrFound { +- ac.addrs = addrs ++ ++ if len(addrs) == 0 { ++ ac.updateConnectivityState(connectivity.Idle, nil) + } + +- return curAddrFound ++ ac.mu.Unlock() ++ ++ // Since we were connecting/connected, we should start a new connection ++ // attempt. ++ go ac.resetTransport() ++} ++ ++// getServerName determines the serverName to be used in the connection ++// handshake. The default value for the serverName is the authority on the ++// ClientConn, which either comes from the user's dial target or through an ++// authority override specified using the WithAuthority dial option. Name ++// resolvers can specify a per-address override for the serverName through the ++// resolver.Address.ServerName field which is used only if the WithAuthority ++// dial option was not used. The rationale is that per-address authority ++// overrides specified by the name resolver can represent a security risk, while ++// an override specified by the user is more dependable since they probably know ++// what they are doing. ++func (cc *ClientConn) getServerName(addr resolver.Address) string { ++ if cc.dopts.authority != "" { ++ return cc.dopts.authority ++ } ++ if addr.ServerName != "" { ++ return addr.ServerName ++ } ++ return cc.authority + } + + func getMethodConfig(sc *ServiceConfig, method string) MethodConfig { +@@ -934,15 +1107,11 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { + return cc.sc.healthCheckConfig + } + +-func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { +- t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ ++func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { ++ return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ + Ctx: ctx, + FullMethodName: method, + }) +- if err != nil { +- return nil, nil, toRPCErr(err) +- } +- return t, done, nil + } + + func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) { +@@ -967,35 +1136,26 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel + cc.retryThrottler.Store((*retryThrottler)(nil)) + } + +- if cc.dopts.balancerBuilder == nil { +- // Only look at balancer types and switch balancer if balancer dial +- // option is not set. +- var newBalancerName string +- if cc.sc != nil && cc.sc.lbConfig != nil { +- newBalancerName = cc.sc.lbConfig.name +- } else { +- var isGRPCLB bool +- for _, a := range addrs { +- if a.Type == resolver.GRPCLB { +- isGRPCLB = true +- break +- } +- } +- if isGRPCLB { +- newBalancerName = grpclbName +- } else if cc.sc != nil && cc.sc.LB != nil { +- newBalancerName = *cc.sc.LB +- } else { +- newBalancerName = PickFirstBalancerName ++ var newBalancerName string ++ if cc.sc != nil && cc.sc.lbConfig != nil { ++ newBalancerName = cc.sc.lbConfig.name ++ } else { ++ var isGRPCLB bool ++ for _, a := range addrs { ++ if a.Type == resolver.GRPCLB { ++ isGRPCLB = true ++ break + } + } +- cc.switchBalancer(newBalancerName) +- } else if cc.balancerWrapper == nil { +- // Balancer dial option was set, and this is the first time handling +- // resolved addresses. Build a balancer with dopts.balancerBuilder. +- cc.curBalancerName = cc.dopts.balancerBuilder.Name() +- cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) ++ if isGRPCLB { ++ newBalancerName = grpclbName ++ } else if cc.sc != nil && cc.sc.LB != nil { ++ newBalancerName = *cc.sc.LB ++ } else { ++ newBalancerName = PickFirstBalancerName ++ } + } ++ cc.balancerWrapper.switchTo(newBalancerName) + } + + func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { +@@ -1017,7 +1177,7 @@ func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { + // However, if a previously unavailable network becomes available, this may be + // used to trigger an immediate reconnect. + // +-// Experimental ++// # Experimental + // + // Notice: This API is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -1039,44 +1199,45 @@ func (cc *ClientConn) Close() error { + cc.mu.Unlock() + return ErrClientConnClosing + } ++ ++ for cc.idlenessState == ccIdlenessStateExitingIdle { ++ cc.exitIdleCond.Wait() ++ } ++ + conns := cc.conns + cc.conns = nil + cc.csMgr.updateState(connectivity.Shutdown) + ++ pWrapper := cc.blockingpicker + rWrapper := cc.resolverWrapper +- cc.resolverWrapper = nil + bWrapper := cc.balancerWrapper +- cc.balancerWrapper = nil ++ idlenessMgr := cc.idlenessMgr + cc.mu.Unlock() + +- cc.blockingpicker.close() +- ++ // The order of closing matters here since the balancer wrapper assumes the ++ // picker is closed before it is closed. ++ if pWrapper != nil { ++ pWrapper.close() ++ } + if bWrapper != nil { + bWrapper.close() + } + if rWrapper != nil { + rWrapper.close() + } ++ if idlenessMgr != nil { ++ idlenessMgr.close() ++ } + + for ac := range conns { + ac.tearDown(ErrClientConnClosing) + } +- if channelz.IsOn() { +- ted := &channelz.TraceEventDesc{ +- Desc: "Channel Deleted", +- Severity: channelz.CtInfo, +- } +- if cc.dopts.channelzParentID != 0 { +- ted.Parent = &channelz.TraceEventDesc{ +- Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID), +- Severity: channelz.CtInfo, +- } +- } +- channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) +- // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to +- // the entity being deleted, and thus prevent it from being deleted right away. +- channelz.RemoveEntry(cc.channelzID) +- } ++ cc.addTraceEvent("deleted") ++ // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add ++ // trace reference to the entity being deleted, and thus prevent it from being ++ // deleted right away. ++ channelz.RemoveEntry(cc.channelzID) ++ + return nil + } + +@@ -1101,12 +1262,13 @@ type addrConn struct { + addrs []resolver.Address // All addresses that the resolver resolved to. + + // Use updateConnectivityState for updating addrConn's connectivity state. +- state connectivity.State ++ state connectivity.State ++ stateChan chan struct{} // closed and recreated on every state change. + + backoffIdx int // Needs to be stateful for resetConnectBackoff. + resetBackoff chan struct{} + +- channelzID int64 // channelz unique identification number. ++ channelzID *channelz.Identifier + czData *channelzData + } + +@@ -1115,8 +1277,15 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) + if ac.state == s { + return + } ++ // When changing states, reset the state change channel. ++ close(ac.stateChan) ++ ac.stateChan = make(chan struct{}) + ac.state = s +- channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) ++ if lastErr == nil { ++ channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) ++ } else { ++ channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) ++ } + ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) + } + +@@ -1135,113 +1304,86 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { + } + + func (ac *addrConn) resetTransport() { +- for i := 0; ; i++ { +- if i > 0 { +- ac.cc.resolveNow(resolver.ResolveNowOptions{}) +- } ++ ac.mu.Lock() ++ acCtx := ac.ctx ++ if acCtx.Err() != nil { ++ ac.mu.Unlock() ++ return ++ } + +- ac.mu.Lock() +- if ac.state == connectivity.Shutdown { +- ac.mu.Unlock() +- return +- } ++ addrs := ac.addrs ++ backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) ++ // This will be the duration that dial gets to finish. ++ dialDuration := minConnectTimeout ++ if ac.dopts.minConnectTimeout != nil { ++ dialDuration = ac.dopts.minConnectTimeout() ++ } + +- addrs := ac.addrs +- backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) +- // This will be the duration that dial gets to finish. +- dialDuration := minConnectTimeout +- if ac.dopts.minConnectTimeout != nil { +- dialDuration = ac.dopts.minConnectTimeout() +- } ++ if dialDuration < backoffFor { ++ // Give dial more time as we keep failing to connect. ++ dialDuration = backoffFor ++ } ++ // We can potentially spend all the time trying the first address, and ++ // if the server accepts the connection and then hangs, the following ++ // addresses will never be tried. ++ // ++ // The spec doesn't mention what should be done for multiple addresses. ++ // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm ++ connectDeadline := time.Now().Add(dialDuration) + +- if dialDuration < backoffFor { +- // Give dial more time as we keep failing to connect. +- dialDuration = backoffFor ++ ac.updateConnectivityState(connectivity.Connecting, nil) ++ ac.mu.Unlock() ++ ++ if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { ++ ac.cc.resolveNow(resolver.ResolveNowOptions{}) ++ // After exhausting all addresses, the addrConn enters ++ // TRANSIENT_FAILURE. ++ if acCtx.Err() != nil { ++ return + } +- // We can potentially spend all the time trying the first address, and +- // if the server accepts the connection and then hangs, the following +- // addresses will never be tried. +- // +- // The spec doesn't mention what should be done for multiple addresses. +- // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm +- connectDeadline := time.Now().Add(dialDuration) ++ ac.mu.Lock() ++ ac.updateConnectivityState(connectivity.TransientFailure, err) + +- ac.updateConnectivityState(connectivity.Connecting, nil) +- ac.transport = nil ++ // Backoff. ++ b := ac.resetBackoff + ac.mu.Unlock() + +- newTr, addr, reconnect, err := ac.tryAllAddrs(addrs, connectDeadline) +- if err != nil { +- // After exhausting all addresses, the addrConn enters +- // TRANSIENT_FAILURE. ++ timer := time.NewTimer(backoffFor) ++ select { ++ case <-timer.C: + ac.mu.Lock() +- if ac.state == connectivity.Shutdown { +- ac.mu.Unlock() +- return +- } +- ac.updateConnectivityState(connectivity.TransientFailure, err) +- +- // Backoff. +- b := ac.resetBackoff ++ ac.backoffIdx++ + ac.mu.Unlock() +- +- timer := time.NewTimer(backoffFor) +- select { +- case <-timer.C: +- ac.mu.Lock() +- ac.backoffIdx++ +- ac.mu.Unlock() +- case <-b: +- timer.Stop() +- case <-ac.ctx.Done(): +- timer.Stop() +- return +- } +- continue ++ case <-b: ++ timer.Stop() ++ case <-acCtx.Done(): ++ timer.Stop() ++ return + } + + ac.mu.Lock() +- if ac.state == connectivity.Shutdown { +- ac.mu.Unlock() +- newTr.Close(fmt.Errorf("reached connectivity state: SHUTDOWN")) +- return ++ if acCtx.Err() == nil { ++ ac.updateConnectivityState(connectivity.Idle, err) + } +- ac.curAddr = addr +- ac.transport = newTr +- ac.backoffIdx = 0 +- +- hctx, hcancel := context.WithCancel(ac.ctx) +- ac.startHealthCheck(hctx) + ac.mu.Unlock() +- +- // Block until the created transport is down. And when this happens, +- // we restart from the top of the addr list. +- <-reconnect.Done() +- hcancel() +- // restart connecting - the top of the loop will set state to +- // CONNECTING. This is against the current connectivity semantics doc, +- // however it allows for graceful behavior for RPCs not yet dispatched +- // - unfortunate timing would otherwise lead to the RPC failing even +- // though the TRANSIENT_FAILURE state (called for by the doc) would be +- // instantaneous. +- // +- // Ideally we should transition to Idle here and block until there is +- // RPC activity that leads to the balancer requesting a reconnect of +- // the associated SubConn. ++ return + } ++ // Success; reset backoff. ++ ac.mu.Lock() ++ ac.backoffIdx = 0 ++ ac.mu.Unlock() + } + +-// tryAllAddrs tries to creates a connection to the addresses, and stop when at the +-// first successful one. It returns the transport, the address and a Event in +-// the successful case. The Event fires when the returned transport disconnects. +-func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) { ++// tryAllAddrs tries to creates a connection to the addresses, and stop when at ++// the first successful one. It returns an error if no address was successfully ++// connected, or updates ac appropriately with the new transport. ++func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { + var firstConnErr error + for _, addr := range addrs { +- ac.mu.Lock() +- if ac.state == connectivity.Shutdown { +- ac.mu.Unlock() +- return nil, resolver.Address{}, nil, errConnClosing ++ if ctx.Err() != nil { ++ return errConnClosing + } ++ ac.mu.Lock() + + ac.cc.mu.RLock() + ac.dopts.copts.KeepaliveParams = ac.cc.mkp +@@ -1255,9 +1397,9 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T + + channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) + +- newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline) ++ err := ac.createTransport(ctx, addr, copts, connectDeadline) + if err == nil { +- return newTr, addr, reconnect, nil ++ return nil + } + if firstConnErr == nil { + firstConnErr = err +@@ -1266,86 +1408,90 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T + } + + // Couldn't connect to any address. +- return nil, resolver.Address{}, nil, firstConnErr ++ return firstConnErr + } + +-// createTransport creates a connection to addr. It returns the transport and a +-// Event in the successful case. The Event fires when the returned transport +-// disconnects. +-func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) (transport.ClientTransport, *grpcsync.Event, error) { +- prefaceReceived := make(chan struct{}) +- onCloseCalled := make(chan struct{}) +- reconnect := grpcsync.NewEvent() +- +- // addr.ServerName takes precedent over ClientConn authority, if present. +- if addr.ServerName == "" { +- addr.ServerName = ac.cc.authority +- } ++// createTransport creates a connection to addr. It returns an error if the ++// address was not successfully connected, or updates ac appropriately with the ++// new transport. ++func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { ++ addr.ServerName = ac.cc.getServerName(addr) ++ hctx, hcancel := context.WithCancel(ctx) + +- once := sync.Once{} +- onGoAway := func(r transport.GoAwayReason) { ++ onClose := func(r transport.GoAwayReason) { + ac.mu.Lock() ++ defer ac.mu.Unlock() ++ // adjust params based on GoAwayReason + ac.adjustParams(r) +- once.Do(func() { +- if ac.state == connectivity.Ready { +- // Prevent this SubConn from being used for new RPCs by setting its +- // state to Connecting. +- // +- // TODO: this should be Idle when grpc-go properly supports it. +- ac.updateConnectivityState(connectivity.Connecting, nil) +- } +- }) +- ac.mu.Unlock() +- reconnect.Fire() +- } +- +- onClose := func() { +- ac.mu.Lock() +- once.Do(func() { +- if ac.state == connectivity.Ready { +- // Prevent this SubConn from being used for new RPCs by setting its +- // state to Connecting. +- // +- // TODO: this should be Idle when grpc-go properly supports it. +- ac.updateConnectivityState(connectivity.Connecting, nil) +- } +- }) +- ac.mu.Unlock() +- close(onCloseCalled) +- reconnect.Fire() +- } +- +- onPrefaceReceipt := func() { +- close(prefaceReceived) ++ if ctx.Err() != nil { ++ // Already shut down or connection attempt canceled. tearDown() or ++ // updateAddrs() already cleared the transport and canceled hctx ++ // via ac.ctx, and we expected this connection to be closed, so do ++ // nothing here. ++ return ++ } ++ hcancel() ++ if ac.transport == nil { ++ // We're still connecting to this address, which could error. Do ++ // not update the connectivity state or resolve; these will happen ++ // at the end of the tryAllAddrs connection loop in the event of an ++ // error. ++ return ++ } ++ ac.transport = nil ++ // Refresh the name resolver on any connection loss. ++ ac.cc.resolveNow(resolver.ResolveNowOptions{}) ++ // Always go idle and wait for the LB policy to initiate a new ++ // connection attempt. ++ ac.updateConnectivityState(connectivity.Idle, nil) + } + +- connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) ++ connectCtx, cancel := context.WithDeadline(ctx, connectDeadline) + defer cancel() +- if channelz.IsOn() { +- copts.ChannelzParentID = ac.channelzID +- } ++ copts.ChannelzParentID = ac.channelzID + +- newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onPrefaceReceipt, onGoAway, onClose) ++ newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) + if err != nil { ++ if logger.V(2) { ++ logger.Infof("Creating new client transport to %q: %v", addr, err) ++ } + // newTr is either nil, or closed. +- channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err) +- return nil, nil, err ++ hcancel() ++ channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) ++ return err + } + +- select { +- case <-time.After(time.Until(connectDeadline)): +- // We didn't get the preface in time. +- newTr.Close(fmt.Errorf("failed to receive server preface within timeout")) +- channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) +- return nil, nil, errors.New("timed out waiting for server handshake") +- case <-prefaceReceived: +- // We got the preface - huzzah! things are good. +- case <-onCloseCalled: +- // The transport has already closed - noop. +- return nil, nil, errors.New("connection closed") +- // TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix. ++ ac.mu.Lock() ++ defer ac.mu.Unlock() ++ if ctx.Err() != nil { ++ // This can happen if the subConn was removed while in `Connecting` ++ // state. tearDown() would have set the state to `Shutdown`, but ++ // would not have closed the transport since ac.transport would not ++ // have been set at that point. ++ // ++ // We run this in a goroutine because newTr.Close() calls onClose() ++ // inline, which requires locking ac.mu. ++ // ++ // The error we pass to Close() is immaterial since there are no open ++ // streams at this point, so no trailers with error details will be sent ++ // out. We just need to pass a non-nil error. ++ // ++ // This can also happen when updateAddrs is called during a connection ++ // attempt. ++ go newTr.Close(transport.ErrConnClosing) ++ return nil + } +- return newTr, reconnect, nil ++ if hctx.Err() != nil { ++ // onClose was already called for this connection, but the connection ++ // was successfully established first. Consider it a success and set ++ // the new state to Idle. ++ ac.updateConnectivityState(connectivity.Idle, nil) ++ return nil ++ } ++ ac.curAddr = addr ++ ac.transport = newTr ++ ac.startHealthCheck(hctx) // Will set state to READY if appropriate. ++ return nil + } + + // startHealthCheck starts the health checking stream (RPC) to watch the health +@@ -1415,7 +1561,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { + if status.Code(err) == codes.Unimplemented { + channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") + } else { +- channelz.Errorf(logger, ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err) ++ channelz.Errorf(logger, ac.channelzID, "Health checking failed: %v", err) + } + } + }() +@@ -1439,6 +1585,29 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport { + return nil + } + ++// getTransport waits until the addrconn is ready and returns the transport. ++// If the context expires first, returns an appropriate status. If the ++// addrConn is stopped first, returns an Unavailable status error. ++func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { ++ for ctx.Err() == nil { ++ ac.mu.Lock() ++ t, state, sc := ac.transport, ac.state, ac.stateChan ++ ac.mu.Unlock() ++ if state == connectivity.Ready { ++ return t, nil ++ } ++ if state == connectivity.Shutdown { ++ return nil, status.Errorf(codes.Unavailable, "SubConn shutting down") ++ } ++ ++ select { ++ case <-ctx.Done(): ++ case <-sc: ++ } ++ } ++ return nil, status.FromContextError(ctx.Err()).Err() ++} ++ + // tearDown starts to tear down the addrConn. + // + // Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct +@@ -1466,19 +1635,18 @@ func (ac *addrConn) tearDown(err error) { + curTr.GracefulClose() + ac.mu.Lock() + } +- if channelz.IsOn() { +- channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ +- Desc: "Subchannel Deleted", ++ channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ ++ Desc: "Subchannel deleted", ++ Severity: channelz.CtInfo, ++ Parent: &channelz.TraceEventDesc{ ++ Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()), + Severity: channelz.CtInfo, +- Parent: &channelz.TraceEventDesc{ +- Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID), +- Severity: channelz.CtInfo, +- }, +- }) +- // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to +- // the entity being deleted, and thus prevent it from being deleted right away. +- channelz.RemoveEntry(ac.channelzID) +- } ++ }, ++ }) ++ // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add ++ // trace reference to the entity being deleted, and thus prevent it from ++ // being deleted right away. ++ channelz.RemoveEntry(ac.channelzID) + ac.mu.Unlock() + } + +@@ -1567,6 +1735,9 @@ func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { + // referenced by users. + var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") + ++// getResolver finds the scheme in the cc's resolvers or the global registry. ++// scheme should always be lowercase (typically by virtue of url.Parse() ++// performing proper RFC3986 behavior). + func (cc *ClientConn) getResolver(scheme string) resolver.Builder { + for _, rb := range cc.dopts.resolvers { + if scheme == rb.Scheme() { +@@ -1587,3 +1758,151 @@ func (cc *ClientConn) connectionError() error { + defer cc.lceMu.Unlock() + return cc.lastConnectionError + } ++ ++// parseTargetAndFindResolver parses the user's dial target and stores the ++// parsed target in `cc.parsedTarget`. ++// ++// The resolver to use is determined based on the scheme in the parsed target ++// and the same is stored in `cc.resolverBuilder`. ++// ++// Doesn't grab cc.mu as this method is expected to be called only at Dial time. ++func (cc *ClientConn) parseTargetAndFindResolver() error { ++ channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target) ++ ++ var rb resolver.Builder ++ parsedTarget, err := parseTarget(cc.target) ++ if err != nil { ++ channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) ++ } else { ++ channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) ++ rb = cc.getResolver(parsedTarget.URL.Scheme) ++ if rb != nil { ++ cc.parsedTarget = parsedTarget ++ cc.resolverBuilder = rb ++ return nil ++ } ++ } ++ ++ // We are here because the user's dial target did not contain a scheme or ++ // specified an unregistered scheme. We should fallback to the default ++ // scheme, except when a custom dialer is specified in which case, we should ++ // always use passthrough scheme. ++ defScheme := resolver.GetDefaultScheme() ++ channelz.Infof(logger, cc.channelzID, "fallback to scheme %q", defScheme) ++ canonicalTarget := defScheme + ":///" + cc.target ++ ++ parsedTarget, err = parseTarget(canonicalTarget) ++ if err != nil { ++ channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err) ++ return err ++ } ++ channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) ++ rb = cc.getResolver(parsedTarget.URL.Scheme) ++ if rb == nil { ++ return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) ++ } ++ cc.parsedTarget = parsedTarget ++ cc.resolverBuilder = rb ++ return nil ++} ++ ++// parseTarget uses RFC 3986 semantics to parse the given target into a ++// resolver.Target struct containing scheme, authority and url. Query ++// params are stripped from the endpoint. ++func parseTarget(target string) (resolver.Target, error) { ++ u, err := url.Parse(target) ++ if err != nil { ++ return resolver.Target{}, err ++ } ++ ++ return resolver.Target{ ++ Scheme: u.Scheme, ++ Authority: u.Host, ++ URL: *u, ++ }, nil ++} ++ ++// Determine channel authority. The order of precedence is as follows: ++// - user specified authority override using `WithAuthority` dial option ++// - creds' notion of server name for the authentication handshake ++// - endpoint from dial target of the form "scheme://[authority]/endpoint" ++// ++// Stores the determined authority in `cc.authority`. ++// ++// Returns a non-nil error if the authority returned by the transport ++// credentials do not match the authority configured through the dial option. ++// ++// Doesn't grab cc.mu as this method is expected to be called only at Dial time. ++func (cc *ClientConn) determineAuthority() error { ++ dopts := cc.dopts ++ // Historically, we had two options for users to specify the serverName or ++ // authority for a channel. One was through the transport credentials ++ // (either in its constructor, or through the OverrideServerName() method). ++ // The other option (for cases where WithInsecure() dial option was used) ++ // was to use the WithAuthority() dial option. ++ // ++ // A few things have changed since: ++ // - `insecure` package with an implementation of the `TransportCredentials` ++ // interface for the insecure case ++ // - WithAuthority() dial option support for secure credentials ++ authorityFromCreds := "" ++ if creds := dopts.copts.TransportCredentials; creds != nil && creds.Info().ServerName != "" { ++ authorityFromCreds = creds.Info().ServerName ++ } ++ authorityFromDialOption := dopts.authority ++ if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption { ++ return fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) ++ } ++ ++ endpoint := cc.parsedTarget.Endpoint() ++ target := cc.target ++ switch { ++ case authorityFromDialOption != "": ++ cc.authority = authorityFromDialOption ++ case authorityFromCreds != "": ++ cc.authority = authorityFromCreds ++ case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"): ++ // TODO: remove when the unix resolver implements optional interface to ++ // return channel authority. ++ cc.authority = "localhost" ++ case strings.HasPrefix(endpoint, ":"): ++ cc.authority = "localhost" + endpoint ++ default: ++ // TODO: Define an optional interface on the resolver builder to return ++ // the channel authority given the user's dial target. For resolvers ++ // which don't implement this interface, we will use the endpoint from ++ // "scheme://authority/endpoint" as the default authority. ++ cc.authority = endpoint ++ } ++ channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) ++ return nil ++} ++ ++// initResolverWrapper creates a ccResolverWrapper, which builds the name ++// resolver. This method grabs the lock to assign the newly built resolver ++// wrapper to the cc.resolverWrapper field. ++func (cc *ClientConn) initResolverWrapper(creds credentials.TransportCredentials) error { ++ rw, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{ ++ target: cc.parsedTarget, ++ builder: cc.resolverBuilder, ++ bOpts: resolver.BuildOptions{ ++ DisableServiceConfig: cc.dopts.disableServiceConfig, ++ DialCreds: creds, ++ CredsBundle: cc.dopts.copts.CredsBundle, ++ Dialer: cc.dopts.copts.Dialer, ++ }, ++ channelzID: cc.channelzID, ++ }) ++ if err != nil { ++ return fmt.Errorf("failed to build resolver: %v", err) ++ } ++ // Resolver implementations may report state update or error inline when ++ // built (or right after), and this is handled in cc.updateResolverState. ++ // Also, an error from the resolver might lead to a re-resolution request ++ // from the balancer, which is handled in resolveNow() where ++ // `cc.resolverWrapper` is accessed. Hence, we need to hold the lock here. ++ cc.mu.Lock() ++ cc.resolverWrapper = rw ++ cc.mu.Unlock() ++ return nil ++} +diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go +index 0b206a5..934fac2 100644 +--- a/vendor/google.golang.org/grpc/codes/code_string.go ++++ b/vendor/google.golang.org/grpc/codes/code_string.go +@@ -18,7 +18,15 @@ + + package codes + +-import "strconv" ++import ( ++ "strconv" ++ ++ "google.golang.org/grpc/internal" ++) ++ ++func init() { ++ internal.CanonicalString = canonicalString ++} + + func (c Code) String() string { + switch c { +@@ -60,3 +68,44 @@ func (c Code) String() string { + return "Code(" + strconv.FormatInt(int64(c), 10) + ")" + } + } ++ ++func canonicalString(c Code) string { ++ switch c { ++ case OK: ++ return "OK" ++ case Canceled: ++ return "CANCELLED" ++ case Unknown: ++ return "UNKNOWN" ++ case InvalidArgument: ++ return "INVALID_ARGUMENT" ++ case DeadlineExceeded: ++ return "DEADLINE_EXCEEDED" ++ case NotFound: ++ return "NOT_FOUND" ++ case AlreadyExists: ++ return "ALREADY_EXISTS" ++ case PermissionDenied: ++ return "PERMISSION_DENIED" ++ case ResourceExhausted: ++ return "RESOURCE_EXHAUSTED" ++ case FailedPrecondition: ++ return "FAILED_PRECONDITION" ++ case Aborted: ++ return "ABORTED" ++ case OutOfRange: ++ return "OUT_OF_RANGE" ++ case Unimplemented: ++ return "UNIMPLEMENTED" ++ case Internal: ++ return "INTERNAL" ++ case Unavailable: ++ return "UNAVAILABLE" ++ case DataLoss: ++ return "DATA_LOSS" ++ case Unauthenticated: ++ return "UNAUTHENTICATED" ++ default: ++ return "CODE(" + strconv.FormatInt(int64(c), 10) + ")" ++ } ++} +diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go +index 0101562..4a89926 100644 +--- a/vendor/google.golang.org/grpc/connectivity/connectivity.go ++++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go +@@ -18,7 +18,6 @@ + + // Package connectivity defines connectivity semantics. + // For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. +-// All APIs in this package are experimental. + package connectivity + + import ( +@@ -45,7 +44,7 @@ func (s State) String() string { + return "SHUTDOWN" + default: + logger.Errorf("unknown connectivity state: %d", s) +- return "Invalid-State" ++ return "INVALID_STATE" + } + } + +@@ -61,3 +60,35 @@ const ( + // Shutdown indicates the ClientConn has started shutting down. + Shutdown + ) ++ ++// ServingMode indicates the current mode of operation of the server. ++// ++// Only xDS enabled gRPC servers currently report their serving mode. ++type ServingMode int ++ ++const ( ++ // ServingModeStarting indicates that the server is starting up. ++ ServingModeStarting ServingMode = iota ++ // ServingModeServing indicates that the server contains all required ++ // configuration and is serving RPCs. ++ ServingModeServing ++ // ServingModeNotServing indicates that the server is not accepting new ++ // connections. Existing connections will be closed gracefully, allowing ++ // in-progress RPCs to complete. A server enters this mode when it does not ++ // contain the required configuration to serve RPCs. ++ ServingModeNotServing ++) ++ ++func (s ServingMode) String() string { ++ switch s { ++ case ServingModeStarting: ++ return "STARTING" ++ case ServingModeServing: ++ return "SERVING" ++ case ServingModeNotServing: ++ return "NOT_SERVING" ++ default: ++ logger.Errorf("unknown serving mode: %d", s) ++ return "INVALID_MODE" ++ } ++} +diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go +index 7eee7e4..5feac3a 100644 +--- a/vendor/google.golang.org/grpc/credentials/credentials.go ++++ b/vendor/google.golang.org/grpc/credentials/credentials.go +@@ -36,16 +36,16 @@ import ( + // PerRPCCredentials defines the common interface for the credentials which need to + // attach security information to every RPC (e.g., oauth2). + type PerRPCCredentials interface { +- // GetRequestMetadata gets the current request metadata, refreshing +- // tokens if required. This should be called by the transport layer on +- // each request, and the data should be populated in headers or other +- // context. If a status code is returned, it will be used as the status +- // for the RPC. uri is the URI of the entry point for the request. +- // When supported by the underlying implementation, ctx can be used for +- // timeout and cancellation. Additionally, RequestInfo data will be +- // available via ctx to this call. +- // TODO(zhaoq): Define the set of the qualified keys instead of leaving +- // it as an arbitrary string. ++ // GetRequestMetadata gets the current request metadata, refreshing tokens ++ // if required. This should be called by the transport layer on each ++ // request, and the data should be populated in headers or other ++ // context. If a status code is returned, it will be used as the status for ++ // the RPC (restricted to an allowable set of codes as defined by gRFC ++ // A54). uri is the URI of the entry point for the request. When supported ++ // by the underlying implementation, ctx can be used for timeout and ++ // cancellation. Additionally, RequestInfo data will be available via ctx ++ // to this call. TODO(zhaoq): Define the set of the qualified keys instead ++ // of leaving it as an arbitrary string. + GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) + // RequireTransportSecurity indicates whether the credentials requires + // transport security. +@@ -140,6 +140,11 @@ type TransportCredentials interface { + // Additionally, ClientHandshakeInfo data will be available via the context + // passed to this call. + // ++ // The second argument to this method is the `:authority` header value used ++ // while creating new streams on this connection after authentication ++ // succeeds. Implementations must use this as the server name during the ++ // authentication handshake. ++ // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. + ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) + // ServerHandshake does the authentication handshake for servers. It returns +@@ -153,9 +158,13 @@ type TransportCredentials interface { + Info() ProtocolInfo + // Clone makes a copy of this TransportCredentials. + Clone() TransportCredentials +- // OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server. +- // gRPC internals also use it to override the virtual hosting name if it is set. +- // It must be called before dialing. Currently, this is only used by grpclb. ++ // OverrideServerName specifies the value used for the following: ++ // - verifying the hostname on the returned certificates ++ // - as SNI in the client's handshake to support virtual hosting ++ // - as the value for `:authority` header at stream creation time ++ // ++ // Deprecated: use grpc.WithAuthority instead. Will be supported ++ // throughout 1.x. + OverrideServerName(string) error + } + +@@ -169,8 +178,18 @@ type TransportCredentials interface { + // + // This API is experimental. + type Bundle interface { ++ // TransportCredentials returns the transport credentials from the Bundle. ++ // ++ // Implementations must return non-nil transport credentials. If transport ++ // security is not needed by the Bundle, implementations may choose to ++ // return insecure.NewCredentials(). + TransportCredentials() TransportCredentials ++ ++ // PerRPCCredentials returns the per-RPC credentials from the Bundle. ++ // ++ // May be nil if per-RPC credentials are not needed. + PerRPCCredentials() PerRPCCredentials ++ + // NewWithMode should make a copy of Bundle, and switch mode. Modifying the + // existing Bundle may cause races. + // +diff --git a/vendor/google.golang.org/grpc/credentials/go12.go b/vendor/google.golang.org/grpc/credentials/go12.go +deleted file mode 100644 +index ccbf35b..0000000 +--- a/vendor/google.golang.org/grpc/credentials/go12.go ++++ /dev/null +@@ -1,30 +0,0 @@ +-// +build go1.12 +- +-/* +- * +- * Copyright 2019 gRPC authors. +- * +- * Licensed under the Apache License, Version 2.0 (the "License"); +- * you may not use this file except in compliance with the License. +- * You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- * +- */ +- +-package credentials +- +-import "crypto/tls" +- +-// This init function adds cipher suite constants only defined in Go 1.12. +-func init() { +- cipherSuiteLookup[tls.TLS_AES_128_GCM_SHA256] = "TLS_AES_128_GCM_SHA256" +- cipherSuiteLookup[tls.TLS_AES_256_GCM_SHA384] = "TLS_AES_256_GCM_SHA384" +- cipherSuiteLookup[tls.TLS_CHACHA20_POLY1305_SHA256] = "TLS_CHACHA20_POLY1305_SHA256" +-} +diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go +index c4fa27c..82bee14 100644 +--- a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go ++++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go +@@ -18,11 +18,6 @@ + + // Package insecure provides an implementation of the + // credentials.TransportCredentials interface which disables transport security. +-// +-// Experimental +-// +-// Notice: This package is EXPERIMENTAL and may be changed or removed in a +-// later release. + package insecure + + import ( +@@ -33,6 +28,9 @@ import ( + ) + + // NewCredentials returns a credentials which disables transport security. ++// ++// Note that using this credentials with per-RPC credentials which require ++// transport security is incompatible and will cause grpc.Dial() to fail. + func NewCredentials() credentials.TransportCredentials { + return insecureTC{} + } +@@ -72,3 +70,29 @@ type info struct { + func (info) AuthType() string { + return "insecure" + } ++ ++// insecureBundle implements an insecure bundle. ++// An insecure bundle provides a thin wrapper around insecureTC to support ++// the credentials.Bundle interface. ++type insecureBundle struct{} ++ ++// NewBundle returns a bundle with disabled transport security and no per rpc credential. ++func NewBundle() credentials.Bundle { ++ return insecureBundle{} ++} ++ ++// NewWithMode returns a new insecure Bundle. The mode is ignored. ++func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) { ++ return insecureBundle{}, nil ++} ++ ++// PerRPCCredentials returns an nil implementation as insecure ++// bundle does not support a per rpc credential. ++func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials { ++ return nil ++} ++ ++// TransportCredentials returns the underlying insecure transport credential. ++func (insecureBundle) TransportCredentials() credentials.TransportCredentials { ++ return NewCredentials() ++} +diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go +index 8ee7124..877b7cd 100644 +--- a/vendor/google.golang.org/grpc/credentials/tls.go ++++ b/vendor/google.golang.org/grpc/credentials/tls.go +@@ -23,9 +23,9 @@ import ( + "crypto/tls" + "crypto/x509" + "fmt" +- "io/ioutil" + "net" + "net/url" ++ "os" + + credinternal "google.golang.org/grpc/internal/credentials" + ) +@@ -166,7 +166,7 @@ func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) Transpor + // it will override the virtual host name of authority (e.g. :authority header + // field) in requests. + func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { +- b, err := ioutil.ReadFile(certFile) ++ b, err := os.ReadFile(certFile) + if err != nil { + return nil, err + } +@@ -195,7 +195,7 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error + // TLSChannelzSecurityValue defines the struct that TLS protocol should return + // from GetSecurityValue(), containing security info like cipher and certificate used. + // +-// Experimental ++// # Experimental + // + // Notice: This type is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -230,4 +230,7 @@ var cipherSuiteLookup = map[uint16]string{ + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", ++ tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256", ++ tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384", ++ tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256", + } +diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go +index 7a49723..15a3d51 100644 +--- a/vendor/google.golang.org/grpc/dialoptions.go ++++ b/vendor/google.golang.org/grpc/dialoptions.go +@@ -20,22 +20,34 @@ package grpc + + import ( + "context" +- "fmt" + "net" + "time" + + "google.golang.org/grpc/backoff" +- "google.golang.org/grpc/balancer" ++ "google.golang.org/grpc/channelz" + "google.golang.org/grpc/credentials" ++ "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + internalbackoff "google.golang.org/grpc/internal/backoff" +- "google.golang.org/grpc/internal/envconfig" ++ "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" + ) + ++func init() { ++ internal.AddGlobalDialOptions = func(opt ...DialOption) { ++ globalDialOptions = append(globalDialOptions, opt...) ++ } ++ internal.ClearGlobalDialOptions = func() { ++ globalDialOptions = nil ++ } ++ internal.WithBinaryLogger = withBinaryLogger ++ internal.JoinDialOptions = newJoinDialOption ++ internal.DisableGlobalDialOptions = newDisableGlobalDialOptions ++} ++ + // dialOptions configure a Dial call. dialOptions are set by the DialOption + // values passed to Dial. + type dialOptions struct { +@@ -45,20 +57,18 @@ type dialOptions struct { + chainUnaryInts []UnaryClientInterceptor + chainStreamInts []StreamClientInterceptor + +- cp Compressor +- dc Decompressor +- bs internalbackoff.Strategy +- block bool +- returnLastError bool +- insecure bool +- timeout time.Duration +- scChan <-chan ServiceConfig +- authority string +- copts transport.ConnectOptions +- callOptions []CallOption +- // This is used by WithBalancerName dial option. +- balancerBuilder balancer.Builder +- channelzParentID int64 ++ cp Compressor ++ dc Decompressor ++ bs internalbackoff.Strategy ++ block bool ++ returnLastError bool ++ timeout time.Duration ++ scChan <-chan ServiceConfig ++ authority string ++ binaryLogger binarylog.Logger ++ copts transport.ConnectOptions ++ callOptions []CallOption ++ channelzParentID *channelz.Identifier + disableServiceConfig bool + disableRetry bool + disableHealthCheck bool +@@ -67,6 +77,7 @@ type dialOptions struct { + defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. + defaultServiceConfigRawJSON *string + resolvers []resolver.Builder ++ idleTimeout time.Duration + } + + // DialOption configures how we set up the connection. +@@ -74,10 +85,12 @@ type DialOption interface { + apply(*dialOptions) + } + ++var globalDialOptions []DialOption ++ + // EmptyDialOption does not alter the dial configuration. It can be embedded in + // another structure to build custom dial options. + // +-// Experimental ++// # Experimental + // + // Notice: This type is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -85,6 +98,16 @@ type EmptyDialOption struct{} + + func (EmptyDialOption) apply(*dialOptions) {} + ++type disableGlobalDialOptions struct{} ++ ++func (disableGlobalDialOptions) apply(*dialOptions) {} ++ ++// newDisableGlobalDialOptions returns a DialOption that prevents the ClientConn ++// from applying the global DialOptions (set via AddGlobalDialOptions). ++func newDisableGlobalDialOptions() DialOption { ++ return &disableGlobalDialOptions{} ++} ++ + // funcDialOption wraps a function that modifies dialOptions into an + // implementation of the DialOption interface. + type funcDialOption struct { +@@ -101,13 +124,28 @@ func newFuncDialOption(f func(*dialOptions)) *funcDialOption { + } + } + ++type joinDialOption struct { ++ opts []DialOption ++} ++ ++func (jdo *joinDialOption) apply(do *dialOptions) { ++ for _, opt := range jdo.opts { ++ opt.apply(do) ++ } ++} ++ ++func newJoinDialOption(opts ...DialOption) DialOption { ++ return &joinDialOption{opts: opts} ++} ++ + // WithWriteBufferSize determines how much data can be batched before doing a + // write on the wire. The corresponding memory allocation for this buffer will + // be twice the size to keep syscalls low. The default value for this buffer is + // 32KB. + // +-// Zero will disable the write buffer such that each write will be on underlying +-// connection. Note: A Send call may not directly translate to a write. ++// Zero or negative values will disable the write buffer such that each write ++// will be on underlying connection. Note: A Send call may not directly ++// translate to a write. + func WithWriteBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.WriteBufferSize = s +@@ -117,8 +155,9 @@ func WithWriteBufferSize(s int) DialOption { + // WithReadBufferSize lets you set the size of read buffer, this determines how + // much data can be read at most for each read syscall. + // +-// The default value for this buffer is 32KB. Zero will disable read buffer for +-// a connection so data framer can access the underlying conn directly. ++// The default value for this buffer is 32KB. Zero or negative values will ++// disable read buffer for a connection so data framer can access the ++// underlying conn directly. + func WithReadBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.ReadBufferSize = s +@@ -196,25 +235,6 @@ func WithDecompressor(dc Decompressor) DialOption { + }) + } + +-// WithBalancerName sets the balancer that the ClientConn will be initialized +-// with. Balancer registered with balancerName will be used. This function +-// panics if no balancer was registered by balancerName. +-// +-// The balancer cannot be overridden by balancer option specified by service +-// config. +-// +-// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig +-// instead. Will be removed in a future 1.x release. +-func WithBalancerName(balancerName string) DialOption { +- builder := balancer.Get(balancerName) +- if builder == nil { +- panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) +- } +- return newFuncDialOption(func(o *dialOptions) { +- o.balancerBuilder = builder +- }) +-} +- + // WithServiceConfig returns a DialOption which has a channel to read the + // service configuration. + // +@@ -228,18 +248,14 @@ func WithServiceConfig(c <-chan ServiceConfig) DialOption { + }) + } + +-// WithConnectParams configures the dialer to use the provided ConnectParams. ++// WithConnectParams configures the ClientConn to use the provided ConnectParams ++// for creating and maintaining connections to servers. + // + // The backoff configuration specified as part of the ConnectParams overrides + // all defaults specified in + // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider + // using the backoff.DefaultConfig as a base, in cases where you want to + // override only a subset of the backoff configuration. +-// +-// Experimental +-// +-// Notice: This API is EXPERIMENTAL and may be changed or removed in a +-// later release. + func WithConnectParams(p ConnectParams) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.bs = internalbackoff.Exponential{Config: p.Backoff} +@@ -277,9 +293,12 @@ func withBackoff(bs internalbackoff.Strategy) DialOption { + }) + } + +-// WithBlock returns a DialOption which makes caller of Dial blocks until the ++// WithBlock returns a DialOption which makes callers of Dial block until the + // underlying connection is up. Without this, Dial returns immediately and + // connecting the server happens in background. ++// ++// Use of this feature is not recommended. For more information, please see: ++// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md + func WithBlock() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.block = true +@@ -291,7 +310,10 @@ func WithBlock() DialOption { + // the context.DeadlineExceeded error. + // Implies WithBlock() + // +-// Experimental ++// Use of this feature is not recommended. For more information, please see: ++// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md ++// ++// # Experimental + // + // Notice: This API is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -303,18 +325,24 @@ func WithReturnConnectionError() DialOption { + } + + // WithInsecure returns a DialOption which disables transport security for this +-// ClientConn. Note that transport security is required unless WithInsecure is +-// set. ++// ClientConn. Under the hood, it uses insecure.NewCredentials(). ++// ++// Note that using this DialOption with per-RPC credentials (through ++// WithCredentialsBundle or WithPerRPCCredentials) which require transport ++// security is incompatible and will cause grpc.Dial() to fail. ++// ++// Deprecated: use WithTransportCredentials and insecure.NewCredentials() ++// instead. Will be supported throughout 1.x. + func WithInsecure() DialOption { + return newFuncDialOption(func(o *dialOptions) { +- o.insecure = true ++ o.copts.TransportCredentials = insecure.NewCredentials() + }) + } + + // WithNoProxy returns a DialOption which disables the use of proxies for this + // ClientConn. This is ignored if WithDialer or WithContextDialer are used. + // +-// Experimental ++// # Experimental + // + // Notice: This API is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -345,7 +373,7 @@ func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { + // the ClientConn.WithCreds. This should not be used together with + // WithTransportCredentials. + // +-// Experimental ++// # Experimental + // + // Notice: This API is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -401,7 +429,21 @@ func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { + // all the RPCs and underlying network connections in this ClientConn. + func WithStatsHandler(h stats.Handler) DialOption { + return newFuncDialOption(func(o *dialOptions) { +- o.copts.StatsHandler = h ++ if h == nil { ++ logger.Error("ignoring nil parameter in grpc.WithStatsHandler ClientOption") ++ // Do not allow a nil stats handler, which would otherwise cause ++ // panics. ++ return ++ } ++ o.copts.StatsHandlers = append(o.copts.StatsHandlers, h) ++ }) ++} ++ ++// withBinaryLogger returns a DialOption that specifies the binary logger for ++// this ClientConn. ++func withBinaryLogger(bl binarylog.Logger) DialOption { ++ return newFuncDialOption(func(o *dialOptions) { ++ o.binaryLogger = bl + }) + } + +@@ -413,7 +455,10 @@ func WithStatsHandler(h stats.Handler) DialOption { + // FailOnNonTempDialError only affects the initial dial, and does not do + // anything useful unless you are also using WithBlock(). + // +-// Experimental ++// Use of this feature is not recommended. For more information, please see: ++// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md ++// ++// # Experimental + // + // Notice: This API is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -482,8 +527,7 @@ func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOpt + } + + // WithAuthority returns a DialOption that specifies the value to be used as the +-// :authority pseudo-header. This value only works with WithInsecure and has no +-// effect if TransportCredentials are present. ++// :authority pseudo-header and as the server name in authentication handshake. + func WithAuthority(a string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.authority = a +@@ -494,11 +538,11 @@ func WithAuthority(a string) DialOption { + // current ClientConn's parent. This function is used in nested channel creation + // (e.g. grpclb dial). + // +-// Experimental ++// # Experimental + // + // Notice: This API is EXPERIMENTAL and may be changed or removed in a + // later release. +-func WithChannelzParentID(id int64) DialOption { ++func WithChannelzParentID(id *channelz.Identifier) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.channelzParentID = id + }) +@@ -519,14 +563,16 @@ func WithDisableServiceConfig() DialOption { + // WithDefaultServiceConfig returns a DialOption that configures the default + // service config, which will be used in cases where: + // +-// 1. WithDisableServiceConfig is also used. +-// 2. Resolver does not return a service config or if the resolver returns an +-// invalid service config. ++// 1. WithDisableServiceConfig is also used, or + // +-// Experimental ++// 2. The name resolver does not provide a service config or provides an ++// invalid service config. + // +-// Notice: This API is EXPERIMENTAL and may be changed or removed in a +-// later release. ++// The parameter s is the JSON representation of the default service config. ++// For more information about service configs, see: ++// https://github.com/grpc/grpc/blob/master/doc/service_config.md ++// For a simple example of usage, see: ++// examples/features/load_balancing/client/main.go + func WithDefaultServiceConfig(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.defaultServiceConfigRawJSON = &s +@@ -537,15 +583,6 @@ func WithDefaultServiceConfig(s string) DialOption { + // service config enables them. This does not impact transparent retries, which + // will happen automatically if no data is written to the wire or if the RPC is + // unprocessed by the remote server. +-// +-// Retry support is currently disabled by default, but will be enabled by +-// default in the future. Until then, it may be enabled by setting the +-// environment variable "GRPC_GO_RETRY" to "on". +-// +-// Experimental +-// +-// Notice: This API is EXPERIMENTAL and may be changed or removed in a +-// later release. + func WithDisableRetry() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableRetry = true +@@ -563,7 +600,7 @@ func WithMaxHeaderListSize(s uint32) DialOption { + // WithDisableHealthCheck disables the LB channel health checking for all + // SubConns of this ClientConn. + // +-// Experimental ++// # Experimental + // + // Notice: This API is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -585,7 +622,6 @@ func withHealthCheckFunc(f internal.HealthChecker) DialOption { + + func defaultDialOptions() dialOptions { + return dialOptions{ +- disableRetry: !envconfig.Retry, + healthCheckFunc: internal.HealthCheckFunc, + copts: transport.ConnectOptions{ + WriteBufferSize: defaultWriteBufSize, +@@ -611,7 +647,7 @@ func withMinConnectDeadline(f func() time.Duration) DialOption { + // resolver.Register. They will be matched against the scheme used for the + // current Dial only, and will take precedence over the global registry. + // +-// Experimental ++// # Experimental + // + // Notice: This API is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -620,3 +656,23 @@ func WithResolvers(rs ...resolver.Builder) DialOption { + o.resolvers = append(o.resolvers, rs...) + }) + } ++ ++// WithIdleTimeout returns a DialOption that configures an idle timeout for the ++// channel. If the channel is idle for the configured timeout, i.e there are no ++// ongoing RPCs and no new RPCs are initiated, the channel will enter idle mode ++// and as a result the name resolver and load balancer will be shut down. The ++// channel will exit idle mode when the Connect() method is called or when an ++// RPC is initiated. ++// ++// By default this feature is disabled, which can also be explicitly configured ++// by passing zero to this function. ++// ++// # Experimental ++// ++// Notice: This API is EXPERIMENTAL and may be changed or removed in a ++// later release. ++func WithIdleTimeout(d time.Duration) DialOption { ++ return newFuncDialOption(func(o *dialOptions) { ++ o.idleTimeout = d ++ }) ++} +diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go +index 6d84f74..07a5861 100644 +--- a/vendor/google.golang.org/grpc/encoding/encoding.go ++++ b/vendor/google.golang.org/grpc/encoding/encoding.go +@@ -19,7 +19,7 @@ + // Package encoding defines the interface for the compressor and codec, and + // functions to register and retrieve compressors and codecs. + // +-// Experimental ++// # Experimental + // + // Notice: This package is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -28,6 +28,8 @@ package encoding + import ( + "io" + "strings" ++ ++ "google.golang.org/grpc/internal/grpcutil" + ) + + // Identity specifies the optional encoding for uncompressed streams. +@@ -73,6 +75,9 @@ var registeredCompressor = make(map[string]Compressor) + // registered with the same name, the one registered last will take effect. + func RegisterCompressor(c Compressor) { + registeredCompressor[c.Name()] = c ++ if !grpcutil.IsCompressorNameRegistered(c.Name()) { ++ grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name()) ++ } + } + + // GetCompressor returns Compressor for the given compressor name. +@@ -108,7 +113,7 @@ var registeredCodecs = make(map[string]Codec) + // more details. + // + // NOTE: this function must only be called during initialization time (i.e. in +-// an init() function), and is not thread-safe. If multiple Compressors are ++// an init() function), and is not thread-safe. If multiple Codecs are + // registered with the same name, the one registered last will take effect. + func RegisterCodec(codec Codec) { + if codec == nil { +diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go +index 4ee3317..5de66e4 100644 +--- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go ++++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go +@@ -19,11 +19,13 @@ + package grpclog + + import ( ++ "encoding/json" ++ "fmt" + "io" +- "io/ioutil" + "log" + "os" + "strconv" ++ "strings" + + "google.golang.org/grpc/internal/grpclog" + ) +@@ -95,8 +97,9 @@ var severityName = []string{ + + // loggerT is the default logger used by grpclog. + type loggerT struct { +- m []*log.Logger +- v int ++ m []*log.Logger ++ v int ++ jsonFormat bool + } + + // NewLoggerV2 creates a loggerV2 with the provided writers. +@@ -105,27 +108,40 @@ type loggerT struct { + // Warning logs will be written to warningW and infoW. + // Info logs will be written to infoW. + func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { +- return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0) ++ return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{}) + } + + // NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and + // verbosity level. + func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { ++ return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v}) ++} ++ ++type loggerV2Config struct { ++ verbose int ++ jsonFormat bool ++} ++ ++func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 { + var m []*log.Logger +- m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags)) +- m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags)) ++ flag := log.LstdFlags ++ if c.jsonFormat { ++ flag = 0 ++ } ++ m = append(m, log.New(infoW, "", flag)) ++ m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. +- m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags)) +- m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags)) +- return &loggerT{m: m, v: v} ++ m = append(m, log.New(ew, "", flag)) ++ m = append(m, log.New(ew, "", flag)) ++ return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat} + } + + // newLoggerV2 creates a loggerV2 to be used as default logger. + // All logs are written to stderr. + func newLoggerV2() LoggerV2 { +- errorW := ioutil.Discard +- warningW := ioutil.Discard +- infoW := ioutil.Discard ++ errorW := io.Discard ++ warningW := io.Discard ++ infoW := io.Discard + + logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") + switch logLevel { +@@ -142,58 +158,79 @@ func newLoggerV2() LoggerV2 { + if vl, err := strconv.Atoi(vLevel); err == nil { + v = vl + } +- return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v) ++ ++ jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json") ++ ++ return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{ ++ verbose: v, ++ jsonFormat: jsonFormat, ++ }) ++} ++ ++func (g *loggerT) output(severity int, s string) { ++ sevStr := severityName[severity] ++ if !g.jsonFormat { ++ g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) ++ return ++ } ++ // TODO: we can also include the logging component, but that needs more ++ // (API) changes. ++ b, _ := json.Marshal(map[string]string{ ++ "severity": sevStr, ++ "message": s, ++ }) ++ g.m[severity].Output(2, string(b)) + } + + func (g *loggerT) Info(args ...interface{}) { +- g.m[infoLog].Print(args...) ++ g.output(infoLog, fmt.Sprint(args...)) + } + + func (g *loggerT) Infoln(args ...interface{}) { +- g.m[infoLog].Println(args...) ++ g.output(infoLog, fmt.Sprintln(args...)) + } + + func (g *loggerT) Infof(format string, args ...interface{}) { +- g.m[infoLog].Printf(format, args...) ++ g.output(infoLog, fmt.Sprintf(format, args...)) + } + + func (g *loggerT) Warning(args ...interface{}) { +- g.m[warningLog].Print(args...) ++ g.output(warningLog, fmt.Sprint(args...)) + } + + func (g *loggerT) Warningln(args ...interface{}) { +- g.m[warningLog].Println(args...) ++ g.output(warningLog, fmt.Sprintln(args...)) + } + + func (g *loggerT) Warningf(format string, args ...interface{}) { +- g.m[warningLog].Printf(format, args...) ++ g.output(warningLog, fmt.Sprintf(format, args...)) + } + + func (g *loggerT) Error(args ...interface{}) { +- g.m[errorLog].Print(args...) ++ g.output(errorLog, fmt.Sprint(args...)) + } + + func (g *loggerT) Errorln(args ...interface{}) { +- g.m[errorLog].Println(args...) ++ g.output(errorLog, fmt.Sprintln(args...)) + } + + func (g *loggerT) Errorf(format string, args ...interface{}) { +- g.m[errorLog].Printf(format, args...) ++ g.output(errorLog, fmt.Sprintf(format, args...)) + } + + func (g *loggerT) Fatal(args ...interface{}) { +- g.m[fatalLog].Fatal(args...) +- // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). ++ g.output(fatalLog, fmt.Sprint(args...)) ++ os.Exit(1) + } + + func (g *loggerT) Fatalln(args ...interface{}) { +- g.m[fatalLog].Fatalln(args...) +- // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). ++ g.output(fatalLog, fmt.Sprintln(args...)) ++ os.Exit(1) + } + + func (g *loggerT) Fatalf(format string, args ...interface{}) { +- g.m[fatalLog].Fatalf(format, args...) +- // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). ++ g.output(fatalLog, fmt.Sprintf(format, args...)) ++ os.Exit(1) + } + + func (g *loggerT) V(l int) bool { +@@ -204,18 +241,18 @@ func (g *loggerT) V(l int) bool { + // DepthLoggerV2, the below functions will be called with the appropriate stack + // depth set for trivial functions the logger may ignore. + // +-// Experimental ++// # Experimental + // + // Notice: This type is EXPERIMENTAL and may be changed or removed in a + // later release. + type DepthLoggerV2 interface { + LoggerV2 +- // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. ++ // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. + InfoDepth(depth int, args ...interface{}) +- // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. ++ // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. + WarningDepth(depth int, args ...interface{}) +- // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. ++ // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. + ErrorDepth(depth int, args ...interface{}) +- // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. ++ // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. + FatalDepth(depth int, args ...interface{}) + } +diff --git a/vendor/google.golang.org/grpc/idle.go b/vendor/google.golang.org/grpc/idle.go +new file mode 100644 +index 0000000..dc3dc72 +--- /dev/null ++++ b/vendor/google.golang.org/grpc/idle.go +@@ -0,0 +1,287 @@ ++/* ++ * ++ * Copyright 2023 gRPC authors. ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * http://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++package grpc ++ ++import ( ++ "fmt" ++ "math" ++ "sync" ++ "sync/atomic" ++ "time" ++) ++ ++// For overriding in unit tests. ++var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { ++ return time.AfterFunc(d, f) ++} ++ ++// idlenessEnforcer is the functionality provided by grpc.ClientConn to enter ++// and exit from idle mode. ++type idlenessEnforcer interface { ++ exitIdleMode() error ++ enterIdleMode() error ++} ++ ++// idlenessManager defines the functionality required to track RPC activity on a ++// channel. ++type idlenessManager interface { ++ onCallBegin() error ++ onCallEnd() ++ close() ++} ++ ++type noopIdlenessManager struct{} ++ ++func (noopIdlenessManager) onCallBegin() error { return nil } ++func (noopIdlenessManager) onCallEnd() {} ++func (noopIdlenessManager) close() {} ++ ++// idlenessManagerImpl implements the idlenessManager interface. It uses atomic ++// operations to synchronize access to shared state and a mutex to guarantee ++// mutual exclusion in a critical section. ++type idlenessManagerImpl struct { ++ // State accessed atomically. ++ lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. ++ activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. ++ activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback. ++ closed int32 // Boolean; True when the manager is closed. ++ ++ // Can be accessed without atomics or mutex since these are set at creation ++ // time and read-only after that. ++ enforcer idlenessEnforcer // Functionality provided by grpc.ClientConn. ++ timeout int64 // Idle timeout duration nanos stored as an int64. ++ ++ // idleMu is used to guarantee mutual exclusion in two scenarios: ++ // - Opposing intentions: ++ // - a: Idle timeout has fired and handleIdleTimeout() is trying to put ++ // the channel in idle mode because the channel has been inactive. ++ // - b: At the same time an RPC is made on the channel, and onCallBegin() ++ // is trying to prevent the channel from going idle. ++ // - Competing intentions: ++ // - The channel is in idle mode and there are multiple RPCs starting at ++ // the same time, all trying to move the channel out of idle. Only one ++ // of them should succeed in doing so, while the other RPCs should ++ // piggyback on the first one and be successfully handled. ++ idleMu sync.RWMutex ++ actuallyIdle bool ++ timer *time.Timer ++} ++ ++// newIdlenessManager creates a new idleness manager implementation for the ++// given idle timeout. ++func newIdlenessManager(enforcer idlenessEnforcer, idleTimeout time.Duration) idlenessManager { ++ if idleTimeout == 0 { ++ return noopIdlenessManager{} ++ } ++ ++ i := &idlenessManagerImpl{ ++ enforcer: enforcer, ++ timeout: int64(idleTimeout), ++ } ++ i.timer = timeAfterFunc(idleTimeout, i.handleIdleTimeout) ++ return i ++} ++ ++// resetIdleTimer resets the idle timer to the given duration. This method ++// should only be called from the timer callback. ++func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) { ++ i.idleMu.Lock() ++ defer i.idleMu.Unlock() ++ ++ if i.timer == nil { ++ // Only close sets timer to nil. We are done. ++ return ++ } ++ ++ // It is safe to ignore the return value from Reset() because this method is ++ // only ever called from the timer callback, which means the timer has ++ // already fired. ++ i.timer.Reset(d) ++} ++ ++// handleIdleTimeout is the timer callback that is invoked upon expiry of the ++// configured idle timeout. The channel is considered inactive if there are no ++// ongoing calls and no RPC activity since the last time the timer fired. ++func (i *idlenessManagerImpl) handleIdleTimeout() { ++ if i.isClosed() { ++ return ++ } ++ ++ if atomic.LoadInt32(&i.activeCallsCount) > 0 { ++ i.resetIdleTimer(time.Duration(i.timeout)) ++ return ++ } ++ ++ // There has been activity on the channel since we last got here. Reset the ++ // timer and return. ++ if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { ++ // Set the timer to fire after a duration of idle timeout, calculated ++ // from the time the most recent RPC completed. ++ atomic.StoreInt32(&i.activeSinceLastTimerCheck, 0) ++ i.resetIdleTimer(time.Duration(atomic.LoadInt64(&i.lastCallEndTime) + i.timeout - time.Now().UnixNano())) ++ return ++ } ++ ++ // This CAS operation is extremely likely to succeed given that there has ++ // been no activity since the last time we were here. Setting the ++ // activeCallsCount to -math.MaxInt32 indicates to onCallBegin() that the ++ // channel is either in idle mode or is trying to get there. ++ if !atomic.CompareAndSwapInt32(&i.activeCallsCount, 0, -math.MaxInt32) { ++ // This CAS operation can fail if an RPC started after we checked for ++ // activity at the top of this method, or one was ongoing from before ++ // the last time we were here. In both case, reset the timer and return. ++ i.resetIdleTimer(time.Duration(i.timeout)) ++ return ++ } ++ ++ // Now that we've set the active calls count to -math.MaxInt32, it's time to ++ // actually move to idle mode. ++ if i.tryEnterIdleMode() { ++ // Successfully entered idle mode. No timer needed until we exit idle. ++ return ++ } ++ ++ // Failed to enter idle mode due to a concurrent RPC that kept the channel ++ // active, or because of an error from the channel. Undo the attempt to ++ // enter idle, and reset the timer to try again later. ++ atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) ++ i.resetIdleTimer(time.Duration(i.timeout)) ++} ++ ++// tryEnterIdleMode instructs the channel to enter idle mode. But before ++// that, it performs a last minute check to ensure that no new RPC has come in, ++// making the channel active. ++// ++// Return value indicates whether or not the channel moved to idle mode. ++// ++// Holds idleMu which ensures mutual exclusion with exitIdleMode. ++func (i *idlenessManagerImpl) tryEnterIdleMode() bool { ++ i.idleMu.Lock() ++ defer i.idleMu.Unlock() ++ ++ if atomic.LoadInt32(&i.activeCallsCount) != -math.MaxInt32 { ++ // We raced and lost to a new RPC. Very rare, but stop entering idle. ++ return false ++ } ++ if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { ++ // An very short RPC could have come in (and also finished) after we ++ // checked for calls count and activity in handleIdleTimeout(), but ++ // before the CAS operation. So, we need to check for activity again. ++ return false ++ } ++ ++ // No new RPCs have come in since we last set the active calls count value ++ // -math.MaxInt32 in the timer callback. And since we have the lock, it is ++ // safe to enter idle mode now. ++ if err := i.enforcer.enterIdleMode(); err != nil { ++ logger.Errorf("Failed to enter idle mode: %v", err) ++ return false ++ } ++ ++ // Successfully entered idle mode. ++ i.actuallyIdle = true ++ return true ++} ++ ++// onCallBegin is invoked at the start of every RPC. ++func (i *idlenessManagerImpl) onCallBegin() error { ++ if i.isClosed() { ++ return nil ++ } ++ ++ if atomic.AddInt32(&i.activeCallsCount, 1) > 0 { ++ // Channel is not idle now. Set the activity bit and allow the call. ++ atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) ++ return nil ++ } ++ ++ // Channel is either in idle mode or is in the process of moving to idle ++ // mode. Attempt to exit idle mode to allow this RPC. ++ if err := i.exitIdleMode(); err != nil { ++ // Undo the increment to calls count, and return an error causing the ++ // RPC to fail. ++ atomic.AddInt32(&i.activeCallsCount, -1) ++ return err ++ } ++ ++ atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) ++ return nil ++} ++ ++// exitIdleMode instructs the channel to exit idle mode. ++// ++// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. ++func (i *idlenessManagerImpl) exitIdleMode() error { ++ i.idleMu.Lock() ++ defer i.idleMu.Unlock() ++ ++ if !i.actuallyIdle { ++ // This can happen in two scenarios: ++ // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called ++ // tryEnterIdleMode(). But before the latter could grab the lock, an RPC ++ // came in and onCallBegin() noticed that the calls count is negative. ++ // - Channel is in idle mode, and multiple new RPCs come in at the same ++ // time, all of them notice a negative calls count in onCallBegin and get ++ // here. The first one to get the lock would got the channel to exit idle. ++ // ++ // Either way, nothing to do here. ++ return nil ++ } ++ ++ if err := i.enforcer.exitIdleMode(); err != nil { ++ return fmt.Errorf("channel failed to exit idle mode: %v", err) ++ } ++ ++ // Undo the idle entry process. This also respects any new RPC attempts. ++ atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) ++ i.actuallyIdle = false ++ ++ // Start a new timer to fire after the configured idle timeout. ++ i.timer = timeAfterFunc(time.Duration(i.timeout), i.handleIdleTimeout) ++ return nil ++} ++ ++// onCallEnd is invoked at the end of every RPC. ++func (i *idlenessManagerImpl) onCallEnd() { ++ if i.isClosed() { ++ return ++ } ++ ++ // Record the time at which the most recent call finished. ++ atomic.StoreInt64(&i.lastCallEndTime, time.Now().UnixNano()) ++ ++ // Decrement the active calls count. This count can temporarily go negative ++ // when the timer callback is in the process of moving the channel to idle ++ // mode, but one or more RPCs come in and complete before the timer callback ++ // can get done with the process of moving to idle mode. ++ atomic.AddInt32(&i.activeCallsCount, -1) ++} ++ ++func (i *idlenessManagerImpl) isClosed() bool { ++ return atomic.LoadInt32(&i.closed) == 1 ++} ++ ++func (i *idlenessManagerImpl) close() { ++ atomic.StoreInt32(&i.closed, 1) ++ ++ i.idleMu.Lock() ++ i.timer.Stop() ++ i.timer = nil ++ i.idleMu.Unlock() ++} +diff --git a/vendor/google.golang.org/grpc/install_gae.sh b/vendor/google.golang.org/grpc/install_gae.sh +deleted file mode 100644 +index 15ff9fa..0000000 +--- a/vendor/google.golang.org/grpc/install_gae.sh ++++ /dev/null +@@ -1,6 +0,0 @@ +-#!/bin/bash +- +-TMP=$(mktemp -d /tmp/sdk.XXX) \ +-&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \ +-&& unzip -q $TMP.zip -d $TMP \ +-&& export PATH="$PATH:$TMP/go_appengine" +\ No newline at end of file +diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go +index 668e0ad..bb96ef5 100644 +--- a/vendor/google.golang.org/grpc/interceptor.go ++++ b/vendor/google.golang.org/grpc/interceptor.go +@@ -72,9 +72,12 @@ type UnaryServerInfo struct { + } + + // UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal +-// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the +-// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as +-// the status message of the RPC. ++// execution of a unary RPC. ++// ++// If a UnaryHandler returns an error, it should either be produced by the ++// status package, or be one of the context errors. Otherwise, gRPC will use ++// codes.Unknown as the status code and err.Error() as the status message of the ++// RPC. + type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) + + // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info +diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +new file mode 100644 +index 0000000..08666f6 +--- /dev/null ++++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +@@ -0,0 +1,384 @@ ++/* ++ * ++ * Copyright 2022 gRPC authors. ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * http://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++// Package gracefulswitch implements a graceful switch load balancer. ++package gracefulswitch ++ ++import ( ++ "errors" ++ "fmt" ++ "sync" ++ ++ "google.golang.org/grpc/balancer" ++ "google.golang.org/grpc/balancer/base" ++ "google.golang.org/grpc/connectivity" ++ "google.golang.org/grpc/resolver" ++) ++ ++var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed") ++var _ balancer.Balancer = (*Balancer)(nil) ++ ++// NewBalancer returns a graceful switch Balancer. ++func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer { ++ return &Balancer{ ++ cc: cc, ++ bOpts: opts, ++ } ++} ++ ++// Balancer is a utility to gracefully switch from one balancer to ++// a new balancer. It implements the balancer.Balancer interface. ++type Balancer struct { ++ bOpts balancer.BuildOptions ++ cc balancer.ClientConn ++ ++ // mu protects the following fields and all fields within balancerCurrent ++ // and balancerPending. mu does not need to be held when calling into the ++ // child balancers, as all calls into these children happen only as a direct ++ // result of a call into the gracefulSwitchBalancer, which are also ++ // guaranteed to be synchronous. There is one exception: an UpdateState call ++ // from a child balancer when current and pending are populated can lead to ++ // calling Close() on the current. To prevent that racing with an ++ // UpdateSubConnState from the channel, we hold currentMu during Close and ++ // UpdateSubConnState calls. ++ mu sync.Mutex ++ balancerCurrent *balancerWrapper ++ balancerPending *balancerWrapper ++ closed bool // set to true when this balancer is closed ++ ++ // currentMu must be locked before mu. This mutex guards against this ++ // sequence of events: UpdateSubConnState() called, finds the ++ // balancerCurrent, gives up lock, updateState comes in, causes Close() on ++ // balancerCurrent before the UpdateSubConnState is called on the ++ // balancerCurrent. ++ currentMu sync.Mutex ++} ++ ++// swap swaps out the current lb with the pending lb and updates the ClientConn. ++// The caller must hold gsb.mu. ++func (gsb *Balancer) swap() { ++ gsb.cc.UpdateState(gsb.balancerPending.lastState) ++ cur := gsb.balancerCurrent ++ gsb.balancerCurrent = gsb.balancerPending ++ gsb.balancerPending = nil ++ go func() { ++ gsb.currentMu.Lock() ++ defer gsb.currentMu.Unlock() ++ cur.Close() ++ }() ++} ++ ++// Helper function that checks if the balancer passed in is current or pending. ++// The caller must hold gsb.mu. ++func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool { ++ return bw == gsb.balancerCurrent || bw == gsb.balancerPending ++} ++ ++// SwitchTo initializes the graceful switch process, which completes based on ++// connectivity state changes on the current/pending balancer. Thus, the switch ++// process is not complete when this method returns. This method must be called ++// synchronously alongside the rest of the balancer.Balancer methods this ++// Graceful Switch Balancer implements. ++func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { ++ gsb.mu.Lock() ++ if gsb.closed { ++ gsb.mu.Unlock() ++ return errBalancerClosed ++ } ++ bw := &balancerWrapper{ ++ gsb: gsb, ++ lastState: balancer.State{ ++ ConnectivityState: connectivity.Connecting, ++ Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), ++ }, ++ subconns: make(map[balancer.SubConn]bool), ++ } ++ balToClose := gsb.balancerPending // nil if there is no pending balancer ++ if gsb.balancerCurrent == nil { ++ gsb.balancerCurrent = bw ++ } else { ++ gsb.balancerPending = bw ++ } ++ gsb.mu.Unlock() ++ balToClose.Close() ++ // This function takes a builder instead of a balancer because builder.Build ++ // can call back inline, and this utility needs to handle the callbacks. ++ newBalancer := builder.Build(bw, gsb.bOpts) ++ if newBalancer == nil { ++ // This is illegal and should never happen; we clear the balancerWrapper ++ // we were constructing if it happens to avoid a potential panic. ++ gsb.mu.Lock() ++ if gsb.balancerPending != nil { ++ gsb.balancerPending = nil ++ } else { ++ gsb.balancerCurrent = nil ++ } ++ gsb.mu.Unlock() ++ return balancer.ErrBadResolverState ++ } ++ ++ // This write doesn't need to take gsb.mu because this field never gets read ++ // or written to on any calls from the current or pending. Calls from grpc ++ // to this balancer are guaranteed to be called synchronously, so this ++ // bw.Balancer field will never be forwarded to until this SwitchTo() ++ // function returns. ++ bw.Balancer = newBalancer ++ return nil ++} ++ ++// Returns nil if the graceful switch balancer is closed. ++func (gsb *Balancer) latestBalancer() *balancerWrapper { ++ gsb.mu.Lock() ++ defer gsb.mu.Unlock() ++ if gsb.balancerPending != nil { ++ return gsb.balancerPending ++ } ++ return gsb.balancerCurrent ++} ++ ++// UpdateClientConnState forwards the update to the latest balancer created. ++func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error { ++ // The resolver data is only relevant to the most recent LB Policy. ++ balToUpdate := gsb.latestBalancer() ++ if balToUpdate == nil { ++ return errBalancerClosed ++ } ++ // Perform this call without gsb.mu to prevent deadlocks if the child calls ++ // back into the channel. The latest balancer can never be closed during a ++ // call from the channel, even without gsb.mu held. ++ return balToUpdate.UpdateClientConnState(state) ++} ++ ++// ResolverError forwards the error to the latest balancer created. ++func (gsb *Balancer) ResolverError(err error) { ++ // The resolver data is only relevant to the most recent LB Policy. ++ balToUpdate := gsb.latestBalancer() ++ if balToUpdate == nil { ++ return ++ } ++ // Perform this call without gsb.mu to prevent deadlocks if the child calls ++ // back into the channel. The latest balancer can never be closed during a ++ // call from the channel, even without gsb.mu held. ++ balToUpdate.ResolverError(err) ++} ++ ++// ExitIdle forwards the call to the latest balancer created. ++// ++// If the latest balancer does not support ExitIdle, the subConns are ++// re-connected to manually. ++func (gsb *Balancer) ExitIdle() { ++ balToUpdate := gsb.latestBalancer() ++ if balToUpdate == nil { ++ return ++ } ++ // There is no need to protect this read with a mutex, as the write to the ++ // Balancer field happens in SwitchTo, which completes before this can be ++ // called. ++ if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok { ++ ei.ExitIdle() ++ return ++ } ++ gsb.mu.Lock() ++ defer gsb.mu.Unlock() ++ for sc := range balToUpdate.subconns { ++ sc.Connect() ++ } ++} ++ ++// UpdateSubConnState forwards the update to the appropriate child. ++func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { ++ gsb.currentMu.Lock() ++ defer gsb.currentMu.Unlock() ++ gsb.mu.Lock() ++ // Forward update to the appropriate child. Even if there is a pending ++ // balancer, the current balancer should continue to get SubConn updates to ++ // maintain the proper state while the pending is still connecting. ++ var balToUpdate *balancerWrapper ++ if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] { ++ balToUpdate = gsb.balancerCurrent ++ } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { ++ balToUpdate = gsb.balancerPending ++ } ++ gsb.mu.Unlock() ++ if balToUpdate == nil { ++ // SubConn belonged to a stale lb policy that has not yet fully closed, ++ // or the balancer was already closed. ++ return ++ } ++ balToUpdate.UpdateSubConnState(sc, state) ++} ++ ++// Close closes any active child balancers. ++func (gsb *Balancer) Close() { ++ gsb.mu.Lock() ++ gsb.closed = true ++ currentBalancerToClose := gsb.balancerCurrent ++ gsb.balancerCurrent = nil ++ pendingBalancerToClose := gsb.balancerPending ++ gsb.balancerPending = nil ++ gsb.mu.Unlock() ++ ++ currentBalancerToClose.Close() ++ pendingBalancerToClose.Close() ++} ++ ++// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer ++// methods to help cleanup SubConns created by the wrapped balancer. ++// ++// It implements the balancer.ClientConn interface and is passed down in that ++// capacity to the wrapped balancer. It maintains a set of subConns created by ++// the wrapped balancer and calls from the latter to create/update/remove ++// SubConns update this set before being forwarded to the parent ClientConn. ++// State updates from the wrapped balancer can result in invocation of the ++// graceful switch logic. ++type balancerWrapper struct { ++ balancer.Balancer ++ gsb *Balancer ++ ++ lastState balancer.State ++ subconns map[balancer.SubConn]bool // subconns created by this balancer ++} ++ ++func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { ++ if state.ConnectivityState == connectivity.Shutdown { ++ bw.gsb.mu.Lock() ++ delete(bw.subconns, sc) ++ bw.gsb.mu.Unlock() ++ } ++ // There is no need to protect this read with a mutex, as the write to the ++ // Balancer field happens in SwitchTo, which completes before this can be ++ // called. ++ bw.Balancer.UpdateSubConnState(sc, state) ++} ++ ++// Close closes the underlying LB policy and removes the subconns it created. bw ++// must not be referenced via balancerCurrent or balancerPending in gsb when ++// called. gsb.mu must not be held. Does not panic with a nil receiver. ++func (bw *balancerWrapper) Close() { ++ // before Close is called. ++ if bw == nil { ++ return ++ } ++ // There is no need to protect this read with a mutex, as Close() is ++ // impossible to be called concurrently with the write in SwitchTo(). The ++ // callsites of Close() for this balancer in Graceful Switch Balancer will ++ // never be called until SwitchTo() returns. ++ bw.Balancer.Close() ++ bw.gsb.mu.Lock() ++ for sc := range bw.subconns { ++ bw.gsb.cc.RemoveSubConn(sc) ++ } ++ bw.gsb.mu.Unlock() ++} ++ ++func (bw *balancerWrapper) UpdateState(state balancer.State) { ++ // Hold the mutex for this entire call to ensure it cannot occur ++ // concurrently with other updateState() calls. This causes updates to ++ // lastState and calls to cc.UpdateState to happen atomically. ++ bw.gsb.mu.Lock() ++ defer bw.gsb.mu.Unlock() ++ bw.lastState = state ++ ++ if !bw.gsb.balancerCurrentOrPending(bw) { ++ return ++ } ++ ++ if bw == bw.gsb.balancerCurrent { ++ // In the case that the current balancer exits READY, and there is a pending ++ // balancer, you can forward the pending balancer's cached State up to ++ // ClientConn and swap the pending into the current. This is because there ++ // is no reason to gracefully switch from and keep using the old policy as ++ // the ClientConn is not connected to any backends. ++ if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil { ++ bw.gsb.swap() ++ return ++ } ++ // Even if there is a pending balancer waiting to be gracefully switched to, ++ // continue to forward current balancer updates to the Client Conn. Ignoring ++ // state + picker from the current would cause undefined behavior/cause the ++ // system to behave incorrectly from the current LB policies perspective. ++ // Also, the current LB is still being used by grpc to choose SubConns per ++ // RPC, and thus should use the most updated form of the current balancer. ++ bw.gsb.cc.UpdateState(state) ++ return ++ } ++ // This method is now dealing with a state update from the pending balancer. ++ // If the current balancer is currently in a state other than READY, the new ++ // policy can be swapped into place immediately. This is because there is no ++ // reason to gracefully switch from and keep using the old policy as the ++ // ClientConn is not connected to any backends. ++ if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready { ++ bw.gsb.swap() ++ } ++} ++ ++func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { ++ bw.gsb.mu.Lock() ++ if !bw.gsb.balancerCurrentOrPending(bw) { ++ bw.gsb.mu.Unlock() ++ return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) ++ } ++ bw.gsb.mu.Unlock() ++ ++ sc, err := bw.gsb.cc.NewSubConn(addrs, opts) ++ if err != nil { ++ return nil, err ++ } ++ bw.gsb.mu.Lock() ++ if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call ++ bw.gsb.cc.RemoveSubConn(sc) ++ bw.gsb.mu.Unlock() ++ return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) ++ } ++ bw.subconns[sc] = true ++ bw.gsb.mu.Unlock() ++ return sc, nil ++} ++ ++func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { ++ // Ignore ResolveNow requests from anything other than the most recent ++ // balancer, because older balancers were already removed from the config. ++ if bw != bw.gsb.latestBalancer() { ++ return ++ } ++ bw.gsb.cc.ResolveNow(opts) ++} ++ ++func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { ++ bw.gsb.mu.Lock() ++ if !bw.gsb.balancerCurrentOrPending(bw) { ++ bw.gsb.mu.Unlock() ++ return ++ } ++ bw.gsb.mu.Unlock() ++ bw.gsb.cc.RemoveSubConn(sc) ++} ++ ++func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { ++ bw.gsb.mu.Lock() ++ if !bw.gsb.balancerCurrentOrPending(bw) { ++ bw.gsb.mu.Unlock() ++ return ++ } ++ bw.gsb.mu.Unlock() ++ bw.gsb.cc.UpdateAddresses(sc, addrs) ++} ++ ++func (bw *balancerWrapper) Target() string { ++ return bw.gsb.cc.Target() ++} +diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +index 5cc3aed..755fdeb 100644 +--- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go ++++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +@@ -28,38 +28,48 @@ import ( + "google.golang.org/grpc/internal/grpcutil" + ) + +-// Logger is the global binary logger. It can be used to get binary logger for +-// each method. ++var grpclogLogger = grpclog.Component("binarylog") ++ ++// Logger specifies MethodLoggers for method names with a Log call that ++// takes a context. ++// ++// This is used in the 1.0 release of gcp/observability, and thus must not be ++// deleted or changed. + type Logger interface { +- getMethodLogger(methodName string) *MethodLogger ++ GetMethodLogger(methodName string) MethodLogger + } + + // binLogger is the global binary logger for the binary. One of this should be + // built at init time from the configuration (environment variable or flags). + // +-// It is used to get a methodLogger for each individual method. ++// It is used to get a MethodLogger for each individual method. + var binLogger Logger + +-var grpclogLogger = grpclog.Component("binarylog") +- +-// SetLogger sets the binarg logger. ++// SetLogger sets the binary logger. + // + // Only call this at init time. + func SetLogger(l Logger) { + binLogger = l + } + +-// GetMethodLogger returns the methodLogger for the given methodName. ++// GetLogger gets the binary logger. ++// ++// Only call this at init time. ++func GetLogger() Logger { ++ return binLogger ++} ++ ++// GetMethodLogger returns the MethodLogger for the given methodName. + // + // methodName should be in the format of "/service/method". + // +-// Each methodLogger returned by this method is a new instance. This is to ++// Each MethodLogger returned by this method is a new instance. This is to + // generate sequence id within the call. +-func GetMethodLogger(methodName string) *MethodLogger { ++func GetMethodLogger(methodName string) MethodLogger { + if binLogger == nil { + return nil + } +- return binLogger.getMethodLogger(methodName) ++ return binLogger.GetMethodLogger(methodName) + } + + func init() { +@@ -68,17 +78,29 @@ func init() { + binLogger = NewLoggerFromConfigString(configStr) + } + +-type methodLoggerConfig struct { ++// MethodLoggerConfig contains the setting for logging behavior of a method ++// logger. Currently, it contains the max length of header and message. ++type MethodLoggerConfig struct { + // Max length of header and message. +- hdr, msg uint64 ++ Header, Message uint64 ++} ++ ++// LoggerConfig contains the config for loggers to create method loggers. ++type LoggerConfig struct { ++ All *MethodLoggerConfig ++ Services map[string]*MethodLoggerConfig ++ Methods map[string]*MethodLoggerConfig ++ ++ Blacklist map[string]struct{} + } + + type logger struct { +- all *methodLoggerConfig +- services map[string]*methodLoggerConfig +- methods map[string]*methodLoggerConfig ++ config LoggerConfig ++} + +- blacklist map[string]struct{} ++// NewLoggerFromConfig builds a logger with the given LoggerConfig. ++func NewLoggerFromConfig(config LoggerConfig) Logger { ++ return &logger{config: config} + } + + // newEmptyLogger creates an empty logger. The map fields need to be filled in +@@ -88,83 +110,83 @@ func newEmptyLogger() *logger { + } + + // Set method logger for "*". +-func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error { +- if l.all != nil { ++func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error { ++ if l.config.All != nil { + return fmt.Errorf("conflicting global rules found") + } +- l.all = ml ++ l.config.All = ml + return nil + } + + // Set method logger for "service/*". + // +-// New methodLogger with same service overrides the old one. +-func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error { +- if _, ok := l.services[service]; ok { ++// New MethodLogger with same service overrides the old one. ++func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error { ++ if _, ok := l.config.Services[service]; ok { + return fmt.Errorf("conflicting service rules for service %v found", service) + } +- if l.services == nil { +- l.services = make(map[string]*methodLoggerConfig) ++ if l.config.Services == nil { ++ l.config.Services = make(map[string]*MethodLoggerConfig) + } +- l.services[service] = ml ++ l.config.Services[service] = ml + return nil + } + + // Set method logger for "service/method". + // +-// New methodLogger with same method overrides the old one. +-func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error { +- if _, ok := l.blacklist[method]; ok { ++// New MethodLogger with same method overrides the old one. ++func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error { ++ if _, ok := l.config.Blacklist[method]; ok { + return fmt.Errorf("conflicting blacklist rules for method %v found", method) + } +- if _, ok := l.methods[method]; ok { ++ if _, ok := l.config.Methods[method]; ok { + return fmt.Errorf("conflicting method rules for method %v found", method) + } +- if l.methods == nil { +- l.methods = make(map[string]*methodLoggerConfig) ++ if l.config.Methods == nil { ++ l.config.Methods = make(map[string]*MethodLoggerConfig) + } +- l.methods[method] = ml ++ l.config.Methods[method] = ml + return nil + } + + // Set blacklist method for "-service/method". + func (l *logger) setBlacklist(method string) error { +- if _, ok := l.blacklist[method]; ok { ++ if _, ok := l.config.Blacklist[method]; ok { + return fmt.Errorf("conflicting blacklist rules for method %v found", method) + } +- if _, ok := l.methods[method]; ok { ++ if _, ok := l.config.Methods[method]; ok { + return fmt.Errorf("conflicting method rules for method %v found", method) + } +- if l.blacklist == nil { +- l.blacklist = make(map[string]struct{}) ++ if l.config.Blacklist == nil { ++ l.config.Blacklist = make(map[string]struct{}) + } +- l.blacklist[method] = struct{}{} ++ l.config.Blacklist[method] = struct{}{} + return nil + } + +-// getMethodLogger returns the methodLogger for the given methodName. ++// getMethodLogger returns the MethodLogger for the given methodName. + // + // methodName should be in the format of "/service/method". + // +-// Each methodLogger returned by this method is a new instance. This is to ++// Each MethodLogger returned by this method is a new instance. This is to + // generate sequence id within the call. +-func (l *logger) getMethodLogger(methodName string) *MethodLogger { ++func (l *logger) GetMethodLogger(methodName string) MethodLogger { + s, m, err := grpcutil.ParseMethod(methodName) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) + return nil + } +- if ml, ok := l.methods[s+"/"+m]; ok { +- return newMethodLogger(ml.hdr, ml.msg) ++ if ml, ok := l.config.Methods[s+"/"+m]; ok { ++ return NewTruncatingMethodLogger(ml.Header, ml.Message) + } +- if _, ok := l.blacklist[s+"/"+m]; ok { ++ if _, ok := l.config.Blacklist[s+"/"+m]; ok { + return nil + } +- if ml, ok := l.services[s]; ok { +- return newMethodLogger(ml.hdr, ml.msg) ++ if ml, ok := l.config.Services[s]; ok { ++ return NewTruncatingMethodLogger(ml.Header, ml.Message) + } +- if l.all == nil { ++ if l.config.All == nil { + return nil + } +- return newMethodLogger(l.all.hdr, l.all.msg) ++ return NewTruncatingMethodLogger(l.config.All.Header, l.config.All.Message) + } +diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go +index d8f4e76..f9e80e2 100644 +--- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go ++++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go +@@ -30,15 +30,15 @@ import ( + // to build a new logger and assign it to binarylog.Logger. + // + // Example filter config strings: +-// - "" Nothing will be logged +-// - "*" All headers and messages will be fully logged. +-// - "*{h}" Only headers will be logged. +-// - "*{m:256}" Only the first 256 bytes of each message will be logged. +-// - "Foo/*" Logs every method in service Foo +-// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar +-// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method +-// /Foo/Bar, logs all headers and messages in every other method in service +-// Foo. ++// - "" Nothing will be logged ++// - "*" All headers and messages will be fully logged. ++// - "*{h}" Only headers will be logged. ++// - "*{m:256}" Only the first 256 bytes of each message will be logged. ++// - "Foo/*" Logs every method in service Foo ++// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar ++// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method ++// /Foo/Bar, logs all headers and messages in every other method in service ++// Foo. + // + // If two configs exist for one certain method or service, the one specified + // later overrides the previous config. +@@ -57,7 +57,7 @@ func NewLoggerFromConfigString(s string) Logger { + return l + } + +-// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds ++// fillMethodLoggerWithConfigString parses config, creates TruncatingMethodLogger and adds + // it to the right map in the logger. + func (l *logger) fillMethodLoggerWithConfigString(config string) error { + // "" is invalid. +@@ -89,7 +89,7 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } +- if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { ++ if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + return nil +@@ -104,11 +104,11 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { + return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) + } + if m == "*" { +- if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { ++ if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + } else { +- if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { ++ if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + } +diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +index 0cdb418..6c3f632 100644 +--- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go ++++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +@@ -19,6 +19,7 @@ + package binarylog + + import ( ++ "context" + "net" + "strings" + "sync/atomic" +@@ -26,7 +27,7 @@ import ( + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" +- pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" ++ binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + ) +@@ -48,7 +49,16 @@ func (g *callIDGenerator) reset() { + var idGen callIDGenerator + + // MethodLogger is the sub-logger for each method. +-type MethodLogger struct { ++// ++// This is used in the 1.0 release of gcp/observability, and thus must not be ++// deleted or changed. ++type MethodLogger interface { ++ Log(context.Context, LogEntryConfig) ++} ++ ++// TruncatingMethodLogger is a method logger that truncates headers and messages ++// based on configured fields. ++type TruncatingMethodLogger struct { + headerMaxLen, messageMaxLen uint64 + + callID uint64 +@@ -57,8 +67,12 @@ type MethodLogger struct { + sink Sink // TODO(blog): make this plugable. + } + +-func newMethodLogger(h, m uint64) *MethodLogger { +- return &MethodLogger{ ++// NewTruncatingMethodLogger returns a new truncating method logger. ++// ++// This is used in the 1.0 release of gcp/observability, and thus must not be ++// deleted or changed. ++func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { ++ return &TruncatingMethodLogger{ + headerMaxLen: h, + messageMaxLen: m, + +@@ -69,8 +83,10 @@ func newMethodLogger(h, m uint64) *MethodLogger { + } + } + +-// Log creates a proto binary log entry, and logs it to the sink. +-func (ml *MethodLogger) Log(c LogEntryConfig) { ++// Build is an internal only method for building the proto message out of the ++// input event. It's made public to enable other library to reuse as much logic ++// in TruncatingMethodLogger as possible. ++func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry { + m := c.toProto() + timestamp, _ := ptypes.TimestampProto(time.Now()) + m.Timestamp = timestamp +@@ -78,18 +94,22 @@ func (ml *MethodLogger) Log(c LogEntryConfig) { + m.SequenceIdWithinCall = ml.idWithinCallGen.next() + + switch pay := m.Payload.(type) { +- case *pb.GrpcLogEntry_ClientHeader: ++ case *binlogpb.GrpcLogEntry_ClientHeader: + m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata()) +- case *pb.GrpcLogEntry_ServerHeader: ++ case *binlogpb.GrpcLogEntry_ServerHeader: + m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata()) +- case *pb.GrpcLogEntry_Message: ++ case *binlogpb.GrpcLogEntry_Message: + m.PayloadTruncated = ml.truncateMessage(pay.Message) + } ++ return m ++} + +- ml.sink.Write(m) ++// Log creates a proto binary log entry, and logs it to the sink. ++func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) { ++ ml.sink.Write(ml.Build(c)) + } + +-func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { ++func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) { + if ml.headerMaxLen == maxUInt { + return false + } +@@ -108,7 +128,7 @@ func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { + // but not counted towards the size limit. + continue + } +- currentEntryLen := uint64(len(entry.Value)) ++ currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue())) + if currentEntryLen > bytesLimit { + break + } +@@ -119,7 +139,7 @@ func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { + return truncated + } + +-func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { ++func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) { + if ml.messageMaxLen == maxUInt { + return false + } +@@ -131,8 +151,11 @@ func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { + } + + // LogEntryConfig represents the configuration for binary log entry. ++// ++// This is used in the 1.0 release of gcp/observability, and thus must not be ++// deleted or changed. + type LogEntryConfig interface { +- toProto() *pb.GrpcLogEntry ++ toProto() *binlogpb.GrpcLogEntry + } + + // ClientHeader configs the binary log entry to be a ClientHeader entry. +@@ -146,10 +169,10 @@ type ClientHeader struct { + PeerAddr net.Addr + } + +-func (c *ClientHeader) toProto() *pb.GrpcLogEntry { ++func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry { + // This function doesn't need to set all the fields (e.g. seq ID). The Log + // function will set the fields when necessary. +- clientHeader := &pb.ClientHeader{ ++ clientHeader := &binlogpb.ClientHeader{ + Metadata: mdToMetadataProto(c.Header), + MethodName: c.MethodName, + Authority: c.Authority, +@@ -157,16 +180,16 @@ func (c *ClientHeader) toProto() *pb.GrpcLogEntry { + if c.Timeout > 0 { + clientHeader.Timeout = ptypes.DurationProto(c.Timeout) + } +- ret := &pb.GrpcLogEntry{ +- Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, +- Payload: &pb.GrpcLogEntry_ClientHeader{ ++ ret := &binlogpb.GrpcLogEntry{ ++ Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, ++ Payload: &binlogpb.GrpcLogEntry_ClientHeader{ + ClientHeader: clientHeader, + }, + } + if c.OnClientSide { +- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT ++ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { +- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER ++ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) +@@ -182,19 +205,19 @@ type ServerHeader struct { + PeerAddr net.Addr + } + +-func (c *ServerHeader) toProto() *pb.GrpcLogEntry { +- ret := &pb.GrpcLogEntry{ +- Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, +- Payload: &pb.GrpcLogEntry_ServerHeader{ +- ServerHeader: &pb.ServerHeader{ ++func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry { ++ ret := &binlogpb.GrpcLogEntry{ ++ Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, ++ Payload: &binlogpb.GrpcLogEntry_ServerHeader{ ++ ServerHeader: &binlogpb.ServerHeader{ + Metadata: mdToMetadataProto(c.Header), + }, + }, + } + if c.OnClientSide { +- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT ++ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { +- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER ++ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) +@@ -210,7 +233,7 @@ type ClientMessage struct { + Message interface{} + } + +-func (c *ClientMessage) toProto() *pb.GrpcLogEntry { ++func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry { + var ( + data []byte + err error +@@ -225,19 +248,19 @@ func (c *ClientMessage) toProto() *pb.GrpcLogEntry { + } else { + grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") + } +- ret := &pb.GrpcLogEntry{ +- Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, +- Payload: &pb.GrpcLogEntry_Message{ +- Message: &pb.Message{ ++ ret := &binlogpb.GrpcLogEntry{ ++ Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, ++ Payload: &binlogpb.GrpcLogEntry_Message{ ++ Message: &binlogpb.Message{ + Length: uint32(len(data)), + Data: data, + }, + }, + } + if c.OnClientSide { +- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT ++ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { +- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER ++ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + return ret + } +@@ -250,7 +273,7 @@ type ServerMessage struct { + Message interface{} + } + +-func (c *ServerMessage) toProto() *pb.GrpcLogEntry { ++func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry { + var ( + data []byte + err error +@@ -265,19 +288,19 @@ func (c *ServerMessage) toProto() *pb.GrpcLogEntry { + } else { + grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") + } +- ret := &pb.GrpcLogEntry{ +- Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, +- Payload: &pb.GrpcLogEntry_Message{ +- Message: &pb.Message{ ++ ret := &binlogpb.GrpcLogEntry{ ++ Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, ++ Payload: &binlogpb.GrpcLogEntry_Message{ ++ Message: &binlogpb.Message{ + Length: uint32(len(data)), + Data: data, + }, + }, + } + if c.OnClientSide { +- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT ++ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { +- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER ++ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + return ret + } +@@ -287,15 +310,15 @@ type ClientHalfClose struct { + OnClientSide bool + } + +-func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry { +- ret := &pb.GrpcLogEntry{ +- Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, ++func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry { ++ ret := &binlogpb.GrpcLogEntry{ ++ Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, + Payload: nil, // No payload here. + } + if c.OnClientSide { +- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT ++ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { +- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER ++ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + return ret + } +@@ -311,7 +334,7 @@ type ServerTrailer struct { + PeerAddr net.Addr + } + +-func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { ++func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry { + st, ok := status.FromError(c.Err) + if !ok { + grpclogLogger.Info("binarylogging: error in trailer is not a status error") +@@ -327,10 +350,10 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { + grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err) + } + } +- ret := &pb.GrpcLogEntry{ +- Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, +- Payload: &pb.GrpcLogEntry_Trailer{ +- Trailer: &pb.Trailer{ ++ ret := &binlogpb.GrpcLogEntry{ ++ Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, ++ Payload: &binlogpb.GrpcLogEntry_Trailer{ ++ Trailer: &binlogpb.Trailer{ + Metadata: mdToMetadataProto(c.Trailer), + StatusCode: uint32(st.Code()), + StatusMessage: st.Message(), +@@ -339,9 +362,9 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { + }, + } + if c.OnClientSide { +- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT ++ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { +- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER ++ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) +@@ -354,15 +377,15 @@ type Cancel struct { + OnClientSide bool + } + +-func (c *Cancel) toProto() *pb.GrpcLogEntry { +- ret := &pb.GrpcLogEntry{ +- Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL, ++func (c *Cancel) toProto() *binlogpb.GrpcLogEntry { ++ ret := &binlogpb.GrpcLogEntry{ ++ Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL, + Payload: nil, + } + if c.OnClientSide { +- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT ++ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { +- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER ++ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + return ret + } +@@ -379,15 +402,15 @@ func metadataKeyOmit(key string) bool { + return strings.HasPrefix(key, "grpc-") + } + +-func mdToMetadataProto(md metadata.MD) *pb.Metadata { +- ret := &pb.Metadata{} ++func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata { ++ ret := &binlogpb.Metadata{} + for k, vv := range md { + if metadataKeyOmit(k) { + continue + } + for _, v := range vv { + ret.Entry = append(ret.Entry, +- &pb.MetadataEntry{ ++ &binlogpb.MetadataEntry{ + Key: k, + Value: []byte(v), + }, +@@ -397,26 +420,26 @@ func mdToMetadataProto(md metadata.MD) *pb.Metadata { + return ret + } + +-func addrToProto(addr net.Addr) *pb.Address { +- ret := &pb.Address{} ++func addrToProto(addr net.Addr) *binlogpb.Address { ++ ret := &binlogpb.Address{} + switch a := addr.(type) { + case *net.TCPAddr: + if a.IP.To4() != nil { +- ret.Type = pb.Address_TYPE_IPV4 ++ ret.Type = binlogpb.Address_TYPE_IPV4 + } else if a.IP.To16() != nil { +- ret.Type = pb.Address_TYPE_IPV6 ++ ret.Type = binlogpb.Address_TYPE_IPV6 + } else { +- ret.Type = pb.Address_TYPE_UNKNOWN ++ ret.Type = binlogpb.Address_TYPE_UNKNOWN + // Do not set address and port fields. + break + } + ret.Address = a.IP.String() + ret.IpPort = uint32(a.Port) + case *net.UnixAddr: +- ret.Type = pb.Address_TYPE_UNIX ++ ret.Type = binlogpb.Address_TYPE_UNIX + ret.Address = a.String() + default: +- ret.Type = pb.Address_TYPE_UNKNOWN ++ ret.Type = binlogpb.Address_TYPE_UNKNOWN + } + return ret + } +diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go +index c2fdd58..264de38 100644 +--- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go ++++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go +@@ -26,7 +26,7 @@ import ( + "time" + + "github.com/golang/protobuf/proto" +- pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" ++ binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + ) + + var ( +@@ -42,15 +42,15 @@ type Sink interface { + // Write will be called to write the log entry into the sink. + // + // It should be thread-safe so it can be called in parallel. +- Write(*pb.GrpcLogEntry) error ++ Write(*binlogpb.GrpcLogEntry) error + // Close will be called when the Sink is replaced by a new Sink. + Close() error + } + + type noopSink struct{} + +-func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil } +-func (ns *noopSink) Close() error { return nil } ++func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil } ++func (ns *noopSink) Close() error { return nil } + + // newWriterSink creates a binary log sink with the given writer. + // +@@ -66,7 +66,7 @@ type writerSink struct { + out io.Writer + } + +-func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { ++func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error { + b, err := proto.Marshal(e) + if err != nil { + grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err) +@@ -96,7 +96,7 @@ type bufferedSink struct { + done chan struct{} + } + +-func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error { ++func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error { + fs.mu.Lock() + defer fs.mu.Unlock() + if !fs.flusherStarted { +diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +index 9f6a0c1..81c2f5f 100644 +--- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go ++++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +@@ -35,6 +35,7 @@ import "sync" + // internal/transport/transport.go for an example of this. + type Unbounded struct { + c chan interface{} ++ closed bool + mu sync.Mutex + backlog []interface{} + } +@@ -47,16 +48,18 @@ func NewUnbounded() *Unbounded { + // Put adds t to the unbounded buffer. + func (b *Unbounded) Put(t interface{}) { + b.mu.Lock() ++ defer b.mu.Unlock() ++ if b.closed { ++ return ++ } + if len(b.backlog) == 0 { + select { + case b.c <- t: +- b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, t) +- b.mu.Unlock() + } + + // Load sends the earliest buffered data, if any, onto the read channel +@@ -64,6 +67,10 @@ func (b *Unbounded) Put(t interface{}) { + // value from the read channel. + func (b *Unbounded) Load() { + b.mu.Lock() ++ defer b.mu.Unlock() ++ if b.closed { ++ return ++ } + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: +@@ -72,7 +79,6 @@ func (b *Unbounded) Load() { + default: + } + } +- b.mu.Unlock() + } + + // Get returns a read channel on which values added to the buffer, via Put(), +@@ -80,6 +86,20 @@ func (b *Unbounded) Load() { + // + // Upon reading a value from this channel, users are expected to call Load() to + // send the next buffered value onto the channel if there is any. ++// ++// If the unbounded buffer is closed, the read channel returned by this method ++// is closed. + func (b *Unbounded) Get() <-chan interface{} { + return b.c + } ++ ++// Close closes the unbounded buffer. ++func (b *Unbounded) Close() { ++ b.mu.Lock() ++ defer b.mu.Unlock() ++ if b.closed { ++ return ++ } ++ b.closed = true ++ close(b.c) ++} +diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go +index f731413..777cbcd 100644 +--- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go ++++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go +@@ -24,6 +24,8 @@ + package channelz + + import ( ++ "context" ++ "errors" + "fmt" + "sort" + "sync" +@@ -49,7 +51,8 @@ var ( + // TurnOn turns on channelz data collection. + func TurnOn() { + if !IsOn() { +- NewChannelzStorage() ++ db.set(newChannelMap()) ++ idGen.reset() + atomic.StoreInt32(&curState, 1) + } + } +@@ -94,46 +97,40 @@ func (d *dbWrapper) get() *channelMap { + return d.DB + } + +-// NewChannelzStorage initializes channelz data storage and id generator. ++// NewChannelzStorageForTesting initializes channelz data storage and id ++// generator for testing purposes. + // +-// This function returns a cleanup function to wait for all channelz state to be reset by the +-// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests +-// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen +-// to remove some entity just register by the new test, since the id space is the same. +-// +-// Note: This function is exported for testing purpose only. User should not call +-// it in most cases. +-func NewChannelzStorage() (cleanup func() error) { +- db.set(&channelMap{ +- topLevelChannels: make(map[int64]struct{}), +- channels: make(map[int64]*channel), +- listenSockets: make(map[int64]*listenSocket), +- normalSockets: make(map[int64]*normalSocket), +- servers: make(map[int64]*server), +- subChannels: make(map[int64]*subChannel), +- }) ++// Returns a cleanup function to be invoked by the test, which waits for up to ++// 10s for all channelz state to be reset by the grpc goroutines when those ++// entities get closed. This cleanup function helps with ensuring that tests ++// don't mess up each other. ++func NewChannelzStorageForTesting() (cleanup func() error) { ++ db.set(newChannelMap()) + idGen.reset() ++ + return func() error { +- var err error + cm := db.get() + if cm == nil { + return nil + } +- for i := 0; i < 1000; i++ { +- cm.mu.Lock() +- if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 { +- cm.mu.Unlock() +- // all things stored in the channelz map have been cleared. ++ ++ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) ++ defer cancel() ++ ticker := time.NewTicker(10 * time.Millisecond) ++ defer ticker.Stop() ++ for { ++ cm.mu.RLock() ++ topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets) ++ cm.mu.RUnlock() ++ ++ if err := ctx.Err(); err != nil { ++ return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets) ++ } ++ if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 { + return nil + } +- cm.mu.Unlock() +- time.Sleep(10 * time.Millisecond) ++ <-ticker.C + } +- +- cm.mu.Lock() +- err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)) +- cm.mu.Unlock() +- return err + } + } + +@@ -188,54 +185,77 @@ func GetServer(id int64) *ServerMetric { + return db.get().GetServer(id) + } + +-// RegisterChannel registers the given channel c in channelz database with ref +-// as its reference name, and add it to the child list of its parent (identified +-// by pid). pid = 0 means no parent. It returns the unique channelz tracking id +-// assigned to this channel. +-func RegisterChannel(c Channel, pid int64, ref string) int64 { ++// RegisterChannel registers the given channel c in the channelz database with ++// ref as its reference name, and adds it to the child list of its parent ++// (identified by pid). pid == nil means no parent. ++// ++// Returns a unique channelz identifier assigned to this channel. ++// ++// If channelz is not turned ON, the channelz database is not mutated. ++func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { + id := idGen.genID() ++ var parent int64 ++ isTopChannel := true ++ if pid != nil { ++ isTopChannel = false ++ parent = pid.Int() ++ } ++ ++ if !IsOn() { ++ return newIdentifer(RefChannel, id, pid) ++ } ++ + cn := &channel{ + refName: ref, + c: c, + subChans: make(map[int64]string), + nestedChans: make(map[int64]string), + id: id, +- pid: pid, ++ pid: parent, + trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + } +- if pid == 0 { +- db.get().addChannel(id, cn, true, pid, ref) +- } else { +- db.get().addChannel(id, cn, false, pid, ref) +- } +- return id ++ db.get().addChannel(id, cn, isTopChannel, parent) ++ return newIdentifer(RefChannel, id, pid) + } + +-// RegisterSubChannel registers the given channel c in channelz database with ref +-// as its reference name, and add it to the child list of its parent (identified +-// by pid). It returns the unique channelz tracking id assigned to this subchannel. +-func RegisterSubChannel(c Channel, pid int64, ref string) int64 { +- if pid == 0 { +- logger.Error("a SubChannel's parent id cannot be 0") +- return 0 ++// RegisterSubChannel registers the given subChannel c in the channelz database ++// with ref as its reference name, and adds it to the child list of its parent ++// (identified by pid). ++// ++// Returns a unique channelz identifier assigned to this subChannel. ++// ++// If channelz is not turned ON, the channelz database is not mutated. ++func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) { ++ if pid == nil { ++ return nil, errors.New("a SubChannel's parent id cannot be nil") + } + id := idGen.genID() ++ if !IsOn() { ++ return newIdentifer(RefSubChannel, id, pid), nil ++ } ++ + sc := &subChannel{ + refName: ref, + c: c, + sockets: make(map[int64]string), + id: id, +- pid: pid, ++ pid: pid.Int(), + trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + } +- db.get().addSubChannel(id, sc, pid, ref) +- return id ++ db.get().addSubChannel(id, sc, pid.Int()) ++ return newIdentifer(RefSubChannel, id, pid), nil + } + + // RegisterServer registers the given server s in channelz database. It returns + // the unique channelz tracking id assigned to this server. +-func RegisterServer(s Server, ref string) int64 { ++// ++// If channelz is not turned ON, the channelz database is not mutated. ++func RegisterServer(s Server, ref string) *Identifier { + id := idGen.genID() ++ if !IsOn() { ++ return newIdentifer(RefServer, id, nil) ++ } ++ + svr := &server{ + refName: ref, + s: s, +@@ -244,71 +264,92 @@ func RegisterServer(s Server, ref string) int64 { + id: id, + } + db.get().addServer(id, svr) +- return id ++ return newIdentifer(RefServer, id, nil) + } + + // RegisterListenSocket registers the given listen socket s in channelz database + // with ref as its reference name, and add it to the child list of its parent + // (identified by pid). It returns the unique channelz tracking id assigned to + // this listen socket. +-func RegisterListenSocket(s Socket, pid int64, ref string) int64 { +- if pid == 0 { +- logger.Error("a ListenSocket's parent id cannot be 0") +- return 0 ++// ++// If channelz is not turned ON, the channelz database is not mutated. ++func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { ++ if pid == nil { ++ return nil, errors.New("a ListenSocket's parent id cannot be 0") + } + id := idGen.genID() +- ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} +- db.get().addListenSocket(id, ls, pid, ref) +- return id ++ if !IsOn() { ++ return newIdentifer(RefListenSocket, id, pid), nil ++ } ++ ++ ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()} ++ db.get().addListenSocket(id, ls, pid.Int()) ++ return newIdentifer(RefListenSocket, id, pid), nil + } + + // RegisterNormalSocket registers the given normal socket s in channelz database +-// with ref as its reference name, and add it to the child list of its parent ++// with ref as its reference name, and adds it to the child list of its parent + // (identified by pid). It returns the unique channelz tracking id assigned to + // this normal socket. +-func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { +- if pid == 0 { +- logger.Error("a NormalSocket's parent id cannot be 0") +- return 0 ++// ++// If channelz is not turned ON, the channelz database is not mutated. ++func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { ++ if pid == nil { ++ return nil, errors.New("a NormalSocket's parent id cannot be 0") + } + id := idGen.genID() +- ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} +- db.get().addNormalSocket(id, ns, pid, ref) +- return id ++ if !IsOn() { ++ return newIdentifer(RefNormalSocket, id, pid), nil ++ } ++ ++ ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()} ++ db.get().addNormalSocket(id, ns, pid.Int()) ++ return newIdentifer(RefNormalSocket, id, pid), nil + } + +-// RemoveEntry removes an entry with unique channelz trakcing id to be id from ++// RemoveEntry removes an entry with unique channelz tracking id to be id from + // channelz database. +-func RemoveEntry(id int64) { +- db.get().removeEntry(id) ++// ++// If channelz is not turned ON, this function is a no-op. ++func RemoveEntry(id *Identifier) { ++ if !IsOn() { ++ return ++ } ++ db.get().removeEntry(id.Int()) + } + +-// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added +-// to the channel trace. +-// The Parent field is optional. It is used for event that will be recorded in the entity's parent +-// trace also. ++// TraceEventDesc is what the caller of AddTraceEvent should provide to describe ++// the event to be added to the channel trace. ++// ++// The Parent field is optional. It is used for an event that will be recorded ++// in the entity's parent trace. + type TraceEventDesc struct { + Desc string + Severity Severity + Parent *TraceEventDesc + } + +-// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. +-func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) { +- for d := desc; d != nil; d = d.Parent { +- switch d.Severity { +- case CtUnknown, CtInfo: +- l.InfoDepth(depth+1, d.Desc) +- case CtWarning: +- l.WarningDepth(depth+1, d.Desc) +- case CtError: +- l.ErrorDepth(depth+1, d.Desc) +- } ++// AddTraceEvent adds trace related to the entity with specified id, using the ++// provided TraceEventDesc. ++// ++// If channelz is not turned ON, this will simply log the event descriptions. ++func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) { ++ // Log only the trace description associated with the bottom most entity. ++ switch desc.Severity { ++ case CtUnknown, CtInfo: ++ l.InfoDepth(depth+1, withParens(id)+desc.Desc) ++ case CtWarning: ++ l.WarningDepth(depth+1, withParens(id)+desc.Desc) ++ case CtError: ++ l.ErrorDepth(depth+1, withParens(id)+desc.Desc) + } ++ + if getMaxTraceEntry() == 0 { + return + } +- db.get().traceEvent(id, desc) ++ if IsOn() { ++ db.get().traceEvent(id.Int(), desc) ++ } + } + + // channelMap is the storage data structure for channelz. +@@ -326,6 +367,17 @@ type channelMap struct { + normalSockets map[int64]*normalSocket + } + ++func newChannelMap() *channelMap { ++ return &channelMap{ ++ topLevelChannels: make(map[int64]struct{}), ++ channels: make(map[int64]*channel), ++ listenSockets: make(map[int64]*listenSocket), ++ normalSockets: make(map[int64]*normalSocket), ++ servers: make(map[int64]*server), ++ subChannels: make(map[int64]*subChannel), ++ } ++} ++ + func (c *channelMap) addServer(id int64, s *server) { + c.mu.Lock() + s.cm = c +@@ -333,7 +385,7 @@ func (c *channelMap) addServer(id int64, s *server) { + c.mu.Unlock() + } + +-func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) { ++func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64) { + c.mu.Lock() + cn.cm = c + cn.trace.cm = c +@@ -346,7 +398,7 @@ func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid in + c.mu.Unlock() + } + +-func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) { ++func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64) { + c.mu.Lock() + sc.cm = c + sc.trace.cm = c +@@ -355,7 +407,7 @@ func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref stri + c.mu.Unlock() + } + +-func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) { ++func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64) { + c.mu.Lock() + ls.cm = c + c.listenSockets[id] = ls +@@ -363,7 +415,7 @@ func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref + c.mu.Unlock() + } + +-func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) { ++func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64) { + c.mu.Lock() + ns.cm = c + c.normalSockets[id] = ns +@@ -630,7 +682,7 @@ func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) + if count == 0 { + end = true + } +- var s []*SocketMetric ++ s := make([]*SocketMetric, 0, len(sks)) + for _, ns := range sks { + sm := &SocketMetric{} + sm.SocketData = ns.s.ChannelzMetric() +diff --git a/vendor/google.golang.org/grpc/internal/channelz/id.go b/vendor/google.golang.org/grpc/internal/channelz/id.go +new file mode 100644 +index 0000000..c9a27ac +--- /dev/null ++++ b/vendor/google.golang.org/grpc/internal/channelz/id.go +@@ -0,0 +1,75 @@ ++/* ++ * ++ * Copyright 2022 gRPC authors. ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * http://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++package channelz ++ ++import "fmt" ++ ++// Identifier is an opaque identifier which uniquely identifies an entity in the ++// channelz database. ++type Identifier struct { ++ typ RefChannelType ++ id int64 ++ str string ++ pid *Identifier ++} ++ ++// Type returns the entity type corresponding to id. ++func (id *Identifier) Type() RefChannelType { ++ return id.typ ++} ++ ++// Int returns the integer identifier corresponding to id. ++func (id *Identifier) Int() int64 { ++ return id.id ++} ++ ++// String returns a string representation of the entity corresponding to id. ++// ++// This includes some information about the parent as well. Examples: ++// Top-level channel: [Channel #channel-number] ++// Nested channel: [Channel #parent-channel-number Channel #channel-number] ++// Sub channel: [Channel #parent-channel SubChannel #subchannel-number] ++func (id *Identifier) String() string { ++ return id.str ++} ++ ++// Equal returns true if other is the same as id. ++func (id *Identifier) Equal(other *Identifier) bool { ++ if (id != nil) != (other != nil) { ++ return false ++ } ++ if id == nil && other == nil { ++ return true ++ } ++ return id.typ == other.typ && id.id == other.id && id.pid == other.pid ++} ++ ++// NewIdentifierForTesting returns a new opaque identifier to be used only for ++// testing purposes. ++func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier { ++ return newIdentifer(typ, id, pid) ++} ++ ++func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier { ++ str := fmt.Sprintf("%s #%d", typ, id) ++ if pid != nil { ++ str = fmt.Sprintf("%s %s", pid, str) ++ } ++ return &Identifier{typ: typ, id: id, str: str, pid: pid} ++} +diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go +index b0013f9..8e13a3d 100644 +--- a/vendor/google.golang.org/grpc/internal/channelz/logging.go ++++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go +@@ -26,77 +26,54 @@ import ( + + var logger = grpclog.Component("channelz") + ++func withParens(id *Identifier) string { ++ return "[" + id.String() + "] " ++} ++ + // Info logs and adds a trace event if channelz is on. +-func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { +- if IsOn() { +- AddTraceEvent(l, id, 1, &TraceEventDesc{ +- Desc: fmt.Sprint(args...), +- Severity: CtInfo, +- }) +- } else { +- l.InfoDepth(1, args...) +- } ++func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { ++ AddTraceEvent(l, id, 1, &TraceEventDesc{ ++ Desc: fmt.Sprint(args...), ++ Severity: CtInfo, ++ }) + } + + // Infof logs and adds a trace event if channelz is on. +-func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { +- msg := fmt.Sprintf(format, args...) +- if IsOn() { +- AddTraceEvent(l, id, 1, &TraceEventDesc{ +- Desc: msg, +- Severity: CtInfo, +- }) +- } else { +- l.InfoDepth(1, msg) +- } ++func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { ++ AddTraceEvent(l, id, 1, &TraceEventDesc{ ++ Desc: fmt.Sprintf(format, args...), ++ Severity: CtInfo, ++ }) + } + + // Warning logs and adds a trace event if channelz is on. +-func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { +- if IsOn() { +- AddTraceEvent(l, id, 1, &TraceEventDesc{ +- Desc: fmt.Sprint(args...), +- Severity: CtWarning, +- }) +- } else { +- l.WarningDepth(1, args...) +- } ++func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { ++ AddTraceEvent(l, id, 1, &TraceEventDesc{ ++ Desc: fmt.Sprint(args...), ++ Severity: CtWarning, ++ }) + } + + // Warningf logs and adds a trace event if channelz is on. +-func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { +- msg := fmt.Sprintf(format, args...) +- if IsOn() { +- AddTraceEvent(l, id, 1, &TraceEventDesc{ +- Desc: msg, +- Severity: CtWarning, +- }) +- } else { +- l.WarningDepth(1, msg) +- } ++func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { ++ AddTraceEvent(l, id, 1, &TraceEventDesc{ ++ Desc: fmt.Sprintf(format, args...), ++ Severity: CtWarning, ++ }) + } + + // Error logs and adds a trace event if channelz is on. +-func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { +- if IsOn() { +- AddTraceEvent(l, id, 1, &TraceEventDesc{ +- Desc: fmt.Sprint(args...), +- Severity: CtError, +- }) +- } else { +- l.ErrorDepth(1, args...) +- } ++func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { ++ AddTraceEvent(l, id, 1, &TraceEventDesc{ ++ Desc: fmt.Sprint(args...), ++ Severity: CtError, ++ }) + } + + // Errorf logs and adds a trace event if channelz is on. +-func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { +- msg := fmt.Sprintf(format, args...) +- if IsOn() { +- AddTraceEvent(l, id, 1, &TraceEventDesc{ +- Desc: msg, +- Severity: CtError, +- }) +- } else { +- l.ErrorDepth(1, msg) +- } ++func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { ++ AddTraceEvent(l, id, 1, &TraceEventDesc{ ++ Desc: fmt.Sprintf(format, args...), ++ Severity: CtError, ++ }) + } +diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go +index 3c595d1..7b2f350 100644 +--- a/vendor/google.golang.org/grpc/internal/channelz/types.go ++++ b/vendor/google.golang.org/grpc/internal/channelz/types.go +@@ -273,10 +273,10 @@ func (c *channel) deleteSelfFromMap() (delete bool) { + + // deleteSelfIfReady tries to delete the channel itself from the channelz database. + // The delete process includes two steps: +-// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +-// parent's child list. +-// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +-// will return entry not found error. ++// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its ++// parent's child list. ++// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id ++// will return entry not found error. + func (c *channel) deleteSelfIfReady() { + if !c.deleteSelfFromTree() { + return +@@ -381,10 +381,10 @@ func (sc *subChannel) deleteSelfFromMap() (delete bool) { + + // deleteSelfIfReady tries to delete the subchannel itself from the channelz database. + // The delete process includes two steps: +-// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +-// its parent's child list. +-// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +-// by id will return entry not found error. ++// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from ++// its parent's child list. ++// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup ++// by id will return entry not found error. + func (sc *subChannel) deleteSelfIfReady() { + if !sc.deleteSelfFromTree() { + return +@@ -686,12 +686,33 @@ const ( + type RefChannelType int + + const ( ++ // RefUnknown indicates an unknown entity type, the zero value for this type. ++ RefUnknown RefChannelType = iota + // RefChannel indicates the referenced entity is a Channel. +- RefChannel RefChannelType = iota ++ RefChannel + // RefSubChannel indicates the referenced entity is a SubChannel. + RefSubChannel ++ // RefServer indicates the referenced entity is a Server. ++ RefServer ++ // RefListenSocket indicates the referenced entity is a ListenSocket. ++ RefListenSocket ++ // RefNormalSocket indicates the referenced entity is a NormalSocket. ++ RefNormalSocket + ) + ++var refChannelTypeToString = map[RefChannelType]string{ ++ RefUnknown: "Unknown", ++ RefChannel: "Channel", ++ RefSubChannel: "SubChannel", ++ RefServer: "Server", ++ RefListenSocket: "ListenSocket", ++ RefNormalSocket: "NormalSocket", ++} ++ ++func (r RefChannelType) String() string { ++ return refChannelTypeToString[r] ++} ++ + func (c *channelTrace) dumpData() *ChannelTrace { + c.mu.Lock() + ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} +diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go +index 692dd61..1b1c4cc 100644 +--- a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go ++++ b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go +@@ -1,5 +1,3 @@ +-// +build !appengine +- + /* + * + * Copyright 2018 gRPC authors. +diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go +index 19c2fc5..8b06eed 100644 +--- a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go ++++ b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go +@@ -1,4 +1,5 @@ +-// +build !linux appengine ++//go:build !linux ++// +build !linux + + /* + * +@@ -37,6 +38,6 @@ type SocketOptionData struct { + // Windows OS doesn't support Socket Option + func (s *SocketOptionData) Getsockopt(fd uintptr) { + once.Do(func() { +- logger.Warning("Channelz: socket options are not supported on non-linux os and appengine.") ++ logger.Warning("Channelz: socket options are not supported on non-linux environments") + }) + } +diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +index fdf409d..8d194e4 100644 +--- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go ++++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +@@ -1,5 +1,3 @@ +-// +build linux,!appengine +- + /* + * + * Copyright 2018 gRPC authors. +diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go +index 8864a08..837ddc4 100644 +--- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go ++++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go +@@ -1,4 +1,5 @@ +-// +build !linux appengine ++//go:build !linux ++// +build !linux + + /* + * +diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go +index be70b6c..25ade62 100644 +--- a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go ++++ b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go +@@ -1,5 +1,3 @@ +-// +build !appengine +- + /* + * + * Copyright 2020 gRPC authors. +diff --git a/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go +index f499a61..2919632 100644 +--- a/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go ++++ b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go +@@ -1,5 +1,3 @@ +-// +build !appengine +- + /* + * + * Copyright 2018 gRPC authors. +diff --git a/vendor/google.golang.org/grpc/internal/credentials/util.go b/vendor/google.golang.org/grpc/internal/credentials/util.go +index 55664fa..f792fd2 100644 +--- a/vendor/google.golang.org/grpc/internal/credentials/util.go ++++ b/vendor/google.golang.org/grpc/internal/credentials/util.go +@@ -18,7 +18,9 @@ + + package credentials + +-import "crypto/tls" ++import ( ++ "crypto/tls" ++) + + const alpnProtoStrH2 = "h2" + +diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +index 73931a9..80fd5c7 100644 +--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go ++++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +@@ -21,18 +21,46 @@ package envconfig + + import ( + "os" ++ "strconv" + "strings" + ) + +-const ( +- prefix = "GRPC_GO_" +- retryStr = prefix + "RETRY" +- txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" +-) +- + var ( +- // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on". +- Retry = strings.EqualFold(os.Getenv(retryStr), "on") + // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). +- TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") ++ TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true) ++ // AdvertiseCompressors is set if registered compressor should be advertised ++ // ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false"). ++ AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true) ++ // RingHashCap indicates the maximum ring size which defaults to 4096 ++ // entries but may be overridden by setting the environment variable ++ // "GRPC_RING_HASH_CAP". This does not override the default bounds ++ // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). ++ RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) ++ // PickFirstLBConfig is set if we should support configuration of the ++ // pick_first LB policy, which can be enabled by setting the environment ++ // variable "GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG" to "true". ++ PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", false) + ) ++ ++func boolFromEnv(envVar string, def bool) bool { ++ if def { ++ // The default is true; return true unless the variable is "false". ++ return !strings.EqualFold(os.Getenv(envVar), "false") ++ } ++ // The default is false; return false unless the variable is "true". ++ return strings.EqualFold(os.Getenv(envVar), "true") ++} ++ ++func uint64FromEnv(envVar string, def, min, max uint64) uint64 { ++ v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64) ++ if err != nil { ++ return def ++ } ++ if v < min { ++ return min ++ } ++ if v > max { ++ return max ++ } ++ return v ++} +diff --git a/vendor/google.golang.org/grpc/internal/envconfig/observability.go b/vendor/google.golang.org/grpc/internal/envconfig/observability.go +new file mode 100644 +index 0000000..dd314cf +--- /dev/null ++++ b/vendor/google.golang.org/grpc/internal/envconfig/observability.go +@@ -0,0 +1,42 @@ ++/* ++ * ++ * Copyright 2022 gRPC authors. ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * http://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++package envconfig ++ ++import "os" ++ ++const ( ++ envObservabilityConfig = "GRPC_GCP_OBSERVABILITY_CONFIG" ++ envObservabilityConfigFile = "GRPC_GCP_OBSERVABILITY_CONFIG_FILE" ++) ++ ++var ( ++ // ObservabilityConfig is the json configuration for the gcp/observability ++ // package specified directly in the envObservabilityConfig env var. ++ // ++ // This is used in the 1.0 release of gcp/observability, and thus must not be ++ // deleted or changed. ++ ObservabilityConfig = os.Getenv(envObservabilityConfig) ++ // ObservabilityConfigFile is the json configuration for the ++ // gcp/observability specified in a file with the location specified in ++ // envObservabilityConfigFile env var. ++ // ++ // This is used in the 1.0 release of gcp/observability, and thus must not be ++ // deleted or changed. ++ ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile) ++) +diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go +new file mode 100644 +index 0000000..02b4b6a +--- /dev/null ++++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go +@@ -0,0 +1,95 @@ ++/* ++ * ++ * Copyright 2020 gRPC authors. ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * http://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++package envconfig ++ ++import ( ++ "os" ++) ++ ++const ( ++ // XDSBootstrapFileNameEnv is the env variable to set bootstrap file name. ++ // Do not use this and read from env directly. Its value is read and kept in ++ // variable XDSBootstrapFileName. ++ // ++ // When both bootstrap FileName and FileContent are set, FileName is used. ++ XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP" ++ // XDSBootstrapFileContentEnv is the env variable to set bootstrap file ++ // content. Do not use this and read from env directly. Its value is read ++ // and kept in variable XDSBootstrapFileContent. ++ // ++ // When both bootstrap FileName and FileContent are set, FileName is used. ++ XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" ++) ++ ++var ( ++ // XDSBootstrapFileName holds the name of the file which contains xDS ++ // bootstrap configuration. Users can specify the location of the bootstrap ++ // file by setting the environment variable "GRPC_XDS_BOOTSTRAP". ++ // ++ // When both bootstrap FileName and FileContent are set, FileName is used. ++ XDSBootstrapFileName = os.Getenv(XDSBootstrapFileNameEnv) ++ // XDSBootstrapFileContent holds the content of the xDS bootstrap ++ // configuration. Users can specify the bootstrap config by setting the ++ // environment variable "GRPC_XDS_BOOTSTRAP_CONFIG". ++ // ++ // When both bootstrap FileName and FileContent are set, FileName is used. ++ XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv) ++ // XDSRingHash indicates whether ring hash support is enabled, which can be ++ // disabled by setting the environment variable ++ // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". ++ XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true) ++ // XDSClientSideSecurity is used to control processing of security ++ // configuration on the client-side. ++ // ++ // Note that there is no env var protection for the server-side because we ++ // have a brand new API on the server-side and users explicitly need to use ++ // the new API to get security integration on the server. ++ XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true) ++ // XDSAggregateAndDNS indicates whether processing of aggregated cluster and ++ // DNS cluster is enabled, which can be disabled by setting the environment ++ // variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" ++ // to "false". ++ XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true) ++ ++ // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, ++ // which can be disabled by setting the environment variable ++ // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". ++ XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true) ++ // XDSOutlierDetection indicates whether outlier detection support is ++ // enabled, which can be disabled by setting the environment variable ++ // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false". ++ XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true) ++ // XDSFederation indicates whether federation support is enabled, which can ++ // be enabled by setting the environment variable ++ // "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true". ++ XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true) ++ ++ // XDSRLS indicates whether processing of Cluster Specifier plugins and ++ // support for the RLS CLuster Specifier is enabled, which can be disabled by ++ // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to ++ // "false". ++ XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true) ++ ++ // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. ++ C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") ++ // XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which ++ // can be disabled by setting the environment variable ++ // "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false". ++ XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true) ++) +diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +index e6f975c..b68e26a 100644 +--- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go ++++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +@@ -110,17 +110,17 @@ type LoggerV2 interface { + // This is a copy of the DepthLoggerV2 defined in the external grpclog package. + // It is defined here to avoid a circular dependency. + // +-// Experimental ++// # Experimental + // + // Notice: This type is EXPERIMENTAL and may be changed or removed in a + // later release. + type DepthLoggerV2 interface { +- // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. ++ // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. + InfoDepth(depth int, args ...interface{}) +- // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. ++ // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. + WarningDepth(depth int, args ...interface{}) +- // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. ++ // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. + ErrorDepth(depth int, args ...interface{}) +- // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. ++ // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. + FatalDepth(depth int, args ...interface{}) + } +diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +index 82af70e..02224b4 100644 +--- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go ++++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +@@ -63,6 +63,9 @@ func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { + + // Debugf does info logging at verbose level 2. + func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { ++ // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe ++ // rewrite PrefixLogger a little to ensure that we don't use the global ++ // `Logger` here, and instead use the `logger` field. + if !Logger.V(2) { + return + } +@@ -73,6 +76,15 @@ func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { + return + } + InfoDepth(1, fmt.Sprintf(format, args...)) ++ ++} ++ ++// V reports whether verbosity level l is at least the requested verbose level. ++func (pl *PrefixLogger) V(l int) bool { ++ // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe ++ // rewrite PrefixLogger a little to ensure that we don't use the global ++ // `Logger` here, and instead use the `logger` field. ++ return Logger.V(l) + } + + // NewPrefixLogger creates a prefix logger with the given prefix. +diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +index 740f83c..d08e3e9 100644 +--- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go ++++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +@@ -52,6 +52,13 @@ func Intn(n int) int { + return r.Intn(n) + } + ++// Int31n implements rand.Int31n on the grpcrand global source. ++func Int31n(n int32) int32 { ++ mu.Lock() ++ defer mu.Unlock() ++ return r.Int31n(n) ++} ++ + // Float64 implements rand.Float64 on the grpcrand global source. + func Float64() float64 { + mu.Lock() +@@ -65,3 +72,17 @@ func Uint64() uint64 { + defer mu.Unlock() + return r.Uint64() + } ++ ++// Uint32 implements rand.Uint32 on the grpcrand global source. ++func Uint32() uint32 { ++ mu.Lock() ++ defer mu.Unlock() ++ return r.Uint32() ++} ++ ++// Shuffle implements rand.Shuffle on the grpcrand global source. ++var Shuffle = func(n int, f func(int, int)) { ++ mu.Lock() ++ defer mu.Unlock() ++ r.Shuffle(n, f) ++} +diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +new file mode 100644 +index 0000000..37b8d41 +--- /dev/null ++++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +@@ -0,0 +1,119 @@ ++/* ++ * ++ * Copyright 2022 gRPC authors. ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * http://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++package grpcsync ++ ++import ( ++ "context" ++ "sync" ++ ++ "google.golang.org/grpc/internal/buffer" ++) ++ ++// CallbackSerializer provides a mechanism to schedule callbacks in a ++// synchronized manner. It provides a FIFO guarantee on the order of execution ++// of scheduled callbacks. New callbacks can be scheduled by invoking the ++// Schedule() method. ++// ++// This type is safe for concurrent access. ++type CallbackSerializer struct { ++ // Done is closed once the serializer is shut down completely, i.e all ++ // scheduled callbacks are executed and the serializer has deallocated all ++ // its resources. ++ Done chan struct{} ++ ++ callbacks *buffer.Unbounded ++ closedMu sync.Mutex ++ closed bool ++} ++ ++// NewCallbackSerializer returns a new CallbackSerializer instance. The provided ++// context will be passed to the scheduled callbacks. Users should cancel the ++// provided context to shutdown the CallbackSerializer. It is guaranteed that no ++// callbacks will be added once this context is canceled, and any pending un-run ++// callbacks will be executed before the serializer is shut down. ++func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { ++ t := &CallbackSerializer{ ++ Done: make(chan struct{}), ++ callbacks: buffer.NewUnbounded(), ++ } ++ go t.run(ctx) ++ return t ++} ++ ++// Schedule adds a callback to be scheduled after existing callbacks are run. ++// ++// Callbacks are expected to honor the context when performing any blocking ++// operations, and should return early when the context is canceled. ++// ++// Return value indicates if the callback was successfully added to the list of ++// callbacks to be executed by the serializer. It is not possible to add ++// callbacks once the context passed to NewCallbackSerializer is cancelled. ++func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { ++ t.closedMu.Lock() ++ defer t.closedMu.Unlock() ++ ++ if t.closed { ++ return false ++ } ++ t.callbacks.Put(f) ++ return true ++} ++ ++func (t *CallbackSerializer) run(ctx context.Context) { ++ var backlog []func(context.Context) ++ ++ defer close(t.Done) ++ for ctx.Err() == nil { ++ select { ++ case <-ctx.Done(): ++ // Do nothing here. Next iteration of the for loop will not happen, ++ // since ctx.Err() would be non-nil. ++ case callback, ok := <-t.callbacks.Get(): ++ if !ok { ++ return ++ } ++ t.callbacks.Load() ++ callback.(func(ctx context.Context))(ctx) ++ } ++ } ++ ++ // Fetch pending callbacks if any, and execute them before returning from ++ // this method and closing t.Done. ++ t.closedMu.Lock() ++ t.closed = true ++ backlog = t.fetchPendingCallbacks() ++ t.callbacks.Close() ++ t.closedMu.Unlock() ++ for _, b := range backlog { ++ b(ctx) ++ } ++} ++ ++func (t *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { ++ var backlog []func(context.Context) ++ for { ++ select { ++ case b := <-t.callbacks.Get(): ++ backlog = append(backlog, b.(func(context.Context))) ++ t.callbacks.Load() ++ default: ++ return backlog ++ } ++ } ++} +diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go +similarity index 67% +rename from vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go +rename to vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go +index af6f577..6635f7b 100644 +--- a/vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go ++++ b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go +@@ -1,8 +1,6 @@ +-// +build appengine +- + /* + * +- * Copyright 2020 gRPC authors. ++ * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. +@@ -18,14 +16,17 @@ + * + */ + +-package credentials ++package grpcsync + + import ( +- "crypto/tls" +- "net/url" ++ "sync" + ) + +-// SPIFFEIDFromState is a no-op for appengine builds. +-func SPIFFEIDFromState(state tls.ConnectionState) *url.URL { +- return nil ++// OnceFunc returns a function wrapping f which ensures f is only executed ++// once even if the returned function is executed multiple times. ++func OnceFunc(f func()) func() { ++ var once sync.Once ++ return func() { ++ once.Do(f) ++ } + } +diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go +new file mode 100644 +index 0000000..9f40909 +--- /dev/null ++++ b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go +@@ -0,0 +1,47 @@ ++/* ++ * ++ * Copyright 2022 gRPC authors. ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * http://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++package grpcutil ++ ++import ( ++ "strings" ++ ++ "google.golang.org/grpc/internal/envconfig" ++) ++ ++// RegisteredCompressorNames holds names of the registered compressors. ++var RegisteredCompressorNames []string ++ ++// IsCompressorNameRegistered returns true when name is available in registry. ++func IsCompressorNameRegistered(name string) bool { ++ for _, compressor := range RegisteredCompressorNames { ++ if compressor == name { ++ return true ++ } ++ } ++ return false ++} ++ ++// RegisteredCompressors returns a string of registered compressor names ++// separated by comma. ++func RegisteredCompressors() string { ++ if !envconfig.AdvertiseCompressors { ++ return "" ++ } ++ return strings.Join(RegisteredCompressorNames, ",") ++} +diff --git a/vendor/google.golang.org/grpc/internal/credentials/syscallconn_appengine.go b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go +similarity index 72% +rename from vendor/google.golang.org/grpc/internal/credentials/syscallconn_appengine.go +rename to vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go +index a6144cd..e2f948e 100644 +--- a/vendor/google.golang.org/grpc/internal/credentials/syscallconn_appengine.go ++++ b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go +@@ -1,8 +1,6 @@ +-// +build appengine +- + /* + * +- * Copyright 2018 gRPC authors. ++ * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. +@@ -18,13 +16,5 @@ + * + */ + +-package credentials +- +-import ( +- "net" +-) +- +-// WrapSyscallConn returns newConn on appengine. +-func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { +- return newConn +-} ++// Package grpcutil provides utility functions used across the gRPC codebase. ++package grpcutil +diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go +index 4e74750..ec62b47 100644 +--- a/vendor/google.golang.org/grpc/internal/grpcutil/method.go ++++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go +@@ -25,7 +25,6 @@ import ( + + // ParseMethod splits service and method from the input. It expects format + // "/service/method". +-// + func ParseMethod(methodName string) (service, method string, _ error) { + if !strings.HasPrefix(methodName, "/") { + return "", "", errors.New("invalid method name: should start with /") +@@ -39,6 +38,11 @@ func ParseMethod(methodName string) (service, method string, _ error) { + return methodName[:pos], methodName[pos+1:], nil + } + ++// baseContentType is the base content-type for gRPC. This is a valid ++// content-type on it's own, but can also include a content-subtype such as ++// "proto" as a suffix after "+" or ";". See ++// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests ++// for more details. + const baseContentType = "application/grpc" + + // ContentSubtype returns the content-subtype for the given content-type. The +diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go +similarity index 63% +rename from vendor/google.golang.org/grpc/internal/resolver/dns/go113.go +rename to vendor/google.golang.org/grpc/internal/grpcutil/regex.go +index 8783a8c..7a092b2 100644 +--- a/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go ++++ b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go +@@ -1,8 +1,6 @@ +-// +build go1.13 +- + /* + * +- * Copyright 2019 gRPC authors. ++ * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. +@@ -18,16 +16,16 @@ + * + */ + +-package dns ++package grpcutil + +-import "net" ++import "regexp" + +-func init() { +- filterError = func(err error) error { +- if dnsErr, ok := err.(*net.DNSError); ok && dnsErr.IsNotFound { +- // The name does not exist; not an error. +- return nil +- } +- return err ++// FullMatchWithRegex returns whether the full text matches the regex provided. ++func FullMatchWithRegex(re *regexp.Regexp, text string) bool { ++ if len(text) == 0 { ++ return re.MatchString(text) + } ++ re.Longest() ++ rem := re.FindString(text) ++ return len(rem) == len(text) + } +diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/target.go b/vendor/google.golang.org/grpc/internal/grpcutil/target.go +deleted file mode 100644 +index 8833021..0000000 +--- a/vendor/google.golang.org/grpc/internal/grpcutil/target.go ++++ /dev/null +@@ -1,89 +0,0 @@ +-/* +- * +- * Copyright 2020 gRPC authors. +- * +- * Licensed under the Apache License, Version 2.0 (the "License"); +- * you may not use this file except in compliance with the License. +- * You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- * +- */ +- +-// Package grpcutil provides a bunch of utility functions to be used across the +-// gRPC codebase. +-package grpcutil +- +-import ( +- "strings" +- +- "google.golang.org/grpc/resolver" +-) +- +-// split2 returns the values from strings.SplitN(s, sep, 2). +-// If sep is not found, it returns ("", "", false) instead. +-func split2(s, sep string) (string, string, bool) { +- spl := strings.SplitN(s, sep, 2) +- if len(spl) < 2 { +- return "", "", false +- } +- return spl[0], spl[1], true +-} +- +-// ParseTarget splits target into a resolver.Target struct containing scheme, +-// authority and endpoint. skipUnixColonParsing indicates that the parse should +-// not parse "unix:[path]" cases. This should be true in cases where a custom +-// dialer is present, to prevent a behavior change. +-// +-// If target is not a valid scheme://authority/endpoint as specified in +-// https://github.com/grpc/grpc/blob/master/doc/naming.md, +-// it returns {Endpoint: target}. +-func ParseTarget(target string, skipUnixColonParsing bool) (ret resolver.Target) { +- var ok bool +- if strings.HasPrefix(target, "unix-abstract:") { +- if strings.HasPrefix(target, "unix-abstract://") { +- // Maybe, with Authority specified, try to parse it +- var remain string +- ret.Scheme, remain, _ = split2(target, "://") +- ret.Authority, ret.Endpoint, ok = split2(remain, "/") +- if !ok { +- // No Authority, add the "//" back +- ret.Endpoint = "//" + remain +- } else { +- // Found Authority, add the "/" back +- ret.Endpoint = "/" + ret.Endpoint +- } +- } else { +- // Without Authority specified, split target on ":" +- ret.Scheme, ret.Endpoint, _ = split2(target, ":") +- } +- return ret +- } +- ret.Scheme, ret.Endpoint, ok = split2(target, "://") +- if !ok { +- if strings.HasPrefix(target, "unix:") && !skipUnixColonParsing { +- // Handle the "unix:[local/path]" and "unix:[/absolute/path]" cases, +- // because splitting on :// only handles the +- // "unix://[/absolute/path]" case. Only handle if the dialer is nil, +- // to avoid a behavior change with custom dialers. +- return resolver.Target{Scheme: "unix", Endpoint: target[len("unix:"):]} +- } +- return resolver.Target{Endpoint: target} +- } +- ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") +- if !ok { +- return resolver.Target{Endpoint: target} +- } +- if ret.Scheme == "unix" { +- // Add the "/" back in the unix case, so the unix resolver receives the +- // actual endpoint in the "unix://[/absolute/path]" case. +- ret.Endpoint = "/" + ret.Endpoint +- } +- return ret +-} +diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go +index 1b596bf..42ff39c 100644 +--- a/vendor/google.golang.org/grpc/internal/internal.go ++++ b/vendor/google.golang.org/grpc/internal/internal.go +@@ -38,11 +38,10 @@ var ( + // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by + // default, but tests may wish to set it lower for convenience. + KeepaliveMinPingTime = 10 * time.Second +- // ParseServiceConfigForTesting is for creating a fake +- // ClientConn for resolver testing only +- ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult ++ // ParseServiceConfig parses a JSON representation of the service config. ++ ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult + // EqualServiceConfigForTesting is for testing service config generation and +- // parsing. Both a and b should be returned by ParseServiceConfigForTesting. ++ // parsing. Both a and b should be returned by ParseServiceConfig. + // This function compares the config without rawJSON stripped, in case the + // there's difference in white space. + EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool +@@ -59,11 +58,112 @@ var ( + // gRPC server. An xDS-enabled server needs to know what type of credentials + // is configured on the underlying gRPC server. This is set by server.go. + GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials ++ // CanonicalString returns the canonical string of the code defined here: ++ // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. ++ // ++ // This is used in the 1.0 release of gcp/observability, and thus must not be ++ // deleted or changed. ++ CanonicalString interface{} // func (codes.Code) string + // DrainServerTransports initiates a graceful close of existing connections + // on a gRPC server accepted on the provided listener address. An + // xDS-enabled server invokes this method on a grpc.Server when a particular + // listener moves to "not-serving" mode. + DrainServerTransports interface{} // func(*grpc.Server, string) ++ // AddGlobalServerOptions adds an array of ServerOption that will be ++ // effective globally for newly created servers. The priority will be: 1. ++ // user-provided; 2. this method; 3. default values. ++ // ++ // This is used in the 1.0 release of gcp/observability, and thus must not be ++ // deleted or changed. ++ AddGlobalServerOptions interface{} // func(opt ...ServerOption) ++ // ClearGlobalServerOptions clears the array of extra ServerOption. This ++ // method is useful in testing and benchmarking. ++ // ++ // This is used in the 1.0 release of gcp/observability, and thus must not be ++ // deleted or changed. ++ ClearGlobalServerOptions func() ++ // AddGlobalDialOptions adds an array of DialOption that will be effective ++ // globally for newly created client channels. The priority will be: 1. ++ // user-provided; 2. this method; 3. default values. ++ // ++ // This is used in the 1.0 release of gcp/observability, and thus must not be ++ // deleted or changed. ++ AddGlobalDialOptions interface{} // func(opt ...DialOption) ++ // DisableGlobalDialOptions returns a DialOption that prevents the ++ // ClientConn from applying the global DialOptions (set via ++ // AddGlobalDialOptions). ++ // ++ // This is used in the 1.0 release of gcp/observability, and thus must not be ++ // deleted or changed. ++ DisableGlobalDialOptions interface{} // func() grpc.DialOption ++ // ClearGlobalDialOptions clears the array of extra DialOption. This ++ // method is useful in testing and benchmarking. ++ // ++ // This is used in the 1.0 release of gcp/observability, and thus must not be ++ // deleted or changed. ++ ClearGlobalDialOptions func() ++ // JoinDialOptions combines the dial options passed as arguments into a ++ // single dial option. ++ JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption ++ // JoinServerOptions combines the server options passed as arguments into a ++ // single server option. ++ JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption ++ ++ // WithBinaryLogger returns a DialOption that specifies the binary logger ++ // for a ClientConn. ++ // ++ // This is used in the 1.0 release of gcp/observability, and thus must not be ++ // deleted or changed. ++ WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption ++ // BinaryLogger returns a ServerOption that can set the binary logger for a ++ // server. ++ // ++ // This is used in the 1.0 release of gcp/observability, and thus must not be ++ // deleted or changed. ++ BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption ++ ++ // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using ++ // the provided xds bootstrap config instead of the global configuration from ++ // the supported environment variables. The resolver.Builder is meant to be ++ // used in conjunction with the grpc.WithResolvers DialOption. ++ // ++ // Testing Only ++ // ++ // This function should ONLY be used for testing and may not work with some ++ // other features, including the CSDS service. ++ NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error) ++ ++ // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster ++ // Specifier Plugin for testing purposes, regardless of the XDSRLS environment ++ // variable. ++ // ++ // TODO: Remove this function once the RLS env var is removed. ++ RegisterRLSClusterSpecifierPluginForTesting func() ++ ++ // UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster ++ // Specifier Plugin for testing purposes. This is needed because there is no way ++ // to unregister the RLS Cluster Specifier Plugin after registering it solely ++ // for testing purposes using RegisterRLSClusterSpecifierPluginForTesting(). ++ // ++ // TODO: Remove this function once the RLS env var is removed. ++ UnregisterRLSClusterSpecifierPluginForTesting func() ++ ++ // RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing ++ // purposes, regardless of the RBAC environment variable. ++ // ++ // TODO: Remove this function once the RBAC env var is removed. ++ RegisterRBACHTTPFilterForTesting func() ++ ++ // UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for ++ // testing purposes. This is needed because there is no way to unregister the ++ // HTTP Filter after registering it solely for testing purposes using ++ // RegisterRBACHTTPFilterForTesting(). ++ // ++ // TODO: Remove this function once the RBAC env var is removed. ++ UnregisterRBACHTTPFilterForTesting func() ++ ++ // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. ++ ORCAAllowAnyMinReportingInterval interface{} // func(so *orca.ServiceOptions) + ) + + // HealthChecker defines the signature of the client-side LB channel health checking function. +@@ -86,3 +186,9 @@ const ( + // that supports backend returned by grpclb balancer. + CredsBundleModeBackendFromBalancer = "backend-from-balancer" + ) ++ ++// RLSLoadBalancingPolicyName is the name of the RLS LB policy. ++// ++// It currently has an experimental suffix which would be removed once ++// end-to-end testing of the policy is completed. ++const RLSLoadBalancingPolicyName = "rls_experimental" +diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go +index 3022626..c82e608 100644 +--- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go ++++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go +@@ -22,6 +22,9 @@ + package metadata + + import ( ++ "fmt" ++ "strings" ++ + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" + ) +@@ -30,14 +33,38 @@ type mdKeyType string + + const mdKey = mdKeyType("grpc.internal.address.metadata") + ++type mdValue metadata.MD ++ ++func (m mdValue) Equal(o interface{}) bool { ++ om, ok := o.(mdValue) ++ if !ok { ++ return false ++ } ++ if len(m) != len(om) { ++ return false ++ } ++ for k, v := range m { ++ ov := om[k] ++ if len(ov) != len(v) { ++ return false ++ } ++ for i, ve := range v { ++ if ov[i] != ve { ++ return false ++ } ++ } ++ } ++ return true ++} ++ + // Get returns the metadata of addr. + func Get(addr resolver.Address) metadata.MD { + attrs := addr.Attributes + if attrs == nil { + return nil + } +- md, _ := attrs.Value(mdKey).(metadata.MD) +- return md ++ md, _ := attrs.Value(mdKey).(mdValue) ++ return metadata.MD(md) + } + + // Set sets (overrides) the metadata in addr. +@@ -45,6 +72,61 @@ func Get(addr resolver.Address) metadata.MD { + // When a SubConn is created with this address, the RPCs sent on it will all + // have this metadata. + func Set(addr resolver.Address, md metadata.MD) resolver.Address { +- addr.Attributes = addr.Attributes.WithValues(mdKey, md) ++ addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md)) + return addr + } ++ ++// Validate validates every pair in md with ValidatePair. ++func Validate(md metadata.MD) error { ++ for k, vals := range md { ++ if err := ValidatePair(k, vals...); err != nil { ++ return err ++ } ++ } ++ return nil ++} ++ ++// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E ++func hasNotPrintable(msg string) bool { ++ // for i that saving a conversion if not using for range ++ for i := 0; i < len(msg); i++ { ++ if msg[i] < 0x20 || msg[i] > 0x7E { ++ return true ++ } ++ } ++ return false ++} ++ ++// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) : ++// ++// - key must contain one or more characters. ++// - the characters in the key must be contained in [0-9 a-z _ - .]. ++// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed. ++// - the characters in the every value must be printable (in [%x20-%x7E]). ++func ValidatePair(key string, vals ...string) error { ++ // key should not be empty ++ if key == "" { ++ return fmt.Errorf("there is an empty key in the header") ++ } ++ // pseudo-header will be ignored ++ if key[0] == ':' { ++ return nil ++ } ++ // check key, for i that saving a conversion if not using for range ++ for i := 0; i < len(key); i++ { ++ r := key[i] ++ if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { ++ return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key) ++ } ++ } ++ if strings.HasSuffix(key, "-bin") { ++ return nil ++ } ++ // check value ++ for _, val := range vals { ++ if hasNotPrintable(val) { ++ return fmt.Errorf("header key %q contains value with non-printable ASCII characters", key) ++ } ++ } ++ return nil ++} +diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go +new file mode 100644 +index 0000000..0177af4 +--- /dev/null ++++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go +@@ -0,0 +1,82 @@ ++/* ++ * ++ * Copyright 2021 gRPC authors. ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * http://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++// Package pretty defines helper functions to pretty-print structs for logging. ++package pretty ++ ++import ( ++ "bytes" ++ "encoding/json" ++ "fmt" ++ ++ "github.com/golang/protobuf/jsonpb" ++ protov1 "github.com/golang/protobuf/proto" ++ "google.golang.org/protobuf/encoding/protojson" ++ protov2 "google.golang.org/protobuf/proto" ++) ++ ++const jsonIndent = " " ++ ++// ToJSON marshals the input into a json string. ++// ++// If marshal fails, it falls back to fmt.Sprintf("%+v"). ++func ToJSON(e interface{}) string { ++ switch ee := e.(type) { ++ case protov1.Message: ++ mm := jsonpb.Marshaler{Indent: jsonIndent} ++ ret, err := mm.MarshalToString(ee) ++ if err != nil { ++ // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 ++ // messages are not imported, and this will fail because the message ++ // is not found. ++ return fmt.Sprintf("%+v", ee) ++ } ++ return ret ++ case protov2.Message: ++ mm := protojson.MarshalOptions{ ++ Multiline: true, ++ Indent: jsonIndent, ++ } ++ ret, err := mm.Marshal(ee) ++ if err != nil { ++ // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 ++ // messages are not imported, and this will fail because the message ++ // is not found. ++ return fmt.Sprintf("%+v", ee) ++ } ++ return string(ret) ++ default: ++ ret, err := json.MarshalIndent(ee, "", jsonIndent) ++ if err != nil { ++ return fmt.Sprintf("%+v", ee) ++ } ++ return string(ret) ++ } ++} ++ ++// FormatJSON formats the input json bytes with indentation. ++// ++// If Indent fails, it returns the unchanged input as string. ++func FormatJSON(b []byte) string { ++ var out bytes.Buffer ++ err := json.Indent(&out, b, "", jsonIndent) ++ if err != nil { ++ return string(b) ++ } ++ return out.String() ++} +diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go +index 5e7f367..c7a18a9 100644 +--- a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go ++++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go +@@ -117,9 +117,12 @@ type ClientInterceptor interface { + NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error) + } + +-// ServerInterceptor is unimplementable; do not use. ++// ServerInterceptor is an interceptor for incoming RPC's on gRPC server side. + type ServerInterceptor interface { +- notDefined() ++ // AllowRPC checks if an incoming RPC is allowed to proceed based on ++ // information about connection RPC was received on, and HTTP Headers. This ++ // information will be piped into context. ++ AllowRPC(ctx context.Context) error // TODO: Make this a real interceptor for filters such as rate limiting. + } + + type csKeyType string +@@ -129,7 +132,7 @@ const csKey = csKeyType("grpc.internal.resolver.configSelector") + // SetConfigSelector sets the config selector in state and returns the new + // state. + func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State { +- state.Attributes = state.Attributes.WithValues(csKey, cs) ++ state.Attributes = state.Attributes.WithValue(csKey, cs) + return state + } + +diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +index 03825bb..09a667f 100644 +--- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go ++++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +@@ -116,7 +116,7 @@ type dnsBuilder struct{} + + // Build creates and starts a DNS resolver that watches the name resolution of the target. + func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { +- host, port, err := parseTarget(target.Endpoint, defaultPort) ++ host, port, err := parseTarget(target.Endpoint(), defaultPort) + if err != nil { + return nil, err + } +@@ -140,10 +140,10 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts + disableServiceConfig: opts.DisableServiceConfig, + } + +- if target.Authority == "" { ++ if target.URL.Host == "" { + d.resolver = defaultResolver + } else { +- d.resolver, err = customAuthorityResolver(target.Authority) ++ d.resolver, err = customAuthorityResolver(target.URL.Host) + if err != nil { + return nil, err + } +@@ -277,18 +277,13 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { + return newAddrs, nil + } + +-var filterError = func(err error) error { ++func handleDNSError(err error, lookupType string) error { + if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { + // Timeouts and temporary errors should be communicated to gRPC to + // attempt another DNS query (with backoff). Other errors should be + // suppressed (they may represent the absence of a TXT record). + return nil + } +- return err +-} +- +-func handleDNSError(err error, lookupType string) error { +- err = filterError(err) + if err != nil { + err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err) + logger.Info(err) +@@ -323,12 +318,12 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { + } + + func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { +- var newAddrs []resolver.Address + addrs, err := d.resolver.LookupHost(d.ctx, d.host) + if err != nil { + err = handleDNSError(err, "A") + return nil, err + } ++ newAddrs := make([]resolver.Address, 0, len(addrs)) + for _, a := range addrs { + ip, ok := formatIP(a) + if !ok { +diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go +index 520d922..afac565 100644 +--- a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go ++++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go +@@ -20,13 +20,20 @@ + // name without scheme back to gRPC as resolved address. + package passthrough + +-import "google.golang.org/grpc/resolver" ++import ( ++ "errors" ++ ++ "google.golang.org/grpc/resolver" ++) + + const scheme = "passthrough" + + type passthroughBuilder struct{} + + func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { ++ if target.Endpoint() == "" && opts.Dialer == nil { ++ return nil, errors.New("passthrough: received empty target in Build()") ++ } + r := &passthroughResolver{ + target: target, + cc: cc, +@@ -45,7 +52,7 @@ type passthroughResolver struct { + } + + func (r *passthroughResolver) start() { +- r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}}) ++ r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}}) + } + + func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} +diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go +index 0d5a811..1609116 100644 +--- a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go ++++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go +@@ -34,13 +34,24 @@ type builder struct { + } + + func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { +- if target.Authority != "" { +- return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority) ++ if target.URL.Host != "" { ++ return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host) + } +- addr := resolver.Address{Addr: target.Endpoint} ++ ++ // gRPC was parsing the dial target manually before PR #4817, and we ++ // switched to using url.Parse() in that PR. To avoid breaking existing ++ // resolver implementations we ended up stripping the leading "/" from the ++ // endpoint. This obviously does not work for the "unix" scheme. Hence we ++ // end up using the parsed URL instead. ++ endpoint := target.URL.Path ++ if endpoint == "" { ++ endpoint = target.URL.Opaque ++ } ++ addr := resolver.Address{Addr: endpoint} + if b.scheme == unixAbstractScheme { +- // prepend "\x00" to address for unix-abstract +- addr.Addr = "\x00" + addr.Addr ++ // We can not prepend \0 as c++ gRPC does, as in Golang '@' is used to signify we do ++ // not want trailing \0 in address. ++ addr.Addr = "@" + addr.Addr + } + cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}}) + return &nopResolver{}, nil +diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go +new file mode 100644 +index 0000000..11d82af +--- /dev/null ++++ b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go +@@ -0,0 +1,130 @@ ++/* ++ * ++ * Copyright 2023 gRPC authors. ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * http://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++package serviceconfig ++ ++import ( ++ "encoding/json" ++ "fmt" ++ "math" ++ "strconv" ++ "strings" ++ "time" ++) ++ ++// Duration defines JSON marshal and unmarshal methods to conform to the ++// protobuf JSON spec defined [here]. ++// ++// [here]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration ++type Duration time.Duration ++ ++func (d Duration) String() string { ++ return fmt.Sprint(time.Duration(d)) ++} ++ ++// MarshalJSON converts from d to a JSON string output. ++func (d Duration) MarshalJSON() ([]byte, error) { ++ ns := time.Duration(d).Nanoseconds() ++ sec := ns / int64(time.Second) ++ ns = ns % int64(time.Second) ++ ++ var sign string ++ if sec < 0 || ns < 0 { ++ sign, sec, ns = "-", -1*sec, -1*ns ++ } ++ ++ // Generated output always contains 0, 3, 6, or 9 fractional digits, ++ // depending on required precision. ++ str := fmt.Sprintf("%s%d.%09d", sign, sec, ns) ++ str = strings.TrimSuffix(str, "000") ++ str = strings.TrimSuffix(str, "000") ++ str = strings.TrimSuffix(str, ".000") ++ return []byte(fmt.Sprintf("\"%ss\"", str)), nil ++} ++ ++// UnmarshalJSON unmarshals b as a duration JSON string into d. ++func (d *Duration) UnmarshalJSON(b []byte) error { ++ var s string ++ if err := json.Unmarshal(b, &s); err != nil { ++ return err ++ } ++ if !strings.HasSuffix(s, "s") { ++ return fmt.Errorf("malformed duration %q: missing seconds unit", s) ++ } ++ neg := false ++ if s[0] == '-' { ++ neg = true ++ s = s[1:] ++ } ++ ss := strings.SplitN(s[:len(s)-1], ".", 3) ++ if len(ss) > 2 { ++ return fmt.Errorf("malformed duration %q: too many decimals", s) ++ } ++ // hasDigits is set if either the whole or fractional part of the number is ++ // present, since both are optional but one is required. ++ hasDigits := false ++ var sec, ns int64 ++ if len(ss[0]) > 0 { ++ var err error ++ if sec, err = strconv.ParseInt(ss[0], 10, 64); err != nil { ++ return fmt.Errorf("malformed duration %q: %v", s, err) ++ } ++ // Maximum seconds value per the durationpb spec. ++ const maxProtoSeconds = 315_576_000_000 ++ if sec > maxProtoSeconds { ++ return fmt.Errorf("out of range: %q", s) ++ } ++ hasDigits = true ++ } ++ if len(ss) == 2 && len(ss[1]) > 0 { ++ if len(ss[1]) > 9 { ++ return fmt.Errorf("malformed duration %q: too many digits after decimal", s) ++ } ++ var err error ++ if ns, err = strconv.ParseInt(ss[1], 10, 64); err != nil { ++ return fmt.Errorf("malformed duration %q: %v", s, err) ++ } ++ for i := 9; i > len(ss[1]); i-- { ++ ns *= 10 ++ } ++ hasDigits = true ++ } ++ if !hasDigits { ++ return fmt.Errorf("malformed duration %q: contains no numbers", s) ++ } ++ ++ if neg { ++ sec *= -1 ++ ns *= -1 ++ } ++ ++ // Maximum/minimum seconds/nanoseconds representable by Go's time.Duration. ++ const maxSeconds = math.MaxInt64 / int64(time.Second) ++ const maxNanosAtMaxSeconds = math.MaxInt64 % int64(time.Second) ++ const minSeconds = math.MinInt64 / int64(time.Second) ++ const minNanosAtMinSeconds = math.MinInt64 % int64(time.Second) ++ ++ if sec > maxSeconds || (sec == maxSeconds && ns >= maxNanosAtMaxSeconds) { ++ *d = Duration(math.MaxInt64) ++ } else if sec < minSeconds || (sec == minSeconds && ns <= minNanosAtMinSeconds) { ++ *d = Duration(math.MinInt64) ++ } else { ++ *d = Duration(sec*int64(time.Second) + ns) ++ } ++ return nil ++} +diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go +index c0634d1..51e733e 100644 +--- a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go ++++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go +@@ -67,10 +67,10 @@ func (bc *BalancerConfig) MarshalJSON() ([]byte, error) { + // ServiceConfig contains a list of loadBalancingConfigs, each with a name and + // config. This method iterates through that list in order, and stops at the + // first policy that is supported. +-// - If the config for the first supported policy is invalid, the whole service +-// config is invalid. +-// - If the list doesn't contain any supported policy, the whole service config +-// is invalid. ++// - If the config for the first supported policy is invalid, the whole service ++// config is invalid. ++// - If the list doesn't contain any supported policy, the whole service config ++// is invalid. + func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { + var ir intermediateBalancerConfig + err := json.Unmarshal(b, &ir) +@@ -78,6 +78,7 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { + return err + } + ++ var names []string + for i, lbcfg := range ir { + if len(lbcfg) != 1 { + return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) +@@ -92,6 +93,7 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { + for name, jsonCfg = range lbcfg { + } + ++ names = append(names, name) + builder := balancer.Get(name) + if builder == nil { + // If the balancer is not registered, move on to the next config. +@@ -120,7 +122,7 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { + // return. This means we had a loadBalancingConfig slice but did not + // encounter a registered policy. The config is considered invalid in this + // case. +- return fmt.Errorf("invalid loadBalancingConfig: no supported policies found") ++ return fmt.Errorf("invalid loadBalancingConfig: no supported policies found in %v", names) + } + + // MethodConfig defines the configuration recommended by the service providers for a +diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go +index e5c6513..b0ead4f 100644 +--- a/vendor/google.golang.org/grpc/internal/status/status.go ++++ b/vendor/google.golang.org/grpc/internal/status/status.go +@@ -164,3 +164,13 @@ func (e *Error) Is(target error) bool { + } + return proto.Equal(e.s.s, tse.s.s) + } ++ ++// IsRestrictedControlPlaneCode returns whether the status includes a code ++// restricted for control plane usage as defined by gRFC A54. ++func IsRestrictedControlPlaneCode(s *Status) bool { ++ switch s.Code() { ++ case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss: ++ return true ++ } ++ return false ++} +diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go +index 4b2964f..b3a7227 100644 +--- a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go ++++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go +@@ -1,5 +1,3 @@ +-// +build !appengine +- + /* + * + * Copyright 2018 gRPC authors. +diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go +index 7913ef1..999f52c 100644 +--- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go ++++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go +@@ -1,4 +1,5 @@ +-// +build !linux appengine ++//go:build !linux ++// +build !linux + + /* + * +@@ -35,41 +36,41 @@ var logger = grpclog.Component("core") + + func log() { + once.Do(func() { +- logger.Info("CPU time info is unavailable on non-linux or appengine environment.") ++ logger.Info("CPU time info is unavailable on non-linux environments.") + }) + } + +-// GetCPUTime returns the how much CPU time has passed since the start of this process. +-// It always returns 0 under non-linux or appengine environment. ++// GetCPUTime returns the how much CPU time has passed since the start of this ++// process. It always returns 0 under non-linux environments. + func GetCPUTime() int64 { + log() + return 0 + } + +-// Rusage is an empty struct under non-linux or appengine environment. ++// Rusage is an empty struct under non-linux environments. + type Rusage struct{} + +-// GetRusage is a no-op function under non-linux or appengine environment. ++// GetRusage is a no-op function under non-linux environments. + func GetRusage() *Rusage { + log() + return nil + } + + // CPUTimeDiff returns the differences of user CPU time and system CPU time used +-// between two Rusage structs. It a no-op function for non-linux or appengine environment. ++// between two Rusage structs. It a no-op function for non-linux environments. + func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { + log() + return 0, 0 + } + +-// SetTCPUserTimeout is a no-op function under non-linux or appengine environments ++// SetTCPUserTimeout is a no-op function under non-linux environments. + func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { + log() + return nil + } + +-// GetTCPUserTimeout is a no-op function under non-linux or appengine environments +-// a negative return value indicates the operation is not supported ++// GetTCPUserTimeout is a no-op function under non-linux environments. ++// A negative return value indicates the operation is not supported + func GetTCPUserTimeout(conn net.Conn) (int, error) { + log() + return -1, nil +diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +index 45532f8..be5a9c8 100644 +--- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go ++++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +@@ -22,6 +22,7 @@ import ( + "bytes" + "errors" + "fmt" ++ "net" + "runtime" + "strconv" + "sync" +@@ -29,6 +30,7 @@ import ( + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" ++ "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/status" + ) +@@ -133,9 +135,11 @@ type cleanupStream struct { + func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM + + type earlyAbortStream struct { ++ httpStatus uint32 + streamID uint32 + contentSubtype string + status *status.Status ++ rst bool + } + + func (*earlyAbortStream) isTransportResponseFrame() bool { return false } +@@ -189,7 +193,7 @@ type goAway struct { + code http2.ErrCode + debugData []byte + headsUp bool +- closeConn bool ++ closeConn error // if set, loopyWriter will exit, resulting in conn closure + } + + func (*goAway) isTransportResponseFrame() bool { return false } +@@ -207,6 +211,14 @@ type outFlowControlSizeRequest struct { + + func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } + ++// closeConnection is an instruction to tell the loopy writer to flush the ++// framer and exit, which will cause the transport's connection to be closed ++// (by the client or server). The transport itself will close after the reader ++// encounters the EOF caused by the connection closure. ++type closeConnection struct{} ++ ++func (closeConnection) isTransportResponseFrame() bool { return false } ++ + type outStreamState int + + const ( +@@ -406,7 +418,7 @@ func (c *controlBuffer) get(block bool) (interface{}, error) { + select { + case <-c.ch: + case <-c.done: +- return nil, ErrConnClosing ++ return nil, errors.New("transport closed by client") + } + } + } +@@ -476,12 +488,14 @@ type loopyWriter struct { + hEnc *hpack.Encoder // HPACK encoder. + bdpEst *bdpEstimator + draining bool ++ conn net.Conn ++ logger *grpclog.PrefixLogger + + // Side-specific handlers + ssGoAwayHandler func(*goAway) (bool, error) + } + +-func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter { ++func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger) *loopyWriter { + var buf bytes.Buffer + l := &loopyWriter{ + side: s, +@@ -494,6 +508,8 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + bdpEst: bdpEst, ++ conn: conn, ++ logger: logger, + } + return l + } +@@ -511,23 +527,26 @@ const minBatchSize = 1000 + // 2. Stream level flow control quota available. + // + // In each iteration of run loop, other than processing the incoming control +-// frame, loopy calls processData, which processes one node from the activeStreams linked-list. +-// This results in writing of HTTP2 frames into an underlying write buffer. +-// When there's no more control frames to read from controlBuf, loopy flushes the write buffer. +-// As an optimization, to increase the batch size for each flush, loopy yields the processor, once +-// if the batch size is too low to give stream goroutines a chance to fill it up. ++// frame, loopy calls processData, which processes one node from the ++// activeStreams linked-list. This results in writing of HTTP2 frames into an ++// underlying write buffer. When there's no more control frames to read from ++// controlBuf, loopy flushes the write buffer. As an optimization, to increase ++// the batch size for each flush, loopy yields the processor, once if the batch ++// size is too low to give stream goroutines a chance to fill it up. ++// ++// Upon exiting, if the error causing the exit is not an I/O error, run() ++// flushes and closes the underlying connection. Otherwise, the connection is ++// left open to allow the I/O error to be encountered by the reader instead. + func (l *loopyWriter) run() (err error) { + defer func() { +- if err == ErrConnClosing { +- // Don't log ErrConnClosing as error since it happens +- // 1. When the connection is closed by some other known issue. +- // 2. User closed the connection. +- // 3. A graceful close of connection. +- if logger.V(logLevel) { +- logger.Infof("transport: loopyWriter.run returning. %v", err) +- } +- err = nil ++ if l.logger.V(logLevel) { ++ l.logger.Infof("loopyWriter exiting with error: %v", err) + } ++ if !isIOError(err) { ++ l.framer.writer.Flush() ++ l.conn.Close() ++ } ++ l.cbuf.finish() + }() + for { + it, err := l.cbuf.get(true) +@@ -572,7 +591,6 @@ func (l *loopyWriter) run() (err error) { + } + l.framer.writer.Flush() + break hasdata +- + } + } + } +@@ -581,11 +599,11 @@ func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error + return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) + } + +-func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error { ++func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) { + // Otherwise update the quota. + if w.streamID == 0 { + l.sendQuota += w.increment +- return nil ++ return + } + // Find the stream and update it. + if str, ok := l.estdStreams[w.streamID]; ok { +@@ -593,10 +611,9 @@ func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { + str.state = active + l.activeStreams.enqueue(str) +- return nil ++ return + } + } +- return nil + } + + func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { +@@ -604,13 +621,11 @@ func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { + } + + func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { +- if err := l.applySettings(s.ss); err != nil { +- return err +- } ++ l.applySettings(s.ss) + return l.framer.fr.WriteSettingsAck() + } + +-func (l *loopyWriter) registerStreamHandler(h *registerStream) error { ++func (l *loopyWriter) registerStreamHandler(h *registerStream) { + str := &outStream{ + id: h.streamID, + state: empty, +@@ -618,15 +633,14 @@ func (l *loopyWriter) registerStreamHandler(h *registerStream) error { + wq: h.wq, + } + l.estdStreams[h.streamID] = str +- return nil + } + + func (l *loopyWriter) headerHandler(h *headerFrame) error { + if l.side == serverSide { + str, ok := l.estdStreams[h.streamID] + if !ok { +- if logger.V(logLevel) { +- logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) ++ if l.logger.V(logLevel) { ++ l.logger.Infof("Unrecognized streamID %d in loopyWriter", h.streamID) + } + return nil + } +@@ -653,19 +667,20 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { + itl: &itemList{}, + wq: h.wq, + } +- str.itl.enqueue(h) +- return l.originateStream(str) ++ return l.originateStream(str, h) + } + +-func (l *loopyWriter) originateStream(str *outStream) error { +- hdr := str.itl.dequeue().(*headerFrame) +- if err := hdr.initStream(str.id); err != nil { +- if err == ErrConnClosing { +- return err +- } +- // Other errors(errStreamDrain) need not close transport. ++func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error { ++ // l.draining is set when handling GoAway. In which case, we want to avoid ++ // creating new streams. ++ if l.draining { ++ // TODO: provide a better error with the reason we are in draining. ++ hdr.onOrphaned(errStreamDrain) + return nil + } ++ if err := hdr.initStream(str.id); err != nil { ++ return err ++ } + if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { + return err + } +@@ -680,8 +695,8 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He + l.hBuf.Reset() + for _, f := range hf { + if err := l.hEnc.WriteField(f); err != nil { +- if logger.V(logLevel) { +- logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err) ++ if l.logger.V(logLevel) { ++ l.logger.Warningf("Encountered error while encoding headers: %v", err) + } + } + } +@@ -719,10 +734,10 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He + return nil + } + +-func (l *loopyWriter) preprocessData(df *dataFrame) error { ++func (l *loopyWriter) preprocessData(df *dataFrame) { + str, ok := l.estdStreams[df.streamID] + if !ok { +- return nil ++ return + } + // If we got data for a stream it means that + // stream was originated and the headers were sent out. +@@ -731,7 +746,6 @@ func (l *loopyWriter) preprocessData(df *dataFrame) error { + str.state = active + l.activeStreams.enqueue(str) + } +- return nil + } + + func (l *loopyWriter) pingHandler(p *ping) error { +@@ -742,9 +756,8 @@ func (l *loopyWriter) pingHandler(p *ping) error { + + } + +-func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error { ++func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) { + o.resp <- l.sendQuota +- return nil + } + + func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { +@@ -761,8 +774,9 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { + return err + } + } +- if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { +- return ErrConnClosing ++ if l.draining && len(l.estdStreams) == 0 { ++ // Flush and close the connection; we are done with it. ++ return errors.New("finished processing active streams while in draining mode") + } + return nil + } +@@ -771,9 +785,12 @@ func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { + if l.side == clientSide { + return errors.New("earlyAbortStream not handled on client") + } +- ++ // In case the caller forgets to set the http status, default to 200. ++ if eas.httpStatus == 0 { ++ eas.httpStatus = 200 ++ } + headerFields := []hpack.HeaderField{ +- {Name: ":status", Value: "200"}, ++ {Name: ":status", Value: strconv.Itoa(int(eas.httpStatus))}, + {Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)}, + {Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))}, + {Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())}, +@@ -782,6 +799,11 @@ func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { + if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil { + return err + } ++ if eas.rst { ++ if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil { ++ return err ++ } ++ } + return nil + } + +@@ -789,7 +811,8 @@ func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { + if l.side == clientSide { + l.draining = true + if len(l.estdStreams) == 0 { +- return ErrConnClosing ++ // Flush and close the connection; we are done with it. ++ return errors.New("received GOAWAY with no active streams") + } + } + return nil +@@ -810,7 +833,7 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { + func (l *loopyWriter) handle(i interface{}) error { + switch i := i.(type) { + case *incomingWindowUpdate: +- return l.incomingWindowUpdateHandler(i) ++ l.incomingWindowUpdateHandler(i) + case *outgoingWindowUpdate: + return l.outgoingWindowUpdateHandler(i) + case *incomingSettings: +@@ -820,7 +843,7 @@ func (l *loopyWriter) handle(i interface{}) error { + case *headerFrame: + return l.headerHandler(i) + case *registerStream: +- return l.registerStreamHandler(i) ++ l.registerStreamHandler(i) + case *cleanupStream: + return l.cleanupStreamHandler(i) + case *earlyAbortStream: +@@ -828,19 +851,24 @@ func (l *loopyWriter) handle(i interface{}) error { + case *incomingGoAway: + return l.incomingGoAwayHandler(i) + case *dataFrame: +- return l.preprocessData(i) ++ l.preprocessData(i) + case *ping: + return l.pingHandler(i) + case *goAway: + return l.goAwayHandler(i) + case *outFlowControlSizeRequest: +- return l.outFlowControlSizeRequestHandler(i) ++ l.outFlowControlSizeRequestHandler(i) ++ case closeConnection: ++ // Just return a non-I/O error and run() will flush and close the ++ // connection. ++ return ErrConnClosing + default: + return fmt.Errorf("transport: unknown control message type %T", i) + } ++ return nil + } + +-func (l *loopyWriter) applySettings(ss []http2.Setting) error { ++func (l *loopyWriter) applySettings(ss []http2.Setting) { + for _, s := range ss { + switch s.ID { + case http2.SettingInitialWindowSize: +@@ -859,7 +887,6 @@ func (l *loopyWriter) applySettings(ss []http2.Setting) error { + updateHeaderTblSize(l.hEnc, s.Val) + } + } +- return nil + } + + // processData removes the first stream from active streams, writes out at most 16KB +@@ -876,9 +903,9 @@ func (l *loopyWriter) processData() (bool, error) { + dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. + // A data item is represented by a dataFrame, since it later translates into + // multiple HTTP2 data frames. +- // Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data. ++ // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data. + // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the +- // maximum possilbe HTTP2 frame size. ++ // maximum possible HTTP2 frame size. + + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame + // Client sends out empty data frame with endStream = true +@@ -893,7 +920,7 @@ func (l *loopyWriter) processData() (bool, error) { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { +- return false, nil ++ return false, err + } + } else { + l.activeStreams.enqueue(str) +diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go +index 9fa306b..bc8ee07 100644 +--- a/vendor/google.golang.org/grpc/internal/transport/defaults.go ++++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go +@@ -47,3 +47,9 @@ const ( + defaultClientMaxHeaderListSize = uint32(16 << 20) + defaultServerMaxHeaderListSize = uint32(16 << 20) + ) ++ ++// MaxStreamID is the upper bound for the stream ID before the current ++// transport gracefully closes and new transport is created for subsequent RPCs. ++// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit ++// integer. It's exported so that tests can override it. ++var MaxStreamID = uint32(math.MaxInt32 * 3 / 4) +diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go +index f262edd..97198c5 100644 +--- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go ++++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go +@@ -136,12 +136,10 @@ type inFlow struct { + + // newLimit updates the inflow window to a new value n. + // It assumes that n is always greater than the old limit. +-func (f *inFlow) newLimit(n uint32) uint32 { ++func (f *inFlow) newLimit(n uint32) { + f.mu.Lock() +- d := n - f.limit + f.limit = n + f.mu.Unlock() +- return d + } + + func (f *inFlow) maybeAdjust(n uint32) uint32 { +diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go +index 1c3459c..98f80e3 100644 +--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go ++++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go +@@ -39,6 +39,7 @@ import ( + "golang.org/x/net/http2" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" ++ "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" +@@ -46,24 +47,32 @@ import ( + "google.golang.org/grpc/status" + ) + +-// NewServerHandlerTransport returns a ServerTransport handling gRPC +-// from inside an http.Handler. It requires that the http Server +-// supports HTTP/2. +-func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) { ++// NewServerHandlerTransport returns a ServerTransport handling gRPC from ++// inside an http.Handler, or writes an HTTP error to w and returns an error. ++// It requires that the http Server supports HTTP/2. ++func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { + if r.ProtoMajor != 2 { +- return nil, errors.New("gRPC requires HTTP/2") ++ msg := "gRPC requires HTTP/2" ++ http.Error(w, msg, http.StatusBadRequest) ++ return nil, errors.New(msg) + } + if r.Method != "POST" { +- return nil, errors.New("invalid gRPC request method") ++ msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) ++ http.Error(w, msg, http.StatusBadRequest) ++ return nil, errors.New(msg) + } + contentType := r.Header.Get("Content-Type") + // TODO: do we assume contentType is lowercase? we did before + contentSubtype, validContentType := grpcutil.ContentSubtype(contentType) + if !validContentType { +- return nil, errors.New("invalid gRPC request content-type") ++ msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType) ++ http.Error(w, msg, http.StatusUnsupportedMediaType) ++ return nil, errors.New(msg) + } + if _, ok := w.(http.Flusher); !ok { +- return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") ++ msg := "gRPC requires a ResponseWriter supporting http.Flusher" ++ http.Error(w, msg, http.StatusInternalServerError) ++ return nil, errors.New(msg) + } + + st := &serverHandlerTransport{ +@@ -75,11 +84,14 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta + contentSubtype: contentSubtype, + stats: stats, + } ++ st.logger = prefixLoggerForServerHandlerTransport(st) + + if v := r.Header.Get("grpc-timeout"); v != "" { + to, err := decodeTimeout(v) + if err != nil { +- return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err) ++ msg := fmt.Sprintf("malformed grpc-timeout: %v", err) ++ http.Error(w, msg, http.StatusBadRequest) ++ return nil, status.Error(codes.Internal, msg) + } + st.timeoutSet = true + st.timeout = to +@@ -97,7 +109,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta + for _, v := range vv { + v, err := decodeMetadataHeader(k, v) + if err != nil { +- return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err) ++ msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err) ++ http.Error(w, msg, http.StatusBadRequest) ++ return nil, status.Error(codes.Internal, msg) + } + metakv = append(metakv, k, v) + } +@@ -138,15 +152,19 @@ type serverHandlerTransport struct { + // TODO make sure this is consistent across handler_server and http2_server + contentSubtype string + +- stats stats.Handler ++ stats []stats.Handler ++ logger *grpclog.PrefixLogger + } + +-func (ht *serverHandlerTransport) Close() { +- ht.closeOnce.Do(ht.closeCloseChanOnce) ++func (ht *serverHandlerTransport) Close(err error) { ++ ht.closeOnce.Do(func() { ++ if ht.logger.V(logLevel) { ++ ht.logger.Infof("Closing: %v", err) ++ } ++ close(ht.closedCh) ++ }) + } + +-func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } +- + func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } + + // strAddr is a net.Addr backed by either a TCP "ip:port" string, or +@@ -228,15 +246,15 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro + }) + + if err == nil { // transport has not been closed +- if ht.stats != nil { +- // Note: The trailer fields are compressed with hpack after this call returns. +- // No WireLength field is set here. +- ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{ ++ // Note: The trailer fields are compressed with hpack after this call returns. ++ // No WireLength field is set here. ++ for _, sh := range ht.stats { ++ sh.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) + } + } +- ht.Close() ++ ht.Close(errors.New("finished writing status")) + return err + } + +@@ -314,10 +332,10 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { + }) + + if err == nil { +- if ht.stats != nil { ++ for _, sh := range ht.stats { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. +- ht.stats.HandleRPC(s.Context(), &stats.OutHeader{ ++ sh.HandleRPC(s.Context(), &stats.OutHeader{ + Header: md.Copy(), + Compression: s.sendCompress, + }) +@@ -346,7 +364,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace + case <-ht.req.Context().Done(): + } + cancel() +- ht.Close() ++ ht.Close(errors.New("request is done processing")) + }() + + req := ht.req +@@ -369,14 +387,14 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace + } + ctx = metadata.NewIncomingContext(ctx, ht.headerMD) + s.ctx = peer.NewContext(ctx, pr) +- if ht.stats != nil { +- s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) ++ for _, sh := range ht.stats { ++ s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ + FullMethod: s.method, + RemoteAddr: ht.RemoteAddr(), + Compression: s.recvCompress, + } +- ht.stats.HandleRPC(s.ctx, inHeader) ++ sh.HandleRPC(s.ctx, inHeader) + } + s.trReader = &transportReader{ + reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, +@@ -435,17 +453,17 @@ func (ht *serverHandlerTransport) IncrMsgSent() {} + + func (ht *serverHandlerTransport) IncrMsgRecv() {} + +-func (ht *serverHandlerTransport) Drain() { ++func (ht *serverHandlerTransport) Drain(debugData string) { + panic("Drain() is not implemented") + } + + // mapRecvMsgError returns the non-nil err into the appropriate + // error value as expected by callers of *grpc.parser.recvMsg. + // In particular, in can only be: +-// * io.EOF +-// * io.ErrUnexpectedEOF +-// * of type transport.ConnectionError +-// * an error from the status package ++// - io.EOF ++// - io.ErrUnexpectedEOF ++// - of type transport.ConnectionError ++// - an error from the status package + func mapRecvMsgError(err error) error { + if err == io.EOF || err == io.ErrUnexpectedEOF { + return err +diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go +index 0cd6da1..326bf08 100644 +--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go ++++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go +@@ -25,6 +25,7 @@ import ( + "math" + "net" + "net/http" ++ "path/filepath" + "strconv" + "strings" + "sync" +@@ -37,8 +38,11 @@ import ( + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" + icredentials "google.golang.org/grpc/internal/credentials" ++ "google.golang.org/grpc/internal/grpclog" ++ "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" ++ istatus "google.golang.org/grpc/internal/status" + "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/internal/transport/networktype" + "google.golang.org/grpc/keepalive" +@@ -56,11 +60,15 @@ var clientConnectionCounter uint64 + + // http2Client implements the ClientTransport interface with HTTP2. + type http2Client struct { +- lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. +- ctx context.Context +- cancel context.CancelFunc +- ctxDone <-chan struct{} // Cache the ctx.Done() chan. +- userAgent string ++ lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. ++ ctx context.Context ++ cancel context.CancelFunc ++ ctxDone <-chan struct{} // Cache the ctx.Done() chan. ++ userAgent string ++ // address contains the resolver returned address for this transport. ++ // If the `ServerName` field is set, it takes precedence over `CallHdr.Host` ++ // passed to `NewStream`, when determining the :authority header. ++ address resolver.Address + md metadata.MD + conn net.Conn // underlying communication channel + loopy *loopyWriter +@@ -77,6 +85,7 @@ type http2Client struct { + framer *framer + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. ++ // Do not access controlBuf with mu held. + controlBuf *controlBuffer + fc *trInFlow + // The scheme used: https if TLS is on, http otherwise. +@@ -89,7 +98,7 @@ type http2Client struct { + kp keepalive.ClientParameters + keepaliveEnabled bool + +- statsHandler stats.Handler ++ statsHandlers []stats.Handler + + initialWindowSize int32 + +@@ -97,17 +106,15 @@ type http2Client struct { + maxSendHeaderListSize *uint32 + + bdpEst *bdpEstimator +- // onPrefaceReceipt is a callback that client transport calls upon +- // receiving server preface to signal that a succefull HTTP2 +- // connection was established. +- onPrefaceReceipt func() + + maxConcurrentStreams uint32 + streamQuota int64 + streamsQuotaAvailable chan struct{} + waitingStreams uint32 + nextID uint32 ++ registeredCompressors string + ++ // Do not access controlBuf with mu held. + mu sync.Mutex // guard the following variables + state transportState + activeStreams map[uint32]*Stream +@@ -131,28 +138,35 @@ type http2Client struct { + kpDormant bool + + // Fields below are for channelz metric collection. +- channelzID int64 // channelz unique identification number ++ channelzID *channelz.Identifier + czData *channelzData + +- onGoAway func(GoAwayReason) +- onClose func() ++ onClose func(GoAwayReason) + + bufferPool *bufferPool + + connectionID uint64 ++ logger *grpclog.PrefixLogger + } + + func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) { + address := addr.Addr + networkType, ok := networktype.Get(addr) + if fn != nil { ++ // Special handling for unix scheme with custom dialer. Back in the day, ++ // we did not have a unix resolver and therefore targets with a unix ++ // scheme would end up using the passthrough resolver. So, user's used a ++ // custom dialer in this case and expected the original dial target to ++ // be passed to the custom dialer. Now, we have a unix resolver. But if ++ // a custom dialer is specified, we want to retain the old behavior in ++ // terms of the address being passed to the custom dialer. + if networkType == "unix" && !strings.HasPrefix(address, "\x00") { +- // For backward compatibility, if the user dialed "unix:///path", +- // the passthrough resolver would be used and the user's custom +- // dialer would see "unix:///path". Since the unix resolver is used +- // and the address is now "/path", prepend "unix://" so the user's +- // custom dialer sees the same address. +- return fn(ctx, "unix://"+address) ++ // Supported unix targets are either "unix://absolute-path" or ++ // "unix:relative-path". ++ if filepath.IsAbs(address) { ++ return fn(ctx, "unix://"+address) ++ } ++ return fn(ctx, "unix:"+address) + } + return fn(ctx, address) + } +@@ -184,7 +198,7 @@ func isTemporary(err error) bool { + // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 + // and starts to receive messages on it. Non-nil error returns if construction + // fails. +-func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { ++func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) { + scheme := "http" + ctx, cancel := context.WithCancel(ctx) + defer func() { +@@ -193,19 +207,51 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts + } + }() + ++ // gRPC, resolver, balancer etc. can specify arbitrary data in the ++ // Attributes field of resolver.Address, which is shoved into connectCtx ++ // and passed to the dialer and credential handshaker. This makes it possible for ++ // address specific arbitrary data to reach custom dialers and credential handshakers. ++ connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) ++ + conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent) + if err != nil { + if opts.FailOnNonTempDialError { + return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) + } +- return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) ++ return nil, connectionErrorf(true, err, "transport: Error while dialing: %v", err) + } ++ + // Any further errors will close the underlying connection + defer func(conn net.Conn) { + if err != nil { + conn.Close() + } + }(conn) ++ ++ // The following defer and goroutine monitor the connectCtx for cancelation ++ // and deadline. On context expiration, the connection is hard closed and ++ // this function will naturally fail as a result. Otherwise, the defer ++ // waits for the goroutine to exit to prevent the context from being ++ // monitored (and to prevent the connection from ever being closed) after ++ // returning from this function. ++ ctxMonitorDone := grpcsync.NewEvent() ++ newClientCtx, newClientDone := context.WithCancel(connectCtx) ++ defer func() { ++ newClientDone() // Awaken the goroutine below if connectCtx hasn't expired. ++ <-ctxMonitorDone.Done() // Wait for the goroutine below to exit. ++ }() ++ go func(conn net.Conn) { ++ defer ctxMonitorDone.Fire() // Signal this goroutine has exited. ++ <-newClientCtx.Done() // Block until connectCtx expires or the defer above executes. ++ if err := connectCtx.Err(); err != nil { ++ // connectCtx expired before exiting the function. Hard close the connection. ++ if logger.V(logLevel) { ++ logger.Infof("Aborting due to connect deadline expiring: %v", err) ++ } ++ conn.Close() ++ } ++ }(conn) ++ + kp := opts.KeepaliveParams + // Validate keepalive parameters. + if kp.Time == 0 { +@@ -237,20 +283,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts + } + } + if transportCreds != nil { +- // gRPC, resolver, balancer etc. can specify arbitrary data in the +- // Attributes field of resolver.Address, which is shoved into connectCtx +- // and passed to the credential handshaker. This makes it possible for +- // address specific arbitrary data to reach the credential handshaker. +- connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) +- rawConn := conn +- // Pull the deadline from the connectCtx, which will be used for +- // timeouts in the authentication protocol handshake. Can ignore the +- // boolean as the deadline will return the zero value, which will make +- // the conn not timeout on I/O operations. +- deadline, _ := connectCtx.Deadline() +- rawConn.SetDeadline(deadline) +- conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, rawConn) +- rawConn.SetDeadline(time.Time{}) ++ conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) + if err != nil { + return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) + } +@@ -288,6 +321,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts + ctxDone: ctx.Done(), // Cache Done chan. + cancel: cancel, + userAgent: opts.UserAgent, ++ registeredCompressors: grpcutil.RegisteredCompressors(), ++ address: addr, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), +@@ -302,19 +337,20 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts + isSecure: isSecure, + perRPCCreds: perRPCCreds, + kp: kp, +- statsHandler: opts.StatsHandler, ++ statsHandlers: opts.StatsHandlers, + initialWindowSize: initialWindowSize, +- onPrefaceReceipt: onPrefaceReceipt, + nextID: 1, + maxConcurrentStreams: defaultMaxStreamsClient, + streamQuota: defaultMaxStreamsClient, + streamsQuotaAvailable: make(chan struct{}, 1), + czData: new(channelzData), +- onGoAway: onGoAway, +- onClose: onClose, + keepaliveEnabled: keepaliveEnabled, + bufferPool: newBufferPool(), ++ onClose: onClose, + } ++ t.logger = prefixLoggerForClientTransport(t) ++ // Add peer information to the http2client context. ++ t.ctx = peer.NewContext(t.ctx, t.getPeer()) + + if md, ok := addr.Metadata.(*metadata.MD); ok { + t.md = *md +@@ -332,38 +368,50 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts + updateFlowControl: t.updateFlowControl, + } + } +- if t.statsHandler != nil { +- t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ ++ for _, sh := range t.statsHandlers { ++ t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{ + Client: true, + } +- t.statsHandler.HandleConn(t.ctx, connBegin) ++ sh.HandleConn(t.ctx, connBegin) + } +- if channelz.IsOn() { +- t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) ++ t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) ++ if err != nil { ++ return nil, err + } + if t.keepaliveEnabled { + t.kpDormancyCond = sync.NewCond(&t.mu) + go t.keepalive() + } +- // Start the reader goroutine for incoming message. Each transport has +- // a dedicated goroutine which reads HTTP2 frame from network. Then it +- // dispatches the frame to the corresponding stream entity. +- go t.reader() ++ ++ // Start the reader goroutine for incoming messages. Each transport has a ++ // dedicated goroutine which reads HTTP2 frames from the network. Then it ++ // dispatches the frame to the corresponding stream entity. When the ++ // server preface is received, readerErrCh is closed. If an error occurs ++ // first, an error is pushed to the channel. This must be checked before ++ // returning from this function. ++ readerErrCh := make(chan error, 1) ++ go t.reader(readerErrCh) ++ defer func() { ++ if err == nil { ++ err = <-readerErrCh ++ } ++ if err != nil { ++ t.Close(err) ++ } ++ }() + + // Send connection preface to server. + n, err := t.conn.Write(clientPreface) + if err != nil { + err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err) +- t.Close(err) + return nil, err + } + if n != len(clientPreface) { + err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) +- t.Close(err) + return nil, err + } + var ss []http2.Setting +@@ -383,14 +431,12 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts + err = t.framer.fr.WriteSettings(ss...) + if err != nil { + err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) +- t.Close(err) + return nil, err + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { + err = connectionErrorf(true, err, "transport: failed to write window update: %v", err) +- t.Close(err) + return nil, err + } + } +@@ -401,17 +447,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts + return nil, err + } + go func() { +- t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) +- err := t.loopy.run() +- if err != nil { +- if logger.V(logLevel) { +- logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) +- } +- } +- // Do not close the transport. Let reader goroutine handle it since +- // there might be data in the buffers. +- t.conn.Close() +- t.controlBuf.finish() ++ t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) ++ t.loopy.run() + close(t.writerDone) + }() + return t, nil +@@ -457,7 +494,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { + func (t *http2Client) getPeer() *peer.Peer { + return &peer.Peer{ + Addr: t.remoteAddr, +- AuthInfo: t.authInfo, ++ AuthInfo: t.authInfo, // Can be nil + } + } + +@@ -493,9 +530,22 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) + } + ++ registeredCompressors := t.registeredCompressors + if callHdr.SendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) +- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress}) ++ // Include the outgoing compressor name when compressor is not registered ++ // via encoding.RegisterCompressor. This is possible when client uses ++ // WithCompressor dial option. ++ if !grpcutil.IsCompressorNameRegistered(callHdr.SendCompress) { ++ if registeredCompressors != "" { ++ registeredCompressors += "," ++ } ++ registeredCompressors += callHdr.SendCompress ++ } ++ } ++ ++ if registeredCompressors != "" { ++ headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: registeredCompressors}) + } + if dl, ok := ctx.Deadline(); ok { + // Send out timeout regardless its value. The server can detect timeout context by itself. +@@ -575,11 +625,15 @@ func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[s + for _, c := range t.perRPCCreds { + data, err := c.GetRequestMetadata(ctx, audience) + if err != nil { +- if _, ok := status.FromError(err); ok { ++ if st, ok := status.FromError(err); ok { ++ // Restrict the code to the list allowed by gRFC A54. ++ if istatus.IsRestrictedControlPlaneCode(st) { ++ err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) ++ } + return nil, err + } + +- return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err) ++ return nil, status.Errorf(codes.Unauthenticated, "transport: per-RPC creds failed due to error: %v", err) + } + for k, v := range data { + // Capital header names are illegal in HTTP/2. +@@ -604,7 +658,14 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call + } + data, err := callCreds.GetRequestMetadata(ctx, audience) + if err != nil { +- return nil, status.Errorf(codes.Internal, "transport: %v", err) ++ if st, ok := status.FromError(err); ok { ++ // Restrict the code to the list allowed by gRFC A54. ++ if istatus.IsRestrictedControlPlaneCode(st) { ++ err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) ++ } ++ return nil, err ++ } ++ return nil, status.Errorf(codes.Internal, "transport: per-RPC creds failed due to error: %v", err) + } + callAuthData = make(map[string]string, len(data)) + for k, v := range data { +@@ -616,12 +677,21 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call + return callAuthData, nil + } + +-// NewStreamError wraps an error and reports additional information. ++// NewStreamError wraps an error and reports additional information. Typically ++// NewStream errors result in transparent retry, as they mean nothing went onto ++// the wire. However, there are two notable exceptions: ++// ++// 1. If the stream headers violate the max header list size allowed by the ++// server. It's possible this could succeed on another transport, even if ++// it's unlikely, but do not transparently retry. ++// 2. If the credentials errored when requesting their headers. In this case, ++// it's possible a retry can fix the problem, but indefinitely transparently ++// retrying is not appropriate as it is likely the credentials, if they can ++// eventually succeed, would need I/O to do so. + type NewStreamError struct { + Err error + +- DoNotRetry bool +- PerformedIO bool ++ AllowTransparentRetry bool + } + + func (e NewStreamError) Error() string { +@@ -630,25 +700,23 @@ func (e NewStreamError) Error() string { + + // NewStream creates a stream and registers it into the transport as "active" + // streams. All non-nil errors returned will be *NewStreamError. +-func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { +- defer func() { +- if err != nil { +- nse, ok := err.(*NewStreamError) +- if !ok { +- nse = &NewStreamError{Err: err} +- } +- if len(t.perRPCCreds) > 0 || callHdr.Creds != nil { +- // We may have performed I/O in the per-RPC creds callback, so do not +- // allow transparent retry. +- nse.PerformedIO = true +- } +- err = nse +- } +- }() ++func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { + ctx = peer.NewContext(ctx, t.getPeer()) ++ ++ // ServerName field of the resolver returned address takes precedence over ++ // Host field of CallHdr to determine the :authority header. This is because, ++ // the ServerName field takes precedence for server authentication during ++ // TLS handshake, and the :authority header should match the value used ++ // for server authentication. ++ if t.address.ServerName != "" { ++ newCallHdr := *callHdr ++ newCallHdr.Host = t.address.ServerName ++ callHdr = &newCallHdr ++ } ++ + headerFields, err := t.createHeaderFields(ctx, callHdr) + if err != nil { +- return nil, err ++ return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} + } + s := t.newStream(ctx, callHdr) + cleanup := func(err error) { +@@ -670,17 +738,13 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea + endStream: false, + initStream: func(id uint32) error { + t.mu.Lock() +- if state := t.state; state != reachable { ++ // TODO: handle transport closure in loopy instead and remove this ++ // initStream is never called when transport is draining. ++ if t.state == closing { + t.mu.Unlock() +- // Do a quick cleanup. +- err := error(errStreamDrain) +- if state == closing { +- err = ErrConnClosing +- } +- cleanup(err) +- return err ++ cleanup(ErrConnClosing) ++ return ErrConnClosing + } +- t.activeStreams[id] = s + if channelz.IsOn() { + atomic.AddInt64(&t.czData.streamsStarted, 1) + atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) +@@ -697,6 +761,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea + } + firstTry := true + var ch chan struct{} ++ transportDrainRequired := false + checkForStreamQuota := func(it interface{}) bool { + if t.streamQuota <= 0 { // Can go negative if server decreases it. + if firstTry { +@@ -712,8 +777,20 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea + h := it.(*headerFrame) + h.streamID = t.nextID + t.nextID += 2 ++ ++ // Drain client transport if nextID > MaxStreamID which signals gRPC that ++ // the connection is closed and a new one must be created for subsequent RPCs. ++ transportDrainRequired = t.nextID > MaxStreamID ++ + s.id = h.streamID + s.fc = &inFlow{limit: uint32(t.initialWindowSize)} ++ t.mu.Lock() ++ if t.state == draining || t.activeStreams == nil { // Can be niled from Close(). ++ t.mu.Unlock() ++ return false // Don't create a stream if the transport is already closed. ++ } ++ t.activeStreams[s.id] = s ++ t.mu.Unlock() + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: +@@ -739,52 +816,56 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea + } + for { + success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { +- if !checkForStreamQuota(it) { +- return false +- } +- if !checkForHeaderListSize(it) { +- return false +- } +- return true ++ return checkForHeaderListSize(it) && checkForStreamQuota(it) + }, hdr) + if err != nil { +- return nil, err ++ // Connection closed. ++ return nil, &NewStreamError{Err: err, AllowTransparentRetry: true} + } + if success { + break + } + if hdrListSizeErr != nil { +- return nil, &NewStreamError{Err: hdrListSizeErr, DoNotRetry: true} ++ return nil, &NewStreamError{Err: hdrListSizeErr} + } + firstTry = false + select { + case <-ch: +- case <-s.ctx.Done(): +- return nil, ContextErr(s.ctx.Err()) ++ case <-ctx.Done(): ++ return nil, &NewStreamError{Err: ContextErr(ctx.Err())} + case <-t.goAway: +- return nil, errStreamDrain ++ return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true} + case <-t.ctx.Done(): +- return nil, ErrConnClosing ++ return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true} + } + } +- if t.statsHandler != nil { ++ if len(t.statsHandlers) != 0 { + header, ok := metadata.FromOutgoingContext(ctx) + if ok { + header.Set("user-agent", t.userAgent) + } else { + header = metadata.Pairs("user-agent", t.userAgent) + } +- // Note: The header fields are compressed with hpack after this call returns. +- // No WireLength field is set here. +- outHeader := &stats.OutHeader{ +- Client: true, +- FullMethod: callHdr.Method, +- RemoteAddr: t.remoteAddr, +- LocalAddr: t.localAddr, +- Compression: callHdr.SendCompress, +- Header: header, ++ for _, sh := range t.statsHandlers { ++ // Note: The header fields are compressed with hpack after this call returns. ++ // No WireLength field is set here. ++ // Note: Creating a new stats object to prevent pollution. ++ outHeader := &stats.OutHeader{ ++ Client: true, ++ FullMethod: callHdr.Method, ++ RemoteAddr: t.remoteAddr, ++ LocalAddr: t.localAddr, ++ Compression: callHdr.SendCompress, ++ Header: header, ++ } ++ sh.HandleRPC(s.ctx, outHeader) ++ } ++ } ++ if transportDrainRequired { ++ if t.logger.V(logLevel) { ++ t.logger.Infof("Draining transport: t.nextID > MaxStreamID") + } +- t.statsHandler.HandleRPC(s.ctx, outHeader) ++ t.GracefulClose() + } + return s, nil + } +@@ -867,20 +948,21 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. + // Close kicks off the shutdown process of the transport. This should be called + // only once on a transport. Once it is called, the transport should not be + // accessed any more. +-// +-// This method blocks until the addrConn that initiated this transport is +-// re-connected. This happens because t.onClose() begins reconnect logic at the +-// addrConn level and blocks until the addrConn is successfully connected. + func (t *http2Client) Close(err error) { + t.mu.Lock() +- // Make sure we only Close once. ++ // Make sure we only close once. + if t.state == closing { + t.mu.Unlock() + return + } +- // Call t.onClose before setting the state to closing to prevent the client +- // from attempting to create new streams ASAP. +- t.onClose() ++ if t.logger.V(logLevel) { ++ t.logger.Infof("Closing: %v", err) ++ } ++ // Call t.onClose ASAP to prevent the client from attempting to create new ++ // streams. ++ if t.state != draining { ++ t.onClose(GoAwayInvalid) ++ } + t.state = closing + streams := t.activeStreams + t.activeStreams = nil +@@ -893,9 +975,7 @@ func (t *http2Client) Close(err error) { + t.controlBuf.finish() + t.cancel() + t.conn.Close() +- if channelz.IsOn() { +- channelz.RemoveEntry(t.channelzID) +- } ++ channelz.RemoveEntry(t.channelzID) + // Append info about previous goaways if there were any, since this may be important + // for understanding the root cause for this connection to be closed. + _, goAwayDebugMessage := t.GetGoAwayReason() +@@ -912,11 +992,11 @@ func (t *http2Client) Close(err error) { + for _, s := range streams { + t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) + } +- if t.statsHandler != nil { ++ for _, sh := range t.statsHandlers { + connEnd := &stats.ConnEnd{ + Client: true, + } +- t.statsHandler.HandleConn(t.ctx, connEnd) ++ sh.HandleConn(t.ctx, connEnd) + } + } + +@@ -932,11 +1012,15 @@ func (t *http2Client) GracefulClose() { + t.mu.Unlock() + return + } ++ if t.logger.V(logLevel) { ++ t.logger.Infof("GracefulClose called") ++ } ++ t.onClose(GoAwayInvalid) + t.state = draining + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { +- t.Close(ErrConnClosing) ++ t.Close(connectionErrorf(true, nil, "no active streams left to process while draining")) + return + } + t.controlBuf.put(&incomingGoAway{}) +@@ -996,13 +1080,13 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { + // for the transport and the stream based on the current bdp + // estimation. + func (t *http2Client) updateFlowControl(n uint32) { +- t.mu.Lock() +- for _, s := range t.activeStreams { +- s.fc.newLimit(n) +- } +- t.mu.Unlock() + updateIWS := func(interface{}) bool { + t.initialWindowSize = int32(n) ++ t.mu.Lock() ++ for _, s := range t.activeStreams { ++ s.fc.newLimit(n) ++ } ++ t.mu.Unlock() + return true + } + t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) +@@ -1077,7 +1161,7 @@ func (t *http2Client) handleData(f *http2.DataFrame) { + } + // The server has closed the stream without sending trailers. Record that + // the read direction is closed, and set the status appropriately. +- if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { ++ if f.StreamEnded() { + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) + } + } +@@ -1093,8 +1177,8 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { + } + statusCode, ok := http2ErrConvTab[f.ErrCode] + if !ok { +- if logger.V(logLevel) { +- logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) ++ if t.logger.V(logLevel) { ++ t.logger.Infof("Received a RST_STREAM frame with code %q, but found no mapped gRPC status", f.ErrCode) + } + statusCode = codes.Unknown + } +@@ -1176,10 +1260,12 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { + t.mu.Unlock() + return + } +- if f.ErrCode == http2.ErrCodeEnhanceYourCalm { +- if logger.V(logLevel) { +- logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") +- } ++ if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" { ++ // When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug ++ // data equal to ASCII "too_many_pings", it should log the occurrence at a log level that is ++ // enabled by default and double the configure KEEPALIVE_TIME used for new connections ++ // on that channel. ++ logger.Errorf("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\".") + } + id := f.LastStreamID + if id > 0 && id%2 == 0 { +@@ -1208,12 +1294,14 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { + default: + t.setGoAwayReason(f) + close(t.goAway) +- t.controlBuf.put(&incomingGoAway{}) ++ defer t.controlBuf.put(&incomingGoAway{}) // Defer as t.mu is currently held. + // Notify the clientconn about the GOAWAY before we set the state to + // draining, to allow the client to stop attempting to create streams + // before disallowing new streams on this connection. +- t.onGoAway(t.goAwayReason) +- t.state = draining ++ if t.state != draining { ++ t.onClose(t.goAwayReason) ++ t.state = draining ++ } + } + // All streams with IDs greater than the GoAwayId + // and smaller than the previous GoAway ID should be killed. +@@ -1221,24 +1309,35 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { + if upperLimit == 0 { // This is the first GoAway Frame. + upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. + } ++ ++ t.prevGoAwayID = id ++ if len(t.activeStreams) == 0 { ++ t.mu.Unlock() ++ t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) ++ return ++ } ++ ++ streamsToClose := make([]*Stream, 0) + for streamID, stream := range t.activeStreams { + if streamID > id && streamID <= upperLimit { + // The stream was unprocessed by the server. +- atomic.StoreUint32(&stream.unprocessed, 1) +- t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) ++ if streamID > id && streamID <= upperLimit { ++ atomic.StoreUint32(&stream.unprocessed, 1) ++ streamsToClose = append(streamsToClose, stream) ++ } + } + } +- t.prevGoAwayID = id +- active := len(t.activeStreams) + t.mu.Unlock() +- if active == 0 { +- t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) ++ // Called outside t.mu because closeStream can take controlBuf's mu, which ++ // could induce deadlock and is not allowed. ++ for _, stream := range streamsToClose { ++ t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) + } + } + + // setGoAwayReason sets the value of t.goAwayReason based + // on the GoAway frame received. +-// It expects a lock on transport's mutext to be held by ++// It expects a lock on transport's mutex to be held by + // the caller. + func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { + t.goAwayReason = GoAwayNoReason +@@ -1407,26 +1506,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + } + + isHeader := false +- defer func() { +- if t.statsHandler != nil { +- if isHeader { +- inHeader := &stats.InHeader{ +- Client: true, +- WireLength: int(frame.Header().Length), +- Header: s.header.Copy(), +- Compression: s.recvCompress, +- } +- t.statsHandler.HandleRPC(s.ctx, inHeader) +- } else { +- inTrailer := &stats.InTrailer{ +- Client: true, +- WireLength: int(frame.Header().Length), +- Trailer: s.trailer.Copy(), +- } +- t.statsHandler.HandleRPC(s.ctx, inTrailer) +- } +- } +- }() + + // If headerChan hasn't been closed yet + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { +@@ -1448,6 +1527,25 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + close(s.headerChan) + } + ++ for _, sh := range t.statsHandlers { ++ if isHeader { ++ inHeader := &stats.InHeader{ ++ Client: true, ++ WireLength: int(frame.Header().Length), ++ Header: metadata.MD(mdata).Copy(), ++ Compression: s.recvCompress, ++ } ++ sh.HandleRPC(s.ctx, inHeader) ++ } else { ++ inTrailer := &stats.InTrailer{ ++ Client: true, ++ WireLength: int(frame.Header().Length), ++ Trailer: metadata.MD(mdata).Copy(), ++ } ++ sh.HandleRPC(s.ctx, inTrailer) ++ } ++ } ++ + if !endStream { + return + } +@@ -1461,33 +1559,35 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) + } + +-// reader runs as a separate goroutine in charge of reading data from network +-// connection. +-// +-// TODO(zhaoq): currently one reader per transport. Investigate whether this is +-// optimal. +-// TODO(zhaoq): Check the validity of the incoming frame sequence. +-func (t *http2Client) reader() { +- defer close(t.readerDone) +- // Check the validity of server preface. ++// readServerPreface reads and handles the initial settings frame from the ++// server. ++func (t *http2Client) readServerPreface() error { + frame, err := t.framer.fr.ReadFrame() + if err != nil { +- err = connectionErrorf(true, err, "error reading server preface: %v", err) +- t.Close(err) // this kicks off resetTransport, so must be last before return +- return +- } +- t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) +- if t.keepaliveEnabled { +- atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) ++ return connectionErrorf(true, err, "error reading server preface: %v", err) + } + sf, ok := frame.(*http2.SettingsFrame) + if !ok { +- // this kicks off resetTransport, so must be last before return +- t.Close(connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame)) +- return ++ return connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame) + } +- t.onPrefaceReceipt() + t.handleSettings(sf, true) ++ return nil ++} ++ ++// reader verifies the server preface and reads all subsequent data from ++// network connection. If the server preface is not read successfully, an ++// error is pushed to errCh; otherwise errCh is closed with no error. ++func (t *http2Client) reader(errCh chan<- error) { ++ defer close(t.readerDone) ++ ++ if err := t.readServerPreface(); err != nil { ++ errCh <- err ++ return ++ } ++ close(errCh) ++ if t.keepaliveEnabled { ++ atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) ++ } + + // loop to keep reading incoming messages on this transport. + for { +@@ -1553,7 +1653,7 @@ func minTime(a, b time.Duration) time.Duration { + return b + } + +-// keepalive running in a separate goroutune makes sure the connection is alive by sending pings. ++// keepalive running in a separate goroutine makes sure the connection is alive by sending pings. + func (t *http2Client) keepalive() { + p := &ping{data: [8]byte{}} + // True iff a ping has been sent, and no data has been received since then. +@@ -1690,3 +1790,9 @@ func (t *http2Client) getOutFlowWindow() int64 { + return -2 + } + } ++ ++func (t *http2Client) stateForTesting() transportState { ++ t.mu.Lock() ++ defer t.mu.Unlock() ++ return t.state ++} +diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go +index e3799d5..ec4eef2 100644 +--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go ++++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go +@@ -35,12 +35,16 @@ import ( + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" ++ "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcutil" ++ "google.golang.org/grpc/internal/pretty" ++ "google.golang.org/grpc/internal/syscall" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" ++ "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" +@@ -52,10 +56,10 @@ import ( + var ( + // ErrIllegalHeaderWrite indicates that setting header is illegal because of + // the stream's state. +- ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") ++ ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times") + // ErrHeaderListSizeLimitViolation indicates that the header list size is larger + // than the limit set by peer. +- ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") ++ ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer") + ) + + // serverConnectionCounter counts the number of connections a server has seen +@@ -73,7 +77,6 @@ type http2Server struct { + writerDone chan struct{} // sync point to enable testing. + remoteAddr net.Addr + localAddr net.Addr +- maxStreamID uint32 // max stream ID ever seen + authInfo credentials.AuthInfo // auth info about the connection + inTapHandle tap.ServerInHandle + framer *framer +@@ -83,7 +86,7 @@ type http2Server struct { + // updates, reset streams, and various settings) to the controller. + controlBuf *controlBuffer + fc *trInFlow +- stats stats.Handler ++ stats []stats.Handler + // Keepalive and max-age parameters for the server. + kp keepalive.ServerParameters + // Keepalive enforcement policy. +@@ -102,13 +105,13 @@ type http2Server struct { + + mu sync.Mutex // guard the following + +- // drainChan is initialized when Drain() is called the first time. +- // After which the server writes out the first GoAway(with ID 2^31-1) frame. +- // Then an independent goroutine will be launched to later send the second GoAway. +- // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. +- // Thus call to Drain() will be a no-op if drainChan is already initialized since draining is +- // already underway. +- drainChan chan struct{} ++ // drainEvent is initialized when Drain() is called the first time. After ++ // which the server writes out the first GoAway(with ID 2^31-1) frame. Then ++ // an independent goroutine will be launched to later send the second ++ // GoAway. During this time we don't want to write another first GoAway(with ++ // ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is ++ // already initialized since draining is already underway. ++ drainEvent *grpcsync.Event + state transportState + activeStreams map[uint32]*Stream + // idle is the time instant when the connection went idle. +@@ -118,21 +121,44 @@ type http2Server struct { + idle time.Time + + // Fields below are for channelz metric collection. +- channelzID int64 // channelz unique identification number ++ channelzID *channelz.Identifier + czData *channelzData + bufferPool *bufferPool + + connectionID uint64 ++ ++ // maxStreamMu guards the maximum stream ID ++ // This lock may not be taken if mu is already held. ++ maxStreamMu sync.Mutex ++ maxStreamID uint32 // max stream ID ever seen ++ ++ logger *grpclog.PrefixLogger + } + + // NewServerTransport creates a http2 transport with conn and configuration + // options from config. + // + // It returns a non-nil transport and a nil error on success. On failure, it +-// returns a non-nil transport and a nil-error. For a special case where the ++// returns a nil transport and a non-nil error. For a special case where the + // underlying conn gets closed before the client preface could be read, it + // returns a nil transport and a nil error. + func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { ++ var authInfo credentials.AuthInfo ++ rawConn := conn ++ if config.Credentials != nil { ++ var err error ++ conn, authInfo, err = config.Credentials.ServerHandshake(rawConn) ++ if err != nil { ++ // ErrConnDispatched means that the connection was dispatched away ++ // from gRPC; those connections should be left open. io.EOF means ++ // the connection was closed before handshaking completed, which can ++ // happen naturally from probers. Return these errors directly. ++ if err == credentials.ErrConnDispatched || err == io.EOF { ++ return nil, err ++ } ++ return nil, connectionErrorf(false, err, "ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) ++ } ++ } + writeBufSize := config.WriteBufferSize + readBufSize := config.ReadBufferSize + maxHeaderListSize := defaultServerMaxHeaderListSize +@@ -145,15 +171,10 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, + ID: http2.SettingMaxFrameSize, + Val: http2MaxFrameLen, + }} +- // TODO(zhaoq): Have a better way to signal "no limit" because 0 is +- // permitted in the HTTP2 spec. +- maxStreams := config.MaxStreams +- if maxStreams == 0 { +- maxStreams = math.MaxUint32 +- } else { ++ if config.MaxStreams != math.MaxUint32 { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxConcurrentStreams, +- Val: maxStreams, ++ Val: config.MaxStreams, + }) + } + dynamicWindow := true +@@ -211,27 +232,33 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, + if kp.Timeout == 0 { + kp.Timeout = defaultServerKeepaliveTimeout + } ++ if kp.Time != infinity { ++ if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { ++ return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) ++ } ++ } + kep := config.KeepalivePolicy + if kep.MinTime == 0 { + kep.MinTime = defaultKeepalivePolicyMinTime + } ++ + done := make(chan struct{}) + t := &http2Server{ +- ctx: context.Background(), ++ ctx: setConnection(context.Background(), rawConn), + done: done, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), +- authInfo: config.AuthInfo, ++ authInfo: authInfo, + framer: framer, + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), +- maxStreams: maxStreams, ++ maxStreams: config.MaxStreams, + inTapHandle: config.InTapHandle, + fc: &trInFlow{limit: uint32(icwz)}, + state: reachable, + activeStreams: make(map[uint32]*Stream), +- stats: config.StatsHandler, ++ stats: config.StatsHandlers, + kp: kp, + idle: time.Now(), + kep: kep, +@@ -239,6 +266,10 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, + czData: new(channelzData), + bufferPool: newBufferPool(), + } ++ t.logger = prefixLoggerForServerTransport(t) ++ // Add peer information to the http2server context. ++ t.ctx = peer.NewContext(t.ctx, t.getPeer()) ++ + t.controlBuf = newControlBuffer(t.done) + if dynamicWindow { + t.bdpEst = &bdpEstimator{ +@@ -246,25 +277,25 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, + updateFlowControl: t.updateFlowControl, + } + } +- if t.stats != nil { +- t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ ++ for _, sh := range t.stats { ++ t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{} +- t.stats.HandleConn(t.ctx, connBegin) ++ sh.HandleConn(t.ctx, connBegin) + } +- if channelz.IsOn() { +- t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) ++ t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) ++ if err != nil { ++ return nil, err + } + + t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) +- + t.framer.writer.Flush() + + defer func() { + if err != nil { +- t.Close() ++ t.Close(err) + } + }() + +@@ -273,10 +304,11 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, + if _, err := io.ReadFull(t.conn, preface); err != nil { + // In deployments where a gRPC server runs behind a cloud load balancer + // which performs regular TCP level health checks, the connection is +- // closed immediately by the latter. Skipping the error here will help +- // reduce log clutter. ++ // closed immediately by the latter. Returning io.EOF here allows the ++ // grpc server implementation to recognize this scenario and suppress ++ // logging to reduce spam. + if err == io.EOF { +- return nil, nil ++ return nil, io.EOF + } + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) + } +@@ -299,23 +331,22 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, + t.handleSettings(sf) + + go func() { +- t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) ++ t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) + t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler +- if err := t.loopy.run(); err != nil { +- if logger.V(logLevel) { +- logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) +- } +- } +- t.conn.Close() +- t.controlBuf.finish() ++ t.loopy.run() + close(t.writerDone) + }() + go t.keepalive() + return t, nil + } + +-// operateHeader takes action on the decoded headers. +-func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { ++// operateHeaders takes action on the decoded headers. Returns an error if fatal ++// error encountered and transport needs to close, otherwise returns nil. ++func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error { ++ // Acquire max stream ID lock for entire duration ++ t.maxStreamMu.Lock() ++ defer t.maxStreamMu.Unlock() ++ + streamID := frame.Header().StreamID + + // frame.Truncated is set to true when framer detects that the current header +@@ -327,9 +358,15 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( + rstCode: http2.ErrCodeFrameSize, + onWrite: func() {}, + }) +- return false ++ return nil + } + ++ if streamID%2 != 1 || streamID <= t.maxStreamID { ++ // illegal gRPC stream id. ++ return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame) ++ } ++ t.maxStreamID = streamID ++ + buf := newRecvBuffer() + s := &Stream{ + id: streamID, +@@ -337,15 +374,15 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + } +- + var ( +- // If a gRPC Response-Headers has already been received, then it means +- // that the peer is speaking gRPC and we are in gRPC mode. +- isGRPC = false +- mdata = make(map[string][]string) +- httpMethod string +- // headerError is set if an error is encountered while parsing the headers +- headerError bool ++ // if false, content-type was missing or invalid ++ isGRPC = false ++ contentType = "" ++ mdata = make(metadata.MD, len(frame.Fields)) ++ httpMethod string ++ // these are set if an error is encountered while parsing the headers ++ protocolError bool ++ headerError *status.Status + + timeoutSet bool + timeout time.Duration +@@ -356,11 +393,23 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( + case "content-type": + contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value) + if !validContentType { ++ contentType = hf.Value + break + } + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + s.contentSubtype = contentSubtype + isGRPC = true ++ ++ case "grpc-accept-encoding": ++ mdata[hf.Name] = append(mdata[hf.Name], hf.Value) ++ if hf.Value == "" { ++ continue ++ } ++ compressors := hf.Value ++ if s.clientAdvertisedCompressors != "" { ++ compressors = s.clientAdvertisedCompressors + "," + compressors ++ } ++ s.clientAdvertisedCompressors = compressors + case "grpc-encoding": + s.recvCompress = hf.Value + case ":method": +@@ -371,30 +420,90 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( + timeoutSet = true + var err error + if timeout, err = decodeTimeout(hf.Value); err != nil { +- headerError = true ++ headerError = status.Newf(codes.Internal, "malformed grpc-timeout: %v", err) + } ++ // "Transports must consider requests containing the Connection header ++ // as malformed." - A41 ++ case "connection": ++ if t.logger.V(logLevel) { ++ t.logger.Infof("Received a HEADERS frame with a :connection header which makes the request malformed, as per the HTTP/2 spec") ++ } ++ protocolError = true + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { +- headerError = true +- logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) ++ headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err) ++ t.logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break + } + mdata[hf.Name] = append(mdata[hf.Name], v) + } + } + +- if !isGRPC || headerError { ++ // "If multiple Host headers or multiple :authority headers are present, the ++ // request must be rejected with an HTTP status code 400 as required by Host ++ // validation in RFC 7230 §5.4, gRPC status code INTERNAL, or RST_STREAM ++ // with HTTP/2 error code PROTOCOL_ERROR." - A41. Since this is a HTTP/2 ++ // error, this takes precedence over a client not speaking gRPC. ++ if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 { ++ errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"])) ++ if t.logger.V(logLevel) { ++ t.logger.Infof("Aborting the stream early: %v", errMsg) ++ } ++ t.controlBuf.put(&earlyAbortStream{ ++ httpStatus: http.StatusBadRequest, ++ streamID: streamID, ++ contentSubtype: s.contentSubtype, ++ status: status.New(codes.Internal, errMsg), ++ rst: !frame.StreamEnded(), ++ }) ++ return nil ++ } ++ ++ if protocolError { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeProtocol, + onWrite: func() {}, + }) +- return false ++ return nil ++ } ++ if !isGRPC { ++ t.controlBuf.put(&earlyAbortStream{ ++ httpStatus: http.StatusUnsupportedMediaType, ++ streamID: streamID, ++ contentSubtype: s.contentSubtype, ++ status: status.Newf(codes.InvalidArgument, "invalid gRPC request content-type %q", contentType), ++ rst: !frame.StreamEnded(), ++ }) ++ return nil ++ } ++ if headerError != nil { ++ t.controlBuf.put(&earlyAbortStream{ ++ httpStatus: http.StatusBadRequest, ++ streamID: streamID, ++ contentSubtype: s.contentSubtype, ++ status: headerError, ++ rst: !frame.StreamEnded(), ++ }) ++ return nil ++ } ++ ++ // "If :authority is missing, Host must be renamed to :authority." - A41 ++ if len(mdata[":authority"]) == 0 { ++ // No-op if host isn't present, no eventual :authority header is a valid ++ // RPC. ++ if host, ok := mdata["host"]; ok { ++ mdata[":authority"] = host ++ delete(mdata, "host") ++ } ++ } else { ++ // "If :authority is present, Host must be discarded" - A41 ++ delete(mdata, "host") + } + + if frame.StreamEnded() { +@@ -406,14 +515,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( + } else { + s.ctx, s.cancel = context.WithCancel(t.ctx) + } +- pr := &peer.Peer{ +- Addr: t.remoteAddr, +- } +- // Attach Auth info if there is any. +- if t.authInfo != nil { +- pr.AuthInfo = t.authInfo +- } +- s.ctx = peer.NewContext(s.ctx, pr) ++ + // Attach the received metadata to the context. + if len(mdata) > 0 { + s.ctx = metadata.NewIncomingContext(s.ctx, mdata) +@@ -428,7 +530,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( + if t.state != reachable { + t.mu.Unlock() + s.cancel() +- return false ++ return nil + } + if uint32(len(t.activeStreams)) >= t.maxStreams { + t.mu.Unlock() +@@ -439,49 +541,43 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( + onWrite: func() {}, + }) + s.cancel() +- return false +- } +- if streamID%2 != 1 || streamID <= t.maxStreamID { +- t.mu.Unlock() +- // illegal gRPC stream id. +- if logger.V(logLevel) { +- logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) +- } +- s.cancel() +- return true ++ return nil + } +- t.maxStreamID = streamID + if httpMethod != http.MethodPost { + t.mu.Unlock() +- if logger.V(logLevel) { +- logger.Infof("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) ++ errMsg := fmt.Sprintf("Received a HEADERS frame with :method %q which should be POST", httpMethod) ++ if t.logger.V(logLevel) { ++ t.logger.Infof("Aborting the stream early: %v", errMsg) + } +- t.controlBuf.put(&cleanupStream{ +- streamID: streamID, +- rst: true, +- rstCode: http2.ErrCodeProtocol, +- onWrite: func() {}, ++ t.controlBuf.put(&earlyAbortStream{ ++ httpStatus: 405, ++ streamID: streamID, ++ contentSubtype: s.contentSubtype, ++ status: status.New(codes.Internal, errMsg), ++ rst: !frame.StreamEnded(), + }) + s.cancel() +- return false ++ return nil + } + if t.inTapHandle != nil { + var err error + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { + t.mu.Unlock() +- if logger.V(logLevel) { +- logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) ++ if t.logger.V(logLevel) { ++ t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) + } + stat, ok := status.FromError(err) + if !ok { + stat = status.New(codes.PermissionDenied, err.Error()) + } + t.controlBuf.put(&earlyAbortStream{ ++ httpStatus: 200, + streamID: s.id, + contentSubtype: s.contentSubtype, + status: stat, ++ rst: !frame.StreamEnded(), + }) +- return false ++ return nil + } + } + t.activeStreams[streamID] = s +@@ -497,17 +593,17 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( + t.adjustWindow(s, uint32(n)) + } + s.ctx = traceCtx(s.ctx, s.method) +- if t.stats != nil { +- s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) ++ for _, sh := range t.stats { ++ s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ + FullMethod: s.method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: s.recvCompress, + WireLength: int(frame.Header().Length), +- Header: metadata.MD(mdata).Copy(), ++ Header: mdata.Copy(), + } +- t.stats.HandleRPC(s.ctx, inHeader) ++ sh.HandleRPC(s.ctx, inHeader) + } + s.ctxDone = s.ctx.Done() + s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) +@@ -528,7 +624,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( + wq: s.wq, + }) + handle(s) +- return false ++ return nil + } + + // HandleStreams receives incoming streams using the given handler. This is +@@ -542,8 +638,8 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + if err != nil { + if se, ok := err.(http2.StreamError); ok { +- if logger.V(logLevel) { +- logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) ++ if t.logger.V(logLevel) { ++ t.logger.Warningf("Encountered http2.StreamError: %v", se) + } + t.mu.Lock() + s := t.activeStreams[se.StreamID] +@@ -561,19 +657,16 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. + continue + } + if err == io.EOF || err == io.ErrUnexpectedEOF { +- t.Close() ++ t.Close(err) + return + } +- if logger.V(logLevel) { +- logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) +- } +- t.Close() ++ t.Close(err) + return + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: +- if t.operateHeaders(frame, handle, traceCtx) { +- t.Close() ++ if err := t.operateHeaders(frame, handle, traceCtx); err != nil { ++ t.Close(err) + break + } + case *http2.DataFrame: +@@ -589,8 +682,8 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. + case *http2.GoAwayFrame: + // TODO: Handle GoAway from the client appropriately. + default: +- if logger.V(logLevel) { +- logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) ++ if t.logger.V(logLevel) { ++ t.logger.Infof("Received unsupported frame type %T", frame) + } + } + } +@@ -717,7 +810,7 @@ func (t *http2Server) handleData(f *http2.DataFrame) { + s.write(recvMsg{buffer: buffer}) + } + } +- if f.Header().Flags.Has(http2.FlagDataEndStream) { ++ if f.StreamEnded() { + // Received the end of stream from the client. + s.compareAndSwapState(streamActive, streamReadDone) + s.write(recvMsg{err: io.EOF}) +@@ -774,8 +867,8 @@ const ( + + func (t *http2Server) handlePing(f *http2.PingFrame) { + if f.IsAck() { +- if f.Data == goAwayPing.data && t.drainChan != nil { +- close(t.drainChan) ++ if f.Data == goAwayPing.data && t.drainEvent != nil { ++ t.drainEvent.Fire() + return + } + // Maybe it's a BDP ping. +@@ -817,10 +910,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { + + if t.pingStrikes > maxPingStrikes { + // Send goaway and close the connection. +- if logger.V(logLevel) { +- logger.Errorf("transport: Got too many pings from the client, closing the connection.") +- } +- t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) ++ t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")}) + } + } + +@@ -852,8 +942,8 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { + var sz int64 + for _, f := range hdrFrame.hf { + if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { +- if logger.V(logLevel) { +- logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) ++ if t.logger.V(logLevel) { ++ t.logger.Infof("Header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + } + return false + } +@@ -861,12 +951,27 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { + return true + } + ++func (t *http2Server) streamContextErr(s *Stream) error { ++ select { ++ case <-t.done: ++ return ErrConnClosing ++ default: ++ } ++ return ContextErr(s.ctx.Err()) ++} ++ + // WriteHeader sends the header metadata md back to the client. + func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { +- if s.updateHeaderSent() || s.getState() == streamDone { ++ s.hdrMu.Lock() ++ defer s.hdrMu.Unlock() ++ if s.getState() == streamDone { ++ return t.streamContextErr(s) ++ } ++ ++ if s.updateHeaderSent() { + return ErrIllegalHeaderWrite + } +- s.hdrMu.Lock() ++ + if md.Len() > 0 { + if s.header.Len() > 0 { + s.header = metadata.Join(s.header, md) +@@ -875,10 +980,8 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { + } + } + if err := t.writeHeaderLocked(s); err != nil { +- s.hdrMu.Unlock() +- return err ++ return status.Convert(err).Err() + } +- s.hdrMu.Unlock() + return nil + } + +@@ -909,14 +1012,14 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { + t.closeStream(s, true, http2.ErrCodeInternal, false) + return ErrHeaderListSizeLimitViolation + } +- if t.stats != nil { ++ for _, sh := range t.stats { + // Note: Headers are compressed with hpack after this call returns. + // No WireLength field is set here. + outHeader := &stats.OutHeader{ + Header: s.header.Copy(), + Compression: s.sendCompress, + } +- t.stats.HandleRPC(s.Context(), outHeader) ++ sh.HandleRPC(s.Context(), outHeader) + } + return nil + } +@@ -926,17 +1029,19 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { + // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early + // OK is adopted. + func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { ++ s.hdrMu.Lock() ++ defer s.hdrMu.Unlock() ++ + if s.getState() == streamDone { + return nil + } +- s.hdrMu.Lock() ++ + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. + if !s.updateHeaderSent() { // No headers have been sent. + if len(s.header) > 0 { // Send a separate header frame. + if err := t.writeHeaderLocked(s); err != nil { +- s.hdrMu.Unlock() + return err + } + } else { // Send a trailer only response. +@@ -951,7 +1056,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. +- logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) ++ t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) + } else { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + } +@@ -965,7 +1070,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + endStream: true, + onWrite: t.setResetPingStrikes, + } +- s.hdrMu.Unlock() ++ + success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) + if !success { + if err != nil { +@@ -977,10 +1082,10 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + // Send a RST_STREAM after the trailers if the client has not already half-closed. + rst := s.getState() == streamActive + t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) +- if t.stats != nil { ++ for _, sh := range t.stats { + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. +- t.stats.HandleRPC(s.Context(), &stats.OutTrailer{ ++ sh.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) + } +@@ -992,23 +1097,12 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + if !s.isHeaderSent() { // Headers haven't been written yet. + if err := t.WriteHeader(s, nil); err != nil { +- if _, ok := err.(ConnectionError); ok { +- return err +- } +- // TODO(mmukhi, dfawley): Make sure this is the right code to return. +- return status.Errorf(codes.Internal, "transport: %v", err) ++ return err + } + } else { + // Writing headers checks for this condition. + if s.getState() == streamDone { +- // TODO(mmukhi, dfawley): Should the server write also return io.EOF? +- s.cancel() +- select { +- case <-t.done: +- return ErrConnClosing +- default: +- } +- return ContextErr(s.ctx.Err()) ++ return t.streamContextErr(s) + } + } + df := &dataFrame{ +@@ -1018,12 +1112,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e + onEachWrite: t.setResetPingStrikes, + } + if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { +- select { +- case <-t.done: +- return ErrConnClosing +- default: +- } +- return ContextErr(s.ctx.Err()) ++ return t.streamContextErr(s) + } + return t.controlBuf.put(df) + } +@@ -1072,20 +1161,20 @@ func (t *http2Server) keepalive() { + if val <= 0 { + // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. + // Gracefully close the connection. +- t.Drain() ++ t.Drain("max_idle") + return + } + idleTimer.Reset(val) + case <-ageTimer.C: +- t.Drain() ++ t.Drain("max_age") + ageTimer.Reset(t.kp.MaxConnectionAgeGrace) + select { + case <-ageTimer.C: + // Close the connection after grace period. +- if logger.V(logLevel) { +- logger.Infof("transport: closing server transport due to maximum connection age.") ++ if t.logger.V(logLevel) { ++ t.logger.Infof("Closing server transport due to maximum connection age") + } +- t.Close() ++ t.controlBuf.put(closeConnection{}) + case <-t.done: + } + return +@@ -1101,10 +1190,7 @@ func (t *http2Server) keepalive() { + continue + } + if outstandingPing && kpTimeoutLeft <= 0 { +- if logger.V(logLevel) { +- logger.Infof("transport: closing server transport due to idleness.") +- } +- t.Close() ++ t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time)) + return + } + if !outstandingPing { +@@ -1131,40 +1217,37 @@ func (t *http2Server) keepalive() { + // Close starts shutting down the http2Server transport. + // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This + // could cause some resource issue. Revisit this later. +-func (t *http2Server) Close() { ++func (t *http2Server) Close(err error) { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return + } ++ if t.logger.V(logLevel) { ++ t.logger.Infof("Closing: %v", err) ++ } + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + t.mu.Unlock() + t.controlBuf.finish() + close(t.done) +- if err := t.conn.Close(); err != nil && logger.V(logLevel) { +- logger.Infof("transport: error closing conn during Close: %v", err) +- } +- if channelz.IsOn() { +- channelz.RemoveEntry(t.channelzID) ++ if err := t.conn.Close(); err != nil && t.logger.V(logLevel) { ++ t.logger.Infof("Error closing underlying net.Conn during Close: %v", err) + } ++ channelz.RemoveEntry(t.channelzID) + // Cancel all active streams. + for _, s := range streams { + s.cancel() + } +- if t.stats != nil { ++ for _, sh := range t.stats { + connEnd := &stats.ConnEnd{} +- t.stats.HandleConn(t.ctx, connEnd) ++ sh.HandleConn(t.ctx, connEnd) + } + } + + // deleteStream deletes the stream s from transport's active streams. + func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { +- // In case stream sending and receiving are invoked in separate +- // goroutines (e.g., bi-directional streaming), cancel needs to be +- // called to interrupt the potential blocking on other goroutines. +- s.cancel() + + t.mu.Lock() + if _, ok := t.activeStreams[s.id]; ok { +@@ -1186,6 +1269,11 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { + + // finishStream closes the stream and puts the trailing headerFrame into controlbuf. + func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { ++ // In case stream sending and receiving are invoked in separate ++ // goroutines (e.g., bi-directional streaming), cancel needs to be ++ // called to interrupt the potential blocking on other goroutines. ++ s.cancel() ++ + oldState := s.swapState(streamDone) + if oldState == streamDone { + // If the stream was already done, return. +@@ -1205,6 +1293,11 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h + + // closeStream clears the footprint of a stream when the stream is not needed any more. + func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { ++ // In case stream sending and receiving are invoked in separate ++ // goroutines (e.g., bi-directional streaming), cancel needs to be ++ // called to interrupt the potential blocking on other goroutines. ++ s.cancel() ++ + s.swapState(streamDone) + t.deleteStream(s, eosReceived) + +@@ -1220,14 +1313,14 @@ func (t *http2Server) RemoteAddr() net.Addr { + return t.remoteAddr + } + +-func (t *http2Server) Drain() { ++func (t *http2Server) Drain(debugData string) { + t.mu.Lock() + defer t.mu.Unlock() +- if t.drainChan != nil { ++ if t.drainEvent != nil { + return + } +- t.drainChan = make(chan struct{}) +- t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) ++ t.drainEvent = grpcsync.NewEvent() ++ t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte(debugData), headsUp: true}) + } + + var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} +@@ -1235,39 +1328,41 @@ var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} + // Handles outgoing GoAway and returns true if loopy needs to put itself + // in draining mode. + func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { ++ t.maxStreamMu.Lock() + t.mu.Lock() + if t.state == closing { // TODO(mmukhi): This seems unnecessary. + t.mu.Unlock() ++ t.maxStreamMu.Unlock() + // The transport is closing. + return false, ErrConnClosing + } +- sid := t.maxStreamID + if !g.headsUp { + // Stop accepting more streams now. + t.state = draining ++ sid := t.maxStreamID ++ retErr := g.closeConn + if len(t.activeStreams) == 0 { +- g.closeConn = true ++ retErr = errors.New("second GOAWAY written and no active streams left to process") + } + t.mu.Unlock() ++ t.maxStreamMu.Unlock() + if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { + return false, err + } +- if g.closeConn { +- // Abruptly close the connection following the GoAway (via +- // loopywriter). But flush out what's inside the buffer first. +- t.framer.writer.Flush() +- return false, fmt.Errorf("transport: Connection closing") ++ if retErr != nil { ++ return false, retErr + } + return true, nil + } + t.mu.Unlock() ++ t.maxStreamMu.Unlock() + // For a graceful close, send out a GoAway with stream ID of MaxUInt32, + // Follow that with a ping and wait for the ack to come back or a timer + // to expire. During this time accept new streams since they might have + // originated before the GoAway reaches the client. + // After getting the ack or timer expiration send out another GoAway this + // time with an ID of the max stream server intends to process. +- if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { ++ if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, g.debugData); err != nil { + return false, err + } + if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { +@@ -1277,7 +1372,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { + timer := time.NewTimer(time.Minute) + defer timer.Stop() + select { +- case <-t.drainChan: ++ case <-t.drainEvent.Done(): + case <-timer.C: + case <-t.done: + return +@@ -1336,6 +1431,13 @@ func (t *http2Server) getOutFlowWindow() int64 { + } + } + ++func (t *http2Server) getPeer() *peer.Peer { ++ return &peer.Peer{ ++ Addr: t.remoteAddr, ++ AuthInfo: t.authInfo, // Can be nil ++ } ++} ++ + func getJitter(v time.Duration) time.Duration { + if v == infinity { + return 0 +@@ -1345,3 +1447,18 @@ func getJitter(v time.Duration) time.Duration { + j := grpcrand.Int63n(2*r) - r + return time.Duration(j) + } ++ ++type connectionKey struct{} ++ ++// GetConnection gets the connection from the context. ++func GetConnection(ctx context.Context) net.Conn { ++ conn, _ := ctx.Value(connectionKey{}).(net.Conn) ++ return conn ++} ++ ++// SetConnection adds the connection to the context to be able to get ++// information about the destination ip and port for an incoming RPC. This also ++// allows any unary or streaming interceptors to see the connection. ++func setConnection(ctx context.Context, conn net.Conn) context.Context { ++ return context.WithValue(ctx, connectionKey{}, conn) ++} +diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go +index d8247bc..19cbb18 100644 +--- a/vendor/google.golang.org/grpc/internal/transport/http_util.go ++++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go +@@ -20,8 +20,8 @@ package transport + + import ( + "bufio" +- "bytes" + "encoding/base64" ++ "errors" + "fmt" + "io" + "math" +@@ -38,21 +38,14 @@ import ( + "golang.org/x/net/http2/hpack" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" +- "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" + ) + + const ( + // http2MaxFrameLen specifies the max length of a HTTP2 frame. + http2MaxFrameLen = 16384 // 16KB frame +- // http://http2.github.io/http2-spec/#SettingValues ++ // https://httpwg.org/specs/rfc7540.html#SettingValues + http2InitHeaderTableSize = 4096 +- // baseContentType is the base content-type for gRPC. This is a valid +- // content-type on it's own, but can also include a content-subtype such as +- // "proto" as a suffix after "+" or ";". See +- // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests +- // for more details. +- + ) + + var ( +@@ -92,7 +85,6 @@ var ( + // 504 Gateway timeout - UNAVAILABLE. + http.StatusGatewayTimeout: codes.Unavailable, + } +- logger = grpclog.Component("transport") + ) + + // isReservedHeader checks whether hdr belongs to HTTP2 headers +@@ -257,13 +249,13 @@ func encodeGrpcMessage(msg string) string { + } + + func encodeGrpcMessageUnchecked(msg string) string { +- var buf bytes.Buffer ++ var sb strings.Builder + for len(msg) > 0 { + r, size := utf8.DecodeRuneInString(msg) + for _, b := range []byte(string(r)) { + if size > 1 { + // If size > 1, r is not ascii. Always do percent encoding. +- buf.WriteString(fmt.Sprintf("%%%02X", b)) ++ fmt.Fprintf(&sb, "%%%02X", b) + continue + } + +@@ -272,14 +264,14 @@ func encodeGrpcMessageUnchecked(msg string) string { + // + // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". + if b >= spaceByte && b <= tildeByte && b != percentByte { +- buf.WriteByte(b) ++ sb.WriteByte(b) + } else { +- buf.WriteString(fmt.Sprintf("%%%02X", b)) ++ fmt.Fprintf(&sb, "%%%02X", b) + } + } + msg = msg[size:] + } +- return buf.String() ++ return sb.String() + } + + // decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. +@@ -297,23 +289,23 @@ func decodeGrpcMessage(msg string) string { + } + + func decodeGrpcMessageUnchecked(msg string) string { +- var buf bytes.Buffer ++ var sb strings.Builder + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if c == percentByte && i+2 < lenMsg { + parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) + if err != nil { +- buf.WriteByte(c) ++ sb.WriteByte(c) + } else { +- buf.WriteByte(byte(parsed)) ++ sb.WriteByte(byte(parsed)) + i += 2 + } + } else { +- buf.WriteByte(c) ++ sb.WriteByte(c) + } + } +- return buf.String() ++ return sb.String() + } + + type bufWriter struct { +@@ -322,8 +314,6 @@ type bufWriter struct { + batchSize int + conn net.Conn + err error +- +- onFlush func() + } + + func newBufWriter(conn net.Conn, batchSize int) *bufWriter { +@@ -339,7 +329,8 @@ func (w *bufWriter) Write(b []byte) (n int, err error) { + return 0, w.err + } + if w.batchSize == 0 { // Buffer has been disabled. +- return w.conn.Write(b) ++ n, err = w.conn.Write(b) ++ return n, toIOError(err) + } + for len(b) > 0 { + nn := copy(w.buf[w.offset:], b) +@@ -360,14 +351,31 @@ func (w *bufWriter) Flush() error { + if w.offset == 0 { + return nil + } +- if w.onFlush != nil { +- w.onFlush() +- } + _, w.err = w.conn.Write(w.buf[:w.offset]) ++ w.err = toIOError(w.err) + w.offset = 0 + return w.err + } + ++type ioError struct { ++ error ++} ++ ++func (i ioError) Unwrap() error { ++ return i.error ++} ++ ++func isIOError(err error) bool { ++ return errors.As(err, &ioError{}) ++} ++ ++func toIOError(err error) error { ++ if err == nil { ++ return nil ++ } ++ return ioError{error: err} ++} ++ + type framer struct { + writer *bufWriter + fr *http2.Framer +diff --git a/vendor/google.golang.org/grpc/internal/transport/logging.go b/vendor/google.golang.org/grpc/internal/transport/logging.go +new file mode 100644 +index 0000000..42ed2b0 +--- /dev/null ++++ b/vendor/google.golang.org/grpc/internal/transport/logging.go +@@ -0,0 +1,40 @@ ++/* ++ * ++ * Copyright 2023 gRPC authors. ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * http://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++package transport ++ ++import ( ++ "fmt" ++ ++ "google.golang.org/grpc/grpclog" ++ internalgrpclog "google.golang.org/grpc/internal/grpclog" ++) ++ ++var logger = grpclog.Component("transport") ++ ++func prefixLoggerForServerTransport(p *http2Server) *internalgrpclog.PrefixLogger { ++ return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-transport %p] ", p)) ++} ++ ++func prefixLoggerForServerHandlerTransport(p *serverHandlerTransport) *internalgrpclog.PrefixLogger { ++ return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-handler-transport %p] ", p)) ++} ++ ++func prefixLoggerForClientTransport(p *http2Client) *internalgrpclog.PrefixLogger { ++ return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[client-transport %p] ", p)) ++} +diff --git a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go +index 7bb53cf..c11b527 100644 +--- a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go ++++ b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go +@@ -31,7 +31,7 @@ const key = keyType("grpc.internal.transport.networktype") + + // Set returns a copy of the provided address with attributes containing networkType. + func Set(address resolver.Address, networkType string) resolver.Address { +- address.Attributes = address.Attributes.WithValues(key, networkType) ++ address.Attributes = address.Attributes.WithValue(key, networkType) + return address + } + +diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go +index a662bf3..4159619 100644 +--- a/vendor/google.golang.org/grpc/internal/transport/proxy.go ++++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go +@@ -37,7 +37,7 @@ var ( + httpProxyFromEnvironment = http.ProxyFromEnvironment + ) + +-func mapAddress(ctx context.Context, address string) (*url.URL, error) { ++func mapAddress(address string) (*url.URL, error) { + req := &http.Request{ + URL: &url.URL{ + Scheme: "https", +@@ -114,7 +114,7 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri + // connection. + func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) { + newAddr := addr +- proxyURL, err := mapAddress(ctx, addr) ++ proxyURL, err := mapAddress(addr) + if err != nil { + return nil, err + } +diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go +index 1419812..aa1c896 100644 +--- a/vendor/google.golang.org/grpc/internal/transport/transport.go ++++ b/vendor/google.golang.org/grpc/internal/transport/transport.go +@@ -30,9 +30,11 @@ import ( + "net" + "sync" + "sync/atomic" ++ "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" ++ "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +@@ -41,6 +43,10 @@ import ( + "google.golang.org/grpc/tap" + ) + ++// ErrNoHeaders is used as a signal that a trailers only response was received, ++// and is not a real error. ++var ErrNoHeaders = errors.New("stream has no headers") ++ + const logLevel = 2 + + type bufferPool struct { +@@ -251,6 +257,9 @@ type Stream struct { + fc *inFlow + wq *writeQuota + ++ // Holds compressor names passed in grpc-accept-encoding metadata from the ++ // client. This is empty for the client side stream. ++ clientAdvertisedCompressors string + // Callback to state application's intentions to read data. This + // is used to adjust flow control, if needed. + requestRead func(int) +@@ -339,8 +348,24 @@ func (s *Stream) RecvCompress() string { + } + + // SetSendCompress sets the compression algorithm to the stream. +-func (s *Stream) SetSendCompress(str string) { +- s.sendCompress = str ++func (s *Stream) SetSendCompress(name string) error { ++ if s.isHeaderSent() || s.getState() == streamDone { ++ return errors.New("transport: set send compressor called after headers sent or stream done") ++ } ++ ++ s.sendCompress = name ++ return nil ++} ++ ++// SendCompress returns the send compressor name. ++func (s *Stream) SendCompress() string { ++ return s.sendCompress ++} ++ ++// ClientAdvertisedCompressors returns the compressor names advertised by the ++// client via grpc-accept-encoding header. ++func (s *Stream) ClientAdvertisedCompressors() string { ++ return s.clientAdvertisedCompressors + } + + // Done returns a channel which is closed when it receives the final status +@@ -364,9 +389,15 @@ func (s *Stream) Header() (metadata.MD, error) { + return s.header.Copy(), nil + } + s.waitOnHeader() ++ + if !s.headerValid { + return nil, s.status.Err() + } ++ ++ if s.noHeaders { ++ return nil, ErrNoHeaders ++ } ++ + return s.header.Copy(), nil + } + +@@ -518,16 +549,17 @@ const ( + // ServerConfig consists of all the configurations to establish a server transport. + type ServerConfig struct { + MaxStreams uint32 +- AuthInfo credentials.AuthInfo ++ ConnectionTimeout time.Duration ++ Credentials credentials.TransportCredentials + InTapHandle tap.ServerInHandle +- StatsHandler stats.Handler ++ StatsHandlers []stats.Handler + KeepaliveParams keepalive.ServerParameters + KeepalivePolicy keepalive.EnforcementPolicy + InitialWindowSize int32 + InitialConnWindowSize int32 + WriteBufferSize int + ReadBufferSize int +- ChannelzParentID int64 ++ ChannelzParentID *channelz.Identifier + MaxHeaderListSize *uint32 + HeaderTableSize *uint32 + } +@@ -550,8 +582,8 @@ type ConnectOptions struct { + CredsBundle credentials.Bundle + // KeepaliveParams stores the keepalive parameters. + KeepaliveParams keepalive.ClientParameters +- // StatsHandler stores the handler for stats. +- StatsHandler stats.Handler ++ // StatsHandlers stores the handler for stats. ++ StatsHandlers []stats.Handler + // InitialWindowSize sets the initial window size for a stream. + InitialWindowSize int32 + // InitialConnWindowSize sets the initial window size for a connection. +@@ -561,7 +593,7 @@ type ConnectOptions struct { + // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. + ReadBufferSize int + // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. +- ChannelzParentID int64 ++ ChannelzParentID *channelz.Identifier + // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. + MaxHeaderListSize *uint32 + // UseProxy specifies if a proxy should be used. +@@ -570,8 +602,8 @@ type ConnectOptions struct { + + // NewClientTransport establishes the transport with the required ConnectOptions + // and returns it to the caller. +-func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { +- return newHTTP2Client(connectCtx, ctx, addr, opts, onPrefaceReceipt, onGoAway, onClose) ++func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) { ++ return newHTTP2Client(connectCtx, ctx, addr, opts, onClose) + } + + // Options provides additional hints and information for message +@@ -688,13 +720,13 @@ type ServerTransport interface { + // Close tears down the transport. Once it is called, the transport + // should not be accessed any more. All the pending streams and their + // handlers will be terminated asynchronously. +- Close() ++ Close(err error) + + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr + + // Drain notifies the client this ServerTransport stops accepting new RPCs. +- Drain() ++ Drain(debugData string) + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() +@@ -739,6 +771,12 @@ func (e ConnectionError) Origin() error { + return e.err + } + ++// Unwrap returns the original error of this connection error or nil when the ++// origin is nil. ++func (e ConnectionError) Unwrap() error { ++ return e.err ++} ++ + var ( + // ErrConnClosing indicates that the transport is closing. + ErrConnClosing = connectionErrorf(true, nil, "transport is closing") +diff --git a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go +index 3677c3f..e8b4927 100644 +--- a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go ++++ b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go +@@ -28,7 +28,7 @@ type handshakeClusterNameKey struct{} + // SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field + // is updated with the cluster name. + func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address { +- addr.Attributes = addr.Attributes.WithValues(handshakeClusterNameKey{}, clusterName) ++ addr.Attributes = addr.Attributes.WithValue(handshakeClusterNameKey{}, clusterName) + return addr + } + +diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go +index 3604c78..a2cdcaf 100644 +--- a/vendor/google.golang.org/grpc/metadata/metadata.go ++++ b/vendor/google.golang.org/grpc/metadata/metadata.go +@@ -41,16 +41,17 @@ type MD map[string][]string + // New creates an MD from a given key-value map. + // + // Only the following ASCII characters are allowed in keys: +-// - digits: 0-9 +-// - uppercase letters: A-Z (normalized to lower) +-// - lowercase letters: a-z +-// - special characters: -_. ++// - digits: 0-9 ++// - uppercase letters: A-Z (normalized to lower) ++// - lowercase letters: a-z ++// - special characters: -_. ++// + // Uppercase letters are automatically converted to lowercase. + // + // Keys beginning with "grpc-" are reserved for grpc-internal use only and may + // result in errors if set in metadata. + func New(m map[string]string) MD { +- md := MD{} ++ md := make(MD, len(m)) + for k, val := range m { + key := strings.ToLower(k) + md[key] = append(md[key], val) +@@ -62,10 +63,11 @@ func New(m map[string]string) MD { + // Pairs panics if len(kv) is odd. + // + // Only the following ASCII characters are allowed in keys: +-// - digits: 0-9 +-// - uppercase letters: A-Z (normalized to lower) +-// - lowercase letters: a-z +-// - special characters: -_. ++// - digits: 0-9 ++// - uppercase letters: A-Z (normalized to lower) ++// - lowercase letters: a-z ++// - special characters: -_. ++// + // Uppercase letters are automatically converted to lowercase. + // + // Keys beginning with "grpc-" are reserved for grpc-internal use only and may +@@ -74,7 +76,7 @@ func Pairs(kv ...string) MD { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) + } +- md := MD{} ++ md := make(MD, len(kv)/2) + for i := 0; i < len(kv); i += 2 { + key := strings.ToLower(kv[i]) + md[key] = append(md[key], kv[i+1]) +@@ -89,7 +91,11 @@ func (md MD) Len() int { + + // Copy returns a copy of md. + func (md MD) Copy() MD { +- return Join(md) ++ out := make(MD, len(md)) ++ for k, v := range md { ++ out[k] = copyOf(v) ++ } ++ return out + } + + // Get obtains the values for a given key. +@@ -169,8 +175,11 @@ func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context + md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) + added := make([][]string, len(md.added)+1) + copy(added, md.added) +- added[len(added)-1] = make([]string, len(kv)) +- copy(added[len(added)-1], kv) ++ kvCopy := make([]string, 0, len(kv)) ++ for i := 0; i < len(kv); i += 2 { ++ kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1]) ++ } ++ added[len(added)-1] = kvCopy + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) + } + +@@ -182,17 +191,51 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { + if !ok { + return nil, false + } +- out := MD{} ++ out := make(MD, len(md)) + for k, v := range md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) +- out[key] = v ++ out[key] = copyOf(v) + } + return out, true + } + ++// ValueFromIncomingContext returns the metadata value corresponding to the metadata ++// key from the incoming metadata if it exists. Key must be lower-case. ++// ++// # Experimental ++// ++// Notice: This API is EXPERIMENTAL and may be changed or removed in a ++// later release. ++func ValueFromIncomingContext(ctx context.Context, key string) []string { ++ md, ok := ctx.Value(mdIncomingKey{}).(MD) ++ if !ok { ++ return nil ++ } ++ ++ if v, ok := md[key]; ok { ++ return copyOf(v) ++ } ++ for k, v := range md { ++ // We need to manually convert all keys to lower case, because MD is a ++ // map, and there's no guarantee that the MD attached to the context is ++ // created using our helper functions. ++ if strings.ToLower(k) == key { ++ return copyOf(v) ++ } ++ } ++ return nil ++} ++ ++// the returned slice must not be modified in place ++func copyOf(v []string) []string { ++ vals := make([]string, len(v)) ++ copy(vals, v) ++ return vals ++} ++ + // FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. + // + // Remember to perform strings.ToLower on the keys, for both the returned MD (MD +@@ -220,13 +263,18 @@ func FromOutgoingContext(ctx context.Context) (MD, bool) { + return nil, false + } + +- out := MD{} ++ mdSize := len(raw.md) ++ for i := range raw.added { ++ mdSize += len(raw.added[i]) / 2 ++ } ++ ++ out := make(MD, mdSize) + for k, v := range raw.md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) +- out[key] = v ++ out[key] = copyOf(v) + } + for _, added := range raw.added { + if len(added)%2 == 1 { +diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go +index 0878ada..02f9759 100644 +--- a/vendor/google.golang.org/grpc/picker_wrapper.go ++++ b/vendor/google.golang.org/grpc/picker_wrapper.go +@@ -26,6 +26,7 @@ import ( + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/channelz" ++ istatus "google.golang.org/grpc/internal/status" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/status" + ) +@@ -35,6 +36,7 @@ import ( + type pickerWrapper struct { + mu sync.Mutex + done bool ++ idle bool + blockingCh chan struct{} + picker balancer.Picker + } +@@ -46,7 +48,11 @@ func newPickerWrapper() *pickerWrapper { + // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. + func (pw *pickerWrapper) updatePicker(p balancer.Picker) { + pw.mu.Lock() +- if pw.done { ++ if pw.done || pw.idle { ++ // There is a small window where a picker update from the LB policy can ++ // race with the channel going to idle mode. If the picker is idle here, ++ // it is because the channel asked it to do so, and therefore it is sage ++ // to ignore the update from the LB policy. + pw.mu.Unlock() + return + } +@@ -57,12 +63,16 @@ func (pw *pickerWrapper) updatePicker(p balancer.Picker) { + pw.mu.Unlock() + } + +-func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) { +- acw.mu.Lock() +- ac := acw.ac +- acw.mu.Unlock() ++// doneChannelzWrapper performs the following: ++// - increments the calls started channelz counter ++// - wraps the done function in the passed in result to increment the calls ++// failed or calls succeeded channelz counter before invoking the actual ++// done function. ++func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { ++ ac := acbw.ac + ac.incrCallsStarted() +- return func(b balancer.DoneInfo) { ++ done := result.Done ++ result.Done = func(b balancer.DoneInfo) { + if b.Err != nil && b.Err != io.EOF { + ac.incrCallsFailed() + } else { +@@ -81,7 +91,7 @@ func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) f + // - the current picker returns other errors and failfast is false. + // - the subConn returned by the current picker is not READY + // When one of these situations happens, pick blocks until the picker gets updated. +-func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) { ++func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) { + var ch chan struct{} + + var lastPickErr error +@@ -89,7 +99,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. + pw.mu.Lock() + if pw.done { + pw.mu.Unlock() +- return nil, nil, ErrClientConnClosing ++ return nil, balancer.PickResult{}, ErrClientConnClosing + } + + if pw.picker == nil { +@@ -110,9 +120,9 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. + } + switch ctx.Err() { + case context.DeadlineExceeded: +- return nil, nil, status.Error(codes.DeadlineExceeded, errStr) ++ return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr) + case context.Canceled: +- return nil, nil, status.Error(codes.Canceled, errStr) ++ return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr) + } + case <-ch: + } +@@ -124,14 +134,17 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. + pw.mu.Unlock() + + pickResult, err := p.Pick(info) +- + if err != nil { + if err == balancer.ErrNoSubConnAvailable { + continue + } +- if _, ok := status.FromError(err); ok { ++ if st, ok := status.FromError(err); ok { + // Status error: end the RPC unconditionally with this status. +- return nil, nil, err ++ // First restrict the code to the list allowed by gRFC A54. ++ if istatus.IsRestrictedControlPlaneCode(st) { ++ err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) ++ } ++ return nil, balancer.PickResult{}, dropError{error: err} + } + // For all other errors, wait for ready RPCs should block and other + // RPCs should fail with unavailable. +@@ -139,19 +152,20 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. + lastPickErr = err + continue + } +- return nil, nil, status.Error(codes.Unavailable, err.Error()) ++ return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) + } + +- acw, ok := pickResult.SubConn.(*acBalancerWrapper) ++ acbw, ok := pickResult.SubConn.(*acBalancerWrapper) + if !ok { +- logger.Error("subconn returned from pick is not *acBalancerWrapper") ++ logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn) + continue + } +- if t := acw.getAddrConn().getReadyTransport(); t != nil { ++ if t := acbw.ac.getReadyTransport(); t != nil { + if channelz.IsOn() { +- return t, doneChannelzWrapper(acw, pickResult.Done), nil ++ doneChannelzWrapper(acbw, &pickResult) ++ return t, pickResult, nil + } +- return t, pickResult.Done, nil ++ return t, pickResult, nil + } + if pickResult.Done != nil { + // Calling done with nil error, no bytes sent and no bytes received. +@@ -175,3 +189,28 @@ func (pw *pickerWrapper) close() { + pw.done = true + close(pw.blockingCh) + } ++ ++func (pw *pickerWrapper) enterIdleMode() { ++ pw.mu.Lock() ++ defer pw.mu.Unlock() ++ if pw.done { ++ return ++ } ++ pw.idle = true ++} ++ ++func (pw *pickerWrapper) exitIdleMode() { ++ pw.mu.Lock() ++ defer pw.mu.Unlock() ++ if pw.done { ++ return ++ } ++ pw.blockingCh = make(chan struct{}) ++ pw.idle = false ++} ++ ++// dropError is a wrapper error that indicates the LB policy wishes to drop the ++// RPC and not retry it. ++type dropError struct { ++ error ++} +diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go +index b858c2a..abe266b 100644 +--- a/vendor/google.golang.org/grpc/pickfirst.go ++++ b/vendor/google.golang.org/grpc/pickfirst.go +@@ -19,11 +19,15 @@ + package grpc + + import ( ++ "encoding/json" + "errors" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" ++ "google.golang.org/grpc/internal/envconfig" ++ "google.golang.org/grpc/internal/grpcrand" ++ "google.golang.org/grpc/serviceconfig" + ) + + // PickFirstBalancerName is the name of the pick_first balancer. +@@ -43,94 +47,181 @@ func (*pickfirstBuilder) Name() string { + return PickFirstBalancerName + } + ++type pfConfig struct { ++ serviceconfig.LoadBalancingConfig `json:"-"` ++ ++ // If set to true, instructs the LB policy to shuffle the order of the list ++ // of addresses received from the name resolver before attempting to ++ // connect to them. ++ ShuffleAddressList bool `json:"shuffleAddressList"` ++} ++ ++func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { ++ cfg := &pfConfig{} ++ if err := json.Unmarshal(js, cfg); err != nil { ++ return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) ++ } ++ return cfg, nil ++} ++ + type pickfirstBalancer struct { +- state connectivity.State +- cc balancer.ClientConn +- sc balancer.SubConn ++ state connectivity.State ++ cc balancer.ClientConn ++ subConn balancer.SubConn ++ cfg *pfConfig + } + + func (b *pickfirstBalancer) ResolverError(err error) { +- switch b.state { +- case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: +- // Set a failing picker if we don't have a good picker. +- b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, +- Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, +- }) +- } + if logger.V(2) { +- logger.Infof("pickfirstBalancer: ResolverError called with error %v", err) ++ logger.Infof("pickfirstBalancer: ResolverError called with error: %v", err) ++ } ++ if b.subConn == nil { ++ b.state = connectivity.TransientFailure ++ } ++ ++ if b.state != connectivity.TransientFailure { ++ // The picker will not change since the balancer does not currently ++ // report an error. ++ return + } ++ b.cc.UpdateState(balancer.State{ ++ ConnectivityState: connectivity.TransientFailure, ++ Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, ++ }) + } + +-func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error { +- if len(cs.ResolverState.Addresses) == 0 { ++func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { ++ addrs := state.ResolverState.Addresses ++ if len(addrs) == 0 { ++ // The resolver reported an empty address list. Treat it like an error by ++ // calling b.ResolverError. ++ if b.subConn != nil { ++ // Remove the old subConn. All addresses were removed, so it is no longer ++ // valid. ++ b.cc.RemoveSubConn(b.subConn) ++ b.subConn = nil ++ } + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } +- if b.sc == nil { +- var err error +- b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{}) +- if err != nil { +- if logger.V(2) { +- logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) +- } +- b.state = connectivity.TransientFailure +- b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, +- Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, +- }) +- return balancer.ErrBadResolverState ++ ++ if state.BalancerConfig != nil { ++ cfg, ok := state.BalancerConfig.(*pfConfig) ++ if !ok { ++ return fmt.Errorf("pickfirstBalancer: received nil or illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) + } +- b.state = connectivity.Idle +- b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}}) +- b.sc.Connect() +- } else { +- b.cc.UpdateAddresses(b.sc, cs.ResolverState.Addresses) +- b.sc.Connect() ++ b.cfg = cfg ++ } ++ ++ if envconfig.PickFirstLBConfig && b.cfg != nil && b.cfg.ShuffleAddressList { ++ grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) ++ } ++ if b.subConn != nil { ++ b.cc.UpdateAddresses(b.subConn, addrs) ++ return nil + } ++ ++ subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) ++ if err != nil { ++ if logger.V(2) { ++ logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) ++ } ++ b.state = connectivity.TransientFailure ++ b.cc.UpdateState(balancer.State{ ++ ConnectivityState: connectivity.TransientFailure, ++ Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, ++ }) ++ return balancer.ErrBadResolverState ++ } ++ b.subConn = subConn ++ b.state = connectivity.Idle ++ b.cc.UpdateState(balancer.State{ ++ ConnectivityState: connectivity.Connecting, ++ Picker: &picker{err: balancer.ErrNoSubConnAvailable}, ++ }) ++ b.subConn.Connect() + return nil + } + +-func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { ++func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + if logger.V(2) { +- logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s) ++ logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state) + } +- if b.sc != sc { ++ if b.subConn != subConn { + if logger.V(2) { +- logger.Infof("pickfirstBalancer: ignored state change because sc is not recognized") ++ logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized") + } + return + } +- b.state = s.ConnectivityState +- if s.ConnectivityState == connectivity.Shutdown { +- b.sc = nil ++ if state.ConnectivityState == connectivity.Shutdown { ++ b.subConn = nil + return + } + +- switch s.ConnectivityState { +- case connectivity.Ready, connectivity.Idle: +- b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}}) ++ switch state.ConnectivityState { ++ case connectivity.Ready: ++ b.cc.UpdateState(balancer.State{ ++ ConnectivityState: state.ConnectivityState, ++ Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, ++ }) + case connectivity.Connecting: +- b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}}) ++ if b.state == connectivity.TransientFailure { ++ // We stay in TransientFailure until we are Ready. See A62. ++ return ++ } ++ b.cc.UpdateState(balancer.State{ ++ ConnectivityState: state.ConnectivityState, ++ Picker: &picker{err: balancer.ErrNoSubConnAvailable}, ++ }) ++ case connectivity.Idle: ++ if b.state == connectivity.TransientFailure { ++ // We stay in TransientFailure until we are Ready. Also kick the ++ // subConn out of Idle into Connecting. See A62. ++ b.subConn.Connect() ++ return ++ } ++ b.cc.UpdateState(balancer.State{ ++ ConnectivityState: state.ConnectivityState, ++ Picker: &idlePicker{subConn: subConn}, ++ }) + case connectivity.TransientFailure: + b.cc.UpdateState(balancer.State{ +- ConnectivityState: s.ConnectivityState, +- Picker: &picker{err: s.ConnectionError}, ++ ConnectivityState: state.ConnectivityState, ++ Picker: &picker{err: state.ConnectionError}, + }) + } ++ b.state = state.ConnectivityState + } + + func (b *pickfirstBalancer) Close() { + } + ++func (b *pickfirstBalancer) ExitIdle() { ++ if b.subConn != nil && b.state == connectivity.Idle { ++ b.subConn.Connect() ++ } ++} ++ + type picker struct { + result balancer.PickResult + err error + } + +-func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { ++func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + return p.result, p.err + } + ++// idlePicker is used when the SubConn is IDLE and kicks the SubConn into ++// CONNECTING when Pick is called. ++type idlePicker struct { ++ subConn balancer.SubConn ++} ++ ++func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { ++ i.subConn.Connect() ++ return balancer.PickResult{}, balancer.ErrNoSubConnAvailable ++} ++ + func init() { + balancer.Register(newPickfirstBuilder()) + } +diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go +index 0a1e975..cd45547 100644 +--- a/vendor/google.golang.org/grpc/preloader.go ++++ b/vendor/google.golang.org/grpc/preloader.go +@@ -25,7 +25,7 @@ import ( + + // PreparedMsg is responsible for creating a Marshalled and Compressed object. + // +-// Experimental ++// # Experimental + // + // Notice: This type is EXPERIMENTAL and may be changed or removed in a + // later release. +diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh +index dfd3226..a6f26c8 100644 +--- a/vendor/google.golang.org/grpc/regenerate.sh ++++ b/vendor/google.golang.org/grpc/regenerate.sh +@@ -27,9 +27,9 @@ export PATH=${GOBIN}:${PATH} + mkdir -p ${GOBIN} + + echo "remove existing generated files" +-# grpc_testingv3/testv3.pb.go is not re-generated because it was +-# intentionally generated by an older version of protoc-gen-go. +-rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testingv3/testv3.pb.go') ++# grpc_testing_not_regenerate/*.pb.go is not re-generated, ++# see grpc_testing_not_regenerate/README.md for details. ++rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') + + echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" + (cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) +@@ -57,7 +57,8 @@ LEGACY_SOURCES=( + ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto + ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto + profiling/proto/service.proto +- reflection/grpc_reflection_v1alpha/reflection.proto ++ ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto ++ ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto + ) + + # Generates only the new gRPC Service symbols +@@ -68,7 +69,6 @@ SOURCES=( + ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto + ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto + ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto +- ${WORKDIR}/grpc-proto/grpc/service_config/service_config.proto + ${WORKDIR}/grpc-proto/grpc/testing/*.proto + ${WORKDIR}/grpc-proto/grpc/core/*.proto + ) +@@ -76,7 +76,20 @@ SOURCES=( + # These options of the form 'Mfoo.proto=bar' instruct the codegen to use an + # import path of 'bar' in the generated code when 'foo.proto' is imported in + # one of the sources. +-OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core ++# ++# Note that the protos listed here are all for testing purposes. All protos to ++# be used externally should have a go_package option (and they don't need to be ++# listed here). ++OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ ++Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ ++Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ ++Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ ++Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\ ++Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\ ++Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\ ++Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\ ++Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\ ++Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing + + for src in ${SOURCES[@]}; do + echo "protoc ${src}" +@@ -85,7 +98,6 @@ for src in ${SOURCES[@]}; do + -I${WORKDIR}/grpc-proto \ + -I${WORKDIR}/googleapis \ + -I${WORKDIR}/protobuf/src \ +- -I${WORKDIR}/istio \ + ${src} + done + +@@ -96,24 +108,16 @@ for src in ${LEGACY_SOURCES[@]}; do + -I${WORKDIR}/grpc-proto \ + -I${WORKDIR}/googleapis \ + -I${WORKDIR}/protobuf/src \ +- -I${WORKDIR}/istio \ + ${src} + done + + # The go_package option in grpc/lookup/v1/rls.proto doesn't match the + # current location. Move it into the right place. +-mkdir -p ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1 +-mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1 +- +-# grpc_testingv3/testv3.pb.go is not re-generated because it was +-# intentionally generated by an older version of protoc-gen-go. +-rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testingv3/*.pb.go +- +-# grpc/service_config/service_config.proto does not have a go_package option. +-mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config ++mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 ++mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 + +-# grpc/testing does not have a go_package option. +-mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/ +-mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/ ++# grpc_testing_not_regenerate/*.pb.go are not re-generated, ++# see grpc_testing_not_regenerate/README.md for details. ++rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go + + cp -R ${WORKDIR}/out/google.golang.org/grpc/* . +diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go +new file mode 100644 +index 0000000..efcb7f3 +--- /dev/null ++++ b/vendor/google.golang.org/grpc/resolver/map.go +@@ -0,0 +1,138 @@ ++/* ++ * ++ * Copyright 2021 gRPC authors. ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * http://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++package resolver ++ ++type addressMapEntry struct { ++ addr Address ++ value interface{} ++} ++ ++// AddressMap is a map of addresses to arbitrary values taking into account ++// Attributes. BalancerAttributes are ignored, as are Metadata and Type. ++// Multiple accesses may not be performed concurrently. Must be created via ++// NewAddressMap; do not construct directly. ++type AddressMap struct { ++ // The underlying map is keyed by an Address with fields that we don't care ++ // about being set to their zero values. The only fields that we care about ++ // are `Addr`, `ServerName` and `Attributes`. Since we need to be able to ++ // distinguish between addresses with same `Addr` and `ServerName`, but ++ // different `Attributes`, we cannot store the `Attributes` in the map key. ++ // ++ // The comparison operation for structs work as follows: ++ // Struct values are comparable if all their fields are comparable. Two ++ // struct values are equal if their corresponding non-blank fields are equal. ++ // ++ // The value type of the map contains a slice of addresses which match the key ++ // in their `Addr` and `ServerName` fields and contain the corresponding value ++ // associated with them. ++ m map[Address]addressMapEntryList ++} ++ ++func toMapKey(addr *Address) Address { ++ return Address{Addr: addr.Addr, ServerName: addr.ServerName} ++} ++ ++type addressMapEntryList []*addressMapEntry ++ ++// NewAddressMap creates a new AddressMap. ++func NewAddressMap() *AddressMap { ++ return &AddressMap{m: make(map[Address]addressMapEntryList)} ++} ++ ++// find returns the index of addr in the addressMapEntry slice, or -1 if not ++// present. ++func (l addressMapEntryList) find(addr Address) int { ++ for i, entry := range l { ++ // Attributes are the only thing to match on here, since `Addr` and ++ // `ServerName` are already equal. ++ if entry.addr.Attributes.Equal(addr.Attributes) { ++ return i ++ } ++ } ++ return -1 ++} ++ ++// Get returns the value for the address in the map, if present. ++func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { ++ addrKey := toMapKey(&addr) ++ entryList := a.m[addrKey] ++ if entry := entryList.find(addr); entry != -1 { ++ return entryList[entry].value, true ++ } ++ return nil, false ++} ++ ++// Set updates or adds the value to the address in the map. ++func (a *AddressMap) Set(addr Address, value interface{}) { ++ addrKey := toMapKey(&addr) ++ entryList := a.m[addrKey] ++ if entry := entryList.find(addr); entry != -1 { ++ entryList[entry].value = value ++ return ++ } ++ a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value}) ++} ++ ++// Delete removes addr from the map. ++func (a *AddressMap) Delete(addr Address) { ++ addrKey := toMapKey(&addr) ++ entryList := a.m[addrKey] ++ entry := entryList.find(addr) ++ if entry == -1 { ++ return ++ } ++ if len(entryList) == 1 { ++ entryList = nil ++ } else { ++ copy(entryList[entry:], entryList[entry+1:]) ++ entryList = entryList[:len(entryList)-1] ++ } ++ a.m[addrKey] = entryList ++} ++ ++// Len returns the number of entries in the map. ++func (a *AddressMap) Len() int { ++ ret := 0 ++ for _, entryList := range a.m { ++ ret += len(entryList) ++ } ++ return ret ++} ++ ++// Keys returns a slice of all current map keys. ++func (a *AddressMap) Keys() []Address { ++ ret := make([]Address, 0, a.Len()) ++ for _, entryList := range a.m { ++ for _, entry := range entryList { ++ ret = append(ret, entry.addr) ++ } ++ } ++ return ret ++} ++ ++// Values returns a slice of all current map values. ++func (a *AddressMap) Values() []interface{} { ++ ret := make([]interface{}, 0, a.Len()) ++ for _, entryList := range a.m { ++ for _, entry := range entryList { ++ ret = append(ret, entry.value) ++ } ++ } ++ return ret ++} +diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go +index 6a9d234..353c10b 100644 +--- a/vendor/google.golang.org/grpc/resolver/resolver.go ++++ b/vendor/google.golang.org/grpc/resolver/resolver.go +@@ -22,7 +22,10 @@ package resolver + + import ( + "context" ++ "fmt" + "net" ++ "net/url" ++ "strings" + + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/credentials" +@@ -38,8 +41,9 @@ var ( + + // TODO(bar) install dns resolver in init(){}. + +-// Register registers the resolver builder to the resolver map. b.Scheme will be +-// used as the scheme registered with this builder. ++// Register registers the resolver builder to the resolver map. b.Scheme will ++// be used as the scheme registered with this builder. The registry is case ++// sensitive, and schemes should not contain any uppercase characters. + // + // NOTE: this function must only be called during initialization time (i.e. in + // an init() function), and is not thread-safe. If multiple Resolvers are +@@ -94,7 +98,7 @@ const ( + + // Address represents a server the client connects to. + // +-// Experimental ++// # Experimental + // + // Notice: This type is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -116,9 +120,14 @@ type Address struct { + ServerName string + + // Attributes contains arbitrary data about this address intended for +- // consumption by the load balancing policy. ++ // consumption by the SubConn. + Attributes *attributes.Attributes + ++ // BalancerAttributes contains arbitrary data about this address intended ++ // for consumption by the LB policy. These attributes do not affect SubConn ++ // creation, connection establishment, handshaking, etc. ++ BalancerAttributes *attributes.Attributes ++ + // Type is the type of this address. + // + // Deprecated: use Attributes instead. +@@ -131,6 +140,30 @@ type Address struct { + Metadata interface{} + } + ++// Equal returns whether a and o are identical. Metadata is compared directly, ++// not with any recursive introspection. ++func (a Address) Equal(o Address) bool { ++ return a.Addr == o.Addr && a.ServerName == o.ServerName && ++ a.Attributes.Equal(o.Attributes) && ++ a.BalancerAttributes.Equal(o.BalancerAttributes) && ++ a.Type == o.Type && a.Metadata == o.Metadata ++} ++ ++// String returns JSON formatted string representation of the address. ++func (a Address) String() string { ++ var sb strings.Builder ++ sb.WriteString(fmt.Sprintf("{Addr: %q, ", a.Addr)) ++ sb.WriteString(fmt.Sprintf("ServerName: %q, ", a.ServerName)) ++ if a.Attributes != nil { ++ sb.WriteString(fmt.Sprintf("Attributes: %v, ", a.Attributes.String())) ++ } ++ if a.BalancerAttributes != nil { ++ sb.WriteString(fmt.Sprintf("BalancerAttributes: %v", a.BalancerAttributes.String())) ++ } ++ sb.WriteString("}") ++ return sb.String() ++} ++ + // BuildOptions includes additional information for the builder to create + // the resolver. + type BuildOptions struct { +@@ -181,6 +214,15 @@ type State struct { + // gRPC to add new methods to this interface. + type ClientConn interface { + // UpdateState updates the state of the ClientConn appropriately. ++ // ++ // If an error is returned, the resolver should try to resolve the ++ // target again. The resolver should use a backoff timer to prevent ++ // overloading the server with requests. If a resolver is certain that ++ // reresolving will not change the result, e.g. because it is ++ // a watch-based resolver, returned errors can be ignored. ++ // ++ // If the resolved State is the same as the last reported one, calling ++ // UpdateState can be omitted. + UpdateState(State) error + // ReportError notifies the ClientConn that the Resolver encountered an + // error. The ClientConn will notify the load balancer and begin calling +@@ -204,25 +246,51 @@ type ClientConn interface { + + // Target represents a target for gRPC, as specified in: + // https://github.com/grpc/grpc/blob/master/doc/naming.md. +-// It is parsed from the target string that gets passed into Dial or DialContext by the user. And +-// grpc passes it to the resolver and the balancer. ++// It is parsed from the target string that gets passed into Dial or DialContext ++// by the user. And gRPC passes it to the resolver and the balancer. + // +-// If the target follows the naming spec, and the parsed scheme is registered with grpc, we will +-// parse the target string according to the spec. e.g. "dns://some_authority/foo.bar" will be parsed +-// into &Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} ++// If the target follows the naming spec, and the parsed scheme is registered ++// with gRPC, we will parse the target string according to the spec. If the ++// target does not contain a scheme or if the parsed scheme is not registered ++// (i.e. no corresponding resolver available to resolve the endpoint), we will ++// apply the default scheme, and will attempt to reparse it. + // +-// If the target does not contain a scheme, we will apply the default scheme, and set the Target to +-// be the full target string. e.g. "foo.bar" will be parsed into +-// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}. ++// Examples: + // +-// If the parsed scheme is not registered (i.e. no corresponding resolver available to resolve the +-// endpoint), we set the Scheme to be the default scheme, and set the Endpoint to be the full target +-// string. e.g. target string "unknown_scheme://authority/endpoint" will be parsed into +-// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}. ++// - "dns://some_authority/foo.bar" ++// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} ++// - "foo.bar" ++// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} ++// - "unknown_scheme://authority/endpoint" ++// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} + type Target struct { +- Scheme string ++ // Deprecated: use URL.Scheme instead. ++ Scheme string ++ // Deprecated: use URL.Host instead. + Authority string +- Endpoint string ++ // URL contains the parsed dial target with an optional default scheme added ++ // to it if the original dial target contained no scheme or contained an ++ // unregistered scheme. Any query params specified in the original dial ++ // target can be accessed from here. ++ URL url.URL ++} ++ ++// Endpoint retrieves endpoint without leading "/" from either `URL.Path` ++// or `URL.Opaque`. The latter is used when the former is empty. ++func (t Target) Endpoint() string { ++ endpoint := t.URL.Path ++ if endpoint == "" { ++ endpoint = t.URL.Opaque ++ } ++ // For targets of the form "[scheme]://[authority]/endpoint, the endpoint ++ // value returned from url.Parse() contains a leading "/". Although this is ++ // in accordance with RFC 3986, we do not want to break existing resolver ++ // implementations which expect the endpoint without the leading "/". So, we ++ // end up stripping the leading "/" here. But this will result in an ++ // incorrect parsing for something like "unix:///path/to/socket". Since we ++ // own the "unix" resolver, we can workaround in the unix resolver by using ++ // the `URL` field. ++ return strings.TrimPrefix(endpoint, "/") + } + + // Builder creates a resolver that will be used to watch name resolution updates. +@@ -232,8 +300,10 @@ type Builder interface { + // gRPC dial calls Build synchronously, and fails if the returned error is + // not nil. + Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error) +- // Scheme returns the scheme supported by this resolver. +- // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. ++ // Scheme returns the scheme supported by this resolver. Scheme is defined ++ // at https://github.com/grpc/grpc/blob/master/doc/naming.md. The returned ++ // string should not contain uppercase characters, as they will not match ++ // the parsed target's scheme as defined in RFC 3986. + Scheme() string + } + +diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +index 2c47cd5..b408b36 100644 +--- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go ++++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +@@ -19,149 +19,204 @@ + package grpc + + import ( +- "fmt" ++ "context" + "strings" + "sync" + + "google.golang.org/grpc/balancer" +- "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" ++ "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + ) + ++// resolverStateUpdater wraps the single method used by ccResolverWrapper to ++// report a state update from the actual resolver implementation. ++type resolverStateUpdater interface { ++ updateResolverState(s resolver.State, err error) error ++} ++ + // ccResolverWrapper is a wrapper on top of cc for resolvers. + // It implements resolver.ClientConn interface. + type ccResolverWrapper struct { +- cc *ClientConn +- resolverMu sync.Mutex +- resolver resolver.Resolver +- done *grpcsync.Event +- curState resolver.State ++ // The following fields are initialized when the wrapper is created and are ++ // read-only afterwards, and therefore can be accessed without a mutex. ++ cc resolverStateUpdater ++ channelzID *channelz.Identifier ++ ignoreServiceConfig bool ++ opts ccResolverWrapperOpts ++ serializer *grpcsync.CallbackSerializer // To serialize all incoming calls. ++ serializerCancel context.CancelFunc // To close the serializer, accessed only from close(). ++ ++ // All incoming (resolver --> gRPC) calls are guaranteed to execute in a ++ // mutually exclusive manner as they are scheduled on the serializer. ++ // Fields accessed *only* in these serializer callbacks, can therefore be ++ // accessed without a mutex. ++ curState resolver.State ++ ++ // mu guards access to the below fields. ++ mu sync.Mutex ++ closed bool ++ resolver resolver.Resolver // Accessed only from outgoing calls. ++} + +- incomingMu sync.Mutex // Synchronizes all the incoming calls. ++// ccResolverWrapperOpts wraps the arguments to be passed when creating a new ++// ccResolverWrapper. ++type ccResolverWrapperOpts struct { ++ target resolver.Target // User specified dial target to resolve. ++ builder resolver.Builder // Resolver builder to use. ++ bOpts resolver.BuildOptions // Resolver build options to use. ++ channelzID *channelz.Identifier // Channelz identifier for the channel. + } + + // newCCResolverWrapper uses the resolver.Builder to build a Resolver and + // returns a ccResolverWrapper object which wraps the newly built resolver. +-func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) { ++func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) (*ccResolverWrapper, error) { ++ ctx, cancel := context.WithCancel(context.Background()) + ccr := &ccResolverWrapper{ +- cc: cc, +- done: grpcsync.NewEvent(), +- } +- +- var credsClone credentials.TransportCredentials +- if creds := cc.dopts.copts.TransportCredentials; creds != nil { +- credsClone = creds.Clone() +- } +- rbo := resolver.BuildOptions{ +- DisableServiceConfig: cc.dopts.disableServiceConfig, +- DialCreds: credsClone, +- CredsBundle: cc.dopts.copts.CredsBundle, +- Dialer: cc.dopts.copts.Dialer, +- } +- +- var err error +- // We need to hold the lock here while we assign to the ccr.resolver field +- // to guard against a data race caused by the following code path, +- // rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up +- // accessing ccr.resolver which is being assigned here. +- ccr.resolverMu.Lock() +- defer ccr.resolverMu.Unlock() +- ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo) ++ cc: cc, ++ channelzID: opts.channelzID, ++ ignoreServiceConfig: opts.bOpts.DisableServiceConfig, ++ opts: opts, ++ serializer: grpcsync.NewCallbackSerializer(ctx), ++ serializerCancel: cancel, ++ } ++ ++ // Cannot hold the lock at build time because the resolver can send an ++ // update or error inline and these incoming calls grab the lock to schedule ++ // a callback in the serializer. ++ r, err := opts.builder.Build(opts.target, ccr, opts.bOpts) + if err != nil { ++ cancel() + return nil, err + } ++ ++ // Any error reported by the resolver at build time that leads to a ++ // re-resolution request from the balancer is dropped by grpc until we ++ // return from this function. So, we don't have to handle pending resolveNow ++ // requests here. ++ ccr.mu.Lock() ++ ccr.resolver = r ++ ccr.mu.Unlock() ++ + return ccr, nil + } + + func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { +- ccr.resolverMu.Lock() +- if !ccr.done.HasFired() { +- ccr.resolver.ResolveNow(o) ++ ccr.mu.Lock() ++ defer ccr.mu.Unlock() ++ ++ // ccr.resolver field is set only after the call to Build() returns. But in ++ // the process of building, the resolver may send an error update which when ++ // propagated to the balancer may result in a re-resolution request. ++ if ccr.closed || ccr.resolver == nil { ++ return + } +- ccr.resolverMu.Unlock() ++ ccr.resolver.ResolveNow(o) + } + + func (ccr *ccResolverWrapper) close() { +- ccr.resolverMu.Lock() +- ccr.resolver.Close() +- ccr.done.Fire() +- ccr.resolverMu.Unlock() ++ ccr.mu.Lock() ++ if ccr.closed { ++ ccr.mu.Unlock() ++ return ++ } ++ ++ channelz.Info(logger, ccr.channelzID, "Closing the name resolver") ++ ++ // Close the serializer to ensure that no more calls from the resolver are ++ // handled, before actually closing the resolver. ++ ccr.serializerCancel() ++ ccr.closed = true ++ r := ccr.resolver ++ ccr.mu.Unlock() ++ ++ // Give enqueued callbacks a chance to finish. ++ <-ccr.serializer.Done ++ ++ // Spawn a goroutine to close the resolver (since it may block trying to ++ // cleanup all allocated resources) and return early. ++ go r.Close() + } + ++// serializerScheduleLocked is a convenience method to schedule a function to be ++// run on the serializer while holding ccr.mu. ++func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) { ++ ccr.mu.Lock() ++ ccr.serializer.Schedule(f) ++ ccr.mu.Unlock() ++} ++ ++// UpdateState is called by resolver implementations to report new state to gRPC ++// which includes addresses and service config. + func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { +- ccr.incomingMu.Lock() +- defer ccr.incomingMu.Unlock() +- if ccr.done.HasFired() { +- return nil +- } +- channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) +- if channelz.IsOn() { ++ errCh := make(chan error, 1) ++ ok := ccr.serializer.Schedule(func(context.Context) { + ccr.addChannelzTraceEvent(s) ++ ccr.curState = s ++ if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { ++ errCh <- balancer.ErrBadResolverState ++ return ++ } ++ errCh <- nil ++ }) ++ if !ok { ++ // The only time when Schedule() fail to add the callback to the ++ // serializer is when the serializer is closed, and this happens only ++ // when the resolver wrapper is closed. ++ return nil + } +- ccr.curState = s +- if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { +- return balancer.ErrBadResolverState +- } +- return nil ++ return <-errCh + } + ++// ReportError is called by resolver implementations to report errors ++// encountered during name resolution to gRPC. + func (ccr *ccResolverWrapper) ReportError(err error) { +- ccr.incomingMu.Lock() +- defer ccr.incomingMu.Unlock() +- if ccr.done.HasFired() { +- return +- } +- channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) +- ccr.cc.updateResolverState(resolver.State{}, err) ++ ccr.serializerScheduleLocked(func(_ context.Context) { ++ channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) ++ ccr.cc.updateResolverState(resolver.State{}, err) ++ }) + } + +-// NewAddress is called by the resolver implementation to send addresses to gRPC. ++// NewAddress is called by the resolver implementation to send addresses to ++// gRPC. + func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { +- ccr.incomingMu.Lock() +- defer ccr.incomingMu.Unlock() +- if ccr.done.HasFired() { +- return +- } +- channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs) +- if channelz.IsOn() { ++ ccr.serializerScheduleLocked(func(_ context.Context) { + ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) +- } +- ccr.curState.Addresses = addrs +- ccr.cc.updateResolverState(ccr.curState, nil) ++ ccr.curState.Addresses = addrs ++ ccr.cc.updateResolverState(ccr.curState, nil) ++ }) + } + + // NewServiceConfig is called by the resolver implementation to send service + // configs to gRPC. + func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { +- ccr.incomingMu.Lock() +- defer ccr.incomingMu.Unlock() +- if ccr.done.HasFired() { +- return +- } +- channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc) +- if ccr.cc.dopts.disableServiceConfig { +- channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") +- return +- } +- scpr := parseServiceConfig(sc) +- if scpr.Err != nil { +- channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) +- return +- } +- if channelz.IsOn() { ++ ccr.serializerScheduleLocked(func(_ context.Context) { ++ channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc) ++ if ccr.ignoreServiceConfig { ++ channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config") ++ return ++ } ++ scpr := parseServiceConfig(sc) ++ if scpr.Err != nil { ++ channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) ++ return ++ } + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) +- } +- ccr.curState.ServiceConfig = scpr +- ccr.cc.updateResolverState(ccr.curState, nil) ++ ccr.curState.ServiceConfig = scpr ++ ccr.cc.updateResolverState(ccr.curState, nil) ++ }) + } + ++// ParseServiceConfig is called by resolver implementations to parse a JSON ++// representation of the service config. + func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { + return parseServiceConfig(scJSON) + } + ++// addChannelzTraceEvent adds a channelz trace event containing the new ++// state received from resolver implementations. + func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + var updates []string + var oldSC, newSC *ServiceConfig +@@ -180,8 +235,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { + updates = append(updates, "resolver returned new addresses") + } +- channelz.AddTraceEvent(logger, ccr.cc.channelzID, 0, &channelz.TraceEventDesc{ +- Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), +- Severity: channelz.CtInfo, +- }) ++ channelz.Infof(logger, ccr.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) + } +diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go +index 87987a2..2030736 100644 +--- a/vendor/google.golang.org/grpc/rpc_util.go ++++ b/vendor/google.golang.org/grpc/rpc_util.go +@@ -25,7 +25,6 @@ import ( + "encoding/binary" + "fmt" + "io" +- "io/ioutil" + "math" + "strings" + "sync" +@@ -77,7 +76,7 @@ func NewGZIPCompressorWithLevel(level int) (Compressor, error) { + return &gzipCompressor{ + pool: sync.Pool{ + New: func() interface{} { +- w, err := gzip.NewWriterLevel(ioutil.Discard, level) ++ w, err := gzip.NewWriterLevel(io.Discard, level) + if err != nil { + panic(err) + } +@@ -143,7 +142,7 @@ func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { + z.Close() + d.pool.Put(z) + }() +- return ioutil.ReadAll(z) ++ return io.ReadAll(z) + } + + func (d *gzipDecompressor) Type() string { +@@ -160,6 +159,7 @@ type callInfo struct { + contentSubtype string + codec baseCodec + maxRetryRPCBufferSize int ++ onFinish []func(err error) + } + + func defaultCallInfo() *callInfo { +@@ -198,7 +198,7 @@ func Header(md *metadata.MD) CallOption { + // HeaderCallOption is a CallOption for collecting response header metadata. + // The metadata field will be populated *after* the RPC completes. + // +-// Experimental ++// # Experimental + // + // Notice: This type is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -220,7 +220,7 @@ func Trailer(md *metadata.MD) CallOption { + // TrailerCallOption is a CallOption for collecting response trailer metadata. + // The metadata field will be populated *after* the RPC completes. + // +-// Experimental ++// # Experimental + // + // Notice: This type is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -242,7 +242,7 @@ func Peer(p *peer.Peer) CallOption { + // PeerCallOption is a CallOption for collecting the identity of the remote + // peer. The peer field will be populated *after* the RPC completes. + // +-// Experimental ++// # Experimental + // + // Notice: This type is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -282,7 +282,7 @@ func FailFast(failFast bool) CallOption { + // FailFastCallOption is a CallOption for indicating whether an RPC should fail + // fast or not. + // +-// Experimental ++// # Experimental + // + // Notice: This type is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -296,8 +296,44 @@ func (o FailFastCallOption) before(c *callInfo) error { + } + func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} + ++// OnFinish returns a CallOption that configures a callback to be called when ++// the call completes. The error passed to the callback is the status of the ++// RPC, and may be nil. The onFinish callback provided will only be called once ++// by gRPC. This is mainly used to be used by streaming interceptors, to be ++// notified when the RPC completes along with information about the status of ++// the RPC. ++// ++// # Experimental ++// ++// Notice: This API is EXPERIMENTAL and may be changed or removed in a ++// later release. ++func OnFinish(onFinish func(err error)) CallOption { ++ return OnFinishCallOption{ ++ OnFinish: onFinish, ++ } ++} ++ ++// OnFinishCallOption is CallOption that indicates a callback to be called when ++// the call completes. ++// ++// # Experimental ++// ++// Notice: This type is EXPERIMENTAL and may be changed or removed in a ++// later release. ++type OnFinishCallOption struct { ++ OnFinish func(error) ++} ++ ++func (o OnFinishCallOption) before(c *callInfo) error { ++ c.onFinish = append(c.onFinish, o.OnFinish) ++ return nil ++} ++ ++func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {} ++ + // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size +-// in bytes the client can receive. ++// in bytes the client can receive. If this is not set, gRPC uses the default ++// 4MB. + func MaxCallRecvMsgSize(bytes int) CallOption { + return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes} + } +@@ -305,7 +341,7 @@ func MaxCallRecvMsgSize(bytes int) CallOption { + // MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message + // size in bytes the client can receive. + // +-// Experimental ++// # Experimental + // + // Notice: This type is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -320,7 +356,8 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { + func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} + + // MaxCallSendMsgSize returns a CallOption which sets the maximum message size +-// in bytes the client can send. ++// in bytes the client can send. If this is not set, gRPC uses the default ++// `math.MaxInt32`. + func MaxCallSendMsgSize(bytes int) CallOption { + return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes} + } +@@ -328,7 +365,7 @@ func MaxCallSendMsgSize(bytes int) CallOption { + // MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message + // size in bytes the client can send. + // +-// Experimental ++// # Experimental + // + // Notice: This type is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -351,7 +388,7 @@ func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { + // PerRPCCredsCallOption is a CallOption that indicates the per-RPC + // credentials to use for the call. + // +-// Experimental ++// # Experimental + // + // Notice: This type is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -369,7 +406,7 @@ func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} + // sending the request. If WithCompressor is also set, UseCompressor has + // higher priority. + // +-// Experimental ++// # Experimental + // + // Notice: This API is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -379,7 +416,7 @@ func UseCompressor(name string) CallOption { + + // CompressorCallOption is a CallOption that indicates the compressor to use. + // +-// Experimental ++// # Experimental + // + // Notice: This type is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -416,7 +453,7 @@ func CallContentSubtype(contentSubtype string) CallOption { + // ContentSubtypeCallOption is a CallOption that indicates the content-subtype + // used for marshaling messages. + // +-// Experimental ++// # Experimental + // + // Notice: This type is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -444,7 +481,7 @@ func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} + // This function is provided for advanced users; prefer to use only + // CallContentSubtype to select a registered codec instead. + // +-// Experimental ++// # Experimental + // + // Notice: This API is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -455,7 +492,7 @@ func ForceCodec(codec encoding.Codec) CallOption { + // ForceCodecCallOption is a CallOption that indicates the codec used for + // marshaling messages. + // +-// Experimental ++// # Experimental + // + // Notice: This type is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -480,7 +517,7 @@ func CallCustomCodec(codec Codec) CallOption { + // CustomCodecCallOption is a CallOption that indicates the codec used for + // marshaling messages. + // +-// Experimental ++// # Experimental + // + // Notice: This type is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -497,7 +534,7 @@ func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} + // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory + // used for buffering this RPC's requests for retry purposes. + // +-// Experimental ++// # Experimental + // + // Notice: This API is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -508,7 +545,7 @@ func MaxRetryRPCBufferSize(bytes int) CallOption { + // MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of + // memory to be used for caching this RPC for retry purposes. + // +-// Experimental ++// # Experimental + // + // Notice: This type is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -548,10 +585,11 @@ type parser struct { + // format. The caller owns the returned msg memory. + // + // If there is an error, possible values are: +-// * io.EOF, when no messages remain +-// * io.ErrUnexpectedEOF +-// * of type transport.ConnectionError +-// * an error from the status package ++// - io.EOF, when no messages remain ++// - io.ErrUnexpectedEOF ++// - of type transport.ConnectionError ++// - an error from the status package ++// + // No other error values or types must be returned, which also means + // that the underlying io.Reader must not return an incompatible + // error. +@@ -656,12 +694,13 @@ func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { + + func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { + return &stats.OutPayload{ +- Client: client, +- Payload: msg, +- Data: data, +- Length: len(data), +- WireLength: len(payload) + headerLen, +- SentTime: t, ++ Client: client, ++ Payload: msg, ++ Data: data, ++ Length: len(data), ++ WireLength: len(payload) + headerLen, ++ CompressedLength: len(payload), ++ SentTime: t, + } + } + +@@ -682,7 +721,7 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool + } + + type payloadInfo struct { +- wireLength int // The compressed length got from wire. ++ compressedLength int // The compressed length got from wire. + uncompressedBytes []byte + } + +@@ -692,7 +731,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei + return nil, err + } + if payInfo != nil { +- payInfo.wireLength = len(d) ++ payInfo.compressedLength = len(d) + } + + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { +@@ -710,15 +749,13 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei + d, size, err = decompress(compressor, d, maxReceiveMessageSize) + } + if err != nil { +- return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) ++ return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) ++ } ++ if size > maxReceiveMessageSize { ++ // TODO: Revisit the error code. Currently keep it consistent with java ++ // implementation. ++ return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + } +- } else { +- size = len(d) +- } +- if size > maxReceiveMessageSize { +- // TODO: Revisit the error code. Currently keep it consistent with java +- // implementation. +- return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", size, maxReceiveMessageSize) + } + return d, nil + } +@@ -747,7 +784,7 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize + } + // Read from LimitReader with limit max+1. So if the underlying + // reader is over limit, the result will be bigger than max. +- d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) ++ d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + return d, len(d), err + } + +@@ -760,7 +797,7 @@ func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interf + return err + } + if err := c.Unmarshal(d, m); err != nil { +- return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) ++ return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) + } + if payInfo != nil { + payInfo.uncompressedBytes = d +diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go +index 0251f48..8869cc9 100644 +--- a/vendor/google.golang.org/grpc/server.go ++++ b/vendor/google.golang.org/grpc/server.go +@@ -43,8 +43,8 @@ import ( + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/channelz" +- "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcsync" ++ "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" +@@ -73,6 +73,14 @@ func init() { + internal.DrainServerTransports = func(srv *Server, addr string) { + srv.drainServerTransports(addr) + } ++ internal.AddGlobalServerOptions = func(opt ...ServerOption) { ++ globalServerOptions = append(globalServerOptions, opt...) ++ } ++ internal.ClearGlobalServerOptions = func() { ++ globalServerOptions = nil ++ } ++ internal.BinaryLogger = binaryLogger ++ internal.JoinServerOptions = newJoinServerOption + } + + var statusOK = status.New(codes.OK, "") +@@ -107,12 +115,6 @@ type serviceInfo struct { + mdata interface{} + } + +-type serverWorkerData struct { +- st transport.ServerTransport +- wg *sync.WaitGroup +- stream *transport.Stream +-} +- + // Server is a gRPC server to serve RPC requests. + type Server struct { + opts serverOptions +@@ -134,10 +136,10 @@ type Server struct { + channelzRemoveOnce sync.Once + serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop + +- channelzID int64 // channelz unique identification number ++ channelzID *channelz.Identifier + czData *channelzData + +- serverWorkerChannels []chan *serverWorkerData ++ serverWorkerChannel chan func() + } + + type serverOptions struct { +@@ -149,8 +151,9 @@ type serverOptions struct { + streamInt StreamServerInterceptor + chainUnaryInts []UnaryServerInterceptor + chainStreamInts []StreamServerInterceptor ++ binaryLogger binarylog.Logger + inTapHandle tap.ServerInHandle +- statsHandler stats.Handler ++ statsHandlers []stats.Handler + maxConcurrentStreams uint32 + maxReceiveMessageSize int + maxSendMessageSize int +@@ -168,12 +171,14 @@ type serverOptions struct { + } + + var defaultServerOptions = serverOptions{ ++ maxConcurrentStreams: math.MaxUint32, + maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, + maxSendMessageSize: defaultServerMaxSendMessageSize, + connectionTimeout: 120 * time.Second, + writeBufferSize: defaultWriteBufSize, + readBufferSize: defaultReadBufSize, + } ++var globalServerOptions []ServerOption + + // A ServerOption sets options such as credentials, codec and keepalive parameters, etc. + type ServerOption interface { +@@ -183,7 +188,7 @@ type ServerOption interface { + // EmptyServerOption does not alter the server configuration. It can be embedded + // in another structure to build custom server options. + // +-// Experimental ++// # Experimental + // + // Notice: This type is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -207,10 +212,27 @@ func newFuncServerOption(f func(*serverOptions)) *funcServerOption { + } + } + +-// WriteBufferSize determines how much data can be batched before doing a write on the wire. +-// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. +-// The default value for this buffer is 32KB. +-// Zero will disable the write buffer such that each write will be on underlying connection. ++// joinServerOption provides a way to combine arbitrary number of server ++// options into one. ++type joinServerOption struct { ++ opts []ServerOption ++} ++ ++func (mdo *joinServerOption) apply(do *serverOptions) { ++ for _, opt := range mdo.opts { ++ opt.apply(do) ++ } ++} ++ ++func newJoinServerOption(opts ...ServerOption) ServerOption { ++ return &joinServerOption{opts: opts} ++} ++ ++// WriteBufferSize determines how much data can be batched before doing a write ++// on the wire. The corresponding memory allocation for this buffer will be ++// twice the size to keep syscalls low. The default value for this buffer is ++// 32KB. Zero or negative values will disable the write buffer such that each ++// write will be on underlying connection. + // Note: A Send call may not directly translate to a write. + func WriteBufferSize(s int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { +@@ -218,11 +240,10 @@ func WriteBufferSize(s int) ServerOption { + }) + } + +-// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most +-// for one read syscall. +-// The default value for this buffer is 32KB. +-// Zero will disable read buffer for a connection so data framer can access the underlying +-// conn directly. ++// ReadBufferSize lets you set the size of read buffer, this determines how much ++// data can be read at most for one read syscall. The default value for this ++// buffer is 32KB. Zero or negative values will disable read buffer for a ++// connection so data framer can access the underlying conn directly. + func ReadBufferSize(s int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.readBufferSize = s +@@ -298,7 +319,7 @@ func CustomCodec(codec Codec) ServerOption { + // https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. + // Will be supported throughout 1.x. + // +-// Experimental ++// # Experimental + // + // Notice: This API is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -361,6 +382,9 @@ func MaxSendMsgSize(m int) ServerOption { + // MaxConcurrentStreams returns a ServerOption that will apply a limit on the number + // of concurrent streams to each ServerTransport. + func MaxConcurrentStreams(n uint32) ServerOption { ++ if n == 0 { ++ n = math.MaxUint32 ++ } + return newFuncServerOption(func(o *serverOptions) { + o.maxConcurrentStreams = n + }) +@@ -419,7 +443,7 @@ func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOptio + // InTapHandle returns a ServerOption that sets the tap handle for all the server + // transport to be created. Only one can be installed. + // +-// Experimental ++// # Experimental + // + // Notice: This API is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -435,7 +459,21 @@ func InTapHandle(h tap.ServerInHandle) ServerOption { + // StatsHandler returns a ServerOption that sets the stats handler for the server. + func StatsHandler(h stats.Handler) ServerOption { + return newFuncServerOption(func(o *serverOptions) { +- o.statsHandler = h ++ if h == nil { ++ logger.Error("ignoring nil parameter in grpc.StatsHandler ServerOption") ++ // Do not allow a nil stats handler, which would otherwise cause ++ // panics. ++ return ++ } ++ o.statsHandlers = append(o.statsHandlers, h) ++ }) ++} ++ ++// binaryLogger returns a ServerOption that can set the binary logger for the ++// server. ++func binaryLogger(bl binarylog.Logger) ServerOption { ++ return newFuncServerOption(func(o *serverOptions) { ++ o.binaryLogger = bl + }) + } + +@@ -462,7 +500,7 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { + // new connections. If this is not set, the default is 120 seconds. A zero or + // negative value will result in an immediate timeout. + // +-// Experimental ++// # Experimental + // + // Notice: This API is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -483,7 +521,7 @@ func MaxHeaderListSize(s uint32) ServerOption { + // HeaderTableSize returns a ServerOption that sets the size of dynamic + // header table for stream. + // +-// Experimental ++// # Experimental + // + // Notice: This API is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -498,7 +536,7 @@ func HeaderTableSize(s uint32) ServerOption { + // zero (default) will disable workers and spawn a new goroutine for each + // stream. + // +-// Experimental ++// # Experimental + // + // Notice: This API is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -520,46 +558,42 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { + const serverWorkerResetThreshold = 1 << 16 + + // serverWorkers blocks on a *transport.Stream channel forever and waits for +-// data to be fed by serveStreams. This allows different requests to be ++// data to be fed by serveStreams. This allows multiple requests to be + // processed by the same goroutine, removing the need for expensive stack + // re-allocations (see the runtime.morestack problem [1]). + // + // [1] https://github.com/golang/go/issues/18138 +-func (s *Server) serverWorker(ch chan *serverWorkerData) { +- // To make sure all server workers don't reset at the same time, choose a +- // random number of iterations before resetting. +- threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold) +- for completed := 0; completed < threshold; completed++ { +- data, ok := <-ch ++func (s *Server) serverWorker() { ++ for completed := 0; completed < serverWorkerResetThreshold; completed++ { ++ f, ok := <-s.serverWorkerChannel + if !ok { + return + } +- s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) +- data.wg.Done() ++ f() + } +- go s.serverWorker(ch) ++ go s.serverWorker() + } + +-// initServerWorkers creates worker goroutines and channels to process incoming ++// initServerWorkers creates worker goroutines and a channel to process incoming + // connections to reduce the time spent overall on runtime.morestack. + func (s *Server) initServerWorkers() { +- s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers) ++ s.serverWorkerChannel = make(chan func()) + for i := uint32(0); i < s.opts.numServerWorkers; i++ { +- s.serverWorkerChannels[i] = make(chan *serverWorkerData) +- go s.serverWorker(s.serverWorkerChannels[i]) ++ go s.serverWorker() + } + } + + func (s *Server) stopServerWorkers() { +- for i := uint32(0); i < s.opts.numServerWorkers; i++ { +- close(s.serverWorkerChannels[i]) +- } ++ close(s.serverWorkerChannel) + } + + // NewServer creates a gRPC server which has no service registered and has not + // started to accept requests yet. + func NewServer(opt ...ServerOption) *Server { + opts := defaultServerOptions ++ for _, o := range globalServerOptions { ++ o.apply(&opts) ++ } + for _, o := range opt { + o.apply(&opts) + } +@@ -584,9 +618,8 @@ func NewServer(opt ...ServerOption) *Server { + s.initServerWorkers() + } + +- if channelz.IsOn() { +- s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") +- } ++ s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") ++ channelz.Info(logger, s.channelzID, "Server created") + return s + } + +@@ -710,16 +743,9 @@ func (s *Server) GetServiceInfo() map[string]ServiceInfo { + // the server being stopped. + var ErrServerStopped = errors.New("grpc: the server has been stopped") + +-func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { +- if s.opts.creds == nil { +- return rawConn, nil, nil +- } +- return s.opts.creds.ServerHandshake(rawConn) +-} +- + type listenSocket struct { + net.Listener +- channelzID int64 ++ channelzID *channelz.Identifier + } + + func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { +@@ -731,9 +757,8 @@ func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { + + func (l *listenSocket) Close() error { + err := l.Listener.Close() +- if channelz.IsOn() { +- channelz.RemoveEntry(l.channelzID) +- } ++ channelz.RemoveEntry(l.channelzID) ++ channelz.Info(logger, l.channelzID, "ListenSocket deleted") + return err + } + +@@ -766,11 +791,6 @@ func (s *Server) Serve(lis net.Listener) error { + ls := &listenSocket{Listener: lis} + s.lis[ls] = true + +- if channelz.IsOn() { +- ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) +- } +- s.mu.Unlock() +- + defer func() { + s.mu.Lock() + if s.lis != nil && s.lis[ls] { +@@ -780,8 +800,16 @@ func (s *Server) Serve(lis net.Listener) error { + s.mu.Unlock() + }() + +- var tempDelay time.Duration // how long to sleep on accept failure ++ var err error ++ ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) ++ if err != nil { ++ s.mu.Unlock() ++ return err ++ } ++ s.mu.Unlock() ++ channelz.Info(logger, ls.channelzID, "ListenSocket created") + ++ var tempDelay time.Duration // how long to sleep on accept failure + for { + rawConn, err := lis.Accept() + if err != nil { +@@ -839,35 +867,14 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { + return + } + rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) +- conn, authInfo, err := s.useTransportAuthenticator(rawConn) +- if err != nil { +- // ErrConnDispatched means that the connection was dispatched away from +- // gRPC; those connections should be left open. +- if err != credentials.ErrConnDispatched { +- // In deployments where a gRPC server runs behind a cloud load +- // balancer which performs regular TCP level health checks, the +- // connection is closed immediately by the latter. Skipping the +- // error here will help reduce log clutter. +- if err != io.EOF { +- s.mu.Lock() +- s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) +- s.mu.Unlock() +- channelz.Warningf(logger, s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) +- } +- rawConn.Close() +- } +- rawConn.SetDeadline(time.Time{}) +- return +- } + + // Finish handshaking (HTTP2) +- st := s.newHTTP2Transport(conn, authInfo) ++ st := s.newHTTP2Transport(rawConn) ++ rawConn.SetDeadline(time.Time{}) + if st == nil { +- conn.Close() + return + } + +- rawConn.SetDeadline(time.Time{}) + if !s.addConn(lisAddr, st) { + return + } +@@ -881,19 +888,20 @@ func (s *Server) drainServerTransports(addr string) { + s.mu.Lock() + conns := s.conns[addr] + for st := range conns { +- st.Drain() ++ st.Drain("") + } + s.mu.Unlock() + } + + // newHTTP2Transport sets up a http/2 transport (using the + // gRPC http2 server transport in transport/http2_server.go). +-func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport { ++func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { + config := &transport.ServerConfig{ + MaxStreams: s.opts.maxConcurrentStreams, +- AuthInfo: authInfo, ++ ConnectionTimeout: s.opts.connectionTimeout, ++ Credentials: s.opts.creds, + InTapHandle: s.opts.inTapHandle, +- StatsHandler: s.opts.statsHandler, ++ StatsHandlers: s.opts.statsHandlers, + KeepaliveParams: s.opts.keepaliveParams, + KeepalivePolicy: s.opts.keepalivePolicy, + InitialWindowSize: s.opts.initialWindowSize, +@@ -909,8 +917,15 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr + s.mu.Lock() + s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) + s.mu.Unlock() +- c.Close() +- channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) ++ // ErrConnDispatched means that the connection was dispatched away from ++ // gRPC; those connections should be left open. ++ if err != credentials.ErrConnDispatched { ++ // Don't log on ErrConnDispatched and io.EOF to prevent log spam. ++ if err != io.EOF { ++ channelz.Info(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) ++ } ++ c.Close() ++ } + return nil + } + +@@ -918,29 +933,29 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr + } + + func (s *Server) serveStreams(st transport.ServerTransport) { +- defer st.Close() ++ defer st.Close(errors.New("finished serving streams for the server transport")) + var wg sync.WaitGroup + +- var roundRobinCounter uint32 ++ streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) + st.HandleStreams(func(stream *transport.Stream) { + wg.Add(1) ++ ++ streamQuota.acquire() ++ f := func() { ++ defer streamQuota.release() ++ defer wg.Done() ++ s.handleStream(st, stream, s.traceInfo(st, stream)) ++ } ++ + if s.opts.numServerWorkers > 0 { +- data := &serverWorkerData{st: st, wg: &wg, stream: stream} + select { +- case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data: ++ case s.serverWorkerChannel <- f: ++ return + default: + // If all stream workers are busy, fallback to the default code path. +- go func() { +- s.handleStream(st, stream, s.traceInfo(st, stream)) +- wg.Done() +- }() + } +- } else { +- go func() { +- defer wg.Done() +- s.handleStream(st, stream, s.traceInfo(st, stream)) +- }() + } ++ go f() + }, func(ctx context.Context, method string) context.Context { + if !EnableTracing { + return ctx +@@ -965,26 +980,27 @@ var _ http.Handler = (*Server)(nil) + // To share one port (such as 443 for https) between gRPC and an + // existing http.Handler, use a root http.Handler such as: + // +-// if r.ProtoMajor == 2 && strings.HasPrefix( +-// r.Header.Get("Content-Type"), "application/grpc") { +-// grpcServer.ServeHTTP(w, r) +-// } else { +-// yourMux.ServeHTTP(w, r) +-// } ++// if r.ProtoMajor == 2 && strings.HasPrefix( ++// r.Header.Get("Content-Type"), "application/grpc") { ++// grpcServer.ServeHTTP(w, r) ++// } else { ++// yourMux.ServeHTTP(w, r) ++// } + // + // Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally + // separate from grpc-go's HTTP/2 server. Performance and features may vary + // between the two paths. ServeHTTP does not support some gRPC features + // available through grpc-go's HTTP/2 server. + // +-// Experimental ++// # Experimental + // + // Notice: This API is EXPERIMENTAL and may be changed or removed in a + // later release. + func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { +- st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler) ++ st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) + if err != nil { +- http.Error(w, err.Error(), http.StatusInternalServerError) ++ // Errors returned from transport.NewServerHandlerTransport have ++ // already been written to w. + return + } + if !s.addConn(listenerAddressForServeHTTP, st) { +@@ -1022,13 +1038,13 @@ func (s *Server) addConn(addr string, st transport.ServerTransport) bool { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns == nil { +- st.Close() ++ st.Close(errors.New("Server.addConn called when server has already been stopped")) + return false + } + if s.drain { + // Transport added after we drained our existing conns: drain it + // immediately. +- st.Drain() ++ st.Drain("") + } + + if s.conns[addr] == nil { +@@ -1095,8 +1111,10 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) + } + err = t.Write(stream, hdr, payload, opts) +- if err == nil && s.opts.statsHandler != nil { +- s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) ++ if err == nil { ++ for _, sh := range s.opts.statsHandlers { ++ sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) ++ } + } + return err + } +@@ -1124,27 +1142,27 @@ func chainUnaryServerInterceptors(s *Server) { + + func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { + return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { +- var i int +- var next UnaryHandler +- next = func(ctx context.Context, req interface{}) (interface{}, error) { +- if i == len(interceptors)-1 { +- return interceptors[i](ctx, req, info, handler) +- } +- i++ +- return interceptors[i-1](ctx, req, info, next) +- } +- return next(ctx, req) ++ return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) ++ } ++} ++ ++func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { ++ if curr == len(interceptors)-1 { ++ return finalHandler ++ } ++ return func(ctx context.Context, req interface{}) (interface{}, error) { ++ return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) + } + } + + func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { +- sh := s.opts.statsHandler +- if sh != nil || trInfo != nil || channelz.IsOn() { ++ shs := s.opts.statsHandlers ++ if len(shs) != 0 || trInfo != nil || channelz.IsOn() { + if channelz.IsOn() { + s.incrCallsStarted() + } + var statsBegin *stats.Begin +- if sh != nil { ++ for _, sh := range shs { + beginTime := time.Now() + statsBegin = &stats.Begin{ + BeginTime: beginTime, +@@ -1175,7 +1193,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + trInfo.tr.Finish() + } + +- if sh != nil { ++ for _, sh := range shs { + end := &stats.End{ + BeginTime: statsBegin.BeginTime, + EndTime: time.Now(), +@@ -1195,9 +1213,16 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + } + }() + } +- +- binlog := binarylog.GetMethodLogger(stream.Method()) +- if binlog != nil { ++ var binlogs []binarylog.MethodLogger ++ if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { ++ binlogs = append(binlogs, ml) ++ } ++ if s.opts.binaryLogger != nil { ++ if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { ++ binlogs = append(binlogs, ml) ++ } ++ } ++ if len(binlogs) != 0 { + ctx := stream.Context() + md, _ := metadata.FromIncomingContext(ctx) + logEntry := &binarylog.ClientHeader{ +@@ -1217,7 +1242,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + if peer, ok := peer.FromContext(ctx); ok { + logEntry.PeerAddr = peer.Addr + } +- binlog.Log(logEntry) ++ for _, binlog := range binlogs { ++ binlog.Log(ctx, logEntry) ++ } + } + + // comp and cp are used for compression. decomp and dc are used for +@@ -1227,6 +1254,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + var comp, decomp encoding.Compressor + var cp Compressor + var dc Decompressor ++ var sendCompressorName string + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. +@@ -1247,23 +1275,29 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + cp = s.opts.cp +- stream.SetSendCompress(cp.Type()) ++ sendCompressorName = cp.Type() + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + comp = encoding.GetCompressor(rc) + if comp != nil { +- stream.SetSendCompress(rc) ++ sendCompressorName = comp.Name() ++ } ++ } ++ ++ if sendCompressorName != "" { ++ if err := stream.SetSendCompress(sendCompressorName); err != nil { ++ return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) + } + } + + var payInfo *payloadInfo +- if sh != nil || binlog != nil { ++ if len(shs) != 0 || len(binlogs) != 0 { + payInfo = &payloadInfo{} + } + d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + if err != nil { + if e := t.WriteStatus(stream, status.Convert(err)); e != nil { +- channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e) ++ channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + } + return err + } +@@ -1274,19 +1308,23 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { + return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) + } +- if sh != nil { ++ for _, sh := range shs { + sh.HandleRPC(stream.Context(), &stats.InPayload{ +- RecvTime: time.Now(), +- Payload: v, +- WireLength: payInfo.wireLength + headerLen, +- Data: d, +- Length: len(d), ++ RecvTime: time.Now(), ++ Payload: v, ++ Length: len(d), ++ WireLength: payInfo.compressedLength + headerLen, ++ CompressedLength: payInfo.compressedLength, ++ Data: d, + }) + } +- if binlog != nil { +- binlog.Log(&binarylog.ClientMessage{ ++ if len(binlogs) != 0 { ++ cm := &binarylog.ClientMessage{ + Message: d, +- }) ++ } ++ for _, binlog := range binlogs { ++ binlog.Log(stream.Context(), cm) ++ } + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) +@@ -1298,9 +1336,10 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { +- // Convert appErr if it is not a grpc status error. +- appErr = status.Error(codes.Unknown, appErr.Error()) +- appStatus, _ = status.FromError(appErr) ++ // Convert non-status application error to a status error with code ++ // Unknown, but handle context errors specifically. ++ appStatus = status.FromContextError(appErr) ++ appErr = appStatus.Err() + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer(appStatus.Message()), true) +@@ -1309,18 +1348,24 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + if e := t.WriteStatus(stream, appStatus); e != nil { + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + } +- if binlog != nil { ++ if len(binlogs) != 0 { + if h, _ := stream.Header(); h.Len() > 0 { + // Only log serverHeader if there was header. Otherwise it can + // be trailer only. +- binlog.Log(&binarylog.ServerHeader{ ++ sh := &binarylog.ServerHeader{ + Header: h, +- }) ++ } ++ for _, binlog := range binlogs { ++ binlog.Log(stream.Context(), sh) ++ } + } +- binlog.Log(&binarylog.ServerTrailer{ ++ st := &binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, +- }) ++ } ++ for _, binlog := range binlogs { ++ binlog.Log(stream.Context(), st) ++ } + } + return appErr + } +@@ -1329,6 +1374,11 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + } + opts := &transport.Options{Last: true} + ++ // Server handler could have set new compressor by calling SetSendCompressor. ++ // In case it is set, we need to use it for compressing outbound message. ++ if stream.SendCompress() != sendCompressorName { ++ comp = encoding.GetCompressor(stream.SendCompress()) ++ } + if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { + if err == io.EOF { + // The entire stream is done (for unary RPC only). +@@ -1346,26 +1396,34 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) + } + } +- if binlog != nil { ++ if len(binlogs) != 0 { + h, _ := stream.Header() +- binlog.Log(&binarylog.ServerHeader{ ++ sh := &binarylog.ServerHeader{ + Header: h, +- }) +- binlog.Log(&binarylog.ServerTrailer{ ++ } ++ st := &binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, +- }) ++ } ++ for _, binlog := range binlogs { ++ binlog.Log(stream.Context(), sh) ++ binlog.Log(stream.Context(), st) ++ } + } + return err + } +- if binlog != nil { ++ if len(binlogs) != 0 { + h, _ := stream.Header() +- binlog.Log(&binarylog.ServerHeader{ ++ sh := &binarylog.ServerHeader{ + Header: h, +- }) +- binlog.Log(&binarylog.ServerMessage{ ++ } ++ sm := &binarylog.ServerMessage{ + Message: reply, +- }) ++ } ++ for _, binlog := range binlogs { ++ binlog.Log(stream.Context(), sh) ++ binlog.Log(stream.Context(), sm) ++ } + } + if channelz.IsOn() { + t.IncrMsgSent() +@@ -1376,14 +1434,16 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + // TODO: Should we be logging if writing status failed here, like above? + // Should the logging be in WriteStatus? Should we ignore the WriteStatus + // error or allow the stats handler to see it? +- err = t.WriteStatus(stream, statusOK) +- if binlog != nil { +- binlog.Log(&binarylog.ServerTrailer{ ++ if len(binlogs) != 0 { ++ st := &binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, +- }) ++ } ++ for _, binlog := range binlogs { ++ binlog.Log(stream.Context(), st) ++ } + } +- return err ++ return t.WriteStatus(stream, statusOK) + } + + // chainStreamServerInterceptors chains all stream server interceptors into one. +@@ -1409,16 +1469,16 @@ func chainStreamServerInterceptors(s *Server) { + + func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { + return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { +- var i int +- var next StreamHandler +- next = func(srv interface{}, ss ServerStream) error { +- if i == len(interceptors)-1 { +- return interceptors[i](srv, ss, info, handler) +- } +- i++ +- return interceptors[i-1](srv, ss, info, next) +- } +- return next(srv, ss) ++ return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) ++ } ++} ++ ++func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { ++ if curr == len(interceptors)-1 { ++ return finalHandler ++ } ++ return func(srv interface{}, stream ServerStream) error { ++ return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) + } + } + +@@ -1426,16 +1486,18 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp + if channelz.IsOn() { + s.incrCallsStarted() + } +- sh := s.opts.statsHandler ++ shs := s.opts.statsHandlers + var statsBegin *stats.Begin +- if sh != nil { ++ if len(shs) != 0 { + beginTime := time.Now() + statsBegin = &stats.Begin{ + BeginTime: beginTime, + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, + } +- sh.HandleRPC(stream.Context(), statsBegin) ++ for _, sh := range shs { ++ sh.HandleRPC(stream.Context(), statsBegin) ++ } + } + ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ss := &serverStream{ +@@ -1447,10 +1509,10 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp + maxReceiveMessageSize: s.opts.maxReceiveMessageSize, + maxSendMessageSize: s.opts.maxSendMessageSize, + trInfo: trInfo, +- statsHandler: sh, ++ statsHandler: shs, + } + +- if sh != nil || trInfo != nil || channelz.IsOn() { ++ if len(shs) != 0 || trInfo != nil || channelz.IsOn() { + // See comment in processUnaryRPC on defers. + defer func() { + if trInfo != nil { +@@ -1464,7 +1526,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp + ss.mu.Unlock() + } + +- if sh != nil { ++ if len(shs) != 0 { + end := &stats.End{ + BeginTime: statsBegin.BeginTime, + EndTime: time.Now(), +@@ -1472,7 +1534,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } +- sh.HandleRPC(stream.Context(), end) ++ for _, sh := range shs { ++ sh.HandleRPC(stream.Context(), end) ++ } + } + + if channelz.IsOn() { +@@ -1485,8 +1549,15 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp + }() + } + +- ss.binlog = binarylog.GetMethodLogger(stream.Method()) +- if ss.binlog != nil { ++ if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { ++ ss.binlogs = append(ss.binlogs, ml) ++ } ++ if s.opts.binaryLogger != nil { ++ if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { ++ ss.binlogs = append(ss.binlogs, ml) ++ } ++ } ++ if len(ss.binlogs) != 0 { + md, _ := metadata.FromIncomingContext(ctx) + logEntry := &binarylog.ClientHeader{ + Header: md, +@@ -1505,7 +1576,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp + if peer, ok := peer.FromContext(ss.Context()); ok { + logEntry.PeerAddr = peer.Addr + } +- ss.binlog.Log(logEntry) ++ for _, binlog := range ss.binlogs { ++ binlog.Log(stream.Context(), logEntry) ++ } + } + + // If dc is set and matches the stream's compression, use it. Otherwise, try +@@ -1527,12 +1600,18 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + ss.cp = s.opts.cp +- stream.SetSendCompress(s.opts.cp.Type()) ++ ss.sendCompressorName = s.opts.cp.Type() + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + ss.comp = encoding.GetCompressor(rc) + if ss.comp != nil { +- stream.SetSendCompress(rc) ++ ss.sendCompressorName = rc ++ } ++ } ++ ++ if ss.sendCompressorName != "" { ++ if err := stream.SetSendCompress(ss.sendCompressorName); err != nil { ++ return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) + } + } + +@@ -1559,7 +1638,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { +- appStatus = status.New(codes.Unknown, appErr.Error()) ++ // Convert non-status application error to a status error with code ++ // Unknown, but handle context errors specifically. ++ appStatus = status.FromContextError(appErr) + appErr = appStatus.Err() + } + if trInfo != nil { +@@ -1568,13 +1649,16 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp + ss.trInfo.tr.SetError() + ss.mu.Unlock() + } +- t.WriteStatus(ss.s, appStatus) +- if ss.binlog != nil { +- ss.binlog.Log(&binarylog.ServerTrailer{ ++ if len(ss.binlogs) != 0 { ++ st := &binarylog.ServerTrailer{ + Trailer: ss.s.Trailer(), + Err: appErr, +- }) ++ } ++ for _, binlog := range ss.binlogs { ++ binlog.Log(stream.Context(), st) ++ } + } ++ t.WriteStatus(ss.s, appStatus) + // TODO: Should we log an error from WriteStatus here and below? + return appErr + } +@@ -1583,14 +1667,16 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp + ss.trInfo.tr.LazyLog(stringer("OK"), false) + ss.mu.Unlock() + } +- err = t.WriteStatus(ss.s, statusOK) +- if ss.binlog != nil { +- ss.binlog.Log(&binarylog.ServerTrailer{ ++ if len(ss.binlogs) != 0 { ++ st := &binarylog.ServerTrailer{ + Trailer: ss.s.Trailer(), + Err: appErr, +- }) ++ } ++ for _, binlog := range ss.binlogs { ++ binlog.Log(stream.Context(), st) ++ } + } +- return err ++ return t.WriteStatus(ss.s, statusOK) + } + + func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { +@@ -1664,7 +1750,7 @@ type streamKey struct{} + // NewContextWithServerTransportStream creates a new context from ctx and + // attaches stream to it. + // +-// Experimental ++// # Experimental + // + // Notice: This API is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -1679,7 +1765,7 @@ func NewContextWithServerTransportStream(ctx context.Context, stream ServerTrans + // + // See also NewContextWithServerTransportStream. + // +-// Experimental ++// # Experimental + // + // Notice: This type is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -1694,7 +1780,7 @@ type ServerTransportStream interface { + // ctx. Returns nil if the given context has no stream associated with it + // (which implies it is not an RPC invocation context). + // +-// Experimental ++// # Experimental + // + // Notice: This API is EXPERIMENTAL and may be changed or removed in a + // later release. +@@ -1716,11 +1802,7 @@ func (s *Server) Stop() { + s.done.Fire() + }() + +- s.channelzRemoveOnce.Do(func() { +- if channelz.IsOn() { +- channelz.RemoveEntry(s.channelzID) +- } +- }) ++ s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) + + s.mu.Lock() + listeners := s.lis +@@ -1736,7 +1818,7 @@ func (s *Server) Stop() { + } + for _, cs := range conns { + for st := range cs { +- st.Close() ++ st.Close(errors.New("Server.Stop called")) + } + } + if s.opts.numServerWorkers > 0 { +@@ -1758,11 +1840,7 @@ func (s *Server) GracefulStop() { + s.quit.Fire() + defer s.done.Fire() + +- s.channelzRemoveOnce.Do(func() { +- if channelz.IsOn() { +- channelz.RemoveEntry(s.channelzID) +- } +- }) ++ s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) + s.mu.Lock() + if s.conns == nil { + s.mu.Unlock() +@@ -1776,7 +1854,7 @@ func (s *Server) GracefulStop() { + if !s.drain { + for _, conns := range s.conns { + for st := range conns { +- st.Drain() ++ st.Drain("graceful_stop") + } + } + s.drain = true +@@ -1815,12 +1893,26 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { + return codec + } + +-// SetHeader sets the header metadata. +-// When called multiple times, all the provided metadata will be merged. +-// All the metadata will be sent out when one of the following happens: +-// - grpc.SendHeader() is called; +-// - The first response is sent out; +-// - An RPC status is sent out (error or success). ++// SetHeader sets the header metadata to be sent from the server to the client. ++// The context provided must be the context passed to the server's handler. ++// ++// Streaming RPCs should prefer the SetHeader method of the ServerStream. ++// ++// When called multiple times, all the provided metadata will be merged. All ++// the metadata will be sent out when one of the following happens: ++// ++// - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader. ++// - The first response message is sent. For unary handlers, this occurs when ++// the handler returns; for streaming handlers, this can happen when stream's ++// SendMsg method is called. ++// - An RPC status is sent out (error or success). This occurs when the handler ++// returns. ++// ++// SetHeader will fail if called after any of the events above. ++// ++// The error returned is compatible with the status package. However, the ++// status code will often not match the RPC status as seen by the client ++// application, and therefore, should not be relied upon for this purpose. + func SetHeader(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil +@@ -1832,8 +1924,14 @@ func SetHeader(ctx context.Context, md metadata.MD) error { + return stream.SetHeader(md) + } + +-// SendHeader sends header metadata. It may be called at most once. +-// The provided md and headers set by SetHeader() will be sent. ++// SendHeader sends header metadata. It may be called at most once, and may not ++// be called after any event that causes headers to be sent (see SetHeader for ++// a complete list). The provided md and headers set by SetHeader() will be ++// sent. ++// ++// The error returned is compatible with the status package. However, the ++// status code will often not match the RPC status as seen by the client ++// application, and therefore, should not be relied upon for this purpose. + func SendHeader(ctx context.Context, md metadata.MD) error { + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { +@@ -1845,8 +1943,66 @@ func SendHeader(ctx context.Context, md metadata.MD) error { + return nil + } + ++// SetSendCompressor sets a compressor for outbound messages from the server. ++// It must not be called after any event that causes headers to be sent ++// (see ServerStream.SetHeader for the complete list). Provided compressor is ++// used when below conditions are met: ++// ++// - compressor is registered via encoding.RegisterCompressor ++// - compressor name must exist in the client advertised compressor names ++// sent in grpc-accept-encoding header. Use ClientSupportedCompressors to ++// get client supported compressor names. ++// ++// The context provided must be the context passed to the server's handler. ++// It must be noted that compressor name encoding.Identity disables the ++// outbound compression. ++// By default, server messages will be sent using the same compressor with ++// which request messages were sent. ++// ++// It is not safe to call SetSendCompressor concurrently with SendHeader and ++// SendMsg. ++// ++// # Experimental ++// ++// Notice: This function is EXPERIMENTAL and may be changed or removed in a ++// later release. ++func SetSendCompressor(ctx context.Context, name string) error { ++ stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) ++ if !ok || stream == nil { ++ return fmt.Errorf("failed to fetch the stream from the given context") ++ } ++ ++ if err := validateSendCompressor(name, stream.ClientAdvertisedCompressors()); err != nil { ++ return fmt.Errorf("unable to set send compressor: %w", err) ++ } ++ ++ return stream.SetSendCompress(name) ++} ++ ++// ClientSupportedCompressors returns compressor names advertised by the client ++// via grpc-accept-encoding header. ++// ++// The context provided must be the context passed to the server's handler. ++// ++// # Experimental ++// ++// Notice: This function is EXPERIMENTAL and may be changed or removed in a ++// later release. ++func ClientSupportedCompressors(ctx context.Context) ([]string, error) { ++ stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) ++ if !ok || stream == nil { ++ return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) ++ } ++ ++ return strings.Split(stream.ClientAdvertisedCompressors(), ","), nil ++} ++ + // SetTrailer sets the trailer metadata that will be sent when an RPC returns. + // When called more than once, all the provided metadata will be merged. ++// ++// The error returned is compatible with the status package. However, the ++// status code will often not match the RPC status as seen by the client ++// application, and therefore, should not be relied upon for this purpose. + func SetTrailer(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil +@@ -1875,3 +2031,51 @@ type channelzServer struct { + func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { + return c.s.channelzMetric() + } ++ ++// validateSendCompressor returns an error when given compressor name cannot be ++// handled by the server or the client based on the advertised compressors. ++func validateSendCompressor(name, clientCompressors string) error { ++ if name == encoding.Identity { ++ return nil ++ } ++ ++ if !grpcutil.IsCompressorNameRegistered(name) { ++ return fmt.Errorf("compressor not registered %q", name) ++ } ++ ++ for _, c := range strings.Split(clientCompressors, ",") { ++ if c == name { ++ return nil // found match ++ } ++ } ++ return fmt.Errorf("client does not support compressor %q", name) ++} ++ ++// atomicSemaphore implements a blocking, counting semaphore. acquire should be ++// called synchronously; release may be called asynchronously. ++type atomicSemaphore struct { ++ n int64 ++ wait chan struct{} ++} ++ ++func (q *atomicSemaphore) acquire() { ++ if atomic.AddInt64(&q.n, -1) < 0 { ++ // We ran out of quota. Block until a release happens. ++ <-q.wait ++ } ++} ++ ++func (q *atomicSemaphore) release() { ++ // N.B. the "<= 0" check below should allow for this to work with multiple ++ // concurrent calls to acquire, but also note that with synchronous calls to ++ // acquire, as our system does, n will never be less than -1. There are ++ // fairness issues (queuing) to consider if this was to be generalized. ++ if atomic.AddInt64(&q.n, 1) <= 0 { ++ // An acquire was waiting on us. Unblock it. ++ q.wait <- struct{}{} ++ } ++} ++ ++func newHandlerQuota(n uint32) *atomicSemaphore { ++ return &atomicSemaphore{n: int64(n), wait: make(chan struct{}, 1)} ++} +diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go +index 22c4240..0df11fc 100644 +--- a/vendor/google.golang.org/grpc/service_config.go ++++ b/vendor/google.golang.org/grpc/service_config.go +@@ -23,8 +23,6 @@ import ( + "errors" + "fmt" + "reflect" +- "strconv" +- "strings" + "time" + + "google.golang.org/grpc/codes" +@@ -57,10 +55,9 @@ type lbConfig struct { + type ServiceConfig struct { + serviceconfig.Config + +- // LB is the load balancer the service providers recommends. The balancer +- // specified via grpc.WithBalancerName will override this. This is deprecated; +- // lbConfigs is preferred. If lbConfig and LB are both present, lbConfig +- // will be used. ++ // LB is the load balancer the service providers recommends. This is ++ // deprecated; lbConfigs is preferred. If lbConfig and LB are both present, ++ // lbConfig will be used. + LB *string + + // lbConfig is the service config's load balancing configuration. If +@@ -107,8 +104,8 @@ type healthCheckConfig struct { + + type jsonRetryPolicy struct { + MaxAttempts int +- InitialBackoff string +- MaxBackoff string ++ InitialBackoff internalserviceconfig.Duration ++ MaxBackoff internalserviceconfig.Duration + BackoffMultiplier float64 + RetryableStatusCodes []codes.Code + } +@@ -130,50 +127,6 @@ type retryThrottlingPolicy struct { + TokenRatio float64 + } + +-func parseDuration(s *string) (*time.Duration, error) { +- if s == nil { +- return nil, nil +- } +- if !strings.HasSuffix(*s, "s") { +- return nil, fmt.Errorf("malformed duration %q", *s) +- } +- ss := strings.SplitN((*s)[:len(*s)-1], ".", 3) +- if len(ss) > 2 { +- return nil, fmt.Errorf("malformed duration %q", *s) +- } +- // hasDigits is set if either the whole or fractional part of the number is +- // present, since both are optional but one is required. +- hasDigits := false +- var d time.Duration +- if len(ss[0]) > 0 { +- i, err := strconv.ParseInt(ss[0], 10, 32) +- if err != nil { +- return nil, fmt.Errorf("malformed duration %q: %v", *s, err) +- } +- d = time.Duration(i) * time.Second +- hasDigits = true +- } +- if len(ss) == 2 && len(ss[1]) > 0 { +- if len(ss[1]) > 9 { +- return nil, fmt.Errorf("malformed duration %q", *s) +- } +- f, err := strconv.ParseInt(ss[1], 10, 64) +- if err != nil { +- return nil, fmt.Errorf("malformed duration %q: %v", *s, err) +- } +- for i := 9; i > len(ss[1]); i-- { +- f *= 10 +- } +- d += time.Duration(f) +- hasDigits = true +- } +- if !hasDigits { +- return nil, fmt.Errorf("malformed duration %q", *s) +- } +- +- return &d, nil +-} +- + type jsonName struct { + Service string + Method string +@@ -202,7 +155,7 @@ func (j jsonName) generatePath() (string, error) { + type jsonMC struct { + Name *[]jsonName + WaitForReady *bool +- Timeout *string ++ Timeout *internalserviceconfig.Duration + MaxRequestMessageBytes *int64 + MaxResponseMessageBytes *int64 + RetryPolicy *jsonRetryPolicy +@@ -218,7 +171,7 @@ type jsonSC struct { + } + + func init() { +- internal.ParseServiceConfigForTesting = parseServiceConfig ++ internal.ParseServiceConfig = parseServiceConfig + } + func parseServiceConfig(js string) *serviceconfig.ParseResult { + if len(js) == 0 { +@@ -227,7 +180,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { + var rsc jsonSC + err := json.Unmarshal([]byte(js), &rsc) + if err != nil { +- logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) ++ logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } + sc := ServiceConfig{ +@@ -253,18 +206,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { + if m.Name == nil { + continue + } +- d, err := parseDuration(m.Timeout) +- if err != nil { +- logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) +- return &serviceconfig.ParseResult{Err: err} +- } + + mc := MethodConfig{ + WaitForReady: m.WaitForReady, +- Timeout: d, ++ Timeout: (*time.Duration)(m.Timeout), + } + if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { +- logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) ++ logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } + if m.MaxRequestMessageBytes != nil { +@@ -284,13 +232,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { + for i, n := range *m.Name { + path, err := n.generatePath() + if err != nil { +- logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) ++ logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) + return &serviceconfig.ParseResult{Err: err} + } + + if _, ok := paths[path]; ok { + err = errDuplicatedName +- logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) ++ logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) + return &serviceconfig.ParseResult{Err: err} + } + paths[path] = struct{}{} +@@ -313,18 +261,10 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol + if jrp == nil { + return nil, nil + } +- ib, err := parseDuration(&jrp.InitialBackoff) +- if err != nil { +- return nil, err +- } +- mb, err := parseDuration(&jrp.MaxBackoff) +- if err != nil { +- return nil, err +- } + + if jrp.MaxAttempts <= 1 || +- *ib <= 0 || +- *mb <= 0 || ++ jrp.InitialBackoff <= 0 || ++ jrp.MaxBackoff <= 0 || + jrp.BackoffMultiplier <= 0 || + len(jrp.RetryableStatusCodes) == 0 { + logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) +@@ -333,8 +273,8 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol + + rp := &internalserviceconfig.RetryPolicy{ + MaxAttempts: jrp.MaxAttempts, +- InitialBackoff: *ib, +- MaxBackoff: *mb, ++ InitialBackoff: time.Duration(jrp.InitialBackoff), ++ MaxBackoff: time.Duration(jrp.MaxBackoff), + BackoffMultiplier: jrp.BackoffMultiplier, + RetryableStatusCodes: make(map[codes.Code]bool), + } +@@ -381,6 +321,9 @@ func init() { + // + // If any of them is NOT *ServiceConfig, return false. + func equalServiceConfig(a, b serviceconfig.Config) bool { ++ if a == nil && b == nil { ++ return true ++ } + aa, ok := a.(*ServiceConfig) + if !ok { + return false +diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go +index 73a2f92..35e7a20 100644 +--- a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go ++++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go +@@ -19,7 +19,7 @@ + // Package serviceconfig defines types and methods for operating on gRPC + // service configs. + // +-// Experimental ++// # Experimental + // + // Notice: This package is EXPERIMENTAL and may be changed or removed in a + // later release. +diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go +index a5ebeeb..7a552a9 100644 +--- a/vendor/google.golang.org/grpc/stats/stats.go ++++ b/vendor/google.golang.org/grpc/stats/stats.go +@@ -36,12 +36,12 @@ type RPCStats interface { + IsClient() bool + } + +-// Begin contains stats when an RPC begins. ++// Begin contains stats when an RPC attempt begins. + // FailFast is only valid if this Begin is from client side. + type Begin struct { + // Client is true if this Begin is from client side. + Client bool +- // BeginTime is the time when the RPC begins. ++ // BeginTime is the time when the RPC attempt begins. + BeginTime time.Time + // FailFast indicates if this RPC is failfast. + FailFast bool +@@ -49,6 +49,9 @@ type Begin struct { + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool ++ // IsTransparentRetryAttempt indicates whether this attempt was initiated ++ // due to transparently retrying a previous attempt. ++ IsTransparentRetryAttempt bool + } + + // IsClient indicates if the stats information is from client side. +@@ -64,10 +67,18 @@ type InPayload struct { + Payload interface{} + // Data is the serialized message payload. + Data []byte +- // Length is the length of uncompressed data. ++ ++ // Length is the size of the uncompressed payload data. Does not include any ++ // framing (gRPC or HTTP/2). + Length int +- // WireLength is the length of data on wire (compressed, signed, encrypted). ++ // CompressedLength is the size of the compressed payload data. Does not ++ // include any framing (gRPC or HTTP/2). Same as Length if compression not ++ // enabled. ++ CompressedLength int ++ // WireLength is the size of the compressed payload data plus gRPC framing. ++ // Does not include HTTP/2 framing. + WireLength int ++ + // RecvTime is the time when the payload is received. + RecvTime time.Time + } +@@ -126,9 +137,15 @@ type OutPayload struct { + Payload interface{} + // Data is the serialized message payload. + Data []byte +- // Length is the length of uncompressed data. ++ // Length is the size of the uncompressed payload data. Does not include any ++ // framing (gRPC or HTTP/2). + Length int +- // WireLength is the length of data on wire (compressed, signed, encrypted). ++ // CompressedLength is the size of the compressed payload data. Does not ++ // include any framing (gRPC or HTTP/2). Same as Length if compression not ++ // enabled. ++ CompressedLength int ++ // WireLength is the size of the compressed payload data plus gRPC framing. ++ // Does not include HTTP/2 framing. + WireLength int + // SentTime is the time when the payload is sent. + SentTime time.Time +diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go +index 54d1871..bcf2e4d 100644 +--- a/vendor/google.golang.org/grpc/status/status.go ++++ b/vendor/google.golang.org/grpc/status/status.go +@@ -29,6 +29,7 @@ package status + + import ( + "context" ++ "errors" + "fmt" + + spb "google.golang.org/genproto/googleapis/rpc/status" +@@ -73,19 +74,52 @@ func FromProto(s *spb.Status) *Status { + return status.FromProto(s) + } + +-// FromError returns a Status representing err if it was produced by this +-// package or has a method `GRPCStatus() *Status`. +-// If err is nil, a Status is returned with codes.OK and no message. +-// Otherwise, ok is false and a Status is returned with codes.Unknown and +-// the original error message. ++// FromError returns a Status representation of err. ++// ++// - If err was produced by this package or implements the method `GRPCStatus() ++// *Status` and `GRPCStatus()` does not return nil, or if err wraps a type ++// satisfying this, the Status from `GRPCStatus()` is returned. For wrapped ++// errors, the message returned contains the entire err.Error() text and not ++// just the wrapped status. In that case, ok is true. ++// ++// - If err is nil, a Status is returned with codes.OK and no message, and ok ++// is true. ++// ++// - If err implements the method `GRPCStatus() *Status` and `GRPCStatus()` ++// returns nil (which maps to Codes.OK), or if err wraps a type ++// satisfying this, a Status is returned with codes.Unknown and err's ++// Error() message, and ok is false. ++// ++// - Otherwise, err is an error not compatible with this package. In this ++// case, a Status is returned with codes.Unknown and err's Error() message, ++// and ok is false. + func FromError(err error) (s *Status, ok bool) { + if err == nil { + return nil, true + } +- if se, ok := err.(interface { +- GRPCStatus() *Status +- }); ok { +- return se.GRPCStatus(), true ++ type grpcstatus interface{ GRPCStatus() *Status } ++ if gs, ok := err.(grpcstatus); ok { ++ if gs.GRPCStatus() == nil { ++ // Error has status nil, which maps to codes.OK. There ++ // is no sensible behavior for this, so we turn it into ++ // an error with codes.Unknown and discard the existing ++ // status. ++ return New(codes.Unknown, err.Error()), false ++ } ++ return gs.GRPCStatus(), true ++ } ++ var gs grpcstatus ++ if errors.As(err, &gs) { ++ if gs.GRPCStatus() == nil { ++ // Error wraps an error that has status nil, which maps ++ // to codes.OK. There is no sensible behavior for this, ++ // so we turn it into an error with codes.Unknown and ++ // discard the existing status. ++ return New(codes.Unknown, err.Error()), false ++ } ++ p := gs.GRPCStatus().Proto() ++ p.Message = err.Error() ++ return status.FromProto(p), true + } + return New(codes.Unknown, err.Error()), false + } +@@ -97,33 +131,30 @@ func Convert(err error) *Status { + return s + } + +-// Code returns the Code of the error if it is a Status error, codes.OK if err +-// is nil, or codes.Unknown otherwise. ++// Code returns the Code of the error if it is a Status error or if it wraps a ++// Status error. If that is not the case, it returns codes.OK if err is nil, or ++// codes.Unknown otherwise. + func Code(err error) codes.Code { + // Don't use FromError to avoid allocation of OK status. + if err == nil { + return codes.OK + } +- if se, ok := err.(interface { +- GRPCStatus() *Status +- }); ok { +- return se.GRPCStatus().Code() +- } +- return codes.Unknown ++ ++ return Convert(err).Code() + } + +-// FromContextError converts a context error into a Status. It returns a +-// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is +-// non-nil and not a context error. ++// FromContextError converts a context error or wrapped context error into a ++// Status. It returns a Status with codes.OK if err is nil, or a Status with ++// codes.Unknown if err is non-nil and not a context error. + func FromContextError(err error) *Status { +- switch err { +- case nil: ++ if err == nil { + return nil +- case context.DeadlineExceeded: ++ } ++ if errors.Is(err, context.DeadlineExceeded) { + return New(codes.DeadlineExceeded, err.Error()) +- case context.Canceled: ++ } ++ if errors.Is(err, context.Canceled) { + return New(codes.Canceled, err.Error()) +- default: +- return New(codes.Unknown, err.Error()) + } ++ return New(codes.Unknown, err.Error()) + } +diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go +index e224af1..1009268 100644 +--- a/vendor/google.golang.org/grpc/stream.go ++++ b/vendor/google.golang.org/grpc/stream.go +@@ -36,8 +36,10 @@ import ( + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcutil" ++ imetadata "google.golang.org/grpc/internal/metadata" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/serviceconfig" ++ istatus "google.golang.org/grpc/internal/status" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" +@@ -46,10 +48,12 @@ import ( + ) + + // StreamHandler defines the handler called by gRPC server to complete the +-// execution of a streaming RPC. If a StreamHandler returns an error, it +-// should be produced by the status package, or else gRPC will use +-// codes.Unknown as the status code and err.Error() as the status message +-// of the RPC. ++// execution of a streaming RPC. ++// ++// If a StreamHandler returns an error, it should either be produced by the ++// status package, or be one of the context errors. Otherwise, gRPC will use ++// codes.Unknown as the status code and err.Error() as the status message of the ++// RPC. + type StreamHandler func(srv interface{}, stream ServerStream) error + + // StreamDesc represents a streaming RPC service's method specification. Used +@@ -119,6 +123,9 @@ type ClientStream interface { + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. It is also + // not safe to call CloseSend concurrently with SendMsg. ++ // ++ // It is not safe to modify the message after calling SendMsg. Tracing ++ // libraries and stats handlers may use the message lazily. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On +@@ -137,17 +144,22 @@ type ClientStream interface { + // To ensure resources are not leaked due to the stream returned, one of the following + // actions must be performed: + // +-// 1. Call Close on the ClientConn. +-// 2. Cancel the context provided. +-// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated +-// client-streaming RPC, for instance, might use the helper function +-// CloseAndRecv (note that CloseSend does not Recv, therefore is not +-// guaranteed to release all resources). +-// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. ++// 1. Call Close on the ClientConn. ++// 2. Cancel the context provided. ++// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated ++// client-streaming RPC, for instance, might use the helper function ++// CloseAndRecv (note that CloseSend does not Recv, therefore is not ++// guaranteed to release all resources). ++// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. + // + // If none of the above happen, a goroutine and a context will be leaked, and grpc + // will not call the optionally-configured stats handler with a stats.End message. + func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { ++ if err := cc.idlenessMgr.onCallBegin(); err != nil { ++ return nil, err ++ } ++ defer cc.idlenessMgr.onCallEnd() ++ + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) +@@ -164,6 +176,20 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth + } + + func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { ++ if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { ++ // validate md ++ if err := imetadata.Validate(md); err != nil { ++ return nil, status.Error(codes.Internal, err.Error()) ++ } ++ // validate added ++ for _, kvs := range added { ++ for i := 0; i < len(kvs); i += 2 { ++ if err := imetadata.ValidatePair(kvs[i], kvs[i+1]); err != nil { ++ return nil, status.Error(codes.Internal, err.Error()) ++ } ++ } ++ } ++ } + if channelz.IsOn() { + cc.incrCallsStarted() + defer func() { +@@ -187,6 +213,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth + rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} + rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo) + if err != nil { ++ if st, ok := status.FromError(err); ok { ++ // Restrict the code to the list allowed by gRFC A54. ++ if istatus.IsRestrictedControlPlaneCode(st) { ++ err = status.Errorf(codes.Internal, "config selector returned illegal status: %v", err) ++ } ++ return nil, err ++ } + return nil, toRPCErr(err) + } + +@@ -274,35 +307,6 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client + if c.creds != nil { + callHdr.Creds = c.creds + } +- var trInfo *traceInfo +- if EnableTracing { +- trInfo = &traceInfo{ +- tr: trace.New("grpc.Sent."+methodFamily(method), method), +- firstLine: firstLine{ +- client: true, +- }, +- } +- if deadline, ok := ctx.Deadline(); ok { +- trInfo.firstLine.deadline = time.Until(deadline) +- } +- trInfo.tr.LazyLog(&trInfo.firstLine, false) +- ctx = trace.NewContext(ctx, trInfo.tr) +- } +- ctx = newContextWithRPCInfo(ctx, c.failFast, c.codec, cp, comp) +- sh := cc.dopts.copts.StatsHandler +- var beginTime time.Time +- if sh != nil { +- ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) +- beginTime = time.Now() +- begin := &stats.Begin{ +- Client: true, +- BeginTime: beginTime, +- FailFast: c.failFast, +- IsClientStream: desc.ClientStreams, +- IsServerStream: desc.ServerStreams, +- } +- sh.HandleRPC(ctx, begin) +- } + + cs := &clientStream{ + callHdr: callHdr, +@@ -316,29 +320,41 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client + cp: cp, + comp: comp, + cancel: cancel, +- beginTime: beginTime, + firstAttempt: true, + onCommit: onCommit, + } + if !cc.dopts.disableRetry { + cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) + } +- cs.binlog = binarylog.GetMethodLogger(method) +- +- // Only this initial attempt has stats/tracing. +- // TODO(dfawley): move to newAttempt when per-attempt stats are implemented. +- if err := cs.newAttemptLocked(sh, trInfo); err != nil { +- cs.finish(err) +- return nil, err ++ if ml := binarylog.GetMethodLogger(method); ml != nil { ++ cs.binlogs = append(cs.binlogs, ml) ++ } ++ if cc.dopts.binaryLogger != nil { ++ if ml := cc.dopts.binaryLogger.GetMethodLogger(method); ml != nil { ++ cs.binlogs = append(cs.binlogs, ml) ++ } + } + +- op := func(a *csAttempt) error { return a.newStream() } ++ // Pick the transport to use and create a new stream on the transport. ++ // Assign cs.attempt upon success. ++ op := func(a *csAttempt) error { ++ if err := a.getTransport(); err != nil { ++ return err ++ } ++ if err := a.newStream(); err != nil { ++ return err ++ } ++ // Because this operation is always called either here (while creating ++ // the clientStream) or by the retry code while locked when replaying ++ // the operation, it is safe to access cs.attempt directly. ++ cs.attempt = a ++ return nil ++ } + if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { +- cs.finish(err) + return nil, err + } + +- if cs.binlog != nil { ++ if len(cs.binlogs) != 0 { + md, _ := metadata.FromOutgoingContext(ctx) + logEntry := &binarylog.ClientHeader{ + OnClientSide: true, +@@ -352,7 +368,9 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client + logEntry.Timeout = 0 + } + } +- cs.binlog.Log(logEntry) ++ for _, binlog := range cs.binlogs { ++ binlog.Log(cs.ctx, logEntry) ++ } + } + + if desc != unaryStreamDesc { +@@ -373,60 +391,123 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client + return cs, nil + } + +-// newAttemptLocked creates a new attempt with a transport. +-// If it succeeds, then it replaces clientStream's attempt with this new attempt. +-func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (retErr error) { +- newAttempt := &csAttempt{ +- cs: cs, +- dc: cs.cc.dopts.dc, +- statsHandler: sh, +- trInfo: trInfo, ++// newAttemptLocked creates a new csAttempt without a transport or stream. ++func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) { ++ if err := cs.ctx.Err(); err != nil { ++ return nil, toRPCErr(err) + } +- defer func() { +- if retErr != nil { +- // This attempt is not set in the clientStream, so it's finish won't +- // be called. Call it here for stats and trace in case they are not +- // nil. +- newAttempt.finish(retErr) ++ if err := cs.cc.ctx.Err(); err != nil { ++ return nil, ErrClientConnClosing ++ } ++ ++ ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) ++ method := cs.callHdr.Method ++ var beginTime time.Time ++ shs := cs.cc.dopts.copts.StatsHandlers ++ for _, sh := range shs { ++ ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) ++ beginTime = time.Now() ++ begin := &stats.Begin{ ++ Client: true, ++ BeginTime: beginTime, ++ FailFast: cs.callInfo.failFast, ++ IsClientStream: cs.desc.ClientStreams, ++ IsServerStream: cs.desc.ServerStreams, ++ IsTransparentRetryAttempt: isTransparent, + } +- }() ++ sh.HandleRPC(ctx, begin) ++ } + +- if err := cs.ctx.Err(); err != nil { +- return toRPCErr(err) ++ var trInfo *traceInfo ++ if EnableTracing { ++ trInfo = &traceInfo{ ++ tr: trace.New("grpc.Sent."+methodFamily(method), method), ++ firstLine: firstLine{ ++ client: true, ++ }, ++ } ++ if deadline, ok := ctx.Deadline(); ok { ++ trInfo.firstLine.deadline = time.Until(deadline) ++ } ++ trInfo.tr.LazyLog(&trInfo.firstLine, false) ++ ctx = trace.NewContext(ctx, trInfo.tr) + } + +- ctx := cs.ctx +- if cs.cc.parsedTarget.Scheme == "xds" { ++ if cs.cc.parsedTarget.URL.Scheme == "xds" { + // Add extra metadata (metadata that will be added by transport) to context + // so the balancer can see them. +- ctx = grpcutil.WithExtraMetadata(cs.ctx, metadata.Pairs( ++ ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( + "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), + )) + } +- t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method) ++ ++ return &csAttempt{ ++ ctx: ctx, ++ beginTime: beginTime, ++ cs: cs, ++ dc: cs.cc.dopts.dc, ++ statsHandlers: shs, ++ trInfo: trInfo, ++ }, nil ++} ++ ++func (a *csAttempt) getTransport() error { ++ cs := a.cs ++ ++ var err error ++ a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) + if err != nil { ++ if de, ok := err.(dropError); ok { ++ err = de.error ++ a.drop = true ++ } + return err + } +- if trInfo != nil { +- trInfo.firstLine.SetRemoteAddr(t.RemoteAddr()) ++ if a.trInfo != nil { ++ a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr()) + } +- newAttempt.t = t +- newAttempt.done = done +- cs.attempt = newAttempt + return nil + } + + func (a *csAttempt) newStream() error { + cs := a.cs + cs.callHdr.PreviousAttempts = cs.numRetries +- s, err := a.t.NewStream(cs.ctx, cs.callHdr) ++ ++ // Merge metadata stored in PickResult, if any, with existing call metadata. ++ // It is safe to overwrite the csAttempt's context here, since all state ++ // maintained in it are local to the attempt. When the attempt has to be ++ // retried, a new instance of csAttempt will be created. ++ if a.pickResult.Metadata != nil { ++ // We currently do not have a function it the metadata package which ++ // merges given metadata with existing metadata in a context. Existing ++ // function `AppendToOutgoingContext()` takes a variadic argument of key ++ // value pairs. ++ // ++ // TODO: Make it possible to retrieve key value pairs from metadata.MD ++ // in a form passable to AppendToOutgoingContext(), or create a version ++ // of AppendToOutgoingContext() that accepts a metadata.MD. ++ md, _ := metadata.FromOutgoingContext(a.ctx) ++ md = metadata.Join(md, a.pickResult.Metadata) ++ a.ctx = metadata.NewOutgoingContext(a.ctx, md) ++ } ++ ++ s, err := a.t.NewStream(a.ctx, cs.callHdr) + if err != nil { +- // Return without converting to an RPC error so retry code can +- // inspect. +- return err ++ nse, ok := err.(*transport.NewStreamError) ++ if !ok { ++ // Unexpected. ++ return err ++ } ++ ++ if nse.AllowTransparentRetry { ++ a.allowTransparentRetry = true ++ } ++ ++ // Unwrap and convert error. ++ return toRPCErr(nse.Err) + } +- cs.attempt.s = s +- cs.attempt.p = &parser{r: s} ++ a.s = s ++ a.p = &parser{r: s} + return nil + } + +@@ -444,8 +525,7 @@ type clientStream struct { + + cancel context.CancelFunc // cancels all attempts + +- sentLast bool // sent an end stream +- beginTime time.Time ++ sentLast bool // sent an end stream + + methodConfig *MethodConfig + +@@ -453,7 +533,7 @@ type clientStream struct { + + retryThrottler *retryThrottler // The throttler active when the RPC began. + +- binlog *binarylog.MethodLogger // Binary logger, can be nil. ++ binlogs []binarylog.MethodLogger + // serverHeaderBinlogged is a boolean for whether server header has been + // logged. Server header will be logged when the first time one of those + // happens: stream.Header(), stream.Recv(). +@@ -485,11 +565,12 @@ type clientStream struct { + // csAttempt implements a single transport stream attempt within a + // clientStream. + type csAttempt struct { +- cs *clientStream +- t transport.ClientTransport +- s *transport.Stream +- p *parser +- done func(balancer.DoneInfo) ++ ctx context.Context ++ cs *clientStream ++ t transport.ClientTransport ++ s *transport.Stream ++ p *parser ++ pickResult balancer.PickResult + + finished bool + dc Decompressor +@@ -502,7 +583,13 @@ type csAttempt struct { + // and cleared when the finish method is called. + trInfo *traceInfo + +- statsHandler stats.Handler ++ statsHandlers []stats.Handler ++ beginTime time.Time ++ ++ // set for newStream errors that may be transparently retried ++ allowTransparentRetry bool ++ // set for pick errors that are returned as a status ++ drop bool + } + + func (cs *clientStream) commitAttemptLocked() { +@@ -520,95 +607,76 @@ func (cs *clientStream) commitAttempt() { + } + + // shouldRetry returns nil if the RPC should be retried; otherwise it returns +-// the error that should be returned by the operation. +-func (cs *clientStream) shouldRetry(err error) error { +- if cs.attempt.s == nil { +- // Error from NewClientStream. +- nse, ok := err.(*transport.NewStreamError) +- if !ok { +- // Unexpected, but assume no I/O was performed and the RPC is not +- // fatal, so retry indefinitely. +- return nil +- } +- +- // Unwrap and convert error. +- err = toRPCErr(nse.Err) +- +- // Never retry DoNotRetry errors, which indicate the RPC should not be +- // retried due to max header list size violation, etc. +- if nse.DoNotRetry { +- return err +- } ++// the error that should be returned by the operation. If the RPC should be ++// retried, the bool indicates whether it is being retried transparently. ++func (a *csAttempt) shouldRetry(err error) (bool, error) { ++ cs := a.cs + +- // In the event of a non-IO operation error from NewStream, we never +- // attempted to write anything to the wire, so we can retry +- // indefinitely. +- if !nse.PerformedIO { +- return nil +- } ++ if cs.finished || cs.committed || a.drop { ++ // RPC is finished or committed or was dropped by the picker; cannot retry. ++ return false, err + } +- if cs.finished || cs.committed { +- // RPC is finished or committed; cannot retry. +- return err ++ if a.s == nil && a.allowTransparentRetry { ++ return true, nil + } + // Wait for the trailers. + unprocessed := false +- if cs.attempt.s != nil { +- <-cs.attempt.s.Done() +- unprocessed = cs.attempt.s.Unprocessed() ++ if a.s != nil { ++ <-a.s.Done() ++ unprocessed = a.s.Unprocessed() + } + if cs.firstAttempt && unprocessed { + // First attempt, stream unprocessed: transparently retry. +- return nil ++ return true, nil + } + if cs.cc.dopts.disableRetry { +- return err ++ return false, err + } + + pushback := 0 + hasPushback := false +- if cs.attempt.s != nil { +- if !cs.attempt.s.TrailersOnly() { +- return err ++ if a.s != nil { ++ if !a.s.TrailersOnly() { ++ return false, err + } + + // TODO(retry): Move down if the spec changes to not check server pushback + // before considering this a failure for throttling. +- sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"] ++ sps := a.s.Trailer()["grpc-retry-pushback-ms"] + if len(sps) == 1 { + var e error + if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { + channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) + cs.retryThrottler.throttle() // This counts as a failure for throttling. +- return err ++ return false, err + } + hasPushback = true + } else if len(sps) > 1 { + channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) + cs.retryThrottler.throttle() // This counts as a failure for throttling. +- return err ++ return false, err + } + } + + var code codes.Code +- if cs.attempt.s != nil { +- code = cs.attempt.s.Status().Code() ++ if a.s != nil { ++ code = a.s.Status().Code() + } else { +- code = status.Convert(err).Code() ++ code = status.Code(err) + } + + rp := cs.methodConfig.RetryPolicy + if rp == nil || !rp.RetryableStatusCodes[code] { +- return err ++ return false, err + } + + // Note: the ordering here is important; we count this as a failure + // only if the code matched a retryable code. + if cs.retryThrottler.throttle() { +- return err ++ return false, err + } + if cs.numRetries+1 >= rp.MaxAttempts { +- return err ++ return false, err + } + + var dur time.Duration +@@ -631,26 +699,32 @@ func (cs *clientStream) shouldRetry(err error) error { + select { + case <-t.C: + cs.numRetries++ +- return nil ++ return false, nil + case <-cs.ctx.Done(): + t.Stop() +- return status.FromContextError(cs.ctx.Err()).Err() ++ return false, status.FromContextError(cs.ctx.Err()).Err() + } + } + + // Returns nil if a retry was performed and succeeded; error otherwise. +-func (cs *clientStream) retryLocked(lastErr error) error { ++func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { + for { +- cs.attempt.finish(toRPCErr(lastErr)) +- if err := cs.shouldRetry(lastErr); err != nil { ++ attempt.finish(toRPCErr(lastErr)) ++ isTransparent, err := attempt.shouldRetry(lastErr) ++ if err != nil { + cs.commitAttemptLocked() + return err + } + cs.firstAttempt = false +- if err := cs.newAttemptLocked(nil, nil); err != nil { ++ attempt, err = cs.newAttemptLocked(isTransparent) ++ if err != nil { ++ // Only returns error if the clientconn is closed or the context of ++ // the stream is canceled. + return err + } +- if lastErr = cs.replayBufferLocked(); lastErr == nil { ++ // Note that the first op in the replay buffer always sets cs.attempt ++ // if it is able to pick a transport and create a stream. ++ if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { + return nil + } + } +@@ -660,7 +734,10 @@ func (cs *clientStream) Context() context.Context { + cs.commitAttempt() + // No need to lock before using attempt, since we know it is committed and + // cannot change. +- return cs.attempt.s.Context() ++ if cs.attempt.s != nil { ++ return cs.attempt.s.Context() ++ } ++ return cs.ctx + } + + func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { +@@ -674,6 +751,18 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) + // already be status errors. + return toRPCErr(op(cs.attempt)) + } ++ if len(cs.buffer) == 0 { ++ // For the first op, which controls creation of the stream and ++ // assigns cs.attempt, we need to create a new attempt inline ++ // before executing the first op. On subsequent ops, the attempt ++ // is created immediately before replaying the ops. ++ var err error ++ if cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */); err != nil { ++ cs.mu.Unlock() ++ cs.finish(err) ++ return err ++ } ++ } + a := cs.attempt + cs.mu.Unlock() + err := op(a) +@@ -690,7 +779,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) + cs.mu.Unlock() + return err + } +- if err := cs.retryLocked(err); err != nil { ++ if err := cs.retryLocked(a, err); err != nil { + cs.mu.Unlock() + return err + } +@@ -699,17 +788,25 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) + + func (cs *clientStream) Header() (metadata.MD, error) { + var m metadata.MD ++ noHeader := false + err := cs.withRetry(func(a *csAttempt) error { + var err error + m, err = a.s.Header() ++ if err == transport.ErrNoHeaders { ++ noHeader = true ++ return nil ++ } + return toRPCErr(err) + }, cs.commitAttemptLocked) ++ + if err != nil { + cs.finish(err) + return nil, err + } +- if cs.binlog != nil && !cs.serverHeaderBinlogged { +- // Only log if binary log is on and header has not been logged. ++ ++ if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader { ++ // Only log if binary log is on and header has not been logged, and ++ // there is actually headers to log. + logEntry := &binarylog.ServerHeader{ + OnClientSide: true, + Header: m, +@@ -718,10 +815,12 @@ func (cs *clientStream) Header() (metadata.MD, error) { + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } +- cs.binlog.Log(logEntry) + cs.serverHeaderBinlogged = true ++ for _, binlog := range cs.binlogs { ++ binlog.Log(cs.ctx, logEntry) ++ } + } +- return m, err ++ return m, nil + } + + func (cs *clientStream) Trailer() metadata.MD { +@@ -739,10 +838,9 @@ func (cs *clientStream) Trailer() metadata.MD { + return cs.attempt.s.Trailer() + } + +-func (cs *clientStream) replayBufferLocked() error { +- a := cs.attempt ++func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { + for _, f := range cs.buffer { +- if err := f(a); err != nil { ++ if err := f(attempt); err != nil { + return err + } + } +@@ -790,47 +888,48 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { + if len(payload) > *cs.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) + } +- msgBytes := data // Store the pointer before setting to nil. For binary logging. + op := func(a *csAttempt) error { +- err := a.sendMsg(m, hdr, payload, data) +- // nil out the message and uncomp when replaying; they are only needed for +- // stats which is disabled for subsequent attempts. +- m, data = nil, nil +- return err ++ return a.sendMsg(m, hdr, payload, data) + } + err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) +- if cs.binlog != nil && err == nil { +- cs.binlog.Log(&binarylog.ClientMessage{ ++ if len(cs.binlogs) != 0 && err == nil { ++ cm := &binarylog.ClientMessage{ + OnClientSide: true, +- Message: msgBytes, +- }) ++ Message: data, ++ } ++ for _, binlog := range cs.binlogs { ++ binlog.Log(cs.ctx, cm) ++ } + } +- return ++ return err + } + + func (cs *clientStream) RecvMsg(m interface{}) error { +- if cs.binlog != nil && !cs.serverHeaderBinlogged { ++ if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { + // Call Header() to binary log header if it's not already logged. + cs.Header() + } + var recvInfo *payloadInfo +- if cs.binlog != nil { ++ if len(cs.binlogs) != 0 { + recvInfo = &payloadInfo{} + } + err := cs.withRetry(func(a *csAttempt) error { + return a.recvMsg(m, recvInfo) + }, cs.commitAttemptLocked) +- if cs.binlog != nil && err == nil { +- cs.binlog.Log(&binarylog.ServerMessage{ ++ if len(cs.binlogs) != 0 && err == nil { ++ sm := &binarylog.ServerMessage{ + OnClientSide: true, + Message: recvInfo.uncompressedBytes, +- }) ++ } ++ for _, binlog := range cs.binlogs { ++ binlog.Log(cs.ctx, sm) ++ } + } + if err != nil || !cs.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + cs.finish(err) + +- if cs.binlog != nil { ++ if len(cs.binlogs) != 0 { + // finish will not log Trailer. Log Trailer here. + logEntry := &binarylog.ServerTrailer{ + OnClientSide: true, +@@ -843,7 +942,9 @@ func (cs *clientStream) RecvMsg(m interface{}) error { + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } +- cs.binlog.Log(logEntry) ++ for _, binlog := range cs.binlogs { ++ binlog.Log(cs.ctx, logEntry) ++ } + } + } + return err +@@ -864,10 +965,13 @@ func (cs *clientStream) CloseSend() error { + return nil + } + cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) +- if cs.binlog != nil { +- cs.binlog.Log(&binarylog.ClientHalfClose{ ++ if len(cs.binlogs) != 0 { ++ chc := &binarylog.ClientHalfClose{ + OnClientSide: true, +- }) ++ } ++ for _, binlog := range cs.binlogs { ++ binlog.Log(cs.ctx, chc) ++ } + } + // We never returned an error here for reasons. + return nil +@@ -884,6 +988,9 @@ func (cs *clientStream) finish(err error) { + return + } + cs.finished = true ++ for _, onFinish := range cs.callInfo.onFinish { ++ onFinish(err) ++ } + cs.commitAttemptLocked() + if cs.attempt != nil { + cs.attempt.finish(err) +@@ -900,10 +1007,13 @@ func (cs *clientStream) finish(err error) { + // + // Only one of cancel or trailer needs to be logged. In the cases where + // users don't call RecvMsg, users must have already canceled the RPC. +- if cs.binlog != nil && status.Code(err) == codes.Canceled { +- cs.binlog.Log(&binarylog.Cancel{ ++ if len(cs.binlogs) != 0 && status.Code(err) == codes.Canceled { ++ c := &binarylog.Cancel{ + OnClientSide: true, +- }) ++ } ++ for _, binlog := range cs.binlogs { ++ binlog.Log(cs.ctx, c) ++ } + } + if err == nil { + cs.retryThrottler.successfulRPC() +@@ -936,8 +1046,8 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { + } + return io.EOF + } +- if a.statsHandler != nil { +- a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now())) ++ for _, sh := range a.statsHandlers { ++ sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) + } + if channelz.IsOn() { + a.t.IncrMsgSent() +@@ -947,7 +1057,7 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { + + func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { + cs := a.cs +- if a.statsHandler != nil && payInfo == nil { ++ if len(a.statsHandlers) != 0 && payInfo == nil { + payInfo = &payloadInfo{} + } + +@@ -975,6 +1085,7 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { + } + return io.EOF // indicates successful end of stream. + } ++ + return toRPCErr(err) + } + if a.trInfo != nil { +@@ -984,15 +1095,16 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { + } + a.mu.Unlock() + } +- if a.statsHandler != nil { +- a.statsHandler.HandleRPC(cs.ctx, &stats.InPayload{ ++ for _, sh := range a.statsHandlers { ++ sh.HandleRPC(a.ctx, &stats.InPayload{ + Client: true, + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. +- Data: payInfo.uncompressedBytes, +- WireLength: payInfo.wireLength + headerLen, +- Length: len(payInfo.uncompressedBytes), ++ Data: payInfo.uncompressedBytes, ++ WireLength: payInfo.compressedLength + headerLen, ++ CompressedLength: payInfo.compressedLength, ++ Length: len(payInfo.uncompressedBytes), + }) + } + if channelz.IsOn() { +@@ -1031,12 +1143,12 @@ func (a *csAttempt) finish(err error) { + tr = a.s.Trailer() + } + +- if a.done != nil { ++ if a.pickResult.Done != nil { + br := false + if a.s != nil { + br = a.s.BytesReceived() + } +- a.done(balancer.DoneInfo{ ++ a.pickResult.Done(balancer.DoneInfo{ + Err: err, + Trailer: tr, + BytesSent: a.s != nil, +@@ -1044,15 +1156,15 @@ func (a *csAttempt) finish(err error) { + ServerLoad: balancerload.Parse(tr), + }) + } +- if a.statsHandler != nil { ++ for _, sh := range a.statsHandlers { + end := &stats.End{ + Client: true, +- BeginTime: a.cs.beginTime, ++ BeginTime: a.beginTime, + EndTime: time.Now(), + Trailer: tr, + Error: err, + } +- a.statsHandler.HandleRPC(a.cs.ctx, end) ++ sh.HandleRPC(a.ctx, end) + } + if a.trInfo != nil && a.trInfo.tr != nil { + if err == nil { +@@ -1161,14 +1273,19 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin + as.p = &parser{r: s} + ac.incrCallsStarted() + if desc != unaryStreamDesc { +- // Listen on cc and stream contexts to cleanup when the user closes the +- // ClientConn or cancels the stream context. In all other cases, an error +- // should already be injected into the recv buffer by the transport, which +- // the client will eventually receive, and then we will cancel the stream's +- // context in clientStream.finish. ++ // Listen on stream context to cleanup when the stream context is ++ // canceled. Also listen for the addrConn's context in case the ++ // addrConn is closed or reconnects to a different address. In all ++ // other cases, an error should already be injected into the recv ++ // buffer by the transport, which the client will eventually receive, ++ // and then we will cancel the stream's context in ++ // addrConnStream.finish. + go func() { ++ ac.mu.Lock() ++ acCtx := ac.ctx ++ ac.mu.Unlock() + select { +- case <-ac.ctx.Done(): ++ case <-acCtx.Done(): + as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) + case <-ctx.Done(): + as.finish(toRPCErr(ctx.Err())) +@@ -1357,8 +1474,10 @@ func (as *addrConnStream) finish(err error) { + + // ServerStream defines the server-side behavior of a streaming RPC. + // +-// All errors returned from ServerStream methods are compatible with the +-// status package. ++// Errors returned from ServerStream methods are compatible with the status ++// package. However, the status code will often not match the RPC status as ++// seen by the client application, and therefore, should not be relied upon for ++// this purpose. + type ServerStream interface { + // SetHeader sets the header metadata. It may be called multiple times. + // When call multiple times, all the provided metadata will be merged. +@@ -1390,6 +1509,9 @@ type ServerStream interface { + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. ++ // ++ // It is not safe to modify the message after calling SendMsg. Tracing ++ // libraries and stats handlers may use the message lazily. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the client has performed a CloseSend. On +@@ -1415,13 +1537,15 @@ type serverStream struct { + comp encoding.Compressor + decomp encoding.Compressor + ++ sendCompressorName string ++ + maxReceiveMessageSize int + maxSendMessageSize int + trInfo *traceInfo + +- statsHandler stats.Handler ++ statsHandler []stats.Handler + +- binlog *binarylog.MethodLogger ++ binlogs []binarylog.MethodLogger + // serverHeaderBinlogged indicates whether server header has been logged. It + // will happen when one of the following two happens: stream.SendHeader(), + // stream.Send(). +@@ -1441,17 +1565,29 @@ func (ss *serverStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } ++ err := imetadata.Validate(md) ++ if err != nil { ++ return status.Error(codes.Internal, err.Error()) ++ } + return ss.s.SetHeader(md) + } + + func (ss *serverStream) SendHeader(md metadata.MD) error { +- err := ss.t.WriteHeader(ss.s, md) +- if ss.binlog != nil && !ss.serverHeaderBinlogged { ++ err := imetadata.Validate(md) ++ if err != nil { ++ return status.Error(codes.Internal, err.Error()) ++ } ++ ++ err = ss.t.WriteHeader(ss.s, md) ++ if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged { + h, _ := ss.s.Header() +- ss.binlog.Log(&binarylog.ServerHeader{ ++ sh := &binarylog.ServerHeader{ + Header: h, +- }) ++ } + ss.serverHeaderBinlogged = true ++ for _, binlog := range ss.binlogs { ++ binlog.Log(ss.ctx, sh) ++ } + } + return err + } +@@ -1460,6 +1596,9 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { + if md.Len() == 0 { + return + } ++ if err := imetadata.Validate(md); err != nil { ++ logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err) ++ } + ss.s.SetTrailer(md) + } + +@@ -1492,6 +1631,13 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { + } + }() + ++ // Server handler could have set new compressor by calling SetSendCompressor. ++ // In case it is set, we need to use it for compressing outbound message. ++ if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName { ++ ss.comp = encoding.GetCompressor(sendCompressorsName) ++ ss.sendCompressorName = sendCompressorsName ++ } ++ + // load hdr, payload, data + hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) + if err != nil { +@@ -1505,20 +1651,28 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { + if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { + return toRPCErr(err) + } +- if ss.binlog != nil { ++ if len(ss.binlogs) != 0 { + if !ss.serverHeaderBinlogged { + h, _ := ss.s.Header() +- ss.binlog.Log(&binarylog.ServerHeader{ ++ sh := &binarylog.ServerHeader{ + Header: h, +- }) ++ } + ss.serverHeaderBinlogged = true ++ for _, binlog := range ss.binlogs { ++ binlog.Log(ss.ctx, sh) ++ } + } +- ss.binlog.Log(&binarylog.ServerMessage{ ++ sm := &binarylog.ServerMessage{ + Message: data, +- }) ++ } ++ for _, binlog := range ss.binlogs { ++ binlog.Log(ss.ctx, sm) ++ } + } +- if ss.statsHandler != nil { +- ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) ++ if len(ss.statsHandler) != 0 { ++ for _, sh := range ss.statsHandler { ++ sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) ++ } + } + return nil + } +@@ -1552,13 +1706,16 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { + } + }() + var payInfo *payloadInfo +- if ss.statsHandler != nil || ss.binlog != nil { ++ if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { + payInfo = &payloadInfo{} + } + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { + if err == io.EOF { +- if ss.binlog != nil { +- ss.binlog.Log(&binarylog.ClientHalfClose{}) ++ if len(ss.binlogs) != 0 { ++ chc := &binarylog.ClientHalfClose{} ++ for _, binlog := range ss.binlogs { ++ binlog.Log(ss.ctx, chc) ++ } + } + return err + } +@@ -1567,20 +1724,26 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { + } + return toRPCErr(err) + } +- if ss.statsHandler != nil { +- ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{ +- RecvTime: time.Now(), +- Payload: m, +- // TODO truncate large payload. +- Data: payInfo.uncompressedBytes, +- WireLength: payInfo.wireLength + headerLen, +- Length: len(payInfo.uncompressedBytes), +- }) ++ if len(ss.statsHandler) != 0 { ++ for _, sh := range ss.statsHandler { ++ sh.HandleRPC(ss.s.Context(), &stats.InPayload{ ++ RecvTime: time.Now(), ++ Payload: m, ++ // TODO truncate large payload. ++ Data: payInfo.uncompressedBytes, ++ Length: len(payInfo.uncompressedBytes), ++ WireLength: payInfo.compressedLength + headerLen, ++ CompressedLength: payInfo.compressedLength, ++ }) ++ } + } +- if ss.binlog != nil { +- ss.binlog.Log(&binarylog.ClientMessage{ ++ if len(ss.binlogs) != 0 { ++ cm := &binarylog.ClientMessage{ + Message: payInfo.uncompressedBytes, +- }) ++ } ++ for _, binlog := range ss.binlogs { ++ binlog.Log(ss.ctx, cm) ++ } + } + return nil + } +diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go +index dbf34e6..bfa5dfa 100644 +--- a/vendor/google.golang.org/grpc/tap/tap.go ++++ b/vendor/google.golang.org/grpc/tap/tap.go +@@ -19,7 +19,7 @@ + // Package tap defines the function handles which are executed on the transport + // layer of gRPC-Go and related information. + // +-// Experimental ++// # Experimental + // + // Notice: This API is EXPERIMENTAL and may be changed or removed in a + // later release. +diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go +index e3510e1..3cc7540 100644 +--- a/vendor/google.golang.org/grpc/version.go ++++ b/vendor/google.golang.org/grpc/version.go +@@ -19,4 +19,4 @@ + package grpc + + // Version is the current grpc version. +-const Version = "1.40.0" ++const Version = "1.56.3" +diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh +index 5eaa8b0..a8e4732 100644 +--- a/vendor/google.golang.org/grpc/vet.sh ++++ b/vendor/google.golang.org/grpc/vet.sh +@@ -41,16 +41,8 @@ if [[ "$1" = "-install" ]]; then + github.com/client9/misspell/cmd/misspell + popd + if [[ -z "${VET_SKIP_PROTO}" ]]; then +- if [[ "${TRAVIS}" = "true" ]]; then +- PROTOBUF_VERSION=3.14.0 +- PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip +- pushd /home/travis +- wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} +- unzip ${PROTOC_FILENAME} +- bin/protoc --version +- popd +- elif [[ "${GITHUB_ACTIONS}" = "true" ]]; then +- PROTOBUF_VERSION=3.14.0 ++ if [[ "${GITHUB_ACTIONS}" = "true" ]]; then ++ PROTOBUF_VERSION=22.0 # a.k.a v4.22.0 in pb.go files. + PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip + pushd /home/runner/go + wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} +@@ -66,8 +58,20 @@ elif [[ "$#" -ne 0 ]]; then + die "Unknown argument(s): $*" + fi + ++# - Check that generated proto files are up to date. ++if [[ -z "${VET_SKIP_PROTO}" ]]; then ++ make proto && git status --porcelain 2>&1 | fail_on_output || \ ++ (git status; git --no-pager diff; exit 1) ++fi ++ ++if [[ -n "${VET_ONLY_PROTO}" ]]; then ++ exit 0 ++fi ++ + # - Ensure all source files contain a copyright message. +-not git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go' ++# (Done in two parts because Darwin "git grep" has broken support for compound ++# exclusion matches.) ++(grep -L "DO NOT EDIT" $(git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)" -- '*.go') || true) | fail_on_output + + # - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. + not grep 'func Test[^(]' *_test.go +@@ -81,7 +85,7 @@ not git grep -l 'x/net/context' -- "*.go" + git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' + + # - Do not call grpclog directly. Use grpclog.Component instead. +-git grep -l 'grpclog.I\|grpclog.W\|grpclog.E\|grpclog.F\|grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' ++git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' + + # - Ensure all ptypes proto packages are renamed when importing. + not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" +@@ -89,19 +93,8 @@ not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" + # - Ensure all xds proto imports are renamed to *pb or *grpc. + git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' + +-# - Check imports that are illegal in appengine (until Go 1.11). +-# TODO: Remove when we drop Go 1.10 support +-go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go +- + misspell -error . + +-# - Check that generated proto files are up to date. +-if [[ -z "${VET_SKIP_PROTO}" ]]; then +- PATH="/home/travis/bin:${PATH}" make proto && \ +- git status --porcelain 2>&1 | fail_on_output || \ +- (git status; git --no-pager diff; exit 1) +-fi +- + # - gofmt, goimports, golint (with exceptions for generated code), go vet, + # go mod tidy. + # Perform these checks on each module inside gRPC. +@@ -111,9 +104,9 @@ for MOD_FILE in $(find . -name 'go.mod'); do + go vet -all ./... | fail_on_output + gofmt -s -d -l . 2>&1 | fail_on_output + goimports -l . 2>&1 | not grep -vE "\.pb\.go" +- golint ./... 2>&1 | not grep -vE "/testv3\.pb\.go:" ++ golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" + +- go mod tidy ++ go mod tidy -compat=1.17 + git status --porcelain 2>&1 | fail_on_output || \ + (git status; git --no-pager diff; exit 1) + popd +@@ -123,8 +116,9 @@ done + # + # TODO(dfawley): don't use deprecated functions in examples or first-party + # plugins. ++# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs. + SC_OUT="$(mktemp)" +-staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true ++staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true + # Error if anything other than deprecation warnings are printed. + not grep -v "is deprecated:.*SA1019" "${SC_OUT}" + # Only ignore the following deprecated types/fields/functions. +@@ -151,7 +145,6 @@ grpc.NewGZIPDecompressor + grpc.RPCCompressor + grpc.RPCDecompressor + grpc.ServiceConfig +-grpc.WithBalancerName + grpc.WithCompressor + grpc.WithDecompressor + grpc.WithDialer +diff --git a/vendor/google.golang.org/protobuf/AUTHORS b/vendor/google.golang.org/protobuf/AUTHORS +deleted file mode 100644 +index 2b00ddb..0000000 +--- a/vendor/google.golang.org/protobuf/AUTHORS ++++ /dev/null +@@ -1,3 +0,0 @@ +-# This source code refers to The Go Authors for copyright purposes. +-# The master list of authors is in the main Go distribution, +-# visible at https://tip.golang.org/AUTHORS. +diff --git a/vendor/google.golang.org/protobuf/CONTRIBUTORS b/vendor/google.golang.org/protobuf/CONTRIBUTORS +deleted file mode 100644 +index 1fbd3e9..0000000 +--- a/vendor/google.golang.org/protobuf/CONTRIBUTORS ++++ /dev/null +@@ -1,3 +0,0 @@ +-# This source code was written by the Go contributors. +-# The master list of contributors is in the main Go distribution, +-# visible at https://tip.golang.org/CONTRIBUTORS. +diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +new file mode 100644 +index 0000000..5f28148 +--- /dev/null ++++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +@@ -0,0 +1,665 @@ ++// Copyright 2019 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package protojson ++ ++import ( ++ "encoding/base64" ++ "fmt" ++ "math" ++ "strconv" ++ "strings" ++ ++ "google.golang.org/protobuf/internal/encoding/json" ++ "google.golang.org/protobuf/internal/encoding/messageset" ++ "google.golang.org/protobuf/internal/errors" ++ "google.golang.org/protobuf/internal/flags" ++ "google.golang.org/protobuf/internal/genid" ++ "google.golang.org/protobuf/internal/pragma" ++ "google.golang.org/protobuf/internal/set" ++ "google.golang.org/protobuf/proto" ++ "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/reflect/protoregistry" ++) ++ ++// Unmarshal reads the given []byte into the given proto.Message. ++// The provided message must be mutable (e.g., a non-nil pointer to a message). ++func Unmarshal(b []byte, m proto.Message) error { ++ return UnmarshalOptions{}.Unmarshal(b, m) ++} ++ ++// UnmarshalOptions is a configurable JSON format parser. ++type UnmarshalOptions struct { ++ pragma.NoUnkeyedLiterals ++ ++ // If AllowPartial is set, input for messages that will result in missing ++ // required fields will not return an error. ++ AllowPartial bool ++ ++ // If DiscardUnknown is set, unknown fields are ignored. ++ DiscardUnknown bool ++ ++ // Resolver is used for looking up types when unmarshaling ++ // google.protobuf.Any messages or extension fields. ++ // If nil, this defaults to using protoregistry.GlobalTypes. ++ Resolver interface { ++ protoregistry.MessageTypeResolver ++ protoregistry.ExtensionTypeResolver ++ } ++} ++ ++// Unmarshal reads the given []byte and populates the given proto.Message ++// using options in the UnmarshalOptions object. ++// It will clear the message first before setting the fields. ++// If it returns an error, the given message may be partially set. ++// The provided message must be mutable (e.g., a non-nil pointer to a message). ++func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { ++ return o.unmarshal(b, m) ++} ++ ++// unmarshal is a centralized function that all unmarshal operations go through. ++// For profiling purposes, avoid changing the name of this function or ++// introducing other code paths for unmarshal that do not go through this. ++func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error { ++ proto.Reset(m) ++ ++ if o.Resolver == nil { ++ o.Resolver = protoregistry.GlobalTypes ++ } ++ ++ dec := decoder{json.NewDecoder(b), o} ++ if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil { ++ return err ++ } ++ ++ // Check for EOF. ++ tok, err := dec.Read() ++ if err != nil { ++ return err ++ } ++ if tok.Kind() != json.EOF { ++ return dec.unexpectedTokenError(tok) ++ } ++ ++ if o.AllowPartial { ++ return nil ++ } ++ return proto.CheckInitialized(m) ++} ++ ++type decoder struct { ++ *json.Decoder ++ opts UnmarshalOptions ++} ++ ++// newError returns an error object with position info. ++func (d decoder) newError(pos int, f string, x ...interface{}) error { ++ line, column := d.Position(pos) ++ head := fmt.Sprintf("(line %d:%d): ", line, column) ++ return errors.New(head+f, x...) ++} ++ ++// unexpectedTokenError returns a syntax error for the given unexpected token. ++func (d decoder) unexpectedTokenError(tok json.Token) error { ++ return d.syntaxError(tok.Pos(), "unexpected token %s", tok.RawString()) ++} ++ ++// syntaxError returns a syntax error for given position. ++func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { ++ line, column := d.Position(pos) ++ head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) ++ return errors.New(head+f, x...) ++} ++ ++// unmarshalMessage unmarshals a message into the given protoreflect.Message. ++func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error { ++ if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil { ++ return unmarshal(d, m) ++ } ++ ++ tok, err := d.Read() ++ if err != nil { ++ return err ++ } ++ if tok.Kind() != json.ObjectOpen { ++ return d.unexpectedTokenError(tok) ++ } ++ ++ messageDesc := m.Descriptor() ++ if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { ++ return errors.New("no support for proto1 MessageSets") ++ } ++ ++ var seenNums set.Ints ++ var seenOneofs set.Ints ++ fieldDescs := messageDesc.Fields() ++ for { ++ // Read field name. ++ tok, err := d.Read() ++ if err != nil { ++ return err ++ } ++ switch tok.Kind() { ++ default: ++ return d.unexpectedTokenError(tok) ++ case json.ObjectClose: ++ return nil ++ case json.Name: ++ // Continue below. ++ } ++ ++ name := tok.Name() ++ // Unmarshaling a non-custom embedded message in Any will contain the ++ // JSON field "@type" which should be skipped because it is not a field ++ // of the embedded message, but simply an artifact of the Any format. ++ if skipTypeURL && name == "@type" { ++ d.Read() ++ continue ++ } ++ ++ // Get the FieldDescriptor. ++ var fd protoreflect.FieldDescriptor ++ if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") { ++ // Only extension names are in [name] format. ++ extName := protoreflect.FullName(name[1 : len(name)-1]) ++ extType, err := d.opts.Resolver.FindExtensionByName(extName) ++ if err != nil && err != protoregistry.NotFound { ++ return d.newError(tok.Pos(), "unable to resolve %s: %v", tok.RawString(), err) ++ } ++ if extType != nil { ++ fd = extType.TypeDescriptor() ++ if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() { ++ return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName()) ++ } ++ } ++ } else { ++ // The name can either be the JSON name or the proto field name. ++ fd = fieldDescs.ByJSONName(name) ++ if fd == nil { ++ fd = fieldDescs.ByTextName(name) ++ } ++ } ++ if flags.ProtoLegacy { ++ if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { ++ fd = nil // reset since the weak reference is not linked in ++ } ++ } ++ ++ if fd == nil { ++ // Field is unknown. ++ if d.opts.DiscardUnknown { ++ if err := d.skipJSONValue(); err != nil { ++ return err ++ } ++ continue ++ } ++ return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) ++ } ++ ++ // Do not allow duplicate fields. ++ num := uint64(fd.Number()) ++ if seenNums.Has(num) { ++ return d.newError(tok.Pos(), "duplicate field %v", tok.RawString()) ++ } ++ seenNums.Set(num) ++ ++ // No need to set values for JSON null unless the field type is ++ // google.protobuf.Value or google.protobuf.NullValue. ++ if tok, _ := d.Peek(); tok.Kind() == json.Null && !isKnownValue(fd) && !isNullValue(fd) { ++ d.Read() ++ continue ++ } ++ ++ switch { ++ case fd.IsList(): ++ list := m.Mutable(fd).List() ++ if err := d.unmarshalList(list, fd); err != nil { ++ return err ++ } ++ case fd.IsMap(): ++ mmap := m.Mutable(fd).Map() ++ if err := d.unmarshalMap(mmap, fd); err != nil { ++ return err ++ } ++ default: ++ // If field is a oneof, check if it has already been set. ++ if od := fd.ContainingOneof(); od != nil { ++ idx := uint64(od.Index()) ++ if seenOneofs.Has(idx) { ++ return d.newError(tok.Pos(), "error parsing %s, oneof %v is already set", tok.RawString(), od.FullName()) ++ } ++ seenOneofs.Set(idx) ++ } ++ ++ // Required or optional fields. ++ if err := d.unmarshalSingular(m, fd); err != nil { ++ return err ++ } ++ } ++ } ++} ++ ++func isKnownValue(fd protoreflect.FieldDescriptor) bool { ++ md := fd.Message() ++ return md != nil && md.FullName() == genid.Value_message_fullname ++} ++ ++func isNullValue(fd protoreflect.FieldDescriptor) bool { ++ ed := fd.Enum() ++ return ed != nil && ed.FullName() == genid.NullValue_enum_fullname ++} ++ ++// unmarshalSingular unmarshals to the non-repeated field specified ++// by the given FieldDescriptor. ++func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.FieldDescriptor) error { ++ var val protoreflect.Value ++ var err error ++ switch fd.Kind() { ++ case protoreflect.MessageKind, protoreflect.GroupKind: ++ val = m.NewField(fd) ++ err = d.unmarshalMessage(val.Message(), false) ++ default: ++ val, err = d.unmarshalScalar(fd) ++ } ++ ++ if err != nil { ++ return err ++ } ++ m.Set(fd, val) ++ return nil ++} ++ ++// unmarshalScalar unmarshals to a scalar/enum protoreflect.Value specified by ++// the given FieldDescriptor. ++func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { ++ const b32 int = 32 ++ const b64 int = 64 ++ ++ tok, err := d.Read() ++ if err != nil { ++ return protoreflect.Value{}, err ++ } ++ ++ kind := fd.Kind() ++ switch kind { ++ case protoreflect.BoolKind: ++ if tok.Kind() == json.Bool { ++ return protoreflect.ValueOfBool(tok.Bool()), nil ++ } ++ ++ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: ++ if v, ok := unmarshalInt(tok, b32); ok { ++ return v, nil ++ } ++ ++ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: ++ if v, ok := unmarshalInt(tok, b64); ok { ++ return v, nil ++ } ++ ++ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: ++ if v, ok := unmarshalUint(tok, b32); ok { ++ return v, nil ++ } ++ ++ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: ++ if v, ok := unmarshalUint(tok, b64); ok { ++ return v, nil ++ } ++ ++ case protoreflect.FloatKind: ++ if v, ok := unmarshalFloat(tok, b32); ok { ++ return v, nil ++ } ++ ++ case protoreflect.DoubleKind: ++ if v, ok := unmarshalFloat(tok, b64); ok { ++ return v, nil ++ } ++ ++ case protoreflect.StringKind: ++ if tok.Kind() == json.String { ++ return protoreflect.ValueOfString(tok.ParsedString()), nil ++ } ++ ++ case protoreflect.BytesKind: ++ if v, ok := unmarshalBytes(tok); ok { ++ return v, nil ++ } ++ ++ case protoreflect.EnumKind: ++ if v, ok := unmarshalEnum(tok, fd); ok { ++ return v, nil ++ } ++ ++ default: ++ panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind)) ++ } ++ ++ return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) ++} ++ ++func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { ++ switch tok.Kind() { ++ case json.Number: ++ return getInt(tok, bitSize) ++ ++ case json.String: ++ // Decode number from string. ++ s := strings.TrimSpace(tok.ParsedString()) ++ if len(s) != len(tok.ParsedString()) { ++ return protoreflect.Value{}, false ++ } ++ dec := json.NewDecoder([]byte(s)) ++ tok, err := dec.Read() ++ if err != nil { ++ return protoreflect.Value{}, false ++ } ++ return getInt(tok, bitSize) ++ } ++ return protoreflect.Value{}, false ++} ++ ++func getInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { ++ n, ok := tok.Int(bitSize) ++ if !ok { ++ return protoreflect.Value{}, false ++ } ++ if bitSize == 32 { ++ return protoreflect.ValueOfInt32(int32(n)), true ++ } ++ return protoreflect.ValueOfInt64(n), true ++} ++ ++func unmarshalUint(tok json.Token, bitSize int) (protoreflect.Value, bool) { ++ switch tok.Kind() { ++ case json.Number: ++ return getUint(tok, bitSize) ++ ++ case json.String: ++ // Decode number from string. ++ s := strings.TrimSpace(tok.ParsedString()) ++ if len(s) != len(tok.ParsedString()) { ++ return protoreflect.Value{}, false ++ } ++ dec := json.NewDecoder([]byte(s)) ++ tok, err := dec.Read() ++ if err != nil { ++ return protoreflect.Value{}, false ++ } ++ return getUint(tok, bitSize) ++ } ++ return protoreflect.Value{}, false ++} ++ ++func getUint(tok json.Token, bitSize int) (protoreflect.Value, bool) { ++ n, ok := tok.Uint(bitSize) ++ if !ok { ++ return protoreflect.Value{}, false ++ } ++ if bitSize == 32 { ++ return protoreflect.ValueOfUint32(uint32(n)), true ++ } ++ return protoreflect.ValueOfUint64(n), true ++} ++ ++func unmarshalFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) { ++ switch tok.Kind() { ++ case json.Number: ++ return getFloat(tok, bitSize) ++ ++ case json.String: ++ s := tok.ParsedString() ++ switch s { ++ case "NaN": ++ if bitSize == 32 { ++ return protoreflect.ValueOfFloat32(float32(math.NaN())), true ++ } ++ return protoreflect.ValueOfFloat64(math.NaN()), true ++ case "Infinity": ++ if bitSize == 32 { ++ return protoreflect.ValueOfFloat32(float32(math.Inf(+1))), true ++ } ++ return protoreflect.ValueOfFloat64(math.Inf(+1)), true ++ case "-Infinity": ++ if bitSize == 32 { ++ return protoreflect.ValueOfFloat32(float32(math.Inf(-1))), true ++ } ++ return protoreflect.ValueOfFloat64(math.Inf(-1)), true ++ } ++ ++ // Decode number from string. ++ if len(s) != len(strings.TrimSpace(s)) { ++ return protoreflect.Value{}, false ++ } ++ dec := json.NewDecoder([]byte(s)) ++ tok, err := dec.Read() ++ if err != nil { ++ return protoreflect.Value{}, false ++ } ++ return getFloat(tok, bitSize) ++ } ++ return protoreflect.Value{}, false ++} ++ ++func getFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) { ++ n, ok := tok.Float(bitSize) ++ if !ok { ++ return protoreflect.Value{}, false ++ } ++ if bitSize == 32 { ++ return protoreflect.ValueOfFloat32(float32(n)), true ++ } ++ return protoreflect.ValueOfFloat64(n), true ++} ++ ++func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) { ++ if tok.Kind() != json.String { ++ return protoreflect.Value{}, false ++ } ++ ++ s := tok.ParsedString() ++ enc := base64.StdEncoding ++ if strings.ContainsAny(s, "-_") { ++ enc = base64.URLEncoding ++ } ++ if len(s)%4 != 0 { ++ enc = enc.WithPadding(base64.NoPadding) ++ } ++ b, err := enc.DecodeString(s) ++ if err != nil { ++ return protoreflect.Value{}, false ++ } ++ return protoreflect.ValueOfBytes(b), true ++} ++ ++func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.Value, bool) { ++ switch tok.Kind() { ++ case json.String: ++ // Lookup EnumNumber based on name. ++ s := tok.ParsedString() ++ if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil { ++ return protoreflect.ValueOfEnum(enumVal.Number()), true ++ } ++ ++ case json.Number: ++ if n, ok := tok.Int(32); ok { ++ return protoreflect.ValueOfEnum(protoreflect.EnumNumber(n)), true ++ } ++ ++ case json.Null: ++ // This is only valid for google.protobuf.NullValue. ++ if isNullValue(fd) { ++ return protoreflect.ValueOfEnum(0), true ++ } ++ } ++ ++ return protoreflect.Value{}, false ++} ++ ++func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error { ++ tok, err := d.Read() ++ if err != nil { ++ return err ++ } ++ if tok.Kind() != json.ArrayOpen { ++ return d.unexpectedTokenError(tok) ++ } ++ ++ switch fd.Kind() { ++ case protoreflect.MessageKind, protoreflect.GroupKind: ++ for { ++ tok, err := d.Peek() ++ if err != nil { ++ return err ++ } ++ ++ if tok.Kind() == json.ArrayClose { ++ d.Read() ++ return nil ++ } ++ ++ val := list.NewElement() ++ if err := d.unmarshalMessage(val.Message(), false); err != nil { ++ return err ++ } ++ list.Append(val) ++ } ++ default: ++ for { ++ tok, err := d.Peek() ++ if err != nil { ++ return err ++ } ++ ++ if tok.Kind() == json.ArrayClose { ++ d.Read() ++ return nil ++ } ++ ++ val, err := d.unmarshalScalar(fd) ++ if err != nil { ++ return err ++ } ++ list.Append(val) ++ } ++ } ++ ++ return nil ++} ++ ++func (d decoder) unmarshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { ++ tok, err := d.Read() ++ if err != nil { ++ return err ++ } ++ if tok.Kind() != json.ObjectOpen { ++ return d.unexpectedTokenError(tok) ++ } ++ ++ // Determine ahead whether map entry is a scalar type or a message type in ++ // order to call the appropriate unmarshalMapValue func inside the for loop ++ // below. ++ var unmarshalMapValue func() (protoreflect.Value, error) ++ switch fd.MapValue().Kind() { ++ case protoreflect.MessageKind, protoreflect.GroupKind: ++ unmarshalMapValue = func() (protoreflect.Value, error) { ++ val := mmap.NewValue() ++ if err := d.unmarshalMessage(val.Message(), false); err != nil { ++ return protoreflect.Value{}, err ++ } ++ return val, nil ++ } ++ default: ++ unmarshalMapValue = func() (protoreflect.Value, error) { ++ return d.unmarshalScalar(fd.MapValue()) ++ } ++ } ++ ++Loop: ++ for { ++ // Read field name. ++ tok, err := d.Read() ++ if err != nil { ++ return err ++ } ++ switch tok.Kind() { ++ default: ++ return d.unexpectedTokenError(tok) ++ case json.ObjectClose: ++ break Loop ++ case json.Name: ++ // Continue. ++ } ++ ++ // Unmarshal field name. ++ pkey, err := d.unmarshalMapKey(tok, fd.MapKey()) ++ if err != nil { ++ return err ++ } ++ ++ // Check for duplicate field name. ++ if mmap.Has(pkey) { ++ return d.newError(tok.Pos(), "duplicate map key %v", tok.RawString()) ++ } ++ ++ // Read and unmarshal field value. ++ pval, err := unmarshalMapValue() ++ if err != nil { ++ return err ++ } ++ ++ mmap.Set(pkey, pval) ++ } ++ ++ return nil ++} ++ ++// unmarshalMapKey converts given token of Name kind into a protoreflect.MapKey. ++// A map key type is any integral or string type. ++func (d decoder) unmarshalMapKey(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.MapKey, error) { ++ const b32 = 32 ++ const b64 = 64 ++ const base10 = 10 ++ ++ name := tok.Name() ++ kind := fd.Kind() ++ switch kind { ++ case protoreflect.StringKind: ++ return protoreflect.ValueOfString(name).MapKey(), nil ++ ++ case protoreflect.BoolKind: ++ switch name { ++ case "true": ++ return protoreflect.ValueOfBool(true).MapKey(), nil ++ case "false": ++ return protoreflect.ValueOfBool(false).MapKey(), nil ++ } ++ ++ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: ++ if n, err := strconv.ParseInt(name, base10, b32); err == nil { ++ return protoreflect.ValueOfInt32(int32(n)).MapKey(), nil ++ } ++ ++ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: ++ if n, err := strconv.ParseInt(name, base10, b64); err == nil { ++ return protoreflect.ValueOfInt64(int64(n)).MapKey(), nil ++ } ++ ++ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: ++ if n, err := strconv.ParseUint(name, base10, b32); err == nil { ++ return protoreflect.ValueOfUint32(uint32(n)).MapKey(), nil ++ } ++ ++ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: ++ if n, err := strconv.ParseUint(name, base10, b64); err == nil { ++ return protoreflect.ValueOfUint64(uint64(n)).MapKey(), nil ++ } ++ ++ default: ++ panic(fmt.Sprintf("invalid kind for map key: %v", kind)) ++ } ++ ++ return protoreflect.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString()) ++} +diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go +new file mode 100644 +index 0000000..21d5d2c +--- /dev/null ++++ b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go +@@ -0,0 +1,11 @@ ++// Copyright 2019 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// Package protojson marshals and unmarshals protocol buffer messages as JSON ++// format. It follows the guide at ++// https://protobuf.dev/programming-guides/proto3#json. ++// ++// This package produces a different output than the standard "encoding/json" ++// package, which does not operate correctly on protocol buffer messages. ++package protojson +diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +new file mode 100644 +index 0000000..d09d22e +--- /dev/null ++++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +@@ -0,0 +1,343 @@ ++// Copyright 2019 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package protojson ++ ++import ( ++ "encoding/base64" ++ "fmt" ++ ++ "google.golang.org/protobuf/internal/encoding/json" ++ "google.golang.org/protobuf/internal/encoding/messageset" ++ "google.golang.org/protobuf/internal/errors" ++ "google.golang.org/protobuf/internal/filedesc" ++ "google.golang.org/protobuf/internal/flags" ++ "google.golang.org/protobuf/internal/genid" ++ "google.golang.org/protobuf/internal/order" ++ "google.golang.org/protobuf/internal/pragma" ++ "google.golang.org/protobuf/proto" ++ "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/reflect/protoregistry" ++) ++ ++const defaultIndent = " " ++ ++// Format formats the message as a multiline string. ++// This function is only intended for human consumption and ignores errors. ++// Do not depend on the output being stable. It may change over time across ++// different versions of the program. ++func Format(m proto.Message) string { ++ return MarshalOptions{Multiline: true}.Format(m) ++} ++ ++// Marshal writes the given proto.Message in JSON format using default options. ++// Do not depend on the output being stable. It may change over time across ++// different versions of the program. ++func Marshal(m proto.Message) ([]byte, error) { ++ return MarshalOptions{}.Marshal(m) ++} ++ ++// MarshalOptions is a configurable JSON format marshaler. ++type MarshalOptions struct { ++ pragma.NoUnkeyedLiterals ++ ++ // Multiline specifies whether the marshaler should format the output in ++ // indented-form with every textual element on a new line. ++ // If Indent is an empty string, then an arbitrary indent is chosen. ++ Multiline bool ++ ++ // Indent specifies the set of indentation characters to use in a multiline ++ // formatted output such that every entry is preceded by Indent and ++ // terminated by a newline. If non-empty, then Multiline is treated as true. ++ // Indent can only be composed of space or tab characters. ++ Indent string ++ ++ // AllowPartial allows messages that have missing required fields to marshal ++ // without returning an error. If AllowPartial is false (the default), ++ // Marshal will return error if there are any missing required fields. ++ AllowPartial bool ++ ++ // UseProtoNames uses proto field name instead of lowerCamelCase name in JSON ++ // field names. ++ UseProtoNames bool ++ ++ // UseEnumNumbers emits enum values as numbers. ++ UseEnumNumbers bool ++ ++ // EmitUnpopulated specifies whether to emit unpopulated fields. It does not ++ // emit unpopulated oneof fields or unpopulated extension fields. ++ // The JSON value emitted for unpopulated fields are as follows: ++ // ╔═══════╤════════════════════════════╗ ++ // ║ JSON │ Protobuf field ║ ++ // ╠═══════╪════════════════════════════╣ ++ // ║ false │ proto3 boolean fields ║ ++ // ║ 0 │ proto3 numeric fields ║ ++ // ║ "" │ proto3 string/bytes fields ║ ++ // ║ null │ proto2 scalar fields ║ ++ // ║ null │ message fields ║ ++ // ║ [] │ list fields ║ ++ // ║ {} │ map fields ║ ++ // ╚═══════╧════════════════════════════╝ ++ EmitUnpopulated bool ++ ++ // Resolver is used for looking up types when expanding google.protobuf.Any ++ // messages. If nil, this defaults to using protoregistry.GlobalTypes. ++ Resolver interface { ++ protoregistry.ExtensionTypeResolver ++ protoregistry.MessageTypeResolver ++ } ++} ++ ++// Format formats the message as a string. ++// This method is only intended for human consumption and ignores errors. ++// Do not depend on the output being stable. It may change over time across ++// different versions of the program. ++func (o MarshalOptions) Format(m proto.Message) string { ++ if m == nil || !m.ProtoReflect().IsValid() { ++ return "" // invalid syntax, but okay since this is for debugging ++ } ++ o.AllowPartial = true ++ b, _ := o.Marshal(m) ++ return string(b) ++} ++ ++// Marshal marshals the given proto.Message in the JSON format using options in ++// MarshalOptions. Do not depend on the output being stable. It may change over ++// time across different versions of the program. ++func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { ++ return o.marshal(m) ++} ++ ++// marshal is a centralized function that all marshal operations go through. ++// For profiling purposes, avoid changing the name of this function or ++// introducing other code paths for marshal that do not go through this. ++func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { ++ if o.Multiline && o.Indent == "" { ++ o.Indent = defaultIndent ++ } ++ if o.Resolver == nil { ++ o.Resolver = protoregistry.GlobalTypes ++ } ++ ++ internalEnc, err := json.NewEncoder(o.Indent) ++ if err != nil { ++ return nil, err ++ } ++ ++ // Treat nil message interface as an empty message, ++ // in which case the output in an empty JSON object. ++ if m == nil { ++ return []byte("{}"), nil ++ } ++ ++ enc := encoder{internalEnc, o} ++ if err := enc.marshalMessage(m.ProtoReflect(), ""); err != nil { ++ return nil, err ++ } ++ if o.AllowPartial { ++ return enc.Bytes(), nil ++ } ++ return enc.Bytes(), proto.CheckInitialized(m) ++} ++ ++type encoder struct { ++ *json.Encoder ++ opts MarshalOptions ++} ++ ++// typeFieldDesc is a synthetic field descriptor used for the "@type" field. ++var typeFieldDesc = func() protoreflect.FieldDescriptor { ++ var fd filedesc.Field ++ fd.L0.FullName = "@type" ++ fd.L0.Index = -1 ++ fd.L1.Cardinality = protoreflect.Optional ++ fd.L1.Kind = protoreflect.StringKind ++ return &fd ++}() ++ ++// typeURLFieldRanger wraps a protoreflect.Message and modifies its Range method ++// to additionally iterate over a synthetic field for the type URL. ++type typeURLFieldRanger struct { ++ order.FieldRanger ++ typeURL string ++} ++ ++func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { ++ if !f(typeFieldDesc, protoreflect.ValueOfString(m.typeURL)) { ++ return ++ } ++ m.FieldRanger.Range(f) ++} ++ ++// unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range ++// method to additionally iterate over unpopulated fields. ++type unpopulatedFieldRanger struct{ protoreflect.Message } ++ ++func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { ++ fds := m.Descriptor().Fields() ++ for i := 0; i < fds.Len(); i++ { ++ fd := fds.Get(i) ++ if m.Has(fd) || fd.ContainingOneof() != nil { ++ continue // ignore populated fields and fields within a oneofs ++ } ++ ++ v := m.Get(fd) ++ isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid() ++ isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil ++ if isProto2Scalar || isSingularMessage { ++ v = protoreflect.Value{} // use invalid value to emit null ++ } ++ if !f(fd, v) { ++ return ++ } ++ } ++ m.Message.Range(f) ++} ++ ++// marshalMessage marshals the fields in the given protoreflect.Message. ++// If the typeURL is non-empty, then a synthetic "@type" field is injected ++// containing the URL as the value. ++func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error { ++ if !flags.ProtoLegacy && messageset.IsMessageSet(m.Descriptor()) { ++ return errors.New("no support for proto1 MessageSets") ++ } ++ ++ if marshal := wellKnownTypeMarshaler(m.Descriptor().FullName()); marshal != nil { ++ return marshal(e, m) ++ } ++ ++ e.StartObject() ++ defer e.EndObject() ++ ++ var fields order.FieldRanger = m ++ if e.opts.EmitUnpopulated { ++ fields = unpopulatedFieldRanger{m} ++ } ++ if typeURL != "" { ++ fields = typeURLFieldRanger{fields, typeURL} ++ } ++ ++ var err error ++ order.RangeFields(fields, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { ++ name := fd.JSONName() ++ if e.opts.UseProtoNames { ++ name = fd.TextName() ++ } ++ ++ if err = e.WriteName(name); err != nil { ++ return false ++ } ++ if err = e.marshalValue(v, fd); err != nil { ++ return false ++ } ++ return true ++ }) ++ return err ++} ++ ++// marshalValue marshals the given protoreflect.Value. ++func (e encoder) marshalValue(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { ++ switch { ++ case fd.IsList(): ++ return e.marshalList(val.List(), fd) ++ case fd.IsMap(): ++ return e.marshalMap(val.Map(), fd) ++ default: ++ return e.marshalSingular(val, fd) ++ } ++} ++ ++// marshalSingular marshals the given non-repeated field value. This includes ++// all scalar types, enums, messages, and groups. ++func (e encoder) marshalSingular(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { ++ if !val.IsValid() { ++ e.WriteNull() ++ return nil ++ } ++ ++ switch kind := fd.Kind(); kind { ++ case protoreflect.BoolKind: ++ e.WriteBool(val.Bool()) ++ ++ case protoreflect.StringKind: ++ if e.WriteString(val.String()) != nil { ++ return errors.InvalidUTF8(string(fd.FullName())) ++ } ++ ++ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: ++ e.WriteInt(val.Int()) ++ ++ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: ++ e.WriteUint(val.Uint()) ++ ++ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Uint64Kind, ++ protoreflect.Sfixed64Kind, protoreflect.Fixed64Kind: ++ // 64-bit integers are written out as JSON string. ++ e.WriteString(val.String()) ++ ++ case protoreflect.FloatKind: ++ // Encoder.WriteFloat handles the special numbers NaN and infinites. ++ e.WriteFloat(val.Float(), 32) ++ ++ case protoreflect.DoubleKind: ++ // Encoder.WriteFloat handles the special numbers NaN and infinites. ++ e.WriteFloat(val.Float(), 64) ++ ++ case protoreflect.BytesKind: ++ e.WriteString(base64.StdEncoding.EncodeToString(val.Bytes())) ++ ++ case protoreflect.EnumKind: ++ if fd.Enum().FullName() == genid.NullValue_enum_fullname { ++ e.WriteNull() ++ } else { ++ desc := fd.Enum().Values().ByNumber(val.Enum()) ++ if e.opts.UseEnumNumbers || desc == nil { ++ e.WriteInt(int64(val.Enum())) ++ } else { ++ e.WriteString(string(desc.Name())) ++ } ++ } ++ ++ case protoreflect.MessageKind, protoreflect.GroupKind: ++ if err := e.marshalMessage(val.Message(), ""); err != nil { ++ return err ++ } ++ ++ default: ++ panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind)) ++ } ++ return nil ++} ++ ++// marshalList marshals the given protoreflect.List. ++func (e encoder) marshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error { ++ e.StartArray() ++ defer e.EndArray() ++ ++ for i := 0; i < list.Len(); i++ { ++ item := list.Get(i) ++ if err := e.marshalSingular(item, fd); err != nil { ++ return err ++ } ++ } ++ return nil ++} ++ ++// marshalMap marshals given protoreflect.Map. ++func (e encoder) marshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { ++ e.StartObject() ++ defer e.EndObject() ++ ++ var err error ++ order.RangeEntries(mmap, order.GenericKeyOrder, func(k protoreflect.MapKey, v protoreflect.Value) bool { ++ if err = e.WriteName(k.String()); err != nil { ++ return false ++ } ++ if err = e.marshalSingular(v, fd.MapValue()); err != nil { ++ return false ++ } ++ return true ++ }) ++ return err ++} +diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go +new file mode 100644 +index 0000000..6c37d41 +--- /dev/null ++++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go +@@ -0,0 +1,895 @@ ++// Copyright 2019 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package protojson ++ ++import ( ++ "bytes" ++ "fmt" ++ "math" ++ "strconv" ++ "strings" ++ "time" ++ ++ "google.golang.org/protobuf/internal/encoding/json" ++ "google.golang.org/protobuf/internal/errors" ++ "google.golang.org/protobuf/internal/genid" ++ "google.golang.org/protobuf/internal/strs" ++ "google.golang.org/protobuf/proto" ++ "google.golang.org/protobuf/reflect/protoreflect" ++) ++ ++type marshalFunc func(encoder, protoreflect.Message) error ++ ++// wellKnownTypeMarshaler returns a marshal function if the message type ++// has specialized serialization behavior. It returns nil otherwise. ++func wellKnownTypeMarshaler(name protoreflect.FullName) marshalFunc { ++ if name.Parent() == genid.GoogleProtobuf_package { ++ switch name.Name() { ++ case genid.Any_message_name: ++ return encoder.marshalAny ++ case genid.Timestamp_message_name: ++ return encoder.marshalTimestamp ++ case genid.Duration_message_name: ++ return encoder.marshalDuration ++ case genid.BoolValue_message_name, ++ genid.Int32Value_message_name, ++ genid.Int64Value_message_name, ++ genid.UInt32Value_message_name, ++ genid.UInt64Value_message_name, ++ genid.FloatValue_message_name, ++ genid.DoubleValue_message_name, ++ genid.StringValue_message_name, ++ genid.BytesValue_message_name: ++ return encoder.marshalWrapperType ++ case genid.Struct_message_name: ++ return encoder.marshalStruct ++ case genid.ListValue_message_name: ++ return encoder.marshalListValue ++ case genid.Value_message_name: ++ return encoder.marshalKnownValue ++ case genid.FieldMask_message_name: ++ return encoder.marshalFieldMask ++ case genid.Empty_message_name: ++ return encoder.marshalEmpty ++ } ++ } ++ return nil ++} ++ ++type unmarshalFunc func(decoder, protoreflect.Message) error ++ ++// wellKnownTypeUnmarshaler returns a unmarshal function if the message type ++// has specialized serialization behavior. It returns nil otherwise. ++func wellKnownTypeUnmarshaler(name protoreflect.FullName) unmarshalFunc { ++ if name.Parent() == genid.GoogleProtobuf_package { ++ switch name.Name() { ++ case genid.Any_message_name: ++ return decoder.unmarshalAny ++ case genid.Timestamp_message_name: ++ return decoder.unmarshalTimestamp ++ case genid.Duration_message_name: ++ return decoder.unmarshalDuration ++ case genid.BoolValue_message_name, ++ genid.Int32Value_message_name, ++ genid.Int64Value_message_name, ++ genid.UInt32Value_message_name, ++ genid.UInt64Value_message_name, ++ genid.FloatValue_message_name, ++ genid.DoubleValue_message_name, ++ genid.StringValue_message_name, ++ genid.BytesValue_message_name: ++ return decoder.unmarshalWrapperType ++ case genid.Struct_message_name: ++ return decoder.unmarshalStruct ++ case genid.ListValue_message_name: ++ return decoder.unmarshalListValue ++ case genid.Value_message_name: ++ return decoder.unmarshalKnownValue ++ case genid.FieldMask_message_name: ++ return decoder.unmarshalFieldMask ++ case genid.Empty_message_name: ++ return decoder.unmarshalEmpty ++ } ++ } ++ return nil ++} ++ ++// The JSON representation of an Any message uses the regular representation of ++// the deserialized, embedded message, with an additional field `@type` which ++// contains the type URL. If the embedded message type is well-known and has a ++// custom JSON representation, that representation will be embedded adding a ++// field `value` which holds the custom JSON in addition to the `@type` field. ++ ++func (e encoder) marshalAny(m protoreflect.Message) error { ++ fds := m.Descriptor().Fields() ++ fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) ++ fdValue := fds.ByNumber(genid.Any_Value_field_number) ++ ++ if !m.Has(fdType) { ++ if !m.Has(fdValue) { ++ // If message is empty, marshal out empty JSON object. ++ e.StartObject() ++ e.EndObject() ++ return nil ++ } else { ++ // Return error if type_url field is not set, but value is set. ++ return errors.New("%s: %v is not set", genid.Any_message_fullname, genid.Any_TypeUrl_field_name) ++ } ++ } ++ ++ typeVal := m.Get(fdType) ++ valueVal := m.Get(fdValue) ++ ++ // Resolve the type in order to unmarshal value field. ++ typeURL := typeVal.String() ++ emt, err := e.opts.Resolver.FindMessageByURL(typeURL) ++ if err != nil { ++ return errors.New("%s: unable to resolve %q: %v", genid.Any_message_fullname, typeURL, err) ++ } ++ ++ em := emt.New() ++ err = proto.UnmarshalOptions{ ++ AllowPartial: true, // never check required fields inside an Any ++ Resolver: e.opts.Resolver, ++ }.Unmarshal(valueVal.Bytes(), em.Interface()) ++ if err != nil { ++ return errors.New("%s: unable to unmarshal %q: %v", genid.Any_message_fullname, typeURL, err) ++ } ++ ++ // If type of value has custom JSON encoding, marshal out a field "value" ++ // with corresponding custom JSON encoding of the embedded message as a ++ // field. ++ if marshal := wellKnownTypeMarshaler(emt.Descriptor().FullName()); marshal != nil { ++ e.StartObject() ++ defer e.EndObject() ++ ++ // Marshal out @type field. ++ e.WriteName("@type") ++ if err := e.WriteString(typeURL); err != nil { ++ return err ++ } ++ ++ e.WriteName("value") ++ return marshal(e, em) ++ } ++ ++ // Else, marshal out the embedded message's fields in this Any object. ++ if err := e.marshalMessage(em, typeURL); err != nil { ++ return err ++ } ++ ++ return nil ++} ++ ++func (d decoder) unmarshalAny(m protoreflect.Message) error { ++ // Peek to check for json.ObjectOpen to avoid advancing a read. ++ start, err := d.Peek() ++ if err != nil { ++ return err ++ } ++ if start.Kind() != json.ObjectOpen { ++ return d.unexpectedTokenError(start) ++ } ++ ++ // Use another decoder to parse the unread bytes for @type field. This ++ // avoids advancing a read from current decoder because the current JSON ++ // object may contain the fields of the embedded type. ++ dec := decoder{d.Clone(), UnmarshalOptions{}} ++ tok, err := findTypeURL(dec) ++ switch err { ++ case errEmptyObject: ++ // An empty JSON object translates to an empty Any message. ++ d.Read() // Read json.ObjectOpen. ++ d.Read() // Read json.ObjectClose. ++ return nil ++ ++ case errMissingType: ++ if d.opts.DiscardUnknown { ++ // Treat all fields as unknowns, similar to an empty object. ++ return d.skipJSONValue() ++ } ++ // Use start.Pos() for line position. ++ return d.newError(start.Pos(), err.Error()) ++ ++ default: ++ if err != nil { ++ return err ++ } ++ } ++ ++ typeURL := tok.ParsedString() ++ emt, err := d.opts.Resolver.FindMessageByURL(typeURL) ++ if err != nil { ++ return d.newError(tok.Pos(), "unable to resolve %v: %q", tok.RawString(), err) ++ } ++ ++ // Create new message for the embedded message type and unmarshal into it. ++ em := emt.New() ++ if unmarshal := wellKnownTypeUnmarshaler(emt.Descriptor().FullName()); unmarshal != nil { ++ // If embedded message is a custom type, ++ // unmarshal the JSON "value" field into it. ++ if err := d.unmarshalAnyValue(unmarshal, em); err != nil { ++ return err ++ } ++ } else { ++ // Else unmarshal the current JSON object into it. ++ if err := d.unmarshalMessage(em, true); err != nil { ++ return err ++ } ++ } ++ // Serialize the embedded message and assign the resulting bytes to the ++ // proto value field. ++ b, err := proto.MarshalOptions{ ++ AllowPartial: true, // No need to check required fields inside an Any. ++ Deterministic: true, ++ }.Marshal(em.Interface()) ++ if err != nil { ++ return d.newError(start.Pos(), "error in marshaling Any.value field: %v", err) ++ } ++ ++ fds := m.Descriptor().Fields() ++ fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) ++ fdValue := fds.ByNumber(genid.Any_Value_field_number) ++ ++ m.Set(fdType, protoreflect.ValueOfString(typeURL)) ++ m.Set(fdValue, protoreflect.ValueOfBytes(b)) ++ return nil ++} ++ ++var errEmptyObject = fmt.Errorf(`empty object`) ++var errMissingType = fmt.Errorf(`missing "@type" field`) ++ ++// findTypeURL returns the token for the "@type" field value from the given ++// JSON bytes. It is expected that the given bytes start with json.ObjectOpen. ++// It returns errEmptyObject if the JSON object is empty or errMissingType if ++// @type field does not exist. It returns other error if the @type field is not ++// valid or other decoding issues. ++func findTypeURL(d decoder) (json.Token, error) { ++ var typeURL string ++ var typeTok json.Token ++ numFields := 0 ++ // Skip start object. ++ d.Read() ++ ++Loop: ++ for { ++ tok, err := d.Read() ++ if err != nil { ++ return json.Token{}, err ++ } ++ ++ switch tok.Kind() { ++ case json.ObjectClose: ++ if typeURL == "" { ++ // Did not find @type field. ++ if numFields > 0 { ++ return json.Token{}, errMissingType ++ } ++ return json.Token{}, errEmptyObject ++ } ++ break Loop ++ ++ case json.Name: ++ numFields++ ++ if tok.Name() != "@type" { ++ // Skip value. ++ if err := d.skipJSONValue(); err != nil { ++ return json.Token{}, err ++ } ++ continue ++ } ++ ++ // Return error if this was previously set already. ++ if typeURL != "" { ++ return json.Token{}, d.newError(tok.Pos(), `duplicate "@type" field`) ++ } ++ // Read field value. ++ tok, err := d.Read() ++ if err != nil { ++ return json.Token{}, err ++ } ++ if tok.Kind() != json.String { ++ return json.Token{}, d.newError(tok.Pos(), `@type field value is not a string: %v`, tok.RawString()) ++ } ++ typeURL = tok.ParsedString() ++ if typeURL == "" { ++ return json.Token{}, d.newError(tok.Pos(), `@type field contains empty value`) ++ } ++ typeTok = tok ++ } ++ } ++ ++ return typeTok, nil ++} ++ ++// skipJSONValue parses a JSON value (null, boolean, string, number, object and ++// array) in order to advance the read to the next JSON value. It relies on ++// the decoder returning an error if the types are not in valid sequence. ++func (d decoder) skipJSONValue() error { ++ tok, err := d.Read() ++ if err != nil { ++ return err ++ } ++ // Only need to continue reading for objects and arrays. ++ switch tok.Kind() { ++ case json.ObjectOpen: ++ for { ++ tok, err := d.Read() ++ if err != nil { ++ return err ++ } ++ switch tok.Kind() { ++ case json.ObjectClose: ++ return nil ++ case json.Name: ++ // Skip object field value. ++ if err := d.skipJSONValue(); err != nil { ++ return err ++ } ++ } ++ } ++ ++ case json.ArrayOpen: ++ for { ++ tok, err := d.Peek() ++ if err != nil { ++ return err ++ } ++ switch tok.Kind() { ++ case json.ArrayClose: ++ d.Read() ++ return nil ++ default: ++ // Skip array item. ++ if err := d.skipJSONValue(); err != nil { ++ return err ++ } ++ } ++ } ++ } ++ return nil ++} ++ ++// unmarshalAnyValue unmarshals the given custom-type message from the JSON ++// object's "value" field. ++func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Message) error { ++ // Skip ObjectOpen, and start reading the fields. ++ d.Read() ++ ++ var found bool // Used for detecting duplicate "value". ++ for { ++ tok, err := d.Read() ++ if err != nil { ++ return err ++ } ++ switch tok.Kind() { ++ case json.ObjectClose: ++ if !found { ++ return d.newError(tok.Pos(), `missing "value" field`) ++ } ++ return nil ++ ++ case json.Name: ++ switch tok.Name() { ++ case "@type": ++ // Skip the value as this was previously parsed already. ++ d.Read() ++ ++ case "value": ++ if found { ++ return d.newError(tok.Pos(), `duplicate "value" field`) ++ } ++ // Unmarshal the field value into the given message. ++ if err := unmarshal(d, m); err != nil { ++ return err ++ } ++ found = true ++ ++ default: ++ if d.opts.DiscardUnknown { ++ if err := d.skipJSONValue(); err != nil { ++ return err ++ } ++ continue ++ } ++ return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) ++ } ++ } ++ } ++} ++ ++// Wrapper types are encoded as JSON primitives like string, number or boolean. ++ ++func (e encoder) marshalWrapperType(m protoreflect.Message) error { ++ fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) ++ val := m.Get(fd) ++ return e.marshalSingular(val, fd) ++} ++ ++func (d decoder) unmarshalWrapperType(m protoreflect.Message) error { ++ fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) ++ val, err := d.unmarshalScalar(fd) ++ if err != nil { ++ return err ++ } ++ m.Set(fd, val) ++ return nil ++} ++ ++// The JSON representation for Empty is an empty JSON object. ++ ++func (e encoder) marshalEmpty(protoreflect.Message) error { ++ e.StartObject() ++ e.EndObject() ++ return nil ++} ++ ++func (d decoder) unmarshalEmpty(protoreflect.Message) error { ++ tok, err := d.Read() ++ if err != nil { ++ return err ++ } ++ if tok.Kind() != json.ObjectOpen { ++ return d.unexpectedTokenError(tok) ++ } ++ ++ for { ++ tok, err := d.Read() ++ if err != nil { ++ return err ++ } ++ switch tok.Kind() { ++ case json.ObjectClose: ++ return nil ++ ++ case json.Name: ++ if d.opts.DiscardUnknown { ++ if err := d.skipJSONValue(); err != nil { ++ return err ++ } ++ continue ++ } ++ return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) ++ ++ default: ++ return d.unexpectedTokenError(tok) ++ } ++ } ++} ++ ++// The JSON representation for Struct is a JSON object that contains the encoded ++// Struct.fields map and follows the serialization rules for a map. ++ ++func (e encoder) marshalStruct(m protoreflect.Message) error { ++ fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) ++ return e.marshalMap(m.Get(fd).Map(), fd) ++} ++ ++func (d decoder) unmarshalStruct(m protoreflect.Message) error { ++ fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) ++ return d.unmarshalMap(m.Mutable(fd).Map(), fd) ++} ++ ++// The JSON representation for ListValue is JSON array that contains the encoded ++// ListValue.values repeated field and follows the serialization rules for a ++// repeated field. ++ ++func (e encoder) marshalListValue(m protoreflect.Message) error { ++ fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) ++ return e.marshalList(m.Get(fd).List(), fd) ++} ++ ++func (d decoder) unmarshalListValue(m protoreflect.Message) error { ++ fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) ++ return d.unmarshalList(m.Mutable(fd).List(), fd) ++} ++ ++// The JSON representation for a Value is dependent on the oneof field that is ++// set. Each of the field in the oneof has its own custom serialization rule. A ++// Value message needs to be a oneof field set, else it is an error. ++ ++func (e encoder) marshalKnownValue(m protoreflect.Message) error { ++ od := m.Descriptor().Oneofs().ByName(genid.Value_Kind_oneof_name) ++ fd := m.WhichOneof(od) ++ if fd == nil { ++ return errors.New("%s: none of the oneof fields is set", genid.Value_message_fullname) ++ } ++ if fd.Number() == genid.Value_NumberValue_field_number { ++ if v := m.Get(fd).Float(); math.IsNaN(v) || math.IsInf(v, 0) { ++ return errors.New("%s: invalid %v value", genid.Value_NumberValue_field_fullname, v) ++ } ++ } ++ return e.marshalSingular(m.Get(fd), fd) ++} ++ ++func (d decoder) unmarshalKnownValue(m protoreflect.Message) error { ++ tok, err := d.Peek() ++ if err != nil { ++ return err ++ } ++ ++ var fd protoreflect.FieldDescriptor ++ var val protoreflect.Value ++ switch tok.Kind() { ++ case json.Null: ++ d.Read() ++ fd = m.Descriptor().Fields().ByNumber(genid.Value_NullValue_field_number) ++ val = protoreflect.ValueOfEnum(0) ++ ++ case json.Bool: ++ tok, err := d.Read() ++ if err != nil { ++ return err ++ } ++ fd = m.Descriptor().Fields().ByNumber(genid.Value_BoolValue_field_number) ++ val = protoreflect.ValueOfBool(tok.Bool()) ++ ++ case json.Number: ++ tok, err := d.Read() ++ if err != nil { ++ return err ++ } ++ fd = m.Descriptor().Fields().ByNumber(genid.Value_NumberValue_field_number) ++ var ok bool ++ val, ok = unmarshalFloat(tok, 64) ++ if !ok { ++ return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString()) ++ } ++ ++ case json.String: ++ // A JSON string may have been encoded from the number_value field, ++ // e.g. "NaN", "Infinity", etc. Parsing a proto double type also allows ++ // for it to be in JSON string form. Given this custom encoding spec, ++ // however, there is no way to identify that and hence a JSON string is ++ // always assigned to the string_value field, which means that certain ++ // encoding cannot be parsed back to the same field. ++ tok, err := d.Read() ++ if err != nil { ++ return err ++ } ++ fd = m.Descriptor().Fields().ByNumber(genid.Value_StringValue_field_number) ++ val = protoreflect.ValueOfString(tok.ParsedString()) ++ ++ case json.ObjectOpen: ++ fd = m.Descriptor().Fields().ByNumber(genid.Value_StructValue_field_number) ++ val = m.NewField(fd) ++ if err := d.unmarshalStruct(val.Message()); err != nil { ++ return err ++ } ++ ++ case json.ArrayOpen: ++ fd = m.Descriptor().Fields().ByNumber(genid.Value_ListValue_field_number) ++ val = m.NewField(fd) ++ if err := d.unmarshalListValue(val.Message()); err != nil { ++ return err ++ } ++ ++ default: ++ return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString()) ++ } ++ ++ m.Set(fd, val) ++ return nil ++} ++ ++// The JSON representation for a Duration is a JSON string that ends in the ++// suffix "s" (indicating seconds) and is preceded by the number of seconds, ++// with nanoseconds expressed as fractional seconds. ++// ++// Durations less than one second are represented with a 0 seconds field and a ++// positive or negative nanos field. For durations of one second or more, a ++// non-zero value for the nanos field must be of the same sign as the seconds ++// field. ++// ++// Duration.seconds must be from -315,576,000,000 to +315,576,000,000 inclusive. ++// Duration.nanos must be from -999,999,999 to +999,999,999 inclusive. ++ ++const ( ++ secondsInNanos = 999999999 ++ maxSecondsInDuration = 315576000000 ++) ++ ++func (e encoder) marshalDuration(m protoreflect.Message) error { ++ fds := m.Descriptor().Fields() ++ fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) ++ fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) ++ ++ secsVal := m.Get(fdSeconds) ++ nanosVal := m.Get(fdNanos) ++ secs := secsVal.Int() ++ nanos := nanosVal.Int() ++ if secs < -maxSecondsInDuration || secs > maxSecondsInDuration { ++ return errors.New("%s: seconds out of range %v", genid.Duration_message_fullname, secs) ++ } ++ if nanos < -secondsInNanos || nanos > secondsInNanos { ++ return errors.New("%s: nanos out of range %v", genid.Duration_message_fullname, nanos) ++ } ++ if (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0) { ++ return errors.New("%s: signs of seconds and nanos do not match", genid.Duration_message_fullname) ++ } ++ // Generated output always contains 0, 3, 6, or 9 fractional digits, ++ // depending on required precision, followed by the suffix "s". ++ var sign string ++ if secs < 0 || nanos < 0 { ++ sign, secs, nanos = "-", -1*secs, -1*nanos ++ } ++ x := fmt.Sprintf("%s%d.%09d", sign, secs, nanos) ++ x = strings.TrimSuffix(x, "000") ++ x = strings.TrimSuffix(x, "000") ++ x = strings.TrimSuffix(x, ".000") ++ e.WriteString(x + "s") ++ return nil ++} ++ ++func (d decoder) unmarshalDuration(m protoreflect.Message) error { ++ tok, err := d.Read() ++ if err != nil { ++ return err ++ } ++ if tok.Kind() != json.String { ++ return d.unexpectedTokenError(tok) ++ } ++ ++ secs, nanos, ok := parseDuration(tok.ParsedString()) ++ if !ok { ++ return d.newError(tok.Pos(), "invalid %v value %v", genid.Duration_message_fullname, tok.RawString()) ++ } ++ // Validate seconds. No need to validate nanos because parseDuration would ++ // have covered that already. ++ if secs < -maxSecondsInDuration || secs > maxSecondsInDuration { ++ return d.newError(tok.Pos(), "%v value out of range: %v", genid.Duration_message_fullname, tok.RawString()) ++ } ++ ++ fds := m.Descriptor().Fields() ++ fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) ++ fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) ++ ++ m.Set(fdSeconds, protoreflect.ValueOfInt64(secs)) ++ m.Set(fdNanos, protoreflect.ValueOfInt32(nanos)) ++ return nil ++} ++ ++// parseDuration parses the given input string for seconds and nanoseconds value ++// for the Duration JSON format. The format is a decimal number with a suffix ++// 's'. It can have optional plus/minus sign. There needs to be at least an ++// integer or fractional part. Fractional part is limited to 9 digits only for ++// nanoseconds precision, regardless of whether there are trailing zero digits. ++// Example values are 1s, 0.1s, 1.s, .1s, +1s, -1s, -.1s. ++func parseDuration(input string) (int64, int32, bool) { ++ b := []byte(input) ++ size := len(b) ++ if size < 2 { ++ return 0, 0, false ++ } ++ if b[size-1] != 's' { ++ return 0, 0, false ++ } ++ b = b[:size-1] ++ ++ // Read optional plus/minus symbol. ++ var neg bool ++ switch b[0] { ++ case '-': ++ neg = true ++ b = b[1:] ++ case '+': ++ b = b[1:] ++ } ++ if len(b) == 0 { ++ return 0, 0, false ++ } ++ ++ // Read the integer part. ++ var intp []byte ++ switch { ++ case b[0] == '0': ++ b = b[1:] ++ ++ case '1' <= b[0] && b[0] <= '9': ++ intp = b[0:] ++ b = b[1:] ++ n := 1 ++ for len(b) > 0 && '0' <= b[0] && b[0] <= '9' { ++ n++ ++ b = b[1:] ++ } ++ intp = intp[:n] ++ ++ case b[0] == '.': ++ // Continue below. ++ ++ default: ++ return 0, 0, false ++ } ++ ++ hasFrac := false ++ var frac [9]byte ++ if len(b) > 0 { ++ if b[0] != '.' { ++ return 0, 0, false ++ } ++ // Read the fractional part. ++ b = b[1:] ++ n := 0 ++ for len(b) > 0 && n < 9 && '0' <= b[0] && b[0] <= '9' { ++ frac[n] = b[0] ++ n++ ++ b = b[1:] ++ } ++ // It is not valid if there are more bytes left. ++ if len(b) > 0 { ++ return 0, 0, false ++ } ++ // Pad fractional part with 0s. ++ for i := n; i < 9; i++ { ++ frac[i] = '0' ++ } ++ hasFrac = true ++ } ++ ++ var secs int64 ++ if len(intp) > 0 { ++ var err error ++ secs, err = strconv.ParseInt(string(intp), 10, 64) ++ if err != nil { ++ return 0, 0, false ++ } ++ } ++ ++ var nanos int64 ++ if hasFrac { ++ nanob := bytes.TrimLeft(frac[:], "0") ++ if len(nanob) > 0 { ++ var err error ++ nanos, err = strconv.ParseInt(string(nanob), 10, 32) ++ if err != nil { ++ return 0, 0, false ++ } ++ } ++ } ++ ++ if neg { ++ if secs > 0 { ++ secs = -secs ++ } ++ if nanos > 0 { ++ nanos = -nanos ++ } ++ } ++ return secs, int32(nanos), true ++} ++ ++// The JSON representation for a Timestamp is a JSON string in the RFC 3339 ++// format, i.e. "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" where ++// {year} is always expressed using four digits while {month}, {day}, {hour}, ++// {min}, and {sec} are zero-padded to two digits each. The fractional seconds, ++// which can go up to 9 digits, up to 1 nanosecond resolution, is optional. The ++// "Z" suffix indicates the timezone ("UTC"); the timezone is required. Encoding ++// should always use UTC (as indicated by "Z") and a decoder should be able to ++// accept both UTC and other timezones (as indicated by an offset). ++// ++// Timestamp.seconds must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z ++// inclusive. ++// Timestamp.nanos must be from 0 to 999,999,999 inclusive. ++ ++const ( ++ maxTimestampSeconds = 253402300799 ++ minTimestampSeconds = -62135596800 ++) ++ ++func (e encoder) marshalTimestamp(m protoreflect.Message) error { ++ fds := m.Descriptor().Fields() ++ fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) ++ fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) ++ ++ secsVal := m.Get(fdSeconds) ++ nanosVal := m.Get(fdNanos) ++ secs := secsVal.Int() ++ nanos := nanosVal.Int() ++ if secs < minTimestampSeconds || secs > maxTimestampSeconds { ++ return errors.New("%s: seconds out of range %v", genid.Timestamp_message_fullname, secs) ++ } ++ if nanos < 0 || nanos > secondsInNanos { ++ return errors.New("%s: nanos out of range %v", genid.Timestamp_message_fullname, nanos) ++ } ++ // Uses RFC 3339, where generated output will be Z-normalized and uses 0, 3, ++ // 6 or 9 fractional digits. ++ t := time.Unix(secs, nanos).UTC() ++ x := t.Format("2006-01-02T15:04:05.000000000") ++ x = strings.TrimSuffix(x, "000") ++ x = strings.TrimSuffix(x, "000") ++ x = strings.TrimSuffix(x, ".000") ++ e.WriteString(x + "Z") ++ return nil ++} ++ ++func (d decoder) unmarshalTimestamp(m protoreflect.Message) error { ++ tok, err := d.Read() ++ if err != nil { ++ return err ++ } ++ if tok.Kind() != json.String { ++ return d.unexpectedTokenError(tok) ++ } ++ ++ s := tok.ParsedString() ++ t, err := time.Parse(time.RFC3339Nano, s) ++ if err != nil { ++ return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) ++ } ++ // Validate seconds. ++ secs := t.Unix() ++ if secs < minTimestampSeconds || secs > maxTimestampSeconds { ++ return d.newError(tok.Pos(), "%v value out of range: %v", genid.Timestamp_message_fullname, tok.RawString()) ++ } ++ // Validate subseconds. ++ i := strings.LastIndexByte(s, '.') // start of subsecond field ++ j := strings.LastIndexAny(s, "Z-+") // start of timezone field ++ if i >= 0 && j >= i && j-i > len(".999999999") { ++ return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) ++ } ++ ++ fds := m.Descriptor().Fields() ++ fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) ++ fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) ++ ++ m.Set(fdSeconds, protoreflect.ValueOfInt64(secs)) ++ m.Set(fdNanos, protoreflect.ValueOfInt32(int32(t.Nanosecond()))) ++ return nil ++} ++ ++// The JSON representation for a FieldMask is a JSON string where paths are ++// separated by a comma. Fields name in each path are converted to/from ++// lower-camel naming conventions. Encoding should fail if the path name would ++// end up differently after a round-trip. ++ ++func (e encoder) marshalFieldMask(m protoreflect.Message) error { ++ fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number) ++ list := m.Get(fd).List() ++ paths := make([]string, 0, list.Len()) ++ ++ for i := 0; i < list.Len(); i++ { ++ s := list.Get(i).String() ++ if !protoreflect.FullName(s).IsValid() { ++ return errors.New("%s contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s) ++ } ++ // Return error if conversion to camelCase is not reversible. ++ cc := strs.JSONCamelCase(s) ++ if s != strs.JSONSnakeCase(cc) { ++ return errors.New("%s contains irreversible value %q", genid.FieldMask_Paths_field_fullname, s) ++ } ++ paths = append(paths, cc) ++ } ++ ++ e.WriteString(strings.Join(paths, ",")) ++ return nil ++} ++ ++func (d decoder) unmarshalFieldMask(m protoreflect.Message) error { ++ tok, err := d.Read() ++ if err != nil { ++ return err ++ } ++ if tok.Kind() != json.String { ++ return d.unexpectedTokenError(tok) ++ } ++ str := strings.TrimSpace(tok.ParsedString()) ++ if str == "" { ++ return nil ++ } ++ paths := strings.Split(str, ",") ++ ++ fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number) ++ list := m.Mutable(fd).List() ++ ++ for _, s0 := range paths { ++ s := strs.JSONSnakeCase(s0) ++ if strings.Contains(s0, "_") || !protoreflect.FullName(s).IsValid() { ++ return d.newError(tok.Pos(), "%v contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s0) ++ } ++ list.Append(protoreflect.ValueOfString(s)) ++ } ++ return nil ++} +diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +index 179d6e8..4921b2d 100644 +--- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go ++++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +@@ -17,7 +17,7 @@ import ( + "google.golang.org/protobuf/internal/set" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" +- pref "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + ) + +@@ -103,7 +103,7 @@ func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { + } + + // unmarshalMessage unmarshals into the given protoreflect.Message. +-func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { ++func (d decoder) unmarshalMessage(m protoreflect.Message, checkDelims bool) error { + messageDesc := m.Descriptor() + if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { + return errors.New("no support for proto1 MessageSets") +@@ -150,24 +150,24 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { + } + + // Resolve the field descriptor. +- var name pref.Name +- var fd pref.FieldDescriptor +- var xt pref.ExtensionType ++ var name protoreflect.Name ++ var fd protoreflect.FieldDescriptor ++ var xt protoreflect.ExtensionType + var xtErr error + var isFieldNumberName bool + + switch tok.NameKind() { + case text.IdentName: +- name = pref.Name(tok.IdentName()) ++ name = protoreflect.Name(tok.IdentName()) + fd = fieldDescs.ByTextName(string(name)) + + case text.TypeName: + // Handle extensions only. This code path is not for Any. +- xt, xtErr = d.opts.Resolver.FindExtensionByName(pref.FullName(tok.TypeName())) ++ xt, xtErr = d.opts.Resolver.FindExtensionByName(protoreflect.FullName(tok.TypeName())) + + case text.FieldNumber: + isFieldNumberName = true +- num := pref.FieldNumber(tok.FieldNumber()) ++ num := protoreflect.FieldNumber(tok.FieldNumber()) + if !num.IsValid() { + return d.newError(tok.Pos(), "invalid field number: %d", num) + } +@@ -215,7 +215,7 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { + switch { + case fd.IsList(): + kind := fd.Kind() +- if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() { ++ if kind != protoreflect.MessageKind && kind != protoreflect.GroupKind && !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + +@@ -232,7 +232,7 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { + + default: + kind := fd.Kind() +- if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() { ++ if kind != protoreflect.MessageKind && kind != protoreflect.GroupKind && !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + +@@ -262,11 +262,11 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { + + // unmarshalSingular unmarshals a non-repeated field value specified by the + // given FieldDescriptor. +-func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) error { +- var val pref.Value ++func (d decoder) unmarshalSingular(fd protoreflect.FieldDescriptor, m protoreflect.Message) error { ++ var val protoreflect.Value + var err error + switch fd.Kind() { +- case pref.MessageKind, pref.GroupKind: ++ case protoreflect.MessageKind, protoreflect.GroupKind: + val = m.NewField(fd) + err = d.unmarshalMessage(val.Message(), true) + default: +@@ -280,94 +280,94 @@ func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) erro + + // unmarshalScalar unmarshals a scalar/enum protoreflect.Value specified by the + // given FieldDescriptor. +-func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) { ++func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + tok, err := d.Read() + if err != nil { +- return pref.Value{}, err ++ return protoreflect.Value{}, err + } + + if tok.Kind() != text.Scalar { +- return pref.Value{}, d.unexpectedTokenError(tok) ++ return protoreflect.Value{}, d.unexpectedTokenError(tok) + } + + kind := fd.Kind() + switch kind { +- case pref.BoolKind: ++ case protoreflect.BoolKind: + if b, ok := tok.Bool(); ok { +- return pref.ValueOfBool(b), nil ++ return protoreflect.ValueOfBool(b), nil + } + +- case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: ++ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if n, ok := tok.Int32(); ok { +- return pref.ValueOfInt32(n), nil ++ return protoreflect.ValueOfInt32(n), nil + } + +- case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: ++ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if n, ok := tok.Int64(); ok { +- return pref.ValueOfInt64(n), nil ++ return protoreflect.ValueOfInt64(n), nil + } + +- case pref.Uint32Kind, pref.Fixed32Kind: ++ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if n, ok := tok.Uint32(); ok { +- return pref.ValueOfUint32(n), nil ++ return protoreflect.ValueOfUint32(n), nil + } + +- case pref.Uint64Kind, pref.Fixed64Kind: ++ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if n, ok := tok.Uint64(); ok { +- return pref.ValueOfUint64(n), nil ++ return protoreflect.ValueOfUint64(n), nil + } + +- case pref.FloatKind: ++ case protoreflect.FloatKind: + if n, ok := tok.Float32(); ok { +- return pref.ValueOfFloat32(n), nil ++ return protoreflect.ValueOfFloat32(n), nil + } + +- case pref.DoubleKind: ++ case protoreflect.DoubleKind: + if n, ok := tok.Float64(); ok { +- return pref.ValueOfFloat64(n), nil ++ return protoreflect.ValueOfFloat64(n), nil + } + +- case pref.StringKind: ++ case protoreflect.StringKind: + if s, ok := tok.String(); ok { + if strs.EnforceUTF8(fd) && !utf8.ValidString(s) { +- return pref.Value{}, d.newError(tok.Pos(), "contains invalid UTF-8") ++ return protoreflect.Value{}, d.newError(tok.Pos(), "contains invalid UTF-8") + } +- return pref.ValueOfString(s), nil ++ return protoreflect.ValueOfString(s), nil + } + +- case pref.BytesKind: ++ case protoreflect.BytesKind: + if b, ok := tok.String(); ok { +- return pref.ValueOfBytes([]byte(b)), nil ++ return protoreflect.ValueOfBytes([]byte(b)), nil + } + +- case pref.EnumKind: ++ case protoreflect.EnumKind: + if lit, ok := tok.Enum(); ok { + // Lookup EnumNumber based on name. +- if enumVal := fd.Enum().Values().ByName(pref.Name(lit)); enumVal != nil { +- return pref.ValueOfEnum(enumVal.Number()), nil ++ if enumVal := fd.Enum().Values().ByName(protoreflect.Name(lit)); enumVal != nil { ++ return protoreflect.ValueOfEnum(enumVal.Number()), nil + } + } + if num, ok := tok.Int32(); ok { +- return pref.ValueOfEnum(pref.EnumNumber(num)), nil ++ return protoreflect.ValueOfEnum(protoreflect.EnumNumber(num)), nil + } + + default: + panic(fmt.Sprintf("invalid scalar kind %v", kind)) + } + +- return pref.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) ++ return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) + } + + // unmarshalList unmarshals into given protoreflect.List. A list value can + // either be in [] syntax or simply just a single scalar/message value. +-func (d decoder) unmarshalList(fd pref.FieldDescriptor, list pref.List) error { ++func (d decoder) unmarshalList(fd protoreflect.FieldDescriptor, list protoreflect.List) error { + tok, err := d.Peek() + if err != nil { + return err + } + + switch fd.Kind() { +- case pref.MessageKind, pref.GroupKind: ++ case protoreflect.MessageKind, protoreflect.GroupKind: + switch tok.Kind() { + case text.ListOpen: + d.Read() +@@ -441,22 +441,22 @@ func (d decoder) unmarshalList(fd pref.FieldDescriptor, list pref.List) error { + + // unmarshalMap unmarshals into given protoreflect.Map. A map value is a + // textproto message containing {key: , value: }. +-func (d decoder) unmarshalMap(fd pref.FieldDescriptor, mmap pref.Map) error { ++func (d decoder) unmarshalMap(fd protoreflect.FieldDescriptor, mmap protoreflect.Map) error { + // Determine ahead whether map entry is a scalar type or a message type in + // order to call the appropriate unmarshalMapValue func inside + // unmarshalMapEntry. +- var unmarshalMapValue func() (pref.Value, error) ++ var unmarshalMapValue func() (protoreflect.Value, error) + switch fd.MapValue().Kind() { +- case pref.MessageKind, pref.GroupKind: +- unmarshalMapValue = func() (pref.Value, error) { ++ case protoreflect.MessageKind, protoreflect.GroupKind: ++ unmarshalMapValue = func() (protoreflect.Value, error) { + pval := mmap.NewValue() + if err := d.unmarshalMessage(pval.Message(), true); err != nil { +- return pref.Value{}, err ++ return protoreflect.Value{}, err + } + return pval, nil + } + default: +- unmarshalMapValue = func() (pref.Value, error) { ++ unmarshalMapValue = func() (protoreflect.Value, error) { + return d.unmarshalScalar(fd.MapValue()) + } + } +@@ -494,9 +494,9 @@ func (d decoder) unmarshalMap(fd pref.FieldDescriptor, mmap pref.Map) error { + + // unmarshalMap unmarshals into given protoreflect.Map. A map value is a + // textproto message containing {key: , value: }. +-func (d decoder) unmarshalMapEntry(fd pref.FieldDescriptor, mmap pref.Map, unmarshalMapValue func() (pref.Value, error)) error { +- var key pref.MapKey +- var pval pref.Value ++func (d decoder) unmarshalMapEntry(fd protoreflect.FieldDescriptor, mmap protoreflect.Map, unmarshalMapValue func() (protoreflect.Value, error)) error { ++ var key protoreflect.MapKey ++ var pval protoreflect.Value + Loop: + for { + // Read field name. +@@ -520,7 +520,7 @@ Loop: + return d.unexpectedTokenError(tok) + } + +- switch name := pref.Name(tok.IdentName()); name { ++ switch name := protoreflect.Name(tok.IdentName()); name { + case genid.MapEntry_Key_field_name: + if !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") +@@ -535,7 +535,7 @@ Loop: + key = val.MapKey() + + case genid.MapEntry_Value_field_name: +- if kind := fd.MapValue().Kind(); (kind != pref.MessageKind) && (kind != pref.GroupKind) { ++ if kind := fd.MapValue().Kind(); (kind != protoreflect.MessageKind) && (kind != protoreflect.GroupKind) { + if !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } +@@ -561,7 +561,7 @@ Loop: + } + if !pval.IsValid() { + switch fd.MapValue().Kind() { +- case pref.MessageKind, pref.GroupKind: ++ case protoreflect.MessageKind, protoreflect.GroupKind: + // If value field is not set for message/group types, construct an + // empty one as default. + pval = mmap.NewValue() +@@ -575,7 +575,7 @@ Loop: + + // unmarshalAny unmarshals an Any textproto. It can either be in expanded form + // or non-expanded form. +-func (d decoder) unmarshalAny(m pref.Message, checkDelims bool) error { ++func (d decoder) unmarshalAny(m protoreflect.Message, checkDelims bool) error { + var typeURL string + var bValue []byte + var seenTypeUrl bool +@@ -619,7 +619,7 @@ Loop: + return d.syntaxError(tok.Pos(), "missing field separator :") + } + +- switch name := pref.Name(tok.IdentName()); name { ++ switch name := protoreflect.Name(tok.IdentName()); name { + case genid.Any_TypeUrl_field_name: + if seenTypeUrl { + return d.newError(tok.Pos(), "duplicate %v field", genid.Any_TypeUrl_field_fullname) +@@ -686,10 +686,10 @@ Loop: + + fds := m.Descriptor().Fields() + if len(typeURL) > 0 { +- m.Set(fds.ByNumber(genid.Any_TypeUrl_field_number), pref.ValueOfString(typeURL)) ++ m.Set(fds.ByNumber(genid.Any_TypeUrl_field_number), protoreflect.ValueOfString(typeURL)) + } + if len(bValue) > 0 { +- m.Set(fds.ByNumber(genid.Any_Value_field_number), pref.ValueOfBytes(bValue)) ++ m.Set(fds.ByNumber(genid.Any_Value_field_number), protoreflect.ValueOfBytes(bValue)) + } + return nil + } +diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +index 8d5304d..ebf6c65 100644 +--- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go ++++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +@@ -20,7 +20,6 @@ import ( + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" +- pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + ) + +@@ -150,7 +149,7 @@ type encoder struct { + } + + // marshalMessage marshals the given protoreflect.Message. +-func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error { ++func (e encoder) marshalMessage(m protoreflect.Message, inclDelims bool) error { + messageDesc := m.Descriptor() + if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { + return errors.New("no support for proto1 MessageSets") +@@ -190,7 +189,7 @@ func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error { + } + + // marshalField marshals the given field with protoreflect.Value. +-func (e encoder) marshalField(name string, val pref.Value, fd pref.FieldDescriptor) error { ++func (e encoder) marshalField(name string, val protoreflect.Value, fd protoreflect.FieldDescriptor) error { + switch { + case fd.IsList(): + return e.marshalList(name, val.List(), fd) +@@ -204,40 +203,40 @@ func (e encoder) marshalField(name string, val pref.Value, fd pref.FieldDescript + + // marshalSingular marshals the given non-repeated field value. This includes + // all scalar types, enums, messages, and groups. +-func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error { ++func (e encoder) marshalSingular(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { + kind := fd.Kind() + switch kind { +- case pref.BoolKind: ++ case protoreflect.BoolKind: + e.WriteBool(val.Bool()) + +- case pref.StringKind: ++ case protoreflect.StringKind: + s := val.String() + if !e.opts.allowInvalidUTF8 && strs.EnforceUTF8(fd) && !utf8.ValidString(s) { + return errors.InvalidUTF8(string(fd.FullName())) + } + e.WriteString(s) + +- case pref.Int32Kind, pref.Int64Kind, +- pref.Sint32Kind, pref.Sint64Kind, +- pref.Sfixed32Kind, pref.Sfixed64Kind: ++ case protoreflect.Int32Kind, protoreflect.Int64Kind, ++ protoreflect.Sint32Kind, protoreflect.Sint64Kind, ++ protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind: + e.WriteInt(val.Int()) + +- case pref.Uint32Kind, pref.Uint64Kind, +- pref.Fixed32Kind, pref.Fixed64Kind: ++ case protoreflect.Uint32Kind, protoreflect.Uint64Kind, ++ protoreflect.Fixed32Kind, protoreflect.Fixed64Kind: + e.WriteUint(val.Uint()) + +- case pref.FloatKind: ++ case protoreflect.FloatKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 32) + +- case pref.DoubleKind: ++ case protoreflect.DoubleKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 64) + +- case pref.BytesKind: ++ case protoreflect.BytesKind: + e.WriteString(string(val.Bytes())) + +- case pref.EnumKind: ++ case protoreflect.EnumKind: + num := val.Enum() + if desc := fd.Enum().Values().ByNumber(num); desc != nil { + e.WriteLiteral(string(desc.Name())) +@@ -246,7 +245,7 @@ func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error + e.WriteInt(int64(num)) + } + +- case pref.MessageKind, pref.GroupKind: ++ case protoreflect.MessageKind, protoreflect.GroupKind: + return e.marshalMessage(val.Message(), true) + + default: +@@ -256,7 +255,7 @@ func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error + } + + // marshalList marshals the given protoreflect.List as multiple name-value fields. +-func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescriptor) error { ++func (e encoder) marshalList(name string, list protoreflect.List, fd protoreflect.FieldDescriptor) error { + size := list.Len() + for i := 0; i < size; i++ { + e.WriteName(name) +@@ -268,9 +267,9 @@ func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescripto + } + + // marshalMap marshals the given protoreflect.Map as multiple name-value fields. +-func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) error { ++func (e encoder) marshalMap(name string, mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { + var err error +- order.RangeEntries(mmap, order.GenericKeyOrder, func(key pref.MapKey, val pref.Value) bool { ++ order.RangeEntries(mmap, order.GenericKeyOrder, func(key protoreflect.MapKey, val protoreflect.Value) bool { + e.WriteName(name) + e.StartMessage() + defer e.EndMessage() +@@ -334,7 +333,7 @@ func (e encoder) marshalUnknown(b []byte) { + + // marshalAny marshals the given google.protobuf.Any message in expanded form. + // It returns true if it was able to marshal, else false. +-func (e encoder) marshalAny(any pref.Message) bool { ++func (e encoder) marshalAny(any protoreflect.Message) bool { + // Construct the embedded message. + fds := any.Descriptor().Fields() + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) +diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +index 9c61112..f4b4686 100644 +--- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go ++++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +@@ -3,7 +3,7 @@ + // license that can be found in the LICENSE file. + + // Package protowire parses and formats the raw wire encoding. +-// See https://developers.google.com/protocol-buffers/docs/encoding. ++// See https://protobuf.dev/programming-guides/encoding. + // + // For marshaling and unmarshaling entire protobuf messages, + // use the "google.golang.org/protobuf/proto" package instead. +@@ -29,12 +29,8 @@ const ( + ) + + // IsValid reports whether the field number is semantically valid. +-// +-// Note that while numbers within the reserved range are semantically invalid, +-// they are syntactically valid in the wire format. +-// Implementations may treat records with reserved field numbers as unknown. + func (n Number) IsValid() bool { +- return MinValidNumber <= n && n < FirstReservedNumber || LastReservedNumber < n && n <= MaxValidNumber ++ return MinValidNumber <= n && n <= MaxValidNumber + } + + // Type represents the wire type. +@@ -516,6 +512,7 @@ func EncodeTag(num Number, typ Type) uint64 { + } + + // DecodeZigZag decodes a zig-zag-encoded uint64 as an int64. ++// + // Input: {…, 5, 3, 1, 0, 2, 4, 6, …} + // Output: {…, -3, -2, -1, 0, +1, +2, +3, …} + func DecodeZigZag(x uint64) int64 { +@@ -523,6 +520,7 @@ func DecodeZigZag(x uint64) int64 { + } + + // EncodeZigZag encodes an int64 as a zig-zag-encoded uint64. ++// + // Input: {…, -3, -2, -1, 0, +1, +2, +3, …} + // Output: {…, 5, 3, 1, 0, 2, 4, 6, …} + func EncodeZigZag(x int64) uint64 { +@@ -530,6 +528,7 @@ func EncodeZigZag(x int64) uint64 { + } + + // DecodeBool decodes a uint64 as a bool. ++// + // Input: { 0, 1, 2, …} + // Output: {false, true, true, …} + func DecodeBool(x uint64) bool { +@@ -537,6 +536,7 @@ func DecodeBool(x uint64) bool { + } + + // EncodeBool encodes a bool as a uint64. ++// + // Input: {false, true} + // Output: { 0, 1} + func EncodeBool(x bool) uint64 { +diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go +index 360c633..db5248e 100644 +--- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go ++++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go +@@ -14,7 +14,7 @@ import ( + + "google.golang.org/protobuf/internal/detrand" + "google.golang.org/protobuf/internal/pragma" +- pref "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/reflect/protoreflect" + ) + + type list interface { +@@ -30,17 +30,17 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { + if isRoot { + var name string + switch vs.(type) { +- case pref.Names: ++ case protoreflect.Names: + name = "Names" +- case pref.FieldNumbers: ++ case protoreflect.FieldNumbers: + name = "FieldNumbers" +- case pref.FieldRanges: ++ case protoreflect.FieldRanges: + name = "FieldRanges" +- case pref.EnumRanges: ++ case protoreflect.EnumRanges: + name = "EnumRanges" +- case pref.FileImports: ++ case protoreflect.FileImports: + name = "FileImports" +- case pref.Descriptor: ++ case protoreflect.Descriptor: + name = reflect.ValueOf(vs).MethodByName("Get").Type().Out(0).Name() + "s" + default: + name = reflect.ValueOf(vs).Elem().Type().Name() +@@ -50,17 +50,17 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { + + var ss []string + switch vs := vs.(type) { +- case pref.Names: ++ case protoreflect.Names: + for i := 0; i < vs.Len(); i++ { + ss = append(ss, fmt.Sprint(vs.Get(i))) + } + return start + joinStrings(ss, false) + end +- case pref.FieldNumbers: ++ case protoreflect.FieldNumbers: + for i := 0; i < vs.Len(); i++ { + ss = append(ss, fmt.Sprint(vs.Get(i))) + } + return start + joinStrings(ss, false) + end +- case pref.FieldRanges: ++ case protoreflect.FieldRanges: + for i := 0; i < vs.Len(); i++ { + r := vs.Get(i) + if r[0]+1 == r[1] { +@@ -70,7 +70,7 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { + } + } + return start + joinStrings(ss, false) + end +- case pref.EnumRanges: ++ case protoreflect.EnumRanges: + for i := 0; i < vs.Len(); i++ { + r := vs.Get(i) + if r[0] == r[1] { +@@ -80,7 +80,7 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { + } + } + return start + joinStrings(ss, false) + end +- case pref.FileImports: ++ case protoreflect.FileImports: + for i := 0; i < vs.Len(); i++ { + var rs records + rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak") +@@ -88,11 +88,11 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { + } + return start + joinStrings(ss, allowMulti) + end + default: +- _, isEnumValue := vs.(pref.EnumValueDescriptors) ++ _, isEnumValue := vs.(protoreflect.EnumValueDescriptors) + for i := 0; i < vs.Len(); i++ { + m := reflect.ValueOf(vs).MethodByName("Get") + v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface() +- ss = append(ss, formatDescOpt(v.(pref.Descriptor), false, allowMulti && !isEnumValue)) ++ ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue)) + } + return start + joinStrings(ss, allowMulti && isEnumValue) + end + } +@@ -106,20 +106,20 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { + // + // Using a list allows us to print the accessors in a sensible order. + var descriptorAccessors = map[reflect.Type][]string{ +- reflect.TypeOf((*pref.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"}, +- reflect.TypeOf((*pref.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"}, +- reflect.TypeOf((*pref.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"}, +- reflect.TypeOf((*pref.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt +- reflect.TypeOf((*pref.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"}, +- reflect.TypeOf((*pref.EnumValueDescriptor)(nil)).Elem(): {"Number"}, +- reflect.TypeOf((*pref.ServiceDescriptor)(nil)).Elem(): {"Methods"}, +- reflect.TypeOf((*pref.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"}, ++ reflect.TypeOf((*protoreflect.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"}, ++ reflect.TypeOf((*protoreflect.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"}, ++ reflect.TypeOf((*protoreflect.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"}, ++ reflect.TypeOf((*protoreflect.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt ++ reflect.TypeOf((*protoreflect.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"}, ++ reflect.TypeOf((*protoreflect.EnumValueDescriptor)(nil)).Elem(): {"Number"}, ++ reflect.TypeOf((*protoreflect.ServiceDescriptor)(nil)).Elem(): {"Methods"}, ++ reflect.TypeOf((*protoreflect.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"}, + } + +-func FormatDesc(s fmt.State, r rune, t pref.Descriptor) { ++func FormatDesc(s fmt.State, r rune, t protoreflect.Descriptor) { + io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')))) + } +-func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { ++func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { + rv := reflect.ValueOf(t) + rt := rv.MethodByName("ProtoType").Type().In(0) + +@@ -128,7 +128,7 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { + start = rt.Name() + "{" + } + +- _, isFile := t.(pref.FileDescriptor) ++ _, isFile := t.(protoreflect.FileDescriptor) + rs := records{allowMulti: allowMulti} + if t.IsPlaceholder() { + if isFile { +@@ -146,7 +146,7 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { + rs.Append(rv, "Name") + } + switch t := t.(type) { +- case pref.FieldDescriptor: ++ case protoreflect.FieldDescriptor: + for _, s := range descriptorAccessors[rt] { + switch s { + case "MapKey": +@@ -156,9 +156,9 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { + case "MapValue": + if v := t.MapValue(); v != nil { + switch v.Kind() { +- case pref.EnumKind: ++ case protoreflect.EnumKind: + rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())}) +- case pref.MessageKind, pref.GroupKind: ++ case protoreflect.MessageKind, protoreflect.GroupKind: + rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())}) + default: + rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()}) +@@ -180,7 +180,7 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { + rs.Append(rv, s) + } + } +- case pref.OneofDescriptor: ++ case protoreflect.OneofDescriptor: + var ss []string + fs := t.Fields() + for i := 0; i < fs.Len(); i++ { +@@ -216,7 +216,7 @@ func (rs *records) Append(v reflect.Value, accessors ...string) { + if !rv.IsValid() { + panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a)) + } +- if _, ok := rv.Interface().(pref.Value); ok { ++ if _, ok := rv.Interface().(protoreflect.Value); ok { + rv = rv.MethodByName("Interface").Call(nil)[0] + if !rv.IsNil() { + rv = rv.Elem() +@@ -250,9 +250,9 @@ func (rs *records) Append(v reflect.Value, accessors ...string) { + switch v := v.(type) { + case list: + s = formatListOpt(v, false, rs.allowMulti) +- case pref.FieldDescriptor, pref.OneofDescriptor, pref.EnumValueDescriptor, pref.MethodDescriptor: +- s = string(v.(pref.Descriptor).Name()) +- case pref.Descriptor: ++ case protoreflect.FieldDescriptor, protoreflect.OneofDescriptor, protoreflect.EnumValueDescriptor, protoreflect.MethodDescriptor: ++ s = string(v.(protoreflect.Descriptor).Name()) ++ case protoreflect.Descriptor: + s = string(v.FullName()) + case string: + s = strconv.Quote(v) +diff --git a/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go b/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go +index fdd9b13..328dc73 100644 +--- a/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go ++++ b/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go +@@ -15,8 +15,8 @@ import ( + "strconv" + + ptext "google.golang.org/protobuf/internal/encoding/text" +- errors "google.golang.org/protobuf/internal/errors" +- pref "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/internal/errors" ++ "google.golang.org/protobuf/reflect/protoreflect" + ) + + // Format is the serialization format used to represent the default value. +@@ -35,56 +35,56 @@ const ( + + // Unmarshal deserializes the default string s according to the given kind k. + // When k is an enum, a list of enum value descriptors must be provided. +-func Unmarshal(s string, k pref.Kind, evs pref.EnumValueDescriptors, f Format) (pref.Value, pref.EnumValueDescriptor, error) { ++func Unmarshal(s string, k protoreflect.Kind, evs protoreflect.EnumValueDescriptors, f Format) (protoreflect.Value, protoreflect.EnumValueDescriptor, error) { + switch k { +- case pref.BoolKind: ++ case protoreflect.BoolKind: + if f == GoTag { + switch s { + case "1": +- return pref.ValueOfBool(true), nil, nil ++ return protoreflect.ValueOfBool(true), nil, nil + case "0": +- return pref.ValueOfBool(false), nil, nil ++ return protoreflect.ValueOfBool(false), nil, nil + } + } else { + switch s { + case "true": +- return pref.ValueOfBool(true), nil, nil ++ return protoreflect.ValueOfBool(true), nil, nil + case "false": +- return pref.ValueOfBool(false), nil, nil ++ return protoreflect.ValueOfBool(false), nil, nil + } + } +- case pref.EnumKind: ++ case protoreflect.EnumKind: + if f == GoTag { + // Go tags use the numeric form of the enum value. + if n, err := strconv.ParseInt(s, 10, 32); err == nil { +- if ev := evs.ByNumber(pref.EnumNumber(n)); ev != nil { +- return pref.ValueOfEnum(ev.Number()), ev, nil ++ if ev := evs.ByNumber(protoreflect.EnumNumber(n)); ev != nil { ++ return protoreflect.ValueOfEnum(ev.Number()), ev, nil + } + } + } else { + // Descriptor default_value use the enum identifier. +- ev := evs.ByName(pref.Name(s)) ++ ev := evs.ByName(protoreflect.Name(s)) + if ev != nil { +- return pref.ValueOfEnum(ev.Number()), ev, nil ++ return protoreflect.ValueOfEnum(ev.Number()), ev, nil + } + } +- case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: ++ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if v, err := strconv.ParseInt(s, 10, 32); err == nil { +- return pref.ValueOfInt32(int32(v)), nil, nil ++ return protoreflect.ValueOfInt32(int32(v)), nil, nil + } +- case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: ++ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if v, err := strconv.ParseInt(s, 10, 64); err == nil { +- return pref.ValueOfInt64(int64(v)), nil, nil ++ return protoreflect.ValueOfInt64(int64(v)), nil, nil + } +- case pref.Uint32Kind, pref.Fixed32Kind: ++ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if v, err := strconv.ParseUint(s, 10, 32); err == nil { +- return pref.ValueOfUint32(uint32(v)), nil, nil ++ return protoreflect.ValueOfUint32(uint32(v)), nil, nil + } +- case pref.Uint64Kind, pref.Fixed64Kind: ++ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if v, err := strconv.ParseUint(s, 10, 64); err == nil { +- return pref.ValueOfUint64(uint64(v)), nil, nil ++ return protoreflect.ValueOfUint64(uint64(v)), nil, nil + } +- case pref.FloatKind, pref.DoubleKind: ++ case protoreflect.FloatKind, protoreflect.DoubleKind: + var v float64 + var err error + switch s { +@@ -98,29 +98,29 @@ func Unmarshal(s string, k pref.Kind, evs pref.EnumValueDescriptors, f Format) ( + v, err = strconv.ParseFloat(s, 64) + } + if err == nil { +- if k == pref.FloatKind { +- return pref.ValueOfFloat32(float32(v)), nil, nil ++ if k == protoreflect.FloatKind { ++ return protoreflect.ValueOfFloat32(float32(v)), nil, nil + } else { +- return pref.ValueOfFloat64(float64(v)), nil, nil ++ return protoreflect.ValueOfFloat64(float64(v)), nil, nil + } + } +- case pref.StringKind: ++ case protoreflect.StringKind: + // String values are already unescaped and can be used as is. +- return pref.ValueOfString(s), nil, nil +- case pref.BytesKind: ++ return protoreflect.ValueOfString(s), nil, nil ++ case protoreflect.BytesKind: + if b, ok := unmarshalBytes(s); ok { +- return pref.ValueOfBytes(b), nil, nil ++ return protoreflect.ValueOfBytes(b), nil, nil + } + } +- return pref.Value{}, nil, errors.New("could not parse value for %v: %q", k, s) ++ return protoreflect.Value{}, nil, errors.New("could not parse value for %v: %q", k, s) + } + + // Marshal serializes v as the default string according to the given kind k. + // When specifying the Descriptor format for an enum kind, the associated + // enum value descriptor must be provided. +-func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) (string, error) { ++func Marshal(v protoreflect.Value, ev protoreflect.EnumValueDescriptor, k protoreflect.Kind, f Format) (string, error) { + switch k { +- case pref.BoolKind: ++ case protoreflect.BoolKind: + if f == GoTag { + if v.Bool() { + return "1", nil +@@ -134,17 +134,17 @@ func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) ( + return "false", nil + } + } +- case pref.EnumKind: ++ case protoreflect.EnumKind: + if f == GoTag { + return strconv.FormatInt(int64(v.Enum()), 10), nil + } else { + return string(ev.Name()), nil + } +- case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind, pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: ++ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return strconv.FormatInt(v.Int(), 10), nil +- case pref.Uint32Kind, pref.Fixed32Kind, pref.Uint64Kind, pref.Fixed64Kind: ++ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return strconv.FormatUint(v.Uint(), 10), nil +- case pref.FloatKind, pref.DoubleKind: ++ case protoreflect.FloatKind, protoreflect.DoubleKind: + f := v.Float() + switch { + case math.IsInf(f, -1): +@@ -154,16 +154,16 @@ func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) ( + case math.IsNaN(f): + return "nan", nil + default: +- if k == pref.FloatKind { ++ if k == protoreflect.FloatKind { + return strconv.FormatFloat(f, 'g', -1, 32), nil + } else { + return strconv.FormatFloat(f, 'g', -1, 64), nil + } + } +- case pref.StringKind: ++ case protoreflect.StringKind: + // String values are serialized as is without any escaping. + return v.String(), nil +- case pref.BytesKind: ++ case protoreflect.BytesKind: + if s, ok := marshalBytes(v.Bytes()); ok { + return s, nil + } +diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go +new file mode 100644 +index 0000000..d043a6e +--- /dev/null ++++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go +@@ -0,0 +1,340 @@ ++// Copyright 2018 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package json ++ ++import ( ++ "bytes" ++ "fmt" ++ "io" ++ "regexp" ++ "unicode/utf8" ++ ++ "google.golang.org/protobuf/internal/errors" ++) ++ ++// call specifies which Decoder method was invoked. ++type call uint8 ++ ++const ( ++ readCall call = iota ++ peekCall ++) ++ ++const unexpectedFmt = "unexpected token %s" ++ ++// ErrUnexpectedEOF means that EOF was encountered in the middle of the input. ++var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF) ++ ++// Decoder is a token-based JSON decoder. ++type Decoder struct { ++ // lastCall is last method called, either readCall or peekCall. ++ // Initial value is readCall. ++ lastCall call ++ ++ // lastToken contains the last read token. ++ lastToken Token ++ ++ // lastErr contains the last read error. ++ lastErr error ++ ++ // openStack is a stack containing ObjectOpen and ArrayOpen values. The ++ // top of stack represents the object or the array the current value is ++ // directly located in. ++ openStack []Kind ++ ++ // orig is used in reporting line and column. ++ orig []byte ++ // in contains the unconsumed input. ++ in []byte ++} ++ ++// NewDecoder returns a Decoder to read the given []byte. ++func NewDecoder(b []byte) *Decoder { ++ return &Decoder{orig: b, in: b} ++} ++ ++// Peek looks ahead and returns the next token kind without advancing a read. ++func (d *Decoder) Peek() (Token, error) { ++ defer func() { d.lastCall = peekCall }() ++ if d.lastCall == readCall { ++ d.lastToken, d.lastErr = d.Read() ++ } ++ return d.lastToken, d.lastErr ++} ++ ++// Read returns the next JSON token. ++// It will return an error if there is no valid token. ++func (d *Decoder) Read() (Token, error) { ++ const scalar = Null | Bool | Number | String ++ ++ defer func() { d.lastCall = readCall }() ++ if d.lastCall == peekCall { ++ return d.lastToken, d.lastErr ++ } ++ ++ tok, err := d.parseNext() ++ if err != nil { ++ return Token{}, err ++ } ++ ++ switch tok.kind { ++ case EOF: ++ if len(d.openStack) != 0 || ++ d.lastToken.kind&scalar|ObjectClose|ArrayClose == 0 { ++ return Token{}, ErrUnexpectedEOF ++ } ++ ++ case Null: ++ if !d.isValueNext() { ++ return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) ++ } ++ ++ case Bool, Number: ++ if !d.isValueNext() { ++ return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) ++ } ++ ++ case String: ++ if d.isValueNext() { ++ break ++ } ++ // This string token should only be for a field name. ++ if d.lastToken.kind&(ObjectOpen|comma) == 0 { ++ return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) ++ } ++ if len(d.in) == 0 { ++ return Token{}, ErrUnexpectedEOF ++ } ++ if c := d.in[0]; c != ':' { ++ return Token{}, d.newSyntaxError(d.currPos(), `unexpected character %s, missing ":" after field name`, string(c)) ++ } ++ tok.kind = Name ++ d.consume(1) ++ ++ case ObjectOpen, ArrayOpen: ++ if !d.isValueNext() { ++ return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) ++ } ++ d.openStack = append(d.openStack, tok.kind) ++ ++ case ObjectClose: ++ if len(d.openStack) == 0 || ++ d.lastToken.kind == comma || ++ d.openStack[len(d.openStack)-1] != ObjectOpen { ++ return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) ++ } ++ d.openStack = d.openStack[:len(d.openStack)-1] ++ ++ case ArrayClose: ++ if len(d.openStack) == 0 || ++ d.lastToken.kind == comma || ++ d.openStack[len(d.openStack)-1] != ArrayOpen { ++ return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) ++ } ++ d.openStack = d.openStack[:len(d.openStack)-1] ++ ++ case comma: ++ if len(d.openStack) == 0 || ++ d.lastToken.kind&(scalar|ObjectClose|ArrayClose) == 0 { ++ return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) ++ } ++ } ++ ++ // Update d.lastToken only after validating token to be in the right sequence. ++ d.lastToken = tok ++ ++ if d.lastToken.kind == comma { ++ return d.Read() ++ } ++ return tok, nil ++} ++ ++// Any sequence that looks like a non-delimiter (for error reporting). ++var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9]{1,32}|.)`) ++ ++// parseNext parses for the next JSON token. It returns a Token object for ++// different types, except for Name. It does not handle whether the next token ++// is in a valid sequence or not. ++func (d *Decoder) parseNext() (Token, error) { ++ // Trim leading spaces. ++ d.consume(0) ++ ++ in := d.in ++ if len(in) == 0 { ++ return d.consumeToken(EOF, 0), nil ++ } ++ ++ switch in[0] { ++ case 'n': ++ if n := matchWithDelim("null", in); n != 0 { ++ return d.consumeToken(Null, n), nil ++ } ++ ++ case 't': ++ if n := matchWithDelim("true", in); n != 0 { ++ return d.consumeBoolToken(true, n), nil ++ } ++ ++ case 'f': ++ if n := matchWithDelim("false", in); n != 0 { ++ return d.consumeBoolToken(false, n), nil ++ } ++ ++ case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': ++ if n, ok := parseNumber(in); ok { ++ return d.consumeToken(Number, n), nil ++ } ++ ++ case '"': ++ s, n, err := d.parseString(in) ++ if err != nil { ++ return Token{}, err ++ } ++ return d.consumeStringToken(s, n), nil ++ ++ case '{': ++ return d.consumeToken(ObjectOpen, 1), nil ++ ++ case '}': ++ return d.consumeToken(ObjectClose, 1), nil ++ ++ case '[': ++ return d.consumeToken(ArrayOpen, 1), nil ++ ++ case ']': ++ return d.consumeToken(ArrayClose, 1), nil ++ ++ case ',': ++ return d.consumeToken(comma, 1), nil ++ } ++ return Token{}, d.newSyntaxError(d.currPos(), "invalid value %s", errRegexp.Find(in)) ++} ++ ++// newSyntaxError returns an error with line and column information useful for ++// syntax errors. ++func (d *Decoder) newSyntaxError(pos int, f string, x ...interface{}) error { ++ e := errors.New(f, x...) ++ line, column := d.Position(pos) ++ return errors.New("syntax error (line %d:%d): %v", line, column, e) ++} ++ ++// Position returns line and column number of given index of the original input. ++// It will panic if index is out of range. ++func (d *Decoder) Position(idx int) (line int, column int) { ++ b := d.orig[:idx] ++ line = bytes.Count(b, []byte("\n")) + 1 ++ if i := bytes.LastIndexByte(b, '\n'); i >= 0 { ++ b = b[i+1:] ++ } ++ column = utf8.RuneCount(b) + 1 // ignore multi-rune characters ++ return line, column ++} ++ ++// currPos returns the current index position of d.in from d.orig. ++func (d *Decoder) currPos() int { ++ return len(d.orig) - len(d.in) ++} ++ ++// matchWithDelim matches s with the input b and verifies that the match ++// terminates with a delimiter of some form (e.g., r"[^-+_.a-zA-Z0-9]"). ++// As a special case, EOF is considered a delimiter. It returns the length of s ++// if there is a match, else 0. ++func matchWithDelim(s string, b []byte) int { ++ if !bytes.HasPrefix(b, []byte(s)) { ++ return 0 ++ } ++ ++ n := len(s) ++ if n < len(b) && isNotDelim(b[n]) { ++ return 0 ++ } ++ return n ++} ++ ++// isNotDelim returns true if given byte is a not delimiter character. ++func isNotDelim(c byte) bool { ++ return (c == '-' || c == '+' || c == '.' || c == '_' || ++ ('a' <= c && c <= 'z') || ++ ('A' <= c && c <= 'Z') || ++ ('0' <= c && c <= '9')) ++} ++ ++// consume consumes n bytes of input and any subsequent whitespace. ++func (d *Decoder) consume(n int) { ++ d.in = d.in[n:] ++ for len(d.in) > 0 { ++ switch d.in[0] { ++ case ' ', '\n', '\r', '\t': ++ d.in = d.in[1:] ++ default: ++ return ++ } ++ } ++} ++ ++// isValueNext returns true if next type should be a JSON value: Null, ++// Number, String or Bool. ++func (d *Decoder) isValueNext() bool { ++ if len(d.openStack) == 0 { ++ return d.lastToken.kind == 0 ++ } ++ ++ start := d.openStack[len(d.openStack)-1] ++ switch start { ++ case ObjectOpen: ++ return d.lastToken.kind&Name != 0 ++ case ArrayOpen: ++ return d.lastToken.kind&(ArrayOpen|comma) != 0 ++ } ++ panic(fmt.Sprintf( ++ "unreachable logic in Decoder.isValueNext, lastToken.kind: %v, openStack: %v", ++ d.lastToken.kind, start)) ++} ++ ++// consumeToken constructs a Token for given Kind with raw value derived from ++// current d.in and given size, and consumes the given size-length of it. ++func (d *Decoder) consumeToken(kind Kind, size int) Token { ++ tok := Token{ ++ kind: kind, ++ raw: d.in[:size], ++ pos: len(d.orig) - len(d.in), ++ } ++ d.consume(size) ++ return tok ++} ++ ++// consumeBoolToken constructs a Token for a Bool kind with raw value derived from ++// current d.in and given size. ++func (d *Decoder) consumeBoolToken(b bool, size int) Token { ++ tok := Token{ ++ kind: Bool, ++ raw: d.in[:size], ++ pos: len(d.orig) - len(d.in), ++ boo: b, ++ } ++ d.consume(size) ++ return tok ++} ++ ++// consumeStringToken constructs a Token for a String kind with raw value derived ++// from current d.in and given size. ++func (d *Decoder) consumeStringToken(s string, size int) Token { ++ tok := Token{ ++ kind: String, ++ raw: d.in[:size], ++ pos: len(d.orig) - len(d.in), ++ str: s, ++ } ++ d.consume(size) ++ return tok ++} ++ ++// Clone returns a copy of the Decoder for use in reading ahead the next JSON ++// object, array or other values without affecting current Decoder. ++func (d *Decoder) Clone() *Decoder { ++ ret := *d ++ ret.openStack = append([]Kind(nil), ret.openStack...) ++ return &ret ++} +diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go +new file mode 100644 +index 0000000..2999d71 +--- /dev/null ++++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go +@@ -0,0 +1,254 @@ ++// Copyright 2018 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package json ++ ++import ( ++ "bytes" ++ "strconv" ++) ++ ++// parseNumber reads the given []byte for a valid JSON number. If it is valid, ++// it returns the number of bytes. Parsing logic follows the definition in ++// https://tools.ietf.org/html/rfc7159#section-6, and is based off ++// encoding/json.isValidNumber function. ++func parseNumber(input []byte) (int, bool) { ++ var n int ++ ++ s := input ++ if len(s) == 0 { ++ return 0, false ++ } ++ ++ // Optional - ++ if s[0] == '-' { ++ s = s[1:] ++ n++ ++ if len(s) == 0 { ++ return 0, false ++ } ++ } ++ ++ // Digits ++ switch { ++ case s[0] == '0': ++ s = s[1:] ++ n++ ++ ++ case '1' <= s[0] && s[0] <= '9': ++ s = s[1:] ++ n++ ++ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { ++ s = s[1:] ++ n++ ++ } ++ ++ default: ++ return 0, false ++ } ++ ++ // . followed by 1 or more digits. ++ if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { ++ s = s[2:] ++ n += 2 ++ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { ++ s = s[1:] ++ n++ ++ } ++ } ++ ++ // e or E followed by an optional - or + and ++ // 1 or more digits. ++ if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { ++ s = s[1:] ++ n++ ++ if s[0] == '+' || s[0] == '-' { ++ s = s[1:] ++ n++ ++ if len(s) == 0 { ++ return 0, false ++ } ++ } ++ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { ++ s = s[1:] ++ n++ ++ } ++ } ++ ++ // Check that next byte is a delimiter or it is at the end. ++ if n < len(input) && isNotDelim(input[n]) { ++ return 0, false ++ } ++ ++ return n, true ++} ++ ++// numberParts is the result of parsing out a valid JSON number. It contains ++// the parts of a number. The parts are used for integer conversion. ++type numberParts struct { ++ neg bool ++ intp []byte ++ frac []byte ++ exp []byte ++} ++ ++// parseNumber constructs numberParts from given []byte. The logic here is ++// similar to consumeNumber above with the difference of having to construct ++// numberParts. The slice fields in numberParts are subslices of the input. ++func parseNumberParts(input []byte) (numberParts, bool) { ++ var neg bool ++ var intp []byte ++ var frac []byte ++ var exp []byte ++ ++ s := input ++ if len(s) == 0 { ++ return numberParts{}, false ++ } ++ ++ // Optional - ++ if s[0] == '-' { ++ neg = true ++ s = s[1:] ++ if len(s) == 0 { ++ return numberParts{}, false ++ } ++ } ++ ++ // Digits ++ switch { ++ case s[0] == '0': ++ // Skip first 0 and no need to store. ++ s = s[1:] ++ ++ case '1' <= s[0] && s[0] <= '9': ++ intp = s ++ n := 1 ++ s = s[1:] ++ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { ++ s = s[1:] ++ n++ ++ } ++ intp = intp[:n] ++ ++ default: ++ return numberParts{}, false ++ } ++ ++ // . followed by 1 or more digits. ++ if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { ++ frac = s[1:] ++ n := 1 ++ s = s[2:] ++ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { ++ s = s[1:] ++ n++ ++ } ++ frac = frac[:n] ++ } ++ ++ // e or E followed by an optional - or + and ++ // 1 or more digits. ++ if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { ++ s = s[1:] ++ exp = s ++ n := 0 ++ if s[0] == '+' || s[0] == '-' { ++ s = s[1:] ++ n++ ++ if len(s) == 0 { ++ return numberParts{}, false ++ } ++ } ++ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { ++ s = s[1:] ++ n++ ++ } ++ exp = exp[:n] ++ } ++ ++ return numberParts{ ++ neg: neg, ++ intp: intp, ++ frac: bytes.TrimRight(frac, "0"), // Remove unnecessary 0s to the right. ++ exp: exp, ++ }, true ++} ++ ++// normalizeToIntString returns an integer string in normal form without the ++// E-notation for given numberParts. It will return false if it is not an ++// integer or if the exponent exceeds than max/min int value. ++func normalizeToIntString(n numberParts) (string, bool) { ++ intpSize := len(n.intp) ++ fracSize := len(n.frac) ++ ++ if intpSize == 0 && fracSize == 0 { ++ return "0", true ++ } ++ ++ var exp int ++ if len(n.exp) > 0 { ++ i, err := strconv.ParseInt(string(n.exp), 10, 32) ++ if err != nil { ++ return "", false ++ } ++ exp = int(i) ++ } ++ ++ var num []byte ++ if exp >= 0 { ++ // For positive E, shift fraction digits into integer part and also pad ++ // with zeroes as needed. ++ ++ // If there are more digits in fraction than the E value, then the ++ // number is not an integer. ++ if fracSize > exp { ++ return "", false ++ } ++ ++ // Make sure resulting digits are within max value limit to avoid ++ // unnecessarily constructing a large byte slice that may simply fail ++ // later on. ++ const maxDigits = 20 // Max uint64 value has 20 decimal digits. ++ if intpSize+exp > maxDigits { ++ return "", false ++ } ++ ++ // Set cap to make a copy of integer part when appended. ++ num = n.intp[:len(n.intp):len(n.intp)] ++ num = append(num, n.frac...) ++ for i := 0; i < exp-fracSize; i++ { ++ num = append(num, '0') ++ } ++ } else { ++ // For negative E, shift digits in integer part out. ++ ++ // If there are fractions, then the number is not an integer. ++ if fracSize > 0 { ++ return "", false ++ } ++ ++ // index is where the decimal point will be after adjusting for negative ++ // exponent. ++ index := intpSize + exp ++ if index < 0 { ++ return "", false ++ } ++ ++ num = n.intp ++ // If any of the digits being shifted to the right of the decimal point ++ // is non-zero, then the number is not an integer. ++ for i := index; i < intpSize; i++ { ++ if num[i] != '0' { ++ return "", false ++ } ++ } ++ num = num[:index] ++ } ++ ++ if n.neg { ++ return "-" + string(num), true ++ } ++ return string(num), true ++} +diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go +new file mode 100644 +index 0000000..f7fea7d +--- /dev/null ++++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go +@@ -0,0 +1,91 @@ ++// Copyright 2018 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package json ++ ++import ( ++ "strconv" ++ "unicode" ++ "unicode/utf16" ++ "unicode/utf8" ++ ++ "google.golang.org/protobuf/internal/strs" ++) ++ ++func (d *Decoder) parseString(in []byte) (string, int, error) { ++ in0 := in ++ if len(in) == 0 { ++ return "", 0, ErrUnexpectedEOF ++ } ++ if in[0] != '"' { ++ return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q at start of string", in[0]) ++ } ++ in = in[1:] ++ i := indexNeedEscapeInBytes(in) ++ in, out := in[i:], in[:i:i] // set cap to prevent mutations ++ for len(in) > 0 { ++ switch r, n := utf8.DecodeRune(in); { ++ case r == utf8.RuneError && n == 1: ++ return "", 0, d.newSyntaxError(d.currPos(), "invalid UTF-8 in string") ++ case r < ' ': ++ return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q in string", r) ++ case r == '"': ++ in = in[1:] ++ n := len(in0) - len(in) ++ return string(out), n, nil ++ case r == '\\': ++ if len(in) < 2 { ++ return "", 0, ErrUnexpectedEOF ++ } ++ switch r := in[1]; r { ++ case '"', '\\', '/': ++ in, out = in[2:], append(out, r) ++ case 'b': ++ in, out = in[2:], append(out, '\b') ++ case 'f': ++ in, out = in[2:], append(out, '\f') ++ case 'n': ++ in, out = in[2:], append(out, '\n') ++ case 'r': ++ in, out = in[2:], append(out, '\r') ++ case 't': ++ in, out = in[2:], append(out, '\t') ++ case 'u': ++ if len(in) < 6 { ++ return "", 0, ErrUnexpectedEOF ++ } ++ v, err := strconv.ParseUint(string(in[2:6]), 16, 16) ++ if err != nil { ++ return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6]) ++ } ++ in = in[6:] ++ ++ r := rune(v) ++ if utf16.IsSurrogate(r) { ++ if len(in) < 6 { ++ return "", 0, ErrUnexpectedEOF ++ } ++ v, err := strconv.ParseUint(string(in[2:6]), 16, 16) ++ r = utf16.DecodeRune(r, rune(v)) ++ if in[0] != '\\' || in[1] != 'u' || ++ r == unicode.ReplacementChar || err != nil { ++ return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6]) ++ } ++ in = in[6:] ++ } ++ out = append(out, string(r)...) ++ default: ++ return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:2]) ++ } ++ default: ++ i := indexNeedEscapeInBytes(in[n:]) ++ in, out = in[n+i:], append(out, in[:n+i]...) ++ } ++ } ++ return "", 0, ErrUnexpectedEOF ++} ++ ++// indexNeedEscapeInBytes returns the index of the character that needs ++// escaping. If no characters need escaping, this returns the input length. ++func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) } +diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go +new file mode 100644 +index 0000000..50578d6 +--- /dev/null ++++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go +@@ -0,0 +1,192 @@ ++// Copyright 2019 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package json ++ ++import ( ++ "bytes" ++ "fmt" ++ "strconv" ++) ++ ++// Kind represents a token kind expressible in the JSON format. ++type Kind uint16 ++ ++const ( ++ Invalid Kind = (1 << iota) / 2 ++ EOF ++ Null ++ Bool ++ Number ++ String ++ Name ++ ObjectOpen ++ ObjectClose ++ ArrayOpen ++ ArrayClose ++ ++ // comma is only for parsing in between tokens and ++ // does not need to be exported. ++ comma ++) ++ ++func (k Kind) String() string { ++ switch k { ++ case EOF: ++ return "eof" ++ case Null: ++ return "null" ++ case Bool: ++ return "bool" ++ case Number: ++ return "number" ++ case String: ++ return "string" ++ case ObjectOpen: ++ return "{" ++ case ObjectClose: ++ return "}" ++ case Name: ++ return "name" ++ case ArrayOpen: ++ return "[" ++ case ArrayClose: ++ return "]" ++ case comma: ++ return "," ++ } ++ return "" ++} ++ ++// Token provides a parsed token kind and value. ++// ++// Values are provided by the difference accessor methods. The accessor methods ++// Name, Bool, and ParsedString will panic if called on the wrong kind. There ++// are different accessor methods for the Number kind for converting to the ++// appropriate Go numeric type and those methods have the ok return value. ++type Token struct { ++ // Token kind. ++ kind Kind ++ // pos provides the position of the token in the original input. ++ pos int ++ // raw bytes of the serialized token. ++ // This is a subslice into the original input. ++ raw []byte ++ // boo is parsed boolean value. ++ boo bool ++ // str is parsed string value. ++ str string ++} ++ ++// Kind returns the token kind. ++func (t Token) Kind() Kind { ++ return t.kind ++} ++ ++// RawString returns the read value in string. ++func (t Token) RawString() string { ++ return string(t.raw) ++} ++ ++// Pos returns the token position from the input. ++func (t Token) Pos() int { ++ return t.pos ++} ++ ++// Name returns the object name if token is Name, else it panics. ++func (t Token) Name() string { ++ if t.kind == Name { ++ return t.str ++ } ++ panic(fmt.Sprintf("Token is not a Name: %v", t.RawString())) ++} ++ ++// Bool returns the bool value if token kind is Bool, else it panics. ++func (t Token) Bool() bool { ++ if t.kind == Bool { ++ return t.boo ++ } ++ panic(fmt.Sprintf("Token is not a Bool: %v", t.RawString())) ++} ++ ++// ParsedString returns the string value for a JSON string token or the read ++// value in string if token is not a string. ++func (t Token) ParsedString() string { ++ if t.kind == String { ++ return t.str ++ } ++ panic(fmt.Sprintf("Token is not a String: %v", t.RawString())) ++} ++ ++// Float returns the floating-point number if token kind is Number. ++// ++// The floating-point precision is specified by the bitSize parameter: 32 for ++// float32 or 64 for float64. If bitSize=32, the result still has type float64, ++// but it will be convertible to float32 without changing its value. It will ++// return false if the number exceeds the floating point limits for given ++// bitSize. ++func (t Token) Float(bitSize int) (float64, bool) { ++ if t.kind != Number { ++ return 0, false ++ } ++ f, err := strconv.ParseFloat(t.RawString(), bitSize) ++ if err != nil { ++ return 0, false ++ } ++ return f, true ++} ++ ++// Int returns the signed integer number if token is Number. ++// ++// The given bitSize specifies the integer type that the result must fit into. ++// It returns false if the number is not an integer value or if the result ++// exceeds the limits for given bitSize. ++func (t Token) Int(bitSize int) (int64, bool) { ++ s, ok := t.getIntStr() ++ if !ok { ++ return 0, false ++ } ++ n, err := strconv.ParseInt(s, 10, bitSize) ++ if err != nil { ++ return 0, false ++ } ++ return n, true ++} ++ ++// Uint returns the signed integer number if token is Number. ++// ++// The given bitSize specifies the unsigned integer type that the result must ++// fit into. It returns false if the number is not an unsigned integer value ++// or if the result exceeds the limits for given bitSize. ++func (t Token) Uint(bitSize int) (uint64, bool) { ++ s, ok := t.getIntStr() ++ if !ok { ++ return 0, false ++ } ++ n, err := strconv.ParseUint(s, 10, bitSize) ++ if err != nil { ++ return 0, false ++ } ++ return n, true ++} ++ ++func (t Token) getIntStr() (string, bool) { ++ if t.kind != Number { ++ return "", false ++ } ++ parts, ok := parseNumberParts(t.raw) ++ if !ok { ++ return "", false ++ } ++ return normalizeToIntString(parts) ++} ++ ++// TokenEquals returns true if given Tokens are equal, else false. ++func TokenEquals(x, y Token) bool { ++ return x.kind == y.kind && ++ x.pos == y.pos && ++ bytes.Equal(x.raw, y.raw) && ++ x.boo == y.boo && ++ x.str == y.str ++} +diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go +new file mode 100644 +index 0000000..fbdf348 +--- /dev/null ++++ b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go +@@ -0,0 +1,276 @@ ++// Copyright 2018 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package json ++ ++import ( ++ "math" ++ "math/bits" ++ "strconv" ++ "strings" ++ "unicode/utf8" ++ ++ "google.golang.org/protobuf/internal/detrand" ++ "google.golang.org/protobuf/internal/errors" ++) ++ ++// kind represents an encoding type. ++type kind uint8 ++ ++const ( ++ _ kind = (1 << iota) / 2 ++ name ++ scalar ++ objectOpen ++ objectClose ++ arrayOpen ++ arrayClose ++) ++ ++// Encoder provides methods to write out JSON constructs and values. The user is ++// responsible for producing valid sequences of JSON constructs and values. ++type Encoder struct { ++ indent string ++ lastKind kind ++ indents []byte ++ out []byte ++} ++ ++// NewEncoder returns an Encoder. ++// ++// If indent is a non-empty string, it causes every entry for an Array or Object ++// to be preceded by the indent and trailed by a newline. ++func NewEncoder(indent string) (*Encoder, error) { ++ e := &Encoder{} ++ if len(indent) > 0 { ++ if strings.Trim(indent, " \t") != "" { ++ return nil, errors.New("indent may only be composed of space or tab characters") ++ } ++ e.indent = indent ++ } ++ return e, nil ++} ++ ++// Bytes returns the content of the written bytes. ++func (e *Encoder) Bytes() []byte { ++ return e.out ++} ++ ++// WriteNull writes out the null value. ++func (e *Encoder) WriteNull() { ++ e.prepareNext(scalar) ++ e.out = append(e.out, "null"...) ++} ++ ++// WriteBool writes out the given boolean value. ++func (e *Encoder) WriteBool(b bool) { ++ e.prepareNext(scalar) ++ if b { ++ e.out = append(e.out, "true"...) ++ } else { ++ e.out = append(e.out, "false"...) ++ } ++} ++ ++// WriteString writes out the given string in JSON string value. Returns error ++// if input string contains invalid UTF-8. ++func (e *Encoder) WriteString(s string) error { ++ e.prepareNext(scalar) ++ var err error ++ if e.out, err = appendString(e.out, s); err != nil { ++ return err ++ } ++ return nil ++} ++ ++// Sentinel error used for indicating invalid UTF-8. ++var errInvalidUTF8 = errors.New("invalid UTF-8") ++ ++func appendString(out []byte, in string) ([]byte, error) { ++ out = append(out, '"') ++ i := indexNeedEscapeInString(in) ++ in, out = in[i:], append(out, in[:i]...) ++ for len(in) > 0 { ++ switch r, n := utf8.DecodeRuneInString(in); { ++ case r == utf8.RuneError && n == 1: ++ return out, errInvalidUTF8 ++ case r < ' ' || r == '"' || r == '\\': ++ out = append(out, '\\') ++ switch r { ++ case '"', '\\': ++ out = append(out, byte(r)) ++ case '\b': ++ out = append(out, 'b') ++ case '\f': ++ out = append(out, 'f') ++ case '\n': ++ out = append(out, 'n') ++ case '\r': ++ out = append(out, 'r') ++ case '\t': ++ out = append(out, 't') ++ default: ++ out = append(out, 'u') ++ out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...) ++ out = strconv.AppendUint(out, uint64(r), 16) ++ } ++ in = in[n:] ++ default: ++ i := indexNeedEscapeInString(in[n:]) ++ in, out = in[n+i:], append(out, in[:n+i]...) ++ } ++ } ++ out = append(out, '"') ++ return out, nil ++} ++ ++// indexNeedEscapeInString returns the index of the character that needs ++// escaping. If no characters need escaping, this returns the input length. ++func indexNeedEscapeInString(s string) int { ++ for i, r := range s { ++ if r < ' ' || r == '\\' || r == '"' || r == utf8.RuneError { ++ return i ++ } ++ } ++ return len(s) ++} ++ ++// WriteFloat writes out the given float and bitSize in JSON number value. ++func (e *Encoder) WriteFloat(n float64, bitSize int) { ++ e.prepareNext(scalar) ++ e.out = appendFloat(e.out, n, bitSize) ++} ++ ++// appendFloat formats given float in bitSize, and appends to the given []byte. ++func appendFloat(out []byte, n float64, bitSize int) []byte { ++ switch { ++ case math.IsNaN(n): ++ return append(out, `"NaN"`...) ++ case math.IsInf(n, +1): ++ return append(out, `"Infinity"`...) ++ case math.IsInf(n, -1): ++ return append(out, `"-Infinity"`...) ++ } ++ ++ // JSON number formatting logic based on encoding/json. ++ // See floatEncoder.encode for reference. ++ fmt := byte('f') ++ if abs := math.Abs(n); abs != 0 { ++ if bitSize == 64 && (abs < 1e-6 || abs >= 1e21) || ++ bitSize == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) { ++ fmt = 'e' ++ } ++ } ++ out = strconv.AppendFloat(out, n, fmt, -1, bitSize) ++ if fmt == 'e' { ++ n := len(out) ++ if n >= 4 && out[n-4] == 'e' && out[n-3] == '-' && out[n-2] == '0' { ++ out[n-2] = out[n-1] ++ out = out[:n-1] ++ } ++ } ++ return out ++} ++ ++// WriteInt writes out the given signed integer in JSON number value. ++func (e *Encoder) WriteInt(n int64) { ++ e.prepareNext(scalar) ++ e.out = append(e.out, strconv.FormatInt(n, 10)...) ++} ++ ++// WriteUint writes out the given unsigned integer in JSON number value. ++func (e *Encoder) WriteUint(n uint64) { ++ e.prepareNext(scalar) ++ e.out = append(e.out, strconv.FormatUint(n, 10)...) ++} ++ ++// StartObject writes out the '{' symbol. ++func (e *Encoder) StartObject() { ++ e.prepareNext(objectOpen) ++ e.out = append(e.out, '{') ++} ++ ++// EndObject writes out the '}' symbol. ++func (e *Encoder) EndObject() { ++ e.prepareNext(objectClose) ++ e.out = append(e.out, '}') ++} ++ ++// WriteName writes out the given string in JSON string value and the name ++// separator ':'. Returns error if input string contains invalid UTF-8, which ++// should not be likely as protobuf field names should be valid. ++func (e *Encoder) WriteName(s string) error { ++ e.prepareNext(name) ++ var err error ++ // Append to output regardless of error. ++ e.out, err = appendString(e.out, s) ++ e.out = append(e.out, ':') ++ return err ++} ++ ++// StartArray writes out the '[' symbol. ++func (e *Encoder) StartArray() { ++ e.prepareNext(arrayOpen) ++ e.out = append(e.out, '[') ++} ++ ++// EndArray writes out the ']' symbol. ++func (e *Encoder) EndArray() { ++ e.prepareNext(arrayClose) ++ e.out = append(e.out, ']') ++} ++ ++// prepareNext adds possible comma and indentation for the next value based ++// on last type and indent option. It also updates lastKind to next. ++func (e *Encoder) prepareNext(next kind) { ++ defer func() { ++ // Set lastKind to next. ++ e.lastKind = next ++ }() ++ ++ if len(e.indent) == 0 { ++ // Need to add comma on the following condition. ++ if e.lastKind&(scalar|objectClose|arrayClose) != 0 && ++ next&(name|scalar|objectOpen|arrayOpen) != 0 { ++ e.out = append(e.out, ',') ++ // For single-line output, add a random extra space after each ++ // comma to make output unstable. ++ if detrand.Bool() { ++ e.out = append(e.out, ' ') ++ } ++ } ++ return ++ } ++ ++ switch { ++ case e.lastKind&(objectOpen|arrayOpen) != 0: ++ // If next type is NOT closing, add indent and newline. ++ if next&(objectClose|arrayClose) == 0 { ++ e.indents = append(e.indents, e.indent...) ++ e.out = append(e.out, '\n') ++ e.out = append(e.out, e.indents...) ++ } ++ ++ case e.lastKind&(scalar|objectClose|arrayClose) != 0: ++ switch { ++ // If next type is either a value or name, add comma and newline. ++ case next&(name|scalar|objectOpen|arrayOpen) != 0: ++ e.out = append(e.out, ',', '\n') ++ ++ // If next type is a closing object or array, adjust indentation. ++ case next&(objectClose|arrayClose) != 0: ++ e.indents = e.indents[:len(e.indents)-len(e.indent)] ++ e.out = append(e.out, '\n') ++ } ++ e.out = append(e.out, e.indents...) ++ ++ case e.lastKind&name != 0: ++ e.out = append(e.out, ' ') ++ // For multi-line output, add a random extra space after key: to make ++ // output unstable. ++ if detrand.Bool() { ++ e.out = append(e.out, ' ') ++ } ++ } ++} +diff --git a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go +index c1866f3..a6693f0 100644 +--- a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go ++++ b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go +@@ -10,7 +10,7 @@ import ( + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" +- pref "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/reflect/protoreflect" + ) + + // The MessageSet wire format is equivalent to a message defined as follows, +@@ -33,6 +33,7 @@ const ( + // ExtensionName is the field name for extensions of MessageSet. + // + // A valid MessageSet extension must be of the form: ++// + // message MyMessage { + // extend proto2.bridge.MessageSet { + // optional MyMessage message_set_extension = 1234; +@@ -42,13 +43,13 @@ const ( + const ExtensionName = "message_set_extension" + + // IsMessageSet returns whether the message uses the MessageSet wire format. +-func IsMessageSet(md pref.MessageDescriptor) bool { ++func IsMessageSet(md protoreflect.MessageDescriptor) bool { + xmd, ok := md.(interface{ IsMessageSet() bool }) + return ok && xmd.IsMessageSet() + } + + // IsMessageSetExtension reports this field properly extends a MessageSet. +-func IsMessageSetExtension(fd pref.FieldDescriptor) bool { ++func IsMessageSetExtension(fd protoreflect.FieldDescriptor) bool { + switch { + case fd.Name() != ExtensionName: + return false +diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go +index 38f1931..373d208 100644 +--- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go ++++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go +@@ -11,10 +11,10 @@ import ( + "strconv" + "strings" + +- defval "google.golang.org/protobuf/internal/encoding/defval" +- fdesc "google.golang.org/protobuf/internal/filedesc" ++ "google.golang.org/protobuf/internal/encoding/defval" ++ "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/strs" +- pref "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/reflect/protoreflect" + ) + + var byteType = reflect.TypeOf(byte(0)) +@@ -29,9 +29,9 @@ var byteType = reflect.TypeOf(byte(0)) + // This does not populate the Enum or Message (except for weak message). + // + // This function is a best effort attempt; parsing errors are ignored. +-func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) pref.FieldDescriptor { +- f := new(fdesc.Field) +- f.L0.ParentFile = fdesc.SurrogateProto2 ++func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor { ++ f := new(filedesc.Field) ++ f.L0.ParentFile = filedesc.SurrogateProto2 + for len(tag) > 0 { + i := strings.IndexByte(tag, ',') + if i < 0 { +@@ -39,68 +39,68 @@ func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) p + } + switch s := tag[:i]; { + case strings.HasPrefix(s, "name="): +- f.L0.FullName = pref.FullName(s[len("name="):]) ++ f.L0.FullName = protoreflect.FullName(s[len("name="):]) + case strings.Trim(s, "0123456789") == "": + n, _ := strconv.ParseUint(s, 10, 32) +- f.L1.Number = pref.FieldNumber(n) ++ f.L1.Number = protoreflect.FieldNumber(n) + case s == "opt": +- f.L1.Cardinality = pref.Optional ++ f.L1.Cardinality = protoreflect.Optional + case s == "req": +- f.L1.Cardinality = pref.Required ++ f.L1.Cardinality = protoreflect.Required + case s == "rep": +- f.L1.Cardinality = pref.Repeated ++ f.L1.Cardinality = protoreflect.Repeated + case s == "varint": + switch goType.Kind() { + case reflect.Bool: +- f.L1.Kind = pref.BoolKind ++ f.L1.Kind = protoreflect.BoolKind + case reflect.Int32: +- f.L1.Kind = pref.Int32Kind ++ f.L1.Kind = protoreflect.Int32Kind + case reflect.Int64: +- f.L1.Kind = pref.Int64Kind ++ f.L1.Kind = protoreflect.Int64Kind + case reflect.Uint32: +- f.L1.Kind = pref.Uint32Kind ++ f.L1.Kind = protoreflect.Uint32Kind + case reflect.Uint64: +- f.L1.Kind = pref.Uint64Kind ++ f.L1.Kind = protoreflect.Uint64Kind + } + case s == "zigzag32": + if goType.Kind() == reflect.Int32 { +- f.L1.Kind = pref.Sint32Kind ++ f.L1.Kind = protoreflect.Sint32Kind + } + case s == "zigzag64": + if goType.Kind() == reflect.Int64 { +- f.L1.Kind = pref.Sint64Kind ++ f.L1.Kind = protoreflect.Sint64Kind + } + case s == "fixed32": + switch goType.Kind() { + case reflect.Int32: +- f.L1.Kind = pref.Sfixed32Kind ++ f.L1.Kind = protoreflect.Sfixed32Kind + case reflect.Uint32: +- f.L1.Kind = pref.Fixed32Kind ++ f.L1.Kind = protoreflect.Fixed32Kind + case reflect.Float32: +- f.L1.Kind = pref.FloatKind ++ f.L1.Kind = protoreflect.FloatKind + } + case s == "fixed64": + switch goType.Kind() { + case reflect.Int64: +- f.L1.Kind = pref.Sfixed64Kind ++ f.L1.Kind = protoreflect.Sfixed64Kind + case reflect.Uint64: +- f.L1.Kind = pref.Fixed64Kind ++ f.L1.Kind = protoreflect.Fixed64Kind + case reflect.Float64: +- f.L1.Kind = pref.DoubleKind ++ f.L1.Kind = protoreflect.DoubleKind + } + case s == "bytes": + switch { + case goType.Kind() == reflect.String: +- f.L1.Kind = pref.StringKind ++ f.L1.Kind = protoreflect.StringKind + case goType.Kind() == reflect.Slice && goType.Elem() == byteType: +- f.L1.Kind = pref.BytesKind ++ f.L1.Kind = protoreflect.BytesKind + default: +- f.L1.Kind = pref.MessageKind ++ f.L1.Kind = protoreflect.MessageKind + } + case s == "group": +- f.L1.Kind = pref.GroupKind ++ f.L1.Kind = protoreflect.GroupKind + case strings.HasPrefix(s, "enum="): +- f.L1.Kind = pref.EnumKind ++ f.L1.Kind = protoreflect.EnumKind + case strings.HasPrefix(s, "json="): + jsonName := s[len("json="):] + if jsonName != strs.JSONCamelCase(string(f.L0.FullName.Name())) { +@@ -111,23 +111,23 @@ func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) p + f.L1.IsPacked = true + case strings.HasPrefix(s, "weak="): + f.L1.IsWeak = true +- f.L1.Message = fdesc.PlaceholderMessage(pref.FullName(s[len("weak="):])) ++ f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):])) + case strings.HasPrefix(s, "def="): + // The default tag is special in that everything afterwards is the + // default regardless of the presence of commas. + s, i = tag[len("def="):], len(tag) + v, ev, _ := defval.Unmarshal(s, f.L1.Kind, evs, defval.GoTag) +- f.L1.Default = fdesc.DefaultValue(v, ev) ++ f.L1.Default = filedesc.DefaultValue(v, ev) + case s == "proto3": +- f.L0.ParentFile = fdesc.SurrogateProto3 ++ f.L0.ParentFile = filedesc.SurrogateProto3 + } + tag = strings.TrimPrefix(tag[i:], ",") + } + + // The generator uses the group message name instead of the field name. + // We obtain the real field name by lowercasing the group name. +- if f.L1.Kind == pref.GroupKind { +- f.L0.FullName = pref.FullName(strings.ToLower(string(f.L0.FullName))) ++ if f.L1.Kind == protoreflect.GroupKind { ++ f.L0.FullName = protoreflect.FullName(strings.ToLower(string(f.L0.FullName))) + } + return f + } +@@ -140,38 +140,38 @@ func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) p + // Depending on the context on how Marshal is called, there are different ways + // through which that information is determined. As such it is the caller's + // responsibility to provide a function to obtain that information. +-func Marshal(fd pref.FieldDescriptor, enumName string) string { ++func Marshal(fd protoreflect.FieldDescriptor, enumName string) string { + var tag []string + switch fd.Kind() { +- case pref.BoolKind, pref.EnumKind, pref.Int32Kind, pref.Uint32Kind, pref.Int64Kind, pref.Uint64Kind: ++ case protoreflect.BoolKind, protoreflect.EnumKind, protoreflect.Int32Kind, protoreflect.Uint32Kind, protoreflect.Int64Kind, protoreflect.Uint64Kind: + tag = append(tag, "varint") +- case pref.Sint32Kind: ++ case protoreflect.Sint32Kind: + tag = append(tag, "zigzag32") +- case pref.Sint64Kind: ++ case protoreflect.Sint64Kind: + tag = append(tag, "zigzag64") +- case pref.Sfixed32Kind, pref.Fixed32Kind, pref.FloatKind: ++ case protoreflect.Sfixed32Kind, protoreflect.Fixed32Kind, protoreflect.FloatKind: + tag = append(tag, "fixed32") +- case pref.Sfixed64Kind, pref.Fixed64Kind, pref.DoubleKind: ++ case protoreflect.Sfixed64Kind, protoreflect.Fixed64Kind, protoreflect.DoubleKind: + tag = append(tag, "fixed64") +- case pref.StringKind, pref.BytesKind, pref.MessageKind: ++ case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind: + tag = append(tag, "bytes") +- case pref.GroupKind: ++ case protoreflect.GroupKind: + tag = append(tag, "group") + } + tag = append(tag, strconv.Itoa(int(fd.Number()))) + switch fd.Cardinality() { +- case pref.Optional: ++ case protoreflect.Optional: + tag = append(tag, "opt") +- case pref.Required: ++ case protoreflect.Required: + tag = append(tag, "req") +- case pref.Repeated: ++ case protoreflect.Repeated: + tag = append(tag, "rep") + } + if fd.IsPacked() { + tag = append(tag, "packed") + } + name := string(fd.Name()) +- if fd.Kind() == pref.GroupKind { ++ if fd.Kind() == protoreflect.GroupKind { + // The name of the FieldDescriptor for a group field is + // lowercased. To find the original capitalization, we + // look in the field's MessageType. +@@ -189,10 +189,10 @@ func Marshal(fd pref.FieldDescriptor, enumName string) string { + // The previous implementation does not tag extension fields as proto3, + // even when the field is defined in a proto3 file. Match that behavior + // for consistency. +- if fd.Syntax() == pref.Proto3 && !fd.IsExtension() { ++ if fd.Syntax() == protoreflect.Proto3 && !fd.IsExtension() { + tag = append(tag, "proto3") + } +- if fd.Kind() == pref.EnumKind && enumName != "" { ++ if fd.Kind() == protoreflect.EnumKind && enumName != "" { + tag = append(tag, "enum="+enumName) + } + if fd.ContainingOneof() != nil { +diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +index 3780377..87853e7 100644 +--- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go ++++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +@@ -8,7 +8,6 @@ import ( + "bytes" + "fmt" + "io" +- "regexp" + "strconv" + "unicode/utf8" + +@@ -413,15 +412,16 @@ func (d *Decoder) parseFieldName() (tok Token, err error) { + // Field number. Identify if input is a valid number that is not negative + // and is decimal integer within 32-bit range. + if num := parseNumber(d.in); num.size > 0 { ++ str := num.string(d.in) + if !num.neg && num.kind == numDec { +- if _, err := strconv.ParseInt(string(d.in[:num.size]), 10, 32); err == nil { ++ if _, err := strconv.ParseInt(str, 10, 32); err == nil { + return d.consumeToken(Name, num.size, uint8(FieldNumber)), nil + } + } +- return Token{}, d.newSyntaxError("invalid field number: %s", d.in[:num.size]) ++ return Token{}, d.newSyntaxError("invalid field number: %s", str) + } + +- return Token{}, d.newSyntaxError("invalid field name: %s", errRegexp.Find(d.in)) ++ return Token{}, d.newSyntaxError("invalid field name: %s", errId(d.in)) + } + + // parseTypeName parses Any type URL or extension field name. The name is +@@ -571,7 +571,7 @@ func (d *Decoder) parseScalar() (Token, error) { + return tok, nil + } + +- return Token{}, d.newSyntaxError("invalid scalar value: %s", errRegexp.Find(d.in)) ++ return Token{}, d.newSyntaxError("invalid scalar value: %s", errId(d.in)) + } + + // parseLiteralValue parses a literal value. A literal value is used for +@@ -653,8 +653,29 @@ func consume(b []byte, n int) []byte { + return b + } + +-// Any sequence that looks like a non-delimiter (for error reporting). +-var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9\/]+|.)`) ++// errId extracts a byte sequence that looks like an invalid ID ++// (for the purposes of error reporting). ++func errId(seq []byte) []byte { ++ const maxLen = 32 ++ for i := 0; i < len(seq); { ++ if i > maxLen { ++ return append(seq[:i:i], "…"...) ++ } ++ r, size := utf8.DecodeRune(seq[i:]) ++ if r > utf8.RuneSelf || (r != '/' && isDelim(byte(r))) { ++ if i == 0 { ++ // Either the first byte is invalid UTF-8 or a ++ // delimiter, or the first rune is non-ASCII. ++ // Return it as-is. ++ i = size ++ } ++ return seq[:i:i] ++ } ++ i += size ++ } ++ // No delimiter found. ++ return seq ++} + + // isDelim returns true if given byte is a delimiter character. + func isDelim(c byte) bool { +diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go +index f2d90b7..45c81f0 100644 +--- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go ++++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go +@@ -15,17 +15,12 @@ func (d *Decoder) parseNumberValue() (Token, bool) { + if num.neg { + numAttrs |= isNegative + } +- strSize := num.size +- last := num.size - 1 +- if num.kind == numFloat && (d.in[last] == 'f' || d.in[last] == 'F') { +- strSize = last +- } + tok := Token{ + kind: Scalar, + attrs: numberValue, + pos: len(d.orig) - len(d.in), + raw: d.in[:num.size], +- str: string(d.in[:strSize]), ++ str: num.string(d.in), + numAttrs: numAttrs, + } + d.consume(num.size) +@@ -46,12 +41,35 @@ type number struct { + kind uint8 + neg bool + size int ++ // if neg, this is the length of whitespace and comments between ++ // the minus sign and the rest fo the number literal ++ sep int ++} ++ ++func (num number) string(data []byte) string { ++ strSize := num.size ++ last := num.size - 1 ++ if num.kind == numFloat && (data[last] == 'f' || data[last] == 'F') { ++ strSize = last ++ } ++ if num.neg && num.sep > 0 { ++ // strip whitespace/comments between negative sign and the rest ++ strLen := strSize - num.sep ++ str := make([]byte, strLen) ++ str[0] = data[0] ++ copy(str[1:], data[num.sep+1:strSize]) ++ return string(str) ++ } ++ return string(data[:strSize]) ++ + } + + // parseNumber constructs a number object from given input. It allows for the + // following patterns: +-// integer: ^-?([1-9][0-9]*|0[xX][0-9a-fA-F]+|0[0-7]*) +-// float: ^-?((0|[1-9][0-9]*)?([.][0-9]*)?([eE][+-]?[0-9]+)?[fF]?) ++// ++// integer: ^-?([1-9][0-9]*|0[xX][0-9a-fA-F]+|0[0-7]*) ++// float: ^-?((0|[1-9][0-9]*)?([.][0-9]*)?([eE][+-]?[0-9]+)?[fF]?) ++// + // It also returns the number of parsed bytes for the given number, 0 if it is + // not a number. + func parseNumber(input []byte) number { +@@ -65,19 +83,22 @@ func parseNumber(input []byte) number { + } + + // Optional - ++ var sep int + if s[0] == '-' { + neg = true + s = s[1:] + size++ ++ // Consume any whitespace or comments between the ++ // negative sign and the rest of the number ++ lenBefore := len(s) ++ s = consume(s, 0) ++ sep = lenBefore - len(s) ++ size += sep + if len(s) == 0 { + return number{} + } + } + +- // C++ allows for whitespace and comments in between the negative sign and +- // the rest of the number. This logic currently does not but is consistent +- // with v1. +- + switch { + case s[0] == '0': + if len(s) > 1 { +@@ -114,7 +135,7 @@ func parseNumber(input []byte) number { + if len(s) > 0 && !isDelim(s[0]) { + return number{} + } +- return number{kind: kind, neg: neg, size: size} ++ return number{kind: kind, neg: neg, size: size, sep: sep} + } + } + s = s[1:] +@@ -186,5 +207,5 @@ func parseNumber(input []byte) number { + return number{} + } + +- return number{kind: kind, neg: neg, size: size} ++ return number{kind: kind, neg: neg, size: size, sep: sep} + } +diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go b/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go +index 0ce8d6f..7ae6c2a 100644 +--- a/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go ++++ b/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go +@@ -24,6 +24,6 @@ + // the Go implementation should as well. + // + // The text format is almost a superset of JSON except: +-// * message keys are not quoted strings, but identifiers +-// * the top-level value must be a message without the delimiters ++// - message keys are not quoted strings, but identifiers ++// - the top-level value must be a message without the delimiters + package text +diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/build.go b/vendor/google.golang.org/protobuf/internal/filedesc/build.go +index b293b69..7cac1c1 100644 +--- a/vendor/google.golang.org/protobuf/internal/filedesc/build.go ++++ b/vendor/google.golang.org/protobuf/internal/filedesc/build.go +@@ -12,8 +12,7 @@ import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/reflect/protoreflect" +- pref "google.golang.org/protobuf/reflect/protoreflect" +- preg "google.golang.org/protobuf/reflect/protoregistry" ++ "google.golang.org/protobuf/reflect/protoregistry" + ) + + // Builder construct a protoreflect.FileDescriptor from the raw descriptor. +@@ -38,7 +37,7 @@ type Builder struct { + // TypeResolver resolves extension field types for descriptor options. + // If nil, it uses protoregistry.GlobalTypes. + TypeResolver interface { +- preg.ExtensionTypeResolver ++ protoregistry.ExtensionTypeResolver + } + + // FileRegistry is use to lookup file, enum, and message dependencies. +@@ -46,8 +45,8 @@ type Builder struct { + // If nil, it uses protoregistry.GlobalFiles. + FileRegistry interface { + FindFileByPath(string) (protoreflect.FileDescriptor, error) +- FindDescriptorByName(pref.FullName) (pref.Descriptor, error) +- RegisterFile(pref.FileDescriptor) error ++ FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) ++ RegisterFile(protoreflect.FileDescriptor) error + } + } + +@@ -55,8 +54,8 @@ type Builder struct { + // If so, it permits looking up an enum or message dependency based on the + // sub-list and element index into filetype.Builder.DependencyIndexes. + type resolverByIndex interface { +- FindEnumByIndex(int32, int32, []Enum, []Message) pref.EnumDescriptor +- FindMessageByIndex(int32, int32, []Enum, []Message) pref.MessageDescriptor ++ FindEnumByIndex(int32, int32, []Enum, []Message) protoreflect.EnumDescriptor ++ FindMessageByIndex(int32, int32, []Enum, []Message) protoreflect.MessageDescriptor + } + + // Indexes of each sub-list in filetype.Builder.DependencyIndexes. +@@ -70,7 +69,7 @@ const ( + + // Out is the output of the Builder. + type Out struct { +- File pref.FileDescriptor ++ File protoreflect.FileDescriptor + + // Enums is all enum descriptors in "flattened ordering". + Enums []Enum +@@ -97,10 +96,10 @@ func (db Builder) Build() (out Out) { + + // Initialize resolvers and registries if unpopulated. + if db.TypeResolver == nil { +- db.TypeResolver = preg.GlobalTypes ++ db.TypeResolver = protoregistry.GlobalTypes + } + if db.FileRegistry == nil { +- db.FileRegistry = preg.GlobalFiles ++ db.FileRegistry = protoregistry.GlobalFiles + } + + fd := newRawFile(db) +diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +index 98ab142..7c3689b 100644 +--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go ++++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +@@ -17,7 +17,7 @@ import ( + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/strs" +- pref "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + ) + +@@ -43,9 +43,9 @@ type ( + L2 *FileL2 + } + FileL1 struct { +- Syntax pref.Syntax ++ Syntax protoreflect.Syntax + Path string +- Package pref.FullName ++ Package protoreflect.FullName + + Enums Enums + Messages Messages +@@ -53,36 +53,36 @@ type ( + Services Services + } + FileL2 struct { +- Options func() pref.ProtoMessage ++ Options func() protoreflect.ProtoMessage + Imports FileImports + Locations SourceLocations + } + ) + +-func (fd *File) ParentFile() pref.FileDescriptor { return fd } +-func (fd *File) Parent() pref.Descriptor { return nil } +-func (fd *File) Index() int { return 0 } +-func (fd *File) Syntax() pref.Syntax { return fd.L1.Syntax } +-func (fd *File) Name() pref.Name { return fd.L1.Package.Name() } +-func (fd *File) FullName() pref.FullName { return fd.L1.Package } +-func (fd *File) IsPlaceholder() bool { return false } +-func (fd *File) Options() pref.ProtoMessage { ++func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd } ++func (fd *File) Parent() protoreflect.Descriptor { return nil } ++func (fd *File) Index() int { return 0 } ++func (fd *File) Syntax() protoreflect.Syntax { return fd.L1.Syntax } ++func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } ++func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } ++func (fd *File) IsPlaceholder() bool { return false } ++func (fd *File) Options() protoreflect.ProtoMessage { + if f := fd.lazyInit().Options; f != nil { + return f() + } + return descopts.File + } +-func (fd *File) Path() string { return fd.L1.Path } +-func (fd *File) Package() pref.FullName { return fd.L1.Package } +-func (fd *File) Imports() pref.FileImports { return &fd.lazyInit().Imports } +-func (fd *File) Enums() pref.EnumDescriptors { return &fd.L1.Enums } +-func (fd *File) Messages() pref.MessageDescriptors { return &fd.L1.Messages } +-func (fd *File) Extensions() pref.ExtensionDescriptors { return &fd.L1.Extensions } +-func (fd *File) Services() pref.ServiceDescriptors { return &fd.L1.Services } +-func (fd *File) SourceLocations() pref.SourceLocations { return &fd.lazyInit().Locations } +-func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } +-func (fd *File) ProtoType(pref.FileDescriptor) {} +-func (fd *File) ProtoInternal(pragma.DoNotImplement) {} ++func (fd *File) Path() string { return fd.L1.Path } ++func (fd *File) Package() protoreflect.FullName { return fd.L1.Package } ++func (fd *File) Imports() protoreflect.FileImports { return &fd.lazyInit().Imports } ++func (fd *File) Enums() protoreflect.EnumDescriptors { return &fd.L1.Enums } ++func (fd *File) Messages() protoreflect.MessageDescriptors { return &fd.L1.Messages } ++func (fd *File) Extensions() protoreflect.ExtensionDescriptors { return &fd.L1.Extensions } ++func (fd *File) Services() protoreflect.ServiceDescriptors { return &fd.L1.Services } ++func (fd *File) SourceLocations() protoreflect.SourceLocations { return &fd.lazyInit().Locations } ++func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } ++func (fd *File) ProtoType(protoreflect.FileDescriptor) {} ++func (fd *File) ProtoInternal(pragma.DoNotImplement) {} + + func (fd *File) lazyInit() *FileL2 { + if atomic.LoadUint32(&fd.once) == 0 { +@@ -119,7 +119,7 @@ type ( + eagerValues bool // controls whether EnumL2.Values is already populated + } + EnumL2 struct { +- Options func() pref.ProtoMessage ++ Options func() protoreflect.ProtoMessage + Values EnumValues + ReservedNames Names + ReservedRanges EnumRanges +@@ -130,41 +130,41 @@ type ( + L1 EnumValueL1 + } + EnumValueL1 struct { +- Options func() pref.ProtoMessage +- Number pref.EnumNumber ++ Options func() protoreflect.ProtoMessage ++ Number protoreflect.EnumNumber + } + ) + +-func (ed *Enum) Options() pref.ProtoMessage { ++func (ed *Enum) Options() protoreflect.ProtoMessage { + if f := ed.lazyInit().Options; f != nil { + return f() + } + return descopts.Enum + } +-func (ed *Enum) Values() pref.EnumValueDescriptors { ++func (ed *Enum) Values() protoreflect.EnumValueDescriptors { + if ed.L1.eagerValues { + return &ed.L2.Values + } + return &ed.lazyInit().Values + } +-func (ed *Enum) ReservedNames() pref.Names { return &ed.lazyInit().ReservedNames } +-func (ed *Enum) ReservedRanges() pref.EnumRanges { return &ed.lazyInit().ReservedRanges } +-func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } +-func (ed *Enum) ProtoType(pref.EnumDescriptor) {} ++func (ed *Enum) ReservedNames() protoreflect.Names { return &ed.lazyInit().ReservedNames } ++func (ed *Enum) ReservedRanges() protoreflect.EnumRanges { return &ed.lazyInit().ReservedRanges } ++func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } ++func (ed *Enum) ProtoType(protoreflect.EnumDescriptor) {} + func (ed *Enum) lazyInit() *EnumL2 { + ed.L0.ParentFile.lazyInit() // implicitly initializes L2 + return ed.L2 + } + +-func (ed *EnumValue) Options() pref.ProtoMessage { ++func (ed *EnumValue) Options() protoreflect.ProtoMessage { + if f := ed.L1.Options; f != nil { + return f() + } + return descopts.EnumValue + } +-func (ed *EnumValue) Number() pref.EnumNumber { return ed.L1.Number } +-func (ed *EnumValue) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } +-func (ed *EnumValue) ProtoType(pref.EnumValueDescriptor) {} ++func (ed *EnumValue) Number() protoreflect.EnumNumber { return ed.L1.Number } ++func (ed *EnumValue) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } ++func (ed *EnumValue) ProtoType(protoreflect.EnumValueDescriptor) {} + + type ( + Message struct { +@@ -180,14 +180,14 @@ type ( + IsMessageSet bool // promoted from google.protobuf.MessageOptions + } + MessageL2 struct { +- Options func() pref.ProtoMessage ++ Options func() protoreflect.ProtoMessage + Fields Fields + Oneofs Oneofs + ReservedNames Names + ReservedRanges FieldRanges + RequiredNumbers FieldNumbers // must be consistent with Fields.Cardinality + ExtensionRanges FieldRanges +- ExtensionRangeOptions []func() pref.ProtoMessage // must be same length as ExtensionRanges ++ ExtensionRangeOptions []func() protoreflect.ProtoMessage // must be same length as ExtensionRanges + } + + Field struct { +@@ -195,10 +195,10 @@ type ( + L1 FieldL1 + } + FieldL1 struct { +- Options func() pref.ProtoMessage +- Number pref.FieldNumber +- Cardinality pref.Cardinality // must be consistent with Message.RequiredNumbers +- Kind pref.Kind ++ Options func() protoreflect.ProtoMessage ++ Number protoreflect.FieldNumber ++ Cardinality protoreflect.Cardinality // must be consistent with Message.RequiredNumbers ++ Kind protoreflect.Kind + StringName stringName + IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto + IsWeak bool // promoted from google.protobuf.FieldOptions +@@ -207,9 +207,9 @@ type ( + HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions + EnforceUTF8 bool // promoted from google.protobuf.FieldOptions + Default defaultValue +- ContainingOneof pref.OneofDescriptor // must be consistent with Message.Oneofs.Fields +- Enum pref.EnumDescriptor +- Message pref.MessageDescriptor ++ ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields ++ Enum protoreflect.EnumDescriptor ++ Message protoreflect.MessageDescriptor + } + + Oneof struct { +@@ -217,35 +217,35 @@ type ( + L1 OneofL1 + } + OneofL1 struct { +- Options func() pref.ProtoMessage ++ Options func() protoreflect.ProtoMessage + Fields OneofFields // must be consistent with Message.Fields.ContainingOneof + } + ) + +-func (md *Message) Options() pref.ProtoMessage { ++func (md *Message) Options() protoreflect.ProtoMessage { + if f := md.lazyInit().Options; f != nil { + return f() + } + return descopts.Message + } +-func (md *Message) IsMapEntry() bool { return md.L1.IsMapEntry } +-func (md *Message) Fields() pref.FieldDescriptors { return &md.lazyInit().Fields } +-func (md *Message) Oneofs() pref.OneofDescriptors { return &md.lazyInit().Oneofs } +-func (md *Message) ReservedNames() pref.Names { return &md.lazyInit().ReservedNames } +-func (md *Message) ReservedRanges() pref.FieldRanges { return &md.lazyInit().ReservedRanges } +-func (md *Message) RequiredNumbers() pref.FieldNumbers { return &md.lazyInit().RequiredNumbers } +-func (md *Message) ExtensionRanges() pref.FieldRanges { return &md.lazyInit().ExtensionRanges } +-func (md *Message) ExtensionRangeOptions(i int) pref.ProtoMessage { ++func (md *Message) IsMapEntry() bool { return md.L1.IsMapEntry } ++func (md *Message) Fields() protoreflect.FieldDescriptors { return &md.lazyInit().Fields } ++func (md *Message) Oneofs() protoreflect.OneofDescriptors { return &md.lazyInit().Oneofs } ++func (md *Message) ReservedNames() protoreflect.Names { return &md.lazyInit().ReservedNames } ++func (md *Message) ReservedRanges() protoreflect.FieldRanges { return &md.lazyInit().ReservedRanges } ++func (md *Message) RequiredNumbers() protoreflect.FieldNumbers { return &md.lazyInit().RequiredNumbers } ++func (md *Message) ExtensionRanges() protoreflect.FieldRanges { return &md.lazyInit().ExtensionRanges } ++func (md *Message) ExtensionRangeOptions(i int) protoreflect.ProtoMessage { + if f := md.lazyInit().ExtensionRangeOptions[i]; f != nil { + return f() + } + return descopts.ExtensionRange + } +-func (md *Message) Enums() pref.EnumDescriptors { return &md.L1.Enums } +-func (md *Message) Messages() pref.MessageDescriptors { return &md.L1.Messages } +-func (md *Message) Extensions() pref.ExtensionDescriptors { return &md.L1.Extensions } +-func (md *Message) ProtoType(pref.MessageDescriptor) {} +-func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } ++func (md *Message) Enums() protoreflect.EnumDescriptors { return &md.L1.Enums } ++func (md *Message) Messages() protoreflect.MessageDescriptors { return &md.L1.Messages } ++func (md *Message) Extensions() protoreflect.ExtensionDescriptors { return &md.L1.Extensions } ++func (md *Message) ProtoType(protoreflect.MessageDescriptor) {} ++func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } + func (md *Message) lazyInit() *MessageL2 { + md.L0.ParentFile.lazyInit() // implicitly initializes L2 + return md.L2 +@@ -260,28 +260,28 @@ func (md *Message) IsMessageSet() bool { + return md.L1.IsMessageSet + } + +-func (fd *Field) Options() pref.ProtoMessage { ++func (fd *Field) Options() protoreflect.ProtoMessage { + if f := fd.L1.Options; f != nil { + return f() + } + return descopts.Field + } +-func (fd *Field) Number() pref.FieldNumber { return fd.L1.Number } +-func (fd *Field) Cardinality() pref.Cardinality { return fd.L1.Cardinality } +-func (fd *Field) Kind() pref.Kind { return fd.L1.Kind } +-func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } +-func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } +-func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } ++func (fd *Field) Number() protoreflect.FieldNumber { return fd.L1.Number } ++func (fd *Field) Cardinality() protoreflect.Cardinality { return fd.L1.Cardinality } ++func (fd *Field) Kind() protoreflect.Kind { return fd.L1.Kind } ++func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } ++func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } ++func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } + func (fd *Field) HasPresence() bool { +- return fd.L1.Cardinality != pref.Repeated && (fd.L0.ParentFile.L1.Syntax == pref.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) ++ return fd.L1.Cardinality != protoreflect.Repeated && (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) + } + func (fd *Field) HasOptionalKeyword() bool { +- return (fd.L0.ParentFile.L1.Syntax == pref.Proto2 && fd.L1.Cardinality == pref.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional ++ return (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional + } + func (fd *Field) IsPacked() bool { +- if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != pref.Proto2 && fd.L1.Cardinality == pref.Repeated { ++ if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Repeated { + switch fd.L1.Kind { +- case pref.StringKind, pref.BytesKind, pref.MessageKind, pref.GroupKind: ++ case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: + default: + return true + } +@@ -290,40 +290,40 @@ func (fd *Field) IsPacked() bool { + } + func (fd *Field) IsExtension() bool { return false } + func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } +-func (fd *Field) IsList() bool { return fd.Cardinality() == pref.Repeated && !fd.IsMap() } ++func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() } + func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } +-func (fd *Field) MapKey() pref.FieldDescriptor { ++func (fd *Field) MapKey() protoreflect.FieldDescriptor { + if !fd.IsMap() { + return nil + } + return fd.Message().Fields().ByNumber(genid.MapEntry_Key_field_number) + } +-func (fd *Field) MapValue() pref.FieldDescriptor { ++func (fd *Field) MapValue() protoreflect.FieldDescriptor { + if !fd.IsMap() { + return nil + } + return fd.Message().Fields().ByNumber(genid.MapEntry_Value_field_number) + } +-func (fd *Field) HasDefault() bool { return fd.L1.Default.has } +-func (fd *Field) Default() pref.Value { return fd.L1.Default.get(fd) } +-func (fd *Field) DefaultEnumValue() pref.EnumValueDescriptor { return fd.L1.Default.enum } +-func (fd *Field) ContainingOneof() pref.OneofDescriptor { return fd.L1.ContainingOneof } +-func (fd *Field) ContainingMessage() pref.MessageDescriptor { +- return fd.L0.Parent.(pref.MessageDescriptor) ++func (fd *Field) HasDefault() bool { return fd.L1.Default.has } ++func (fd *Field) Default() protoreflect.Value { return fd.L1.Default.get(fd) } ++func (fd *Field) DefaultEnumValue() protoreflect.EnumValueDescriptor { return fd.L1.Default.enum } ++func (fd *Field) ContainingOneof() protoreflect.OneofDescriptor { return fd.L1.ContainingOneof } ++func (fd *Field) ContainingMessage() protoreflect.MessageDescriptor { ++ return fd.L0.Parent.(protoreflect.MessageDescriptor) + } +-func (fd *Field) Enum() pref.EnumDescriptor { ++func (fd *Field) Enum() protoreflect.EnumDescriptor { + return fd.L1.Enum + } +-func (fd *Field) Message() pref.MessageDescriptor { ++func (fd *Field) Message() protoreflect.MessageDescriptor { + if fd.L1.IsWeak { + if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil { +- return d.(pref.MessageDescriptor) ++ return d.(protoreflect.MessageDescriptor) + } + } + return fd.L1.Message + } +-func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } +-func (fd *Field) ProtoType(pref.FieldDescriptor) {} ++func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } ++func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} + + // EnforceUTF8 is a pseudo-internal API to determine whether to enforce UTF-8 + // validation for the string field. This exists for Google-internal use only +@@ -336,21 +336,21 @@ func (fd *Field) EnforceUTF8() bool { + if fd.L1.HasEnforceUTF8 { + return fd.L1.EnforceUTF8 + } +- return fd.L0.ParentFile.L1.Syntax == pref.Proto3 ++ return fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 + } + + func (od *Oneof) IsSynthetic() bool { +- return od.L0.ParentFile.L1.Syntax == pref.Proto3 && len(od.L1.Fields.List) == 1 && od.L1.Fields.List[0].HasOptionalKeyword() ++ return od.L0.ParentFile.L1.Syntax == protoreflect.Proto3 && len(od.L1.Fields.List) == 1 && od.L1.Fields.List[0].HasOptionalKeyword() + } +-func (od *Oneof) Options() pref.ProtoMessage { ++func (od *Oneof) Options() protoreflect.ProtoMessage { + if f := od.L1.Options; f != nil { + return f() + } + return descopts.Oneof + } +-func (od *Oneof) Fields() pref.FieldDescriptors { return &od.L1.Fields } +-func (od *Oneof) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, od) } +-func (od *Oneof) ProtoType(pref.OneofDescriptor) {} ++func (od *Oneof) Fields() protoreflect.FieldDescriptors { return &od.L1.Fields } ++func (od *Oneof) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, od) } ++func (od *Oneof) ProtoType(protoreflect.OneofDescriptor) {} + + type ( + Extension struct { +@@ -359,55 +359,57 @@ type ( + L2 *ExtensionL2 // protected by fileDesc.once + } + ExtensionL1 struct { +- Number pref.FieldNumber +- Extendee pref.MessageDescriptor +- Cardinality pref.Cardinality +- Kind pref.Kind ++ Number protoreflect.FieldNumber ++ Extendee protoreflect.MessageDescriptor ++ Cardinality protoreflect.Cardinality ++ Kind protoreflect.Kind + } + ExtensionL2 struct { +- Options func() pref.ProtoMessage ++ Options func() protoreflect.ProtoMessage + StringName stringName + IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto + IsPacked bool // promoted from google.protobuf.FieldOptions + Default defaultValue +- Enum pref.EnumDescriptor +- Message pref.MessageDescriptor ++ Enum protoreflect.EnumDescriptor ++ Message protoreflect.MessageDescriptor + } + ) + +-func (xd *Extension) Options() pref.ProtoMessage { ++func (xd *Extension) Options() protoreflect.ProtoMessage { + if f := xd.lazyInit().Options; f != nil { + return f() + } + return descopts.Field + } +-func (xd *Extension) Number() pref.FieldNumber { return xd.L1.Number } +-func (xd *Extension) Cardinality() pref.Cardinality { return xd.L1.Cardinality } +-func (xd *Extension) Kind() pref.Kind { return xd.L1.Kind } +-func (xd *Extension) HasJSONName() bool { return xd.lazyInit().StringName.hasJSON } +-func (xd *Extension) JSONName() string { return xd.lazyInit().StringName.getJSON(xd) } +-func (xd *Extension) TextName() string { return xd.lazyInit().StringName.getText(xd) } +-func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != pref.Repeated } ++func (xd *Extension) Number() protoreflect.FieldNumber { return xd.L1.Number } ++func (xd *Extension) Cardinality() protoreflect.Cardinality { return xd.L1.Cardinality } ++func (xd *Extension) Kind() protoreflect.Kind { return xd.L1.Kind } ++func (xd *Extension) HasJSONName() bool { return xd.lazyInit().StringName.hasJSON } ++func (xd *Extension) JSONName() string { return xd.lazyInit().StringName.getJSON(xd) } ++func (xd *Extension) TextName() string { return xd.lazyInit().StringName.getText(xd) } ++func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != protoreflect.Repeated } + func (xd *Extension) HasOptionalKeyword() bool { +- return (xd.L0.ParentFile.L1.Syntax == pref.Proto2 && xd.L1.Cardinality == pref.Optional) || xd.lazyInit().IsProto3Optional +-} +-func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked } +-func (xd *Extension) IsExtension() bool { return true } +-func (xd *Extension) IsWeak() bool { return false } +-func (xd *Extension) IsList() bool { return xd.Cardinality() == pref.Repeated } +-func (xd *Extension) IsMap() bool { return false } +-func (xd *Extension) MapKey() pref.FieldDescriptor { return nil } +-func (xd *Extension) MapValue() pref.FieldDescriptor { return nil } +-func (xd *Extension) HasDefault() bool { return xd.lazyInit().Default.has } +-func (xd *Extension) Default() pref.Value { return xd.lazyInit().Default.get(xd) } +-func (xd *Extension) DefaultEnumValue() pref.EnumValueDescriptor { return xd.lazyInit().Default.enum } +-func (xd *Extension) ContainingOneof() pref.OneofDescriptor { return nil } +-func (xd *Extension) ContainingMessage() pref.MessageDescriptor { return xd.L1.Extendee } +-func (xd *Extension) Enum() pref.EnumDescriptor { return xd.lazyInit().Enum } +-func (xd *Extension) Message() pref.MessageDescriptor { return xd.lazyInit().Message } +-func (xd *Extension) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, xd) } +-func (xd *Extension) ProtoType(pref.FieldDescriptor) {} +-func (xd *Extension) ProtoInternal(pragma.DoNotImplement) {} ++ return (xd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && xd.L1.Cardinality == protoreflect.Optional) || xd.lazyInit().IsProto3Optional ++} ++func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked } ++func (xd *Extension) IsExtension() bool { return true } ++func (xd *Extension) IsWeak() bool { return false } ++func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated } ++func (xd *Extension) IsMap() bool { return false } ++func (xd *Extension) MapKey() protoreflect.FieldDescriptor { return nil } ++func (xd *Extension) MapValue() protoreflect.FieldDescriptor { return nil } ++func (xd *Extension) HasDefault() bool { return xd.lazyInit().Default.has } ++func (xd *Extension) Default() protoreflect.Value { return xd.lazyInit().Default.get(xd) } ++func (xd *Extension) DefaultEnumValue() protoreflect.EnumValueDescriptor { ++ return xd.lazyInit().Default.enum ++} ++func (xd *Extension) ContainingOneof() protoreflect.OneofDescriptor { return nil } ++func (xd *Extension) ContainingMessage() protoreflect.MessageDescriptor { return xd.L1.Extendee } ++func (xd *Extension) Enum() protoreflect.EnumDescriptor { return xd.lazyInit().Enum } ++func (xd *Extension) Message() protoreflect.MessageDescriptor { return xd.lazyInit().Message } ++func (xd *Extension) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, xd) } ++func (xd *Extension) ProtoType(protoreflect.FieldDescriptor) {} ++func (xd *Extension) ProtoInternal(pragma.DoNotImplement) {} + func (xd *Extension) lazyInit() *ExtensionL2 { + xd.L0.ParentFile.lazyInit() // implicitly initializes L2 + return xd.L2 +@@ -421,7 +423,7 @@ type ( + } + ServiceL1 struct{} + ServiceL2 struct { +- Options func() pref.ProtoMessage ++ Options func() protoreflect.ProtoMessage + Methods Methods + } + +@@ -430,48 +432,48 @@ type ( + L1 MethodL1 + } + MethodL1 struct { +- Options func() pref.ProtoMessage +- Input pref.MessageDescriptor +- Output pref.MessageDescriptor ++ Options func() protoreflect.ProtoMessage ++ Input protoreflect.MessageDescriptor ++ Output protoreflect.MessageDescriptor + IsStreamingClient bool + IsStreamingServer bool + } + ) + +-func (sd *Service) Options() pref.ProtoMessage { ++func (sd *Service) Options() protoreflect.ProtoMessage { + if f := sd.lazyInit().Options; f != nil { + return f() + } + return descopts.Service + } +-func (sd *Service) Methods() pref.MethodDescriptors { return &sd.lazyInit().Methods } +-func (sd *Service) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, sd) } +-func (sd *Service) ProtoType(pref.ServiceDescriptor) {} +-func (sd *Service) ProtoInternal(pragma.DoNotImplement) {} ++func (sd *Service) Methods() protoreflect.MethodDescriptors { return &sd.lazyInit().Methods } ++func (sd *Service) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, sd) } ++func (sd *Service) ProtoType(protoreflect.ServiceDescriptor) {} ++func (sd *Service) ProtoInternal(pragma.DoNotImplement) {} + func (sd *Service) lazyInit() *ServiceL2 { + sd.L0.ParentFile.lazyInit() // implicitly initializes L2 + return sd.L2 + } + +-func (md *Method) Options() pref.ProtoMessage { ++func (md *Method) Options() protoreflect.ProtoMessage { + if f := md.L1.Options; f != nil { + return f() + } + return descopts.Method + } +-func (md *Method) Input() pref.MessageDescriptor { return md.L1.Input } +-func (md *Method) Output() pref.MessageDescriptor { return md.L1.Output } +-func (md *Method) IsStreamingClient() bool { return md.L1.IsStreamingClient } +-func (md *Method) IsStreamingServer() bool { return md.L1.IsStreamingServer } +-func (md *Method) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } +-func (md *Method) ProtoType(pref.MethodDescriptor) {} +-func (md *Method) ProtoInternal(pragma.DoNotImplement) {} ++func (md *Method) Input() protoreflect.MessageDescriptor { return md.L1.Input } ++func (md *Method) Output() protoreflect.MessageDescriptor { return md.L1.Output } ++func (md *Method) IsStreamingClient() bool { return md.L1.IsStreamingClient } ++func (md *Method) IsStreamingServer() bool { return md.L1.IsStreamingServer } ++func (md *Method) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } ++func (md *Method) ProtoType(protoreflect.MethodDescriptor) {} ++func (md *Method) ProtoInternal(pragma.DoNotImplement) {} + + // Surrogate files are can be used to create standalone descriptors + // where the syntax is only information derived from the parent file. + var ( +- SurrogateProto2 = &File{L1: FileL1{Syntax: pref.Proto2}, L2: &FileL2{}} +- SurrogateProto3 = &File{L1: FileL1{Syntax: pref.Proto3}, L2: &FileL2{}} ++ SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}} ++ SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}} + ) + + type ( +@@ -479,24 +481,24 @@ type ( + L0 BaseL0 + } + BaseL0 struct { +- FullName pref.FullName // must be populated +- ParentFile *File // must be populated +- Parent pref.Descriptor ++ FullName protoreflect.FullName // must be populated ++ ParentFile *File // must be populated ++ Parent protoreflect.Descriptor + Index int + } + ) + +-func (d *Base) Name() pref.Name { return d.L0.FullName.Name() } +-func (d *Base) FullName() pref.FullName { return d.L0.FullName } +-func (d *Base) ParentFile() pref.FileDescriptor { ++func (d *Base) Name() protoreflect.Name { return d.L0.FullName.Name() } ++func (d *Base) FullName() protoreflect.FullName { return d.L0.FullName } ++func (d *Base) ParentFile() protoreflect.FileDescriptor { + if d.L0.ParentFile == SurrogateProto2 || d.L0.ParentFile == SurrogateProto3 { + return nil // surrogate files are not real parents + } + return d.L0.ParentFile + } +-func (d *Base) Parent() pref.Descriptor { return d.L0.Parent } ++func (d *Base) Parent() protoreflect.Descriptor { return d.L0.Parent } + func (d *Base) Index() int { return d.L0.Index } +-func (d *Base) Syntax() pref.Syntax { return d.L0.ParentFile.Syntax() } ++func (d *Base) Syntax() protoreflect.Syntax { return d.L0.ParentFile.Syntax() } + func (d *Base) IsPlaceholder() bool { return false } + func (d *Base) ProtoInternal(pragma.DoNotImplement) {} + +@@ -513,7 +515,7 @@ func (s *stringName) InitJSON(name string) { + s.nameJSON = name + } + +-func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName { ++func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName { + s.once.Do(func() { + if fd.IsExtension() { + // For extensions, JSON and text are formatted the same way. +@@ -533,7 +535,7 @@ func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName { + + // Format the text name. + s.nameText = string(fd.Name()) +- if fd.Kind() == pref.GroupKind { ++ if fd.Kind() == protoreflect.GroupKind { + s.nameText = string(fd.Message().Name()) + } + } +@@ -541,10 +543,10 @@ func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName { + return s + } + +-func (s *stringName) getJSON(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameJSON } +-func (s *stringName) getText(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameText } ++func (s *stringName) getJSON(fd protoreflect.FieldDescriptor) string { return s.lazyInit(fd).nameJSON } ++func (s *stringName) getText(fd protoreflect.FieldDescriptor) string { return s.lazyInit(fd).nameText } + +-func DefaultValue(v pref.Value, ev pref.EnumValueDescriptor) defaultValue { ++func DefaultValue(v protoreflect.Value, ev protoreflect.EnumValueDescriptor) defaultValue { + dv := defaultValue{has: v.IsValid(), val: v, enum: ev} + if b, ok := v.Interface().([]byte); ok { + // Store a copy of the default bytes, so that we can detect +@@ -554,9 +556,9 @@ func DefaultValue(v pref.Value, ev pref.EnumValueDescriptor) defaultValue { + return dv + } + +-func unmarshalDefault(b []byte, k pref.Kind, pf *File, ed pref.EnumDescriptor) defaultValue { +- var evs pref.EnumValueDescriptors +- if k == pref.EnumKind { ++func unmarshalDefault(b []byte, k protoreflect.Kind, pf *File, ed protoreflect.EnumDescriptor) defaultValue { ++ var evs protoreflect.EnumValueDescriptors ++ if k == protoreflect.EnumKind { + // If the enum is declared within the same file, be careful not to + // blindly call the Values method, lest we bind ourselves in a deadlock. + if e, ok := ed.(*Enum); ok && e.L0.ParentFile == pf { +@@ -567,9 +569,9 @@ func unmarshalDefault(b []byte, k pref.Kind, pf *File, ed pref.EnumDescriptor) d + + // If we are unable to resolve the enum dependency, use a placeholder + // enum value since we will not be able to parse the default value. +- if ed.IsPlaceholder() && pref.Name(b).IsValid() { +- v := pref.ValueOfEnum(0) +- ev := PlaceholderEnumValue(ed.FullName().Parent().Append(pref.Name(b))) ++ if ed.IsPlaceholder() && protoreflect.Name(b).IsValid() { ++ v := protoreflect.ValueOfEnum(0) ++ ev := PlaceholderEnumValue(ed.FullName().Parent().Append(protoreflect.Name(b))) + return DefaultValue(v, ev) + } + } +@@ -583,41 +585,41 @@ func unmarshalDefault(b []byte, k pref.Kind, pf *File, ed pref.EnumDescriptor) d + + type defaultValue struct { + has bool +- val pref.Value +- enum pref.EnumValueDescriptor ++ val protoreflect.Value ++ enum protoreflect.EnumValueDescriptor + bytes []byte + } + +-func (dv *defaultValue) get(fd pref.FieldDescriptor) pref.Value { ++func (dv *defaultValue) get(fd protoreflect.FieldDescriptor) protoreflect.Value { + // Return the zero value as the default if unpopulated. + if !dv.has { +- if fd.Cardinality() == pref.Repeated { +- return pref.Value{} ++ if fd.Cardinality() == protoreflect.Repeated { ++ return protoreflect.Value{} + } + switch fd.Kind() { +- case pref.BoolKind: +- return pref.ValueOfBool(false) +- case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: +- return pref.ValueOfInt32(0) +- case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: +- return pref.ValueOfInt64(0) +- case pref.Uint32Kind, pref.Fixed32Kind: +- return pref.ValueOfUint32(0) +- case pref.Uint64Kind, pref.Fixed64Kind: +- return pref.ValueOfUint64(0) +- case pref.FloatKind: +- return pref.ValueOfFloat32(0) +- case pref.DoubleKind: +- return pref.ValueOfFloat64(0) +- case pref.StringKind: +- return pref.ValueOfString("") +- case pref.BytesKind: +- return pref.ValueOfBytes(nil) +- case pref.EnumKind: ++ case protoreflect.BoolKind: ++ return protoreflect.ValueOfBool(false) ++ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: ++ return protoreflect.ValueOfInt32(0) ++ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: ++ return protoreflect.ValueOfInt64(0) ++ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: ++ return protoreflect.ValueOfUint32(0) ++ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: ++ return protoreflect.ValueOfUint64(0) ++ case protoreflect.FloatKind: ++ return protoreflect.ValueOfFloat32(0) ++ case protoreflect.DoubleKind: ++ return protoreflect.ValueOfFloat64(0) ++ case protoreflect.StringKind: ++ return protoreflect.ValueOfString("") ++ case protoreflect.BytesKind: ++ return protoreflect.ValueOfBytes(nil) ++ case protoreflect.EnumKind: + if evs := fd.Enum().Values(); evs.Len() > 0 { +- return pref.ValueOfEnum(evs.Get(0).Number()) ++ return protoreflect.ValueOfEnum(evs.Get(0).Number()) + } +- return pref.ValueOfEnum(0) ++ return protoreflect.ValueOfEnum(0) + } + } + +diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +index 66e1fee..4a1584c 100644 +--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go ++++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +@@ -10,7 +10,7 @@ import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" +- pref "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/reflect/protoreflect" + ) + + // fileRaw is a data struct used when initializing a file descriptor from +@@ -95,7 +95,7 @@ func (fd *File) unmarshalSeed(b []byte) { + sb := getBuilder() + defer putBuilder(sb) + +- var prevField pref.FieldNumber ++ var prevField protoreflect.FieldNumber + var numEnums, numMessages, numExtensions, numServices int + var posEnums, posMessages, posExtensions, posServices int + b0 := b +@@ -110,16 +110,16 @@ func (fd *File) unmarshalSeed(b []byte) { + case genid.FileDescriptorProto_Syntax_field_number: + switch string(v) { + case "proto2": +- fd.L1.Syntax = pref.Proto2 ++ fd.L1.Syntax = protoreflect.Proto2 + case "proto3": +- fd.L1.Syntax = pref.Proto3 ++ fd.L1.Syntax = protoreflect.Proto3 + default: + panic("invalid syntax") + } + case genid.FileDescriptorProto_Name_field_number: + fd.L1.Path = sb.MakeString(v) + case genid.FileDescriptorProto_Package_field_number: +- fd.L1.Package = pref.FullName(sb.MakeString(v)) ++ fd.L1.Package = protoreflect.FullName(sb.MakeString(v)) + case genid.FileDescriptorProto_EnumType_field_number: + if prevField != genid.FileDescriptorProto_EnumType_field_number { + if numEnums > 0 { +@@ -163,7 +163,7 @@ func (fd *File) unmarshalSeed(b []byte) { + + // If syntax is missing, it is assumed to be proto2. + if fd.L1.Syntax == 0 { +- fd.L1.Syntax = pref.Proto2 ++ fd.L1.Syntax = protoreflect.Proto2 + } + + // Must allocate all declarations before parsing each descriptor type +@@ -219,7 +219,7 @@ func (fd *File) unmarshalSeed(b []byte) { + } + } + +-func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { ++func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { + ed.L0.ParentFile = pf + ed.L0.Parent = pd + ed.L0.Index = i +@@ -271,12 +271,12 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Desc + } + } + +-func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { ++func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { + md.L0.ParentFile = pf + md.L0.Parent = pd + md.L0.Index = i + +- var prevField pref.FieldNumber ++ var prevField protoreflect.FieldNumber + var numEnums, numMessages, numExtensions int + var posEnums, posMessages, posExtensions int + b0 := b +@@ -387,7 +387,7 @@ func (md *Message) unmarshalSeedOptions(b []byte) { + } + } + +-func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { ++func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { + xd.L0.ParentFile = pf + xd.L0.Parent = pd + xd.L0.Index = i +@@ -401,11 +401,11 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref + b = b[m:] + switch num { + case genid.FieldDescriptorProto_Number_field_number: +- xd.L1.Number = pref.FieldNumber(v) ++ xd.L1.Number = protoreflect.FieldNumber(v) + case genid.FieldDescriptorProto_Label_field_number: +- xd.L1.Cardinality = pref.Cardinality(v) ++ xd.L1.Cardinality = protoreflect.Cardinality(v) + case genid.FieldDescriptorProto_Type_field_number: +- xd.L1.Kind = pref.Kind(v) ++ xd.L1.Kind = protoreflect.Kind(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) +@@ -423,7 +423,7 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref + } + } + +-func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { ++func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { + sd.L0.ParentFile = pf + sd.L0.Parent = pd + sd.L0.Index = i +@@ -459,13 +459,13 @@ func putBuilder(b *strs.Builder) { + + // makeFullName converts b to a protoreflect.FullName, + // where b must start with a leading dot. +-func makeFullName(sb *strs.Builder, b []byte) pref.FullName { ++func makeFullName(sb *strs.Builder, b []byte) protoreflect.FullName { + if len(b) == 0 || b[0] != '.' { + panic("name reference must be fully qualified") + } +- return pref.FullName(sb.MakeString(b[1:])) ++ return protoreflect.FullName(sb.MakeString(b[1:])) + } + +-func appendFullName(sb *strs.Builder, prefix pref.FullName, suffix []byte) pref.FullName { +- return sb.AppendFullName(prefix, pref.Name(strs.UnsafeString(suffix))) ++func appendFullName(sb *strs.Builder, prefix protoreflect.FullName, suffix []byte) protoreflect.FullName { ++ return sb.AppendFullName(prefix, protoreflect.Name(strs.UnsafeString(suffix))) + } +diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +index 198451e..736a19a 100644 +--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go ++++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +@@ -13,7 +13,7 @@ import ( + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" +- pref "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/reflect/protoreflect" + ) + + func (fd *File) lazyRawInit() { +@@ -39,10 +39,10 @@ func (file *File) resolveMessages() { + + // Resolve message field dependency. + switch fd.L1.Kind { +- case pref.EnumKind: ++ case protoreflect.EnumKind: + fd.L1.Enum = file.resolveEnumDependency(fd.L1.Enum, listFieldDeps, depIdx) + depIdx++ +- case pref.MessageKind, pref.GroupKind: ++ case protoreflect.MessageKind, protoreflect.GroupKind: + fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx) + depIdx++ + } +@@ -62,10 +62,10 @@ func (file *File) resolveExtensions() { + + // Resolve extension field dependency. + switch xd.L1.Kind { +- case pref.EnumKind: ++ case protoreflect.EnumKind: + xd.L2.Enum = file.resolveEnumDependency(xd.L2.Enum, listExtDeps, depIdx) + depIdx++ +- case pref.MessageKind, pref.GroupKind: ++ case protoreflect.MessageKind, protoreflect.GroupKind: + xd.L2.Message = file.resolveMessageDependency(xd.L2.Message, listExtDeps, depIdx) + depIdx++ + } +@@ -92,7 +92,7 @@ func (file *File) resolveServices() { + } + } + +-func (file *File) resolveEnumDependency(ed pref.EnumDescriptor, i, j int32) pref.EnumDescriptor { ++func (file *File) resolveEnumDependency(ed protoreflect.EnumDescriptor, i, j int32) protoreflect.EnumDescriptor { + r := file.builder.FileRegistry + if r, ok := r.(resolverByIndex); ok { + if ed2 := r.FindEnumByIndex(i, j, file.allEnums, file.allMessages); ed2 != nil { +@@ -105,12 +105,12 @@ func (file *File) resolveEnumDependency(ed pref.EnumDescriptor, i, j int32) pref + } + } + if d, _ := r.FindDescriptorByName(ed.FullName()); d != nil { +- return d.(pref.EnumDescriptor) ++ return d.(protoreflect.EnumDescriptor) + } + return ed + } + +-func (file *File) resolveMessageDependency(md pref.MessageDescriptor, i, j int32) pref.MessageDescriptor { ++func (file *File) resolveMessageDependency(md protoreflect.MessageDescriptor, i, j int32) protoreflect.MessageDescriptor { + r := file.builder.FileRegistry + if r, ok := r.(resolverByIndex); ok { + if md2 := r.FindMessageByIndex(i, j, file.allEnums, file.allMessages); md2 != nil { +@@ -123,7 +123,7 @@ func (file *File) resolveMessageDependency(md pref.MessageDescriptor, i, j int32 + } + } + if d, _ := r.FindDescriptorByName(md.FullName()); d != nil { +- return d.(pref.MessageDescriptor) ++ return d.(protoreflect.MessageDescriptor) + } + return md + } +@@ -158,7 +158,7 @@ func (fd *File) unmarshalFull(b []byte) { + if imp == nil { + imp = PlaceholderFile(path) + } +- fd.L2.Imports = append(fd.L2.Imports, pref.FileImport{FileDescriptor: imp}) ++ fd.L2.Imports = append(fd.L2.Imports, protoreflect.FileImport{FileDescriptor: imp}) + case genid.FileDescriptorProto_EnumType_field_number: + fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb) + enumIdx++ +@@ -199,7 +199,7 @@ func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) { + case genid.EnumDescriptorProto_Value_field_number: + rawValues = append(rawValues, v) + case genid.EnumDescriptorProto_ReservedName_field_number: +- ed.L2.ReservedNames.List = append(ed.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) ++ ed.L2.ReservedNames.List = append(ed.L2.ReservedNames.List, protoreflect.Name(sb.MakeString(v))) + case genid.EnumDescriptorProto_ReservedRange_field_number: + ed.L2.ReservedRanges.List = append(ed.L2.ReservedRanges.List, unmarshalEnumReservedRange(v)) + case genid.EnumDescriptorProto_Options_field_number: +@@ -219,7 +219,7 @@ func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) { + ed.L2.Options = ed.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Enum, rawOptions) + } + +-func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) { ++func unmarshalEnumReservedRange(b []byte) (r [2]protoreflect.EnumNumber) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] +@@ -229,9 +229,9 @@ func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) { + b = b[m:] + switch num { + case genid.EnumDescriptorProto_EnumReservedRange_Start_field_number: +- r[0] = pref.EnumNumber(v) ++ r[0] = protoreflect.EnumNumber(v) + case genid.EnumDescriptorProto_EnumReservedRange_End_field_number: +- r[1] = pref.EnumNumber(v) ++ r[1] = protoreflect.EnumNumber(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) +@@ -241,7 +241,7 @@ func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) { + return r + } + +-func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { ++func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { + vd.L0.ParentFile = pf + vd.L0.Parent = pd + vd.L0.Index = i +@@ -256,7 +256,7 @@ func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref + b = b[m:] + switch num { + case genid.EnumValueDescriptorProto_Number_field_number: +- vd.L1.Number = pref.EnumNumber(v) ++ vd.L1.Number = protoreflect.EnumNumber(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) +@@ -294,7 +294,7 @@ func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) { + case genid.DescriptorProto_OneofDecl_field_number: + rawOneofs = append(rawOneofs, v) + case genid.DescriptorProto_ReservedName_field_number: +- md.L2.ReservedNames.List = append(md.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) ++ md.L2.ReservedNames.List = append(md.L2.ReservedNames.List, protoreflect.Name(sb.MakeString(v))) + case genid.DescriptorProto_ReservedRange_field_number: + md.L2.ReservedRanges.List = append(md.L2.ReservedRanges.List, unmarshalMessageReservedRange(v)) + case genid.DescriptorProto_ExtensionRange_field_number: +@@ -326,7 +326,7 @@ func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) { + for i, b := range rawFields { + fd := &md.L2.Fields.List[i] + fd.unmarshalFull(b, sb, md.L0.ParentFile, md, i) +- if fd.L1.Cardinality == pref.Required { ++ if fd.L1.Cardinality == protoreflect.Required { + md.L2.RequiredNumbers.List = append(md.L2.RequiredNumbers.List, fd.L1.Number) + } + } +@@ -359,7 +359,7 @@ func (md *Message) unmarshalOptions(b []byte) { + } + } + +-func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) { ++func unmarshalMessageReservedRange(b []byte) (r [2]protoreflect.FieldNumber) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] +@@ -369,9 +369,9 @@ func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) { + b = b[m:] + switch num { + case genid.DescriptorProto_ReservedRange_Start_field_number: +- r[0] = pref.FieldNumber(v) ++ r[0] = protoreflect.FieldNumber(v) + case genid.DescriptorProto_ReservedRange_End_field_number: +- r[1] = pref.FieldNumber(v) ++ r[1] = protoreflect.FieldNumber(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) +@@ -381,7 +381,7 @@ func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) { + return r + } + +-func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions []byte) { ++func unmarshalMessageExtensionRange(b []byte) (r [2]protoreflect.FieldNumber, rawOptions []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] +@@ -391,9 +391,9 @@ func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions + b = b[m:] + switch num { + case genid.DescriptorProto_ExtensionRange_Start_field_number: +- r[0] = pref.FieldNumber(v) ++ r[0] = protoreflect.FieldNumber(v) + case genid.DescriptorProto_ExtensionRange_End_field_number: +- r[1] = pref.FieldNumber(v) ++ r[1] = protoreflect.FieldNumber(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) +@@ -410,7 +410,7 @@ func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions + return r, rawOptions + } + +-func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { ++func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { + fd.L0.ParentFile = pf + fd.L0.Parent = pd + fd.L0.Index = i +@@ -426,11 +426,11 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des + b = b[m:] + switch num { + case genid.FieldDescriptorProto_Number_field_number: +- fd.L1.Number = pref.FieldNumber(v) ++ fd.L1.Number = protoreflect.FieldNumber(v) + case genid.FieldDescriptorProto_Label_field_number: +- fd.L1.Cardinality = pref.Cardinality(v) ++ fd.L1.Cardinality = protoreflect.Cardinality(v) + case genid.FieldDescriptorProto_Type_field_number: +- fd.L1.Kind = pref.Kind(v) ++ fd.L1.Kind = protoreflect.Kind(v) + case genid.FieldDescriptorProto_OneofIndex_field_number: + // In Message.unmarshalFull, we allocate slices for both + // the field and oneof descriptors before unmarshaling either +@@ -453,7 +453,7 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des + case genid.FieldDescriptorProto_JsonName_field_number: + fd.L1.StringName.InitJSON(sb.MakeString(v)) + case genid.FieldDescriptorProto_DefaultValue_field_number: +- fd.L1.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages ++ fd.L1.Default.val = protoreflect.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages + case genid.FieldDescriptorProto_TypeName_field_number: + rawTypeName = v + case genid.FieldDescriptorProto_Options_field_number: +@@ -468,9 +468,9 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des + if rawTypeName != nil { + name := makeFullName(sb, rawTypeName) + switch fd.L1.Kind { +- case pref.EnumKind: ++ case protoreflect.EnumKind: + fd.L1.Enum = PlaceholderEnum(name) +- case pref.MessageKind, pref.GroupKind: ++ case protoreflect.MessageKind, protoreflect.GroupKind: + fd.L1.Message = PlaceholderMessage(name) + } + } +@@ -504,7 +504,7 @@ func (fd *Field) unmarshalOptions(b []byte) { + } + } + +-func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { ++func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { + od.L0.ParentFile = pf + od.L0.Parent = pd + od.L0.Index = i +@@ -553,7 +553,7 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { + case genid.FieldDescriptorProto_JsonName_field_number: + xd.L2.StringName.InitJSON(sb.MakeString(v)) + case genid.FieldDescriptorProto_DefaultValue_field_number: +- xd.L2.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions ++ xd.L2.Default.val = protoreflect.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions + case genid.FieldDescriptorProto_TypeName_field_number: + rawTypeName = v + case genid.FieldDescriptorProto_Options_field_number: +@@ -568,9 +568,9 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { + if rawTypeName != nil { + name := makeFullName(sb, rawTypeName) + switch xd.L1.Kind { +- case pref.EnumKind: ++ case protoreflect.EnumKind: + xd.L2.Enum = PlaceholderEnum(name) +- case pref.MessageKind, pref.GroupKind: ++ case protoreflect.MessageKind, protoreflect.GroupKind: + xd.L2.Message = PlaceholderMessage(name) + } + } +@@ -627,7 +627,7 @@ func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) { + sd.L2.Options = sd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Service, rawOptions) + } + +-func (md *Method) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { ++func (md *Method) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { + md.L0.ParentFile = pf + md.L0.Parent = pd + md.L0.Index = i +@@ -680,18 +680,18 @@ func appendOptions(dst, src []byte) []byte { + // + // The type of message to unmarshal to is passed as a pointer since the + // vars in descopts may not yet be populated at the time this function is called. +-func (db *Builder) optionsUnmarshaler(p *pref.ProtoMessage, b []byte) func() pref.ProtoMessage { ++func (db *Builder) optionsUnmarshaler(p *protoreflect.ProtoMessage, b []byte) func() protoreflect.ProtoMessage { + if b == nil { + return nil + } +- var opts pref.ProtoMessage ++ var opts protoreflect.ProtoMessage + var once sync.Once +- return func() pref.ProtoMessage { ++ return func() protoreflect.ProtoMessage { + once.Do(func() { + if *p == nil { + panic("Descriptor.Options called without importing the descriptor package") + } +- opts = reflect.New(reflect.TypeOf(*p).Elem()).Interface().(pref.ProtoMessage) ++ opts = reflect.New(reflect.TypeOf(*p).Elem()).Interface().(protoreflect.ProtoMessage) + if err := (proto.UnmarshalOptions{ + AllowPartial: true, + Resolver: db.TypeResolver, +diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go +index aa294ff..e3b6587 100644 +--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go ++++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go +@@ -17,31 +17,30 @@ import ( + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" +- pref "google.golang.org/protobuf/reflect/protoreflect" + ) + +-type FileImports []pref.FileImport ++type FileImports []protoreflect.FileImport + + func (p *FileImports) Len() int { return len(*p) } +-func (p *FileImports) Get(i int) pref.FileImport { return (*p)[i] } ++func (p *FileImports) Get(i int) protoreflect.FileImport { return (*p)[i] } + func (p *FileImports) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } + func (p *FileImports) ProtoInternal(pragma.DoNotImplement) {} + + type Names struct { +- List []pref.Name ++ List []protoreflect.Name + once sync.Once +- has map[pref.Name]int // protected by once ++ has map[protoreflect.Name]int // protected by once + } + + func (p *Names) Len() int { return len(p.List) } +-func (p *Names) Get(i int) pref.Name { return p.List[i] } +-func (p *Names) Has(s pref.Name) bool { return p.lazyInit().has[s] > 0 } ++func (p *Names) Get(i int) protoreflect.Name { return p.List[i] } ++func (p *Names) Has(s protoreflect.Name) bool { return p.lazyInit().has[s] > 0 } + func (p *Names) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } + func (p *Names) ProtoInternal(pragma.DoNotImplement) {} + func (p *Names) lazyInit() *Names { + p.once.Do(func() { + if len(p.List) > 0 { +- p.has = make(map[pref.Name]int, len(p.List)) ++ p.has = make(map[protoreflect.Name]int, len(p.List)) + for _, s := range p.List { + p.has[s] = p.has[s] + 1 + } +@@ -67,14 +66,14 @@ func (p *Names) CheckValid() error { + } + + type EnumRanges struct { +- List [][2]pref.EnumNumber // start inclusive; end inclusive ++ List [][2]protoreflect.EnumNumber // start inclusive; end inclusive + once sync.Once +- sorted [][2]pref.EnumNumber // protected by once ++ sorted [][2]protoreflect.EnumNumber // protected by once + } + +-func (p *EnumRanges) Len() int { return len(p.List) } +-func (p *EnumRanges) Get(i int) [2]pref.EnumNumber { return p.List[i] } +-func (p *EnumRanges) Has(n pref.EnumNumber) bool { ++func (p *EnumRanges) Len() int { return len(p.List) } ++func (p *EnumRanges) Get(i int) [2]protoreflect.EnumNumber { return p.List[i] } ++func (p *EnumRanges) Has(n protoreflect.EnumNumber) bool { + for ls := p.lazyInit().sorted; len(ls) > 0; { + i := len(ls) / 2 + switch r := enumRange(ls[i]); { +@@ -129,14 +128,14 @@ func (r enumRange) String() string { + } + + type FieldRanges struct { +- List [][2]pref.FieldNumber // start inclusive; end exclusive ++ List [][2]protoreflect.FieldNumber // start inclusive; end exclusive + once sync.Once +- sorted [][2]pref.FieldNumber // protected by once ++ sorted [][2]protoreflect.FieldNumber // protected by once + } + +-func (p *FieldRanges) Len() int { return len(p.List) } +-func (p *FieldRanges) Get(i int) [2]pref.FieldNumber { return p.List[i] } +-func (p *FieldRanges) Has(n pref.FieldNumber) bool { ++func (p *FieldRanges) Len() int { return len(p.List) } ++func (p *FieldRanges) Get(i int) [2]protoreflect.FieldNumber { return p.List[i] } ++func (p *FieldRanges) Has(n protoreflect.FieldNumber) bool { + for ls := p.lazyInit().sorted; len(ls) > 0; { + i := len(ls) / 2 + switch r := fieldRange(ls[i]); { +@@ -221,17 +220,17 @@ func (r fieldRange) String() string { + } + + type FieldNumbers struct { +- List []pref.FieldNumber ++ List []protoreflect.FieldNumber + once sync.Once +- has map[pref.FieldNumber]struct{} // protected by once ++ has map[protoreflect.FieldNumber]struct{} // protected by once + } + +-func (p *FieldNumbers) Len() int { return len(p.List) } +-func (p *FieldNumbers) Get(i int) pref.FieldNumber { return p.List[i] } +-func (p *FieldNumbers) Has(n pref.FieldNumber) bool { ++func (p *FieldNumbers) Len() int { return len(p.List) } ++func (p *FieldNumbers) Get(i int) protoreflect.FieldNumber { return p.List[i] } ++func (p *FieldNumbers) Has(n protoreflect.FieldNumber) bool { + p.once.Do(func() { + if len(p.List) > 0 { +- p.has = make(map[pref.FieldNumber]struct{}, len(p.List)) ++ p.has = make(map[protoreflect.FieldNumber]struct{}, len(p.List)) + for _, n := range p.List { + p.has[n] = struct{}{} + } +@@ -244,30 +243,38 @@ func (p *FieldNumbers) Format(s fmt.State, r rune) { descfmt.FormatList + func (p *FieldNumbers) ProtoInternal(pragma.DoNotImplement) {} + + type OneofFields struct { +- List []pref.FieldDescriptor ++ List []protoreflect.FieldDescriptor + once sync.Once +- byName map[pref.Name]pref.FieldDescriptor // protected by once +- byJSON map[string]pref.FieldDescriptor // protected by once +- byText map[string]pref.FieldDescriptor // protected by once +- byNum map[pref.FieldNumber]pref.FieldDescriptor // protected by once ++ byName map[protoreflect.Name]protoreflect.FieldDescriptor // protected by once ++ byJSON map[string]protoreflect.FieldDescriptor // protected by once ++ byText map[string]protoreflect.FieldDescriptor // protected by once ++ byNum map[protoreflect.FieldNumber]protoreflect.FieldDescriptor // protected by once + } + +-func (p *OneofFields) Len() int { return len(p.List) } +-func (p *OneofFields) Get(i int) pref.FieldDescriptor { return p.List[i] } +-func (p *OneofFields) ByName(s pref.Name) pref.FieldDescriptor { return p.lazyInit().byName[s] } +-func (p *OneofFields) ByJSONName(s string) pref.FieldDescriptor { return p.lazyInit().byJSON[s] } +-func (p *OneofFields) ByTextName(s string) pref.FieldDescriptor { return p.lazyInit().byText[s] } +-func (p *OneofFields) ByNumber(n pref.FieldNumber) pref.FieldDescriptor { return p.lazyInit().byNum[n] } +-func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +-func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {} ++func (p *OneofFields) Len() int { return len(p.List) } ++func (p *OneofFields) Get(i int) protoreflect.FieldDescriptor { return p.List[i] } ++func (p *OneofFields) ByName(s protoreflect.Name) protoreflect.FieldDescriptor { ++ return p.lazyInit().byName[s] ++} ++func (p *OneofFields) ByJSONName(s string) protoreflect.FieldDescriptor { ++ return p.lazyInit().byJSON[s] ++} ++func (p *OneofFields) ByTextName(s string) protoreflect.FieldDescriptor { ++ return p.lazyInit().byText[s] ++} ++func (p *OneofFields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor { ++ return p.lazyInit().byNum[n] ++} ++func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } ++func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {} + + func (p *OneofFields) lazyInit() *OneofFields { + p.once.Do(func() { + if len(p.List) > 0 { +- p.byName = make(map[pref.Name]pref.FieldDescriptor, len(p.List)) +- p.byJSON = make(map[string]pref.FieldDescriptor, len(p.List)) +- p.byText = make(map[string]pref.FieldDescriptor, len(p.List)) +- p.byNum = make(map[pref.FieldNumber]pref.FieldDescriptor, len(p.List)) ++ p.byName = make(map[protoreflect.Name]protoreflect.FieldDescriptor, len(p.List)) ++ p.byJSON = make(map[string]protoreflect.FieldDescriptor, len(p.List)) ++ p.byText = make(map[string]protoreflect.FieldDescriptor, len(p.List)) ++ p.byNum = make(map[protoreflect.FieldNumber]protoreflect.FieldDescriptor, len(p.List)) + for _, f := range p.List { + // Field names and numbers are guaranteed to be unique. + p.byName[f.Name()] = f +@@ -284,123 +291,123 @@ type SourceLocations struct { + // List is a list of SourceLocations. + // The SourceLocation.Next field does not need to be populated + // as it will be lazily populated upon first need. +- List []pref.SourceLocation ++ List []protoreflect.SourceLocation + + // File is the parent file descriptor that these locations are relative to. + // If non-nil, ByDescriptor verifies that the provided descriptor + // is a child of this file descriptor. +- File pref.FileDescriptor ++ File protoreflect.FileDescriptor + + once sync.Once + byPath map[pathKey]int + } + +-func (p *SourceLocations) Len() int { return len(p.List) } +-func (p *SourceLocations) Get(i int) pref.SourceLocation { return p.lazyInit().List[i] } +-func (p *SourceLocations) byKey(k pathKey) pref.SourceLocation { ++func (p *SourceLocations) Len() int { return len(p.List) } ++func (p *SourceLocations) Get(i int) protoreflect.SourceLocation { return p.lazyInit().List[i] } ++func (p *SourceLocations) byKey(k pathKey) protoreflect.SourceLocation { + if i, ok := p.lazyInit().byPath[k]; ok { + return p.List[i] + } +- return pref.SourceLocation{} ++ return protoreflect.SourceLocation{} + } +-func (p *SourceLocations) ByPath(path pref.SourcePath) pref.SourceLocation { ++func (p *SourceLocations) ByPath(path protoreflect.SourcePath) protoreflect.SourceLocation { + return p.byKey(newPathKey(path)) + } +-func (p *SourceLocations) ByDescriptor(desc pref.Descriptor) pref.SourceLocation { ++func (p *SourceLocations) ByDescriptor(desc protoreflect.Descriptor) protoreflect.SourceLocation { + if p.File != nil && desc != nil && p.File != desc.ParentFile() { +- return pref.SourceLocation{} // mismatching parent files ++ return protoreflect.SourceLocation{} // mismatching parent files + } + var pathArr [16]int32 + path := pathArr[:0] + for { + switch desc.(type) { +- case pref.FileDescriptor: ++ case protoreflect.FileDescriptor: + // Reverse the path since it was constructed in reverse. + for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 { + path[i], path[j] = path[j], path[i] + } + return p.byKey(newPathKey(path)) +- case pref.MessageDescriptor: ++ case protoreflect.MessageDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { +- case pref.FileDescriptor: ++ case protoreflect.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_MessageType_field_number)) +- case pref.MessageDescriptor: ++ case protoreflect.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_NestedType_field_number)) + default: +- return pref.SourceLocation{} ++ return protoreflect.SourceLocation{} + } +- case pref.FieldDescriptor: +- isExtension := desc.(pref.FieldDescriptor).IsExtension() ++ case protoreflect.FieldDescriptor: ++ isExtension := desc.(protoreflect.FieldDescriptor).IsExtension() + path = append(path, int32(desc.Index())) + desc = desc.Parent() + if isExtension { + switch desc.(type) { +- case pref.FileDescriptor: ++ case protoreflect.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_Extension_field_number)) +- case pref.MessageDescriptor: ++ case protoreflect.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_Extension_field_number)) + default: +- return pref.SourceLocation{} ++ return protoreflect.SourceLocation{} + } + } else { + switch desc.(type) { +- case pref.MessageDescriptor: ++ case protoreflect.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_Field_field_number)) + default: +- return pref.SourceLocation{} ++ return protoreflect.SourceLocation{} + } + } +- case pref.OneofDescriptor: ++ case protoreflect.OneofDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { +- case pref.MessageDescriptor: ++ case protoreflect.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_OneofDecl_field_number)) + default: +- return pref.SourceLocation{} ++ return protoreflect.SourceLocation{} + } +- case pref.EnumDescriptor: ++ case protoreflect.EnumDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { +- case pref.FileDescriptor: ++ case protoreflect.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_EnumType_field_number)) +- case pref.MessageDescriptor: ++ case protoreflect.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_EnumType_field_number)) + default: +- return pref.SourceLocation{} ++ return protoreflect.SourceLocation{} + } +- case pref.EnumValueDescriptor: ++ case protoreflect.EnumValueDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { +- case pref.EnumDescriptor: ++ case protoreflect.EnumDescriptor: + path = append(path, int32(genid.EnumDescriptorProto_Value_field_number)) + default: +- return pref.SourceLocation{} ++ return protoreflect.SourceLocation{} + } +- case pref.ServiceDescriptor: ++ case protoreflect.ServiceDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { +- case pref.FileDescriptor: ++ case protoreflect.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_Service_field_number)) + default: +- return pref.SourceLocation{} ++ return protoreflect.SourceLocation{} + } +- case pref.MethodDescriptor: ++ case protoreflect.MethodDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { +- case pref.ServiceDescriptor: ++ case protoreflect.ServiceDescriptor: + path = append(path, int32(genid.ServiceDescriptorProto_Method_field_number)) + default: +- return pref.SourceLocation{} ++ return protoreflect.SourceLocation{} + } + default: +- return pref.SourceLocation{} ++ return protoreflect.SourceLocation{} + } + } + } +@@ -435,7 +442,7 @@ type pathKey struct { + str string // used if the path does not fit in arr + } + +-func newPathKey(p pref.SourcePath) (k pathKey) { ++func newPathKey(p protoreflect.SourcePath) (k pathKey) { + if len(p) < len(k.arr) { + for i, ps := range p { + if ps < 0 || math.MaxUint8 <= ps { +diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go +index dbf2c60..28240eb 100644 +--- a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go ++++ b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go +@@ -7,7 +7,7 @@ package filedesc + import ( + "google.golang.org/protobuf/internal/descopts" + "google.golang.org/protobuf/internal/pragma" +- pref "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/reflect/protoreflect" + ) + + var ( +@@ -30,78 +30,80 @@ var ( + // PlaceholderFile is a placeholder, representing only the file path. + type PlaceholderFile string + +-func (f PlaceholderFile) ParentFile() pref.FileDescriptor { return f } +-func (f PlaceholderFile) Parent() pref.Descriptor { return nil } +-func (f PlaceholderFile) Index() int { return 0 } +-func (f PlaceholderFile) Syntax() pref.Syntax { return 0 } +-func (f PlaceholderFile) Name() pref.Name { return "" } +-func (f PlaceholderFile) FullName() pref.FullName { return "" } +-func (f PlaceholderFile) IsPlaceholder() bool { return true } +-func (f PlaceholderFile) Options() pref.ProtoMessage { return descopts.File } +-func (f PlaceholderFile) Path() string { return string(f) } +-func (f PlaceholderFile) Package() pref.FullName { return "" } +-func (f PlaceholderFile) Imports() pref.FileImports { return emptyFiles } +-func (f PlaceholderFile) Messages() pref.MessageDescriptors { return emptyMessages } +-func (f PlaceholderFile) Enums() pref.EnumDescriptors { return emptyEnums } +-func (f PlaceholderFile) Extensions() pref.ExtensionDescriptors { return emptyExtensions } +-func (f PlaceholderFile) Services() pref.ServiceDescriptors { return emptyServices } +-func (f PlaceholderFile) SourceLocations() pref.SourceLocations { return emptySourceLocations } +-func (f PlaceholderFile) ProtoType(pref.FileDescriptor) { return } +-func (f PlaceholderFile) ProtoInternal(pragma.DoNotImplement) { return } ++func (f PlaceholderFile) ParentFile() protoreflect.FileDescriptor { return f } ++func (f PlaceholderFile) Parent() protoreflect.Descriptor { return nil } ++func (f PlaceholderFile) Index() int { return 0 } ++func (f PlaceholderFile) Syntax() protoreflect.Syntax { return 0 } ++func (f PlaceholderFile) Name() protoreflect.Name { return "" } ++func (f PlaceholderFile) FullName() protoreflect.FullName { return "" } ++func (f PlaceholderFile) IsPlaceholder() bool { return true } ++func (f PlaceholderFile) Options() protoreflect.ProtoMessage { return descopts.File } ++func (f PlaceholderFile) Path() string { return string(f) } ++func (f PlaceholderFile) Package() protoreflect.FullName { return "" } ++func (f PlaceholderFile) Imports() protoreflect.FileImports { return emptyFiles } ++func (f PlaceholderFile) Messages() protoreflect.MessageDescriptors { return emptyMessages } ++func (f PlaceholderFile) Enums() protoreflect.EnumDescriptors { return emptyEnums } ++func (f PlaceholderFile) Extensions() protoreflect.ExtensionDescriptors { return emptyExtensions } ++func (f PlaceholderFile) Services() protoreflect.ServiceDescriptors { return emptyServices } ++func (f PlaceholderFile) SourceLocations() protoreflect.SourceLocations { return emptySourceLocations } ++func (f PlaceholderFile) ProtoType(protoreflect.FileDescriptor) { return } ++func (f PlaceholderFile) ProtoInternal(pragma.DoNotImplement) { return } + + // PlaceholderEnum is a placeholder, representing only the full name. +-type PlaceholderEnum pref.FullName ++type PlaceholderEnum protoreflect.FullName + +-func (e PlaceholderEnum) ParentFile() pref.FileDescriptor { return nil } +-func (e PlaceholderEnum) Parent() pref.Descriptor { return nil } +-func (e PlaceholderEnum) Index() int { return 0 } +-func (e PlaceholderEnum) Syntax() pref.Syntax { return 0 } +-func (e PlaceholderEnum) Name() pref.Name { return pref.FullName(e).Name() } +-func (e PlaceholderEnum) FullName() pref.FullName { return pref.FullName(e) } +-func (e PlaceholderEnum) IsPlaceholder() bool { return true } +-func (e PlaceholderEnum) Options() pref.ProtoMessage { return descopts.Enum } +-func (e PlaceholderEnum) Values() pref.EnumValueDescriptors { return emptyEnumValues } +-func (e PlaceholderEnum) ReservedNames() pref.Names { return emptyNames } +-func (e PlaceholderEnum) ReservedRanges() pref.EnumRanges { return emptyEnumRanges } +-func (e PlaceholderEnum) ProtoType(pref.EnumDescriptor) { return } +-func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return } ++func (e PlaceholderEnum) ParentFile() protoreflect.FileDescriptor { return nil } ++func (e PlaceholderEnum) Parent() protoreflect.Descriptor { return nil } ++func (e PlaceholderEnum) Index() int { return 0 } ++func (e PlaceholderEnum) Syntax() protoreflect.Syntax { return 0 } ++func (e PlaceholderEnum) Name() protoreflect.Name { return protoreflect.FullName(e).Name() } ++func (e PlaceholderEnum) FullName() protoreflect.FullName { return protoreflect.FullName(e) } ++func (e PlaceholderEnum) IsPlaceholder() bool { return true } ++func (e PlaceholderEnum) Options() protoreflect.ProtoMessage { return descopts.Enum } ++func (e PlaceholderEnum) Values() protoreflect.EnumValueDescriptors { return emptyEnumValues } ++func (e PlaceholderEnum) ReservedNames() protoreflect.Names { return emptyNames } ++func (e PlaceholderEnum) ReservedRanges() protoreflect.EnumRanges { return emptyEnumRanges } ++func (e PlaceholderEnum) ProtoType(protoreflect.EnumDescriptor) { return } ++func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return } + + // PlaceholderEnumValue is a placeholder, representing only the full name. +-type PlaceholderEnumValue pref.FullName ++type PlaceholderEnumValue protoreflect.FullName + +-func (e PlaceholderEnumValue) ParentFile() pref.FileDescriptor { return nil } +-func (e PlaceholderEnumValue) Parent() pref.Descriptor { return nil } +-func (e PlaceholderEnumValue) Index() int { return 0 } +-func (e PlaceholderEnumValue) Syntax() pref.Syntax { return 0 } +-func (e PlaceholderEnumValue) Name() pref.Name { return pref.FullName(e).Name() } +-func (e PlaceholderEnumValue) FullName() pref.FullName { return pref.FullName(e) } +-func (e PlaceholderEnumValue) IsPlaceholder() bool { return true } +-func (e PlaceholderEnumValue) Options() pref.ProtoMessage { return descopts.EnumValue } +-func (e PlaceholderEnumValue) Number() pref.EnumNumber { return 0 } +-func (e PlaceholderEnumValue) ProtoType(pref.EnumValueDescriptor) { return } +-func (e PlaceholderEnumValue) ProtoInternal(pragma.DoNotImplement) { return } ++func (e PlaceholderEnumValue) ParentFile() protoreflect.FileDescriptor { return nil } ++func (e PlaceholderEnumValue) Parent() protoreflect.Descriptor { return nil } ++func (e PlaceholderEnumValue) Index() int { return 0 } ++func (e PlaceholderEnumValue) Syntax() protoreflect.Syntax { return 0 } ++func (e PlaceholderEnumValue) Name() protoreflect.Name { return protoreflect.FullName(e).Name() } ++func (e PlaceholderEnumValue) FullName() protoreflect.FullName { return protoreflect.FullName(e) } ++func (e PlaceholderEnumValue) IsPlaceholder() bool { return true } ++func (e PlaceholderEnumValue) Options() protoreflect.ProtoMessage { return descopts.EnumValue } ++func (e PlaceholderEnumValue) Number() protoreflect.EnumNumber { return 0 } ++func (e PlaceholderEnumValue) ProtoType(protoreflect.EnumValueDescriptor) { return } ++func (e PlaceholderEnumValue) ProtoInternal(pragma.DoNotImplement) { return } + + // PlaceholderMessage is a placeholder, representing only the full name. +-type PlaceholderMessage pref.FullName ++type PlaceholderMessage protoreflect.FullName + +-func (m PlaceholderMessage) ParentFile() pref.FileDescriptor { return nil } +-func (m PlaceholderMessage) Parent() pref.Descriptor { return nil } +-func (m PlaceholderMessage) Index() int { return 0 } +-func (m PlaceholderMessage) Syntax() pref.Syntax { return 0 } +-func (m PlaceholderMessage) Name() pref.Name { return pref.FullName(m).Name() } +-func (m PlaceholderMessage) FullName() pref.FullName { return pref.FullName(m) } +-func (m PlaceholderMessage) IsPlaceholder() bool { return true } +-func (m PlaceholderMessage) Options() pref.ProtoMessage { return descopts.Message } +-func (m PlaceholderMessage) IsMapEntry() bool { return false } +-func (m PlaceholderMessage) Fields() pref.FieldDescriptors { return emptyFields } +-func (m PlaceholderMessage) Oneofs() pref.OneofDescriptors { return emptyOneofs } +-func (m PlaceholderMessage) ReservedNames() pref.Names { return emptyNames } +-func (m PlaceholderMessage) ReservedRanges() pref.FieldRanges { return emptyFieldRanges } +-func (m PlaceholderMessage) RequiredNumbers() pref.FieldNumbers { return emptyFieldNumbers } +-func (m PlaceholderMessage) ExtensionRanges() pref.FieldRanges { return emptyFieldRanges } +-func (m PlaceholderMessage) ExtensionRangeOptions(int) pref.ProtoMessage { panic("index out of range") } +-func (m PlaceholderMessage) Messages() pref.MessageDescriptors { return emptyMessages } +-func (m PlaceholderMessage) Enums() pref.EnumDescriptors { return emptyEnums } +-func (m PlaceholderMessage) Extensions() pref.ExtensionDescriptors { return emptyExtensions } +-func (m PlaceholderMessage) ProtoType(pref.MessageDescriptor) { return } +-func (m PlaceholderMessage) ProtoInternal(pragma.DoNotImplement) { return } ++func (m PlaceholderMessage) ParentFile() protoreflect.FileDescriptor { return nil } ++func (m PlaceholderMessage) Parent() protoreflect.Descriptor { return nil } ++func (m PlaceholderMessage) Index() int { return 0 } ++func (m PlaceholderMessage) Syntax() protoreflect.Syntax { return 0 } ++func (m PlaceholderMessage) Name() protoreflect.Name { return protoreflect.FullName(m).Name() } ++func (m PlaceholderMessage) FullName() protoreflect.FullName { return protoreflect.FullName(m) } ++func (m PlaceholderMessage) IsPlaceholder() bool { return true } ++func (m PlaceholderMessage) Options() protoreflect.ProtoMessage { return descopts.Message } ++func (m PlaceholderMessage) IsMapEntry() bool { return false } ++func (m PlaceholderMessage) Fields() protoreflect.FieldDescriptors { return emptyFields } ++func (m PlaceholderMessage) Oneofs() protoreflect.OneofDescriptors { return emptyOneofs } ++func (m PlaceholderMessage) ReservedNames() protoreflect.Names { return emptyNames } ++func (m PlaceholderMessage) ReservedRanges() protoreflect.FieldRanges { return emptyFieldRanges } ++func (m PlaceholderMessage) RequiredNumbers() protoreflect.FieldNumbers { return emptyFieldNumbers } ++func (m PlaceholderMessage) ExtensionRanges() protoreflect.FieldRanges { return emptyFieldRanges } ++func (m PlaceholderMessage) ExtensionRangeOptions(int) protoreflect.ProtoMessage { ++ panic("index out of range") ++} ++func (m PlaceholderMessage) Messages() protoreflect.MessageDescriptors { return emptyMessages } ++func (m PlaceholderMessage) Enums() protoreflect.EnumDescriptors { return emptyEnums } ++func (m PlaceholderMessage) Extensions() protoreflect.ExtensionDescriptors { return emptyExtensions } ++func (m PlaceholderMessage) ProtoType(protoreflect.MessageDescriptor) { return } ++func (m PlaceholderMessage) ProtoInternal(pragma.DoNotImplement) { return } +diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go +index 0a0dd35..f0e38c4 100644 +--- a/vendor/google.golang.org/protobuf/internal/filetype/build.go ++++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go +@@ -10,17 +10,16 @@ import ( + "reflect" + + "google.golang.org/protobuf/internal/descopts" +- fdesc "google.golang.org/protobuf/internal/filedesc" ++ "google.golang.org/protobuf/internal/filedesc" + pimpl "google.golang.org/protobuf/internal/impl" +- pref "google.golang.org/protobuf/reflect/protoreflect" +- preg "google.golang.org/protobuf/reflect/protoregistry" ++ "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/reflect/protoregistry" + ) + + // Builder constructs type descriptors from a raw file descriptor + // and associated Go types for each enum and message declaration. + // +-// +-// Flattened Ordering ++// # Flattened Ordering + // + // The protobuf type system represents declarations as a tree. Certain nodes in + // the tree require us to either associate it with a concrete Go type or to +@@ -52,7 +51,7 @@ import ( + // that children themselves may have. + type Builder struct { + // File is the underlying file descriptor builder. +- File fdesc.Builder ++ File filedesc.Builder + + // GoTypes is a unique set of the Go types for all declarations and + // dependencies. Each type is represented as a zero value of the Go type. +@@ -108,22 +107,22 @@ type Builder struct { + // TypeRegistry is the registry to register each type descriptor. + // If nil, it uses protoregistry.GlobalTypes. + TypeRegistry interface { +- RegisterMessage(pref.MessageType) error +- RegisterEnum(pref.EnumType) error +- RegisterExtension(pref.ExtensionType) error ++ RegisterMessage(protoreflect.MessageType) error ++ RegisterEnum(protoreflect.EnumType) error ++ RegisterExtension(protoreflect.ExtensionType) error + } + } + + // Out is the output of the builder. + type Out struct { +- File pref.FileDescriptor ++ File protoreflect.FileDescriptor + } + + func (tb Builder) Build() (out Out) { + // Replace the resolver with one that resolves dependencies by index, + // which is faster and more reliable than relying on the global registry. + if tb.File.FileRegistry == nil { +- tb.File.FileRegistry = preg.GlobalFiles ++ tb.File.FileRegistry = protoregistry.GlobalFiles + } + tb.File.FileRegistry = &resolverByIndex{ + goTypes: tb.GoTypes, +@@ -133,7 +132,7 @@ func (tb Builder) Build() (out Out) { + + // Initialize registry if unpopulated. + if tb.TypeRegistry == nil { +- tb.TypeRegistry = preg.GlobalTypes ++ tb.TypeRegistry = protoregistry.GlobalTypes + } + + fbOut := tb.File.Build() +@@ -183,23 +182,23 @@ func (tb Builder) Build() (out Out) { + for i := range fbOut.Messages { + switch fbOut.Messages[i].Name() { + case "FileOptions": +- descopts.File = messageGoTypes[i].(pref.ProtoMessage) ++ descopts.File = messageGoTypes[i].(protoreflect.ProtoMessage) + case "EnumOptions": +- descopts.Enum = messageGoTypes[i].(pref.ProtoMessage) ++ descopts.Enum = messageGoTypes[i].(protoreflect.ProtoMessage) + case "EnumValueOptions": +- descopts.EnumValue = messageGoTypes[i].(pref.ProtoMessage) ++ descopts.EnumValue = messageGoTypes[i].(protoreflect.ProtoMessage) + case "MessageOptions": +- descopts.Message = messageGoTypes[i].(pref.ProtoMessage) ++ descopts.Message = messageGoTypes[i].(protoreflect.ProtoMessage) + case "FieldOptions": +- descopts.Field = messageGoTypes[i].(pref.ProtoMessage) ++ descopts.Field = messageGoTypes[i].(protoreflect.ProtoMessage) + case "OneofOptions": +- descopts.Oneof = messageGoTypes[i].(pref.ProtoMessage) ++ descopts.Oneof = messageGoTypes[i].(protoreflect.ProtoMessage) + case "ExtensionRangeOptions": +- descopts.ExtensionRange = messageGoTypes[i].(pref.ProtoMessage) ++ descopts.ExtensionRange = messageGoTypes[i].(protoreflect.ProtoMessage) + case "ServiceOptions": +- descopts.Service = messageGoTypes[i].(pref.ProtoMessage) ++ descopts.Service = messageGoTypes[i].(protoreflect.ProtoMessage) + case "MethodOptions": +- descopts.Method = messageGoTypes[i].(pref.ProtoMessage) ++ descopts.Method = messageGoTypes[i].(protoreflect.ProtoMessage) + } + } + } +@@ -216,11 +215,11 @@ func (tb Builder) Build() (out Out) { + const listExtDeps = 2 + var goType reflect.Type + switch fbOut.Extensions[i].L1.Kind { +- case pref.EnumKind: ++ case protoreflect.EnumKind: + j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx) + goType = reflect.TypeOf(tb.GoTypes[j]) + depIdx++ +- case pref.MessageKind, pref.GroupKind: ++ case protoreflect.MessageKind, protoreflect.GroupKind: + j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx) + goType = reflect.TypeOf(tb.GoTypes[j]) + depIdx++ +@@ -242,22 +241,22 @@ func (tb Builder) Build() (out Out) { + return out + } + +-var goTypeForPBKind = map[pref.Kind]reflect.Type{ +- pref.BoolKind: reflect.TypeOf(bool(false)), +- pref.Int32Kind: reflect.TypeOf(int32(0)), +- pref.Sint32Kind: reflect.TypeOf(int32(0)), +- pref.Sfixed32Kind: reflect.TypeOf(int32(0)), +- pref.Int64Kind: reflect.TypeOf(int64(0)), +- pref.Sint64Kind: reflect.TypeOf(int64(0)), +- pref.Sfixed64Kind: reflect.TypeOf(int64(0)), +- pref.Uint32Kind: reflect.TypeOf(uint32(0)), +- pref.Fixed32Kind: reflect.TypeOf(uint32(0)), +- pref.Uint64Kind: reflect.TypeOf(uint64(0)), +- pref.Fixed64Kind: reflect.TypeOf(uint64(0)), +- pref.FloatKind: reflect.TypeOf(float32(0)), +- pref.DoubleKind: reflect.TypeOf(float64(0)), +- pref.StringKind: reflect.TypeOf(string("")), +- pref.BytesKind: reflect.TypeOf([]byte(nil)), ++var goTypeForPBKind = map[protoreflect.Kind]reflect.Type{ ++ protoreflect.BoolKind: reflect.TypeOf(bool(false)), ++ protoreflect.Int32Kind: reflect.TypeOf(int32(0)), ++ protoreflect.Sint32Kind: reflect.TypeOf(int32(0)), ++ protoreflect.Sfixed32Kind: reflect.TypeOf(int32(0)), ++ protoreflect.Int64Kind: reflect.TypeOf(int64(0)), ++ protoreflect.Sint64Kind: reflect.TypeOf(int64(0)), ++ protoreflect.Sfixed64Kind: reflect.TypeOf(int64(0)), ++ protoreflect.Uint32Kind: reflect.TypeOf(uint32(0)), ++ protoreflect.Fixed32Kind: reflect.TypeOf(uint32(0)), ++ protoreflect.Uint64Kind: reflect.TypeOf(uint64(0)), ++ protoreflect.Fixed64Kind: reflect.TypeOf(uint64(0)), ++ protoreflect.FloatKind: reflect.TypeOf(float32(0)), ++ protoreflect.DoubleKind: reflect.TypeOf(float64(0)), ++ protoreflect.StringKind: reflect.TypeOf(string("")), ++ protoreflect.BytesKind: reflect.TypeOf([]byte(nil)), + } + + type depIdxs []int32 +@@ -274,13 +273,13 @@ type ( + fileRegistry + } + fileRegistry interface { +- FindFileByPath(string) (pref.FileDescriptor, error) +- FindDescriptorByName(pref.FullName) (pref.Descriptor, error) +- RegisterFile(pref.FileDescriptor) error ++ FindFileByPath(string) (protoreflect.FileDescriptor, error) ++ FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) ++ RegisterFile(protoreflect.FileDescriptor) error + } + ) + +-func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.EnumDescriptor { ++func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []filedesc.Enum, ms []filedesc.Message) protoreflect.EnumDescriptor { + if depIdx := int(r.depIdxs.Get(i, j)); int(depIdx) < len(es)+len(ms) { + return &es[depIdx] + } else { +@@ -288,7 +287,7 @@ func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []fdesc.Enum, ms []fdes + } + } + +-func (r *resolverByIndex) FindMessageByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.MessageDescriptor { ++func (r *resolverByIndex) FindMessageByIndex(i, j int32, es []filedesc.Enum, ms []filedesc.Message) protoreflect.MessageDescriptor { + if depIdx := int(r.depIdxs.Get(i, j)); depIdx < len(es)+len(ms) { + return &ms[depIdx-len(es)] + } else { +diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +index e3cdf1c..5c0e8f7 100644 +--- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go ++++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +@@ -50,6 +50,7 @@ const ( + FileDescriptorProto_Options_field_name protoreflect.Name = "options" + FileDescriptorProto_SourceCodeInfo_field_name protoreflect.Name = "source_code_info" + FileDescriptorProto_Syntax_field_name protoreflect.Name = "syntax" ++ FileDescriptorProto_Edition_field_name protoreflect.Name = "edition" + + FileDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.name" + FileDescriptorProto_Package_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.package" +@@ -63,6 +64,7 @@ const ( + FileDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.options" + FileDescriptorProto_SourceCodeInfo_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.source_code_info" + FileDescriptorProto_Syntax_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.syntax" ++ FileDescriptorProto_Edition_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.edition" + ) + + // Field numbers for google.protobuf.FileDescriptorProto. +@@ -79,6 +81,7 @@ const ( + FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 + FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9 + FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12 ++ FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 13 + ) + + // Names for google.protobuf.DescriptorProto. +@@ -494,26 +497,29 @@ const ( + + // Field names for google.protobuf.MessageOptions. + const ( +- MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format" +- MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor" +- MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" +- MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" +- MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" ++ MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format" ++ MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor" ++ MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" ++ MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" ++ MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" ++ MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + +- MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" +- MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor" +- MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" +- MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" +- MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" ++ MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" ++ MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor" ++ MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" ++ MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" ++ MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated_legacy_json_field_conflicts" ++ MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" + ) + + // Field numbers for google.protobuf.MessageOptions. + const ( +- MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1 +- MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2 +- MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 +- MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 +- MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ++ MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1 ++ MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2 ++ MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 ++ MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 ++ MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 11 ++ MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 + ) + + // Names for google.protobuf.FieldOptions. +@@ -528,16 +534,24 @@ const ( + FieldOptions_Packed_field_name protoreflect.Name = "packed" + FieldOptions_Jstype_field_name protoreflect.Name = "jstype" + FieldOptions_Lazy_field_name protoreflect.Name = "lazy" ++ FieldOptions_UnverifiedLazy_field_name protoreflect.Name = "unverified_lazy" + FieldOptions_Deprecated_field_name protoreflect.Name = "deprecated" + FieldOptions_Weak_field_name protoreflect.Name = "weak" ++ FieldOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" ++ FieldOptions_Retention_field_name protoreflect.Name = "retention" ++ FieldOptions_Target_field_name protoreflect.Name = "target" + FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" + FieldOptions_Packed_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.packed" + FieldOptions_Jstype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.jstype" + FieldOptions_Lazy_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.lazy" ++ FieldOptions_UnverifiedLazy_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.unverified_lazy" + FieldOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.deprecated" + FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak" ++ FieldOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.debug_redact" ++ FieldOptions_Retention_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.retention" ++ FieldOptions_Target_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.target" + FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" + ) + +@@ -547,8 +561,12 @@ const ( + FieldOptions_Packed_field_number protoreflect.FieldNumber = 2 + FieldOptions_Jstype_field_number protoreflect.FieldNumber = 6 + FieldOptions_Lazy_field_number protoreflect.FieldNumber = 5 ++ FieldOptions_UnverifiedLazy_field_number protoreflect.FieldNumber = 15 + FieldOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + FieldOptions_Weak_field_number protoreflect.FieldNumber = 10 ++ FieldOptions_DebugRedact_field_number protoreflect.FieldNumber = 16 ++ FieldOptions_Retention_field_number protoreflect.FieldNumber = 17 ++ FieldOptions_Target_field_number protoreflect.FieldNumber = 18 + FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 + ) + +@@ -564,6 +582,18 @@ const ( + FieldOptions_JSType_enum_name = "JSType" + ) + ++// Full and short names for google.protobuf.FieldOptions.OptionRetention. ++const ( ++ FieldOptions_OptionRetention_enum_fullname = "google.protobuf.FieldOptions.OptionRetention" ++ FieldOptions_OptionRetention_enum_name = "OptionRetention" ++) ++ ++// Full and short names for google.protobuf.FieldOptions.OptionTargetType. ++const ( ++ FieldOptions_OptionTargetType_enum_fullname = "google.protobuf.FieldOptions.OptionTargetType" ++ FieldOptions_OptionTargetType_enum_name = "OptionTargetType" ++) ++ + // Names for google.protobuf.OneofOptions. + const ( + OneofOptions_message_name protoreflect.Name = "OneofOptions" +@@ -590,20 +620,23 @@ const ( + + // Field names for google.protobuf.EnumOptions. + const ( +- EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" +- EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" +- EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" ++ EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" ++ EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" ++ EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" ++ EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + +- EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" +- EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" +- EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" ++ EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" ++ EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" ++ EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated_legacy_json_field_conflicts" ++ EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" + ) + + // Field numbers for google.protobuf.EnumOptions. + const ( +- EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 +- EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 +- EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ++ EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 ++ EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 ++ EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 6 ++ EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 + ) + + // Names for google.protobuf.EnumValueOptions. +@@ -813,11 +846,13 @@ const ( + GeneratedCodeInfo_Annotation_SourceFile_field_name protoreflect.Name = "source_file" + GeneratedCodeInfo_Annotation_Begin_field_name protoreflect.Name = "begin" + GeneratedCodeInfo_Annotation_End_field_name protoreflect.Name = "end" ++ GeneratedCodeInfo_Annotation_Semantic_field_name protoreflect.Name = "semantic" + + GeneratedCodeInfo_Annotation_Path_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.path" + GeneratedCodeInfo_Annotation_SourceFile_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.source_file" + GeneratedCodeInfo_Annotation_Begin_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.begin" + GeneratedCodeInfo_Annotation_End_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.end" ++ GeneratedCodeInfo_Annotation_Semantic_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.semantic" + ) + + // Field numbers for google.protobuf.GeneratedCodeInfo.Annotation. +@@ -826,4 +861,11 @@ const ( + GeneratedCodeInfo_Annotation_SourceFile_field_number protoreflect.FieldNumber = 2 + GeneratedCodeInfo_Annotation_Begin_field_number protoreflect.FieldNumber = 3 + GeneratedCodeInfo_Annotation_End_field_number protoreflect.FieldNumber = 4 ++ GeneratedCodeInfo_Annotation_Semantic_field_number protoreflect.FieldNumber = 5 ++) ++ ++// Full and short names for google.protobuf.GeneratedCodeInfo.Annotation.Semantic. ++const ( ++ GeneratedCodeInfo_Annotation_Semantic_enum_fullname = "google.protobuf.GeneratedCodeInfo.Annotation.Semantic" ++ GeneratedCodeInfo_Annotation_Semantic_enum_name = "Semantic" + ) +diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go +index abee5f3..a371f98 100644 +--- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go ++++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go +@@ -12,8 +12,8 @@ import ( + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/proto" +- pref "google.golang.org/protobuf/reflect/protoreflect" +- piface "google.golang.org/protobuf/runtime/protoiface" ++ "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/runtime/protoiface" + ) + + // Export is a zero-length named type that exists only to export a set of +@@ -32,11 +32,11 @@ type enum = interface{} + + // EnumOf returns the protoreflect.Enum interface over e. + // It returns nil if e is nil. +-func (Export) EnumOf(e enum) pref.Enum { ++func (Export) EnumOf(e enum) protoreflect.Enum { + switch e := e.(type) { + case nil: + return nil +- case pref.Enum: ++ case protoreflect.Enum: + return e + default: + return legacyWrapEnum(reflect.ValueOf(e)) +@@ -45,11 +45,11 @@ func (Export) EnumOf(e enum) pref.Enum { + + // EnumDescriptorOf returns the protoreflect.EnumDescriptor for e. + // It returns nil if e is nil. +-func (Export) EnumDescriptorOf(e enum) pref.EnumDescriptor { ++func (Export) EnumDescriptorOf(e enum) protoreflect.EnumDescriptor { + switch e := e.(type) { + case nil: + return nil +- case pref.Enum: ++ case protoreflect.Enum: + return e.Descriptor() + default: + return LegacyLoadEnumDesc(reflect.TypeOf(e)) +@@ -58,11 +58,11 @@ func (Export) EnumDescriptorOf(e enum) pref.EnumDescriptor { + + // EnumTypeOf returns the protoreflect.EnumType for e. + // It returns nil if e is nil. +-func (Export) EnumTypeOf(e enum) pref.EnumType { ++func (Export) EnumTypeOf(e enum) protoreflect.EnumType { + switch e := e.(type) { + case nil: + return nil +- case pref.Enum: ++ case protoreflect.Enum: + return e.Type() + default: + return legacyLoadEnumType(reflect.TypeOf(e)) +@@ -71,7 +71,7 @@ func (Export) EnumTypeOf(e enum) pref.EnumType { + + // EnumStringOf returns the enum value as a string, either as the name if + // the number is resolvable, or the number formatted as a string. +-func (Export) EnumStringOf(ed pref.EnumDescriptor, n pref.EnumNumber) string { ++func (Export) EnumStringOf(ed protoreflect.EnumDescriptor, n protoreflect.EnumNumber) string { + ev := ed.Values().ByNumber(n) + if ev != nil { + return string(ev.Name()) +@@ -84,7 +84,7 @@ func (Export) EnumStringOf(ed pref.EnumDescriptor, n pref.EnumNumber) string { + type message = interface{} + + // legacyMessageWrapper wraps a v2 message as a v1 message. +-type legacyMessageWrapper struct{ m pref.ProtoMessage } ++type legacyMessageWrapper struct{ m protoreflect.ProtoMessage } + + func (m legacyMessageWrapper) Reset() { proto.Reset(m.m) } + func (m legacyMessageWrapper) String() string { return Export{}.MessageStringOf(m.m) } +@@ -92,30 +92,30 @@ func (m legacyMessageWrapper) ProtoMessage() {} + + // ProtoMessageV1Of converts either a v1 or v2 message to a v1 message. + // It returns nil if m is nil. +-func (Export) ProtoMessageV1Of(m message) piface.MessageV1 { ++func (Export) ProtoMessageV1Of(m message) protoiface.MessageV1 { + switch mv := m.(type) { + case nil: + return nil +- case piface.MessageV1: ++ case protoiface.MessageV1: + return mv + case unwrapper: + return Export{}.ProtoMessageV1Of(mv.protoUnwrap()) +- case pref.ProtoMessage: ++ case protoreflect.ProtoMessage: + return legacyMessageWrapper{mv} + default: + panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m)) + } + } + +-func (Export) protoMessageV2Of(m message) pref.ProtoMessage { ++func (Export) protoMessageV2Of(m message) protoreflect.ProtoMessage { + switch mv := m.(type) { + case nil: + return nil +- case pref.ProtoMessage: ++ case protoreflect.ProtoMessage: + return mv + case legacyMessageWrapper: + return mv.m +- case piface.MessageV1: ++ case protoiface.MessageV1: + return nil + default: + panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m)) +@@ -124,7 +124,7 @@ func (Export) protoMessageV2Of(m message) pref.ProtoMessage { + + // ProtoMessageV2Of converts either a v1 or v2 message to a v2 message. + // It returns nil if m is nil. +-func (Export) ProtoMessageV2Of(m message) pref.ProtoMessage { ++func (Export) ProtoMessageV2Of(m message) protoreflect.ProtoMessage { + if m == nil { + return nil + } +@@ -136,7 +136,7 @@ func (Export) ProtoMessageV2Of(m message) pref.ProtoMessage { + + // MessageOf returns the protoreflect.Message interface over m. + // It returns nil if m is nil. +-func (Export) MessageOf(m message) pref.Message { ++func (Export) MessageOf(m message) protoreflect.Message { + if m == nil { + return nil + } +@@ -148,7 +148,7 @@ func (Export) MessageOf(m message) pref.Message { + + // MessageDescriptorOf returns the protoreflect.MessageDescriptor for m. + // It returns nil if m is nil. +-func (Export) MessageDescriptorOf(m message) pref.MessageDescriptor { ++func (Export) MessageDescriptorOf(m message) protoreflect.MessageDescriptor { + if m == nil { + return nil + } +@@ -160,7 +160,7 @@ func (Export) MessageDescriptorOf(m message) pref.MessageDescriptor { + + // MessageTypeOf returns the protoreflect.MessageType for m. + // It returns nil if m is nil. +-func (Export) MessageTypeOf(m message) pref.MessageType { ++func (Export) MessageTypeOf(m message) protoreflect.MessageType { + if m == nil { + return nil + } +@@ -172,6 +172,6 @@ func (Export) MessageTypeOf(m message) pref.MessageType { + + // MessageStringOf returns the message value as a string, + // which is the message serialized in the protobuf text format. +-func (Export) MessageStringOf(m pref.ProtoMessage) string { ++func (Export) MessageStringOf(m protoreflect.ProtoMessage) string { + return prototext.MarshalOptions{Multiline: false}.Format(m) + } +diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go +index b82341e..bff041e 100644 +--- a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go ++++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go +@@ -8,18 +8,18 @@ import ( + "sync" + + "google.golang.org/protobuf/internal/errors" +- pref "google.golang.org/protobuf/reflect/protoreflect" +- piface "google.golang.org/protobuf/runtime/protoiface" ++ "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/runtime/protoiface" + ) + +-func (mi *MessageInfo) checkInitialized(in piface.CheckInitializedInput) (piface.CheckInitializedOutput, error) { ++func (mi *MessageInfo) checkInitialized(in protoiface.CheckInitializedInput) (protoiface.CheckInitializedOutput, error) { + var p pointer + if ms, ok := in.Message.(*messageState); ok { + p = ms.pointer() + } else { + p = in.Message.(*messageReflectWrapper).pointer() + } +- return piface.CheckInitializedOutput{}, mi.checkInitializedPointer(p) ++ return protoiface.CheckInitializedOutput{}, mi.checkInitializedPointer(p) + } + + func (mi *MessageInfo) checkInitializedPointer(p pointer) error { +@@ -90,7 +90,7 @@ var ( + // needsInitCheck reports whether a message needs to be checked for partial initialization. + // + // It returns true if the message transitively includes any required or extension fields. +-func needsInitCheck(md pref.MessageDescriptor) bool { ++func needsInitCheck(md protoreflect.MessageDescriptor) bool { + if v, ok := needsInitCheckMap.Load(md); ok { + if has, ok := v.(bool); ok { + return has +@@ -101,7 +101,7 @@ func needsInitCheck(md pref.MessageDescriptor) bool { + return needsInitCheckLocked(md) + } + +-func needsInitCheckLocked(md pref.MessageDescriptor) (has bool) { ++func needsInitCheckLocked(md protoreflect.MessageDescriptor) (has bool) { + if v, ok := needsInitCheckMap.Load(md); ok { + // If has is true, we've previously determined that this message + // needs init checks. +diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +index 08d3517..e74cefd 100644 +--- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go ++++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +@@ -10,7 +10,7 @@ import ( + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" +- pref "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/reflect/protoreflect" + ) + + type extensionFieldInfo struct { +@@ -23,7 +23,7 @@ type extensionFieldInfo struct { + + var legacyExtensionFieldInfoCache sync.Map // map[protoreflect.ExtensionType]*extensionFieldInfo + +-func getExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { ++func getExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { + if xi, ok := xt.(*ExtensionInfo); ok { + xi.lazyInit() + return xi.info +@@ -32,7 +32,7 @@ func getExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { + } + + // legacyLoadExtensionFieldInfo dynamically loads a *ExtensionInfo for xt. +-func legacyLoadExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { ++func legacyLoadExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { + if xi, ok := legacyExtensionFieldInfoCache.Load(xt); ok { + return xi.(*extensionFieldInfo) + } +@@ -43,7 +43,7 @@ func legacyLoadExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { + return e + } + +-func makeExtensionFieldInfo(xd pref.ExtensionDescriptor) *extensionFieldInfo { ++func makeExtensionFieldInfo(xd protoreflect.ExtensionDescriptor) *extensionFieldInfo { + var wiretag uint64 + if !xd.IsPacked() { + wiretag = protowire.EncodeTag(xd.Number(), wireTypes[xd.Kind()]) +@@ -59,10 +59,10 @@ func makeExtensionFieldInfo(xd pref.ExtensionDescriptor) *extensionFieldInfo { + // This is true for composite types, where we pass in a message, list, or map to fill in, + // and for enums, where we pass in a prototype value to specify the concrete enum type. + switch xd.Kind() { +- case pref.MessageKind, pref.GroupKind, pref.EnumKind: ++ case protoreflect.MessageKind, protoreflect.GroupKind, protoreflect.EnumKind: + e.unmarshalNeedsValue = true + default: +- if xd.Cardinality() == pref.Repeated { ++ if xd.Cardinality() == protoreflect.Repeated { + e.unmarshalNeedsValue = true + } + } +@@ -73,21 +73,21 @@ type lazyExtensionValue struct { + atomicOnce uint32 // atomically set if value is valid + mu sync.Mutex + xi *extensionFieldInfo +- value pref.Value ++ value protoreflect.Value + b []byte +- fn func() pref.Value ++ fn func() protoreflect.Value + } + + type ExtensionField struct { +- typ pref.ExtensionType ++ typ protoreflect.ExtensionType + + // value is either the value of GetValue, + // or a *lazyExtensionValue that then returns the value of GetValue. +- value pref.Value ++ value protoreflect.Value + lazy *lazyExtensionValue + } + +-func (f *ExtensionField) appendLazyBytes(xt pref.ExtensionType, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, b []byte) { ++func (f *ExtensionField) appendLazyBytes(xt protoreflect.ExtensionType, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, b []byte) { + if f.lazy == nil { + f.lazy = &lazyExtensionValue{xi: xi} + } +@@ -97,7 +97,7 @@ func (f *ExtensionField) appendLazyBytes(xt pref.ExtensionType, xi *extensionFie + f.lazy.b = append(f.lazy.b, b...) + } + +-func (f *ExtensionField) canLazy(xt pref.ExtensionType) bool { ++func (f *ExtensionField) canLazy(xt protoreflect.ExtensionType) bool { + if f.typ == nil { + return true + } +@@ -154,7 +154,7 @@ func (f *ExtensionField) lazyInit() { + + // Set sets the type and value of the extension field. + // This must not be called concurrently. +-func (f *ExtensionField) Set(t pref.ExtensionType, v pref.Value) { ++func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value) { + f.typ = t + f.value = v + f.lazy = nil +@@ -162,14 +162,14 @@ func (f *ExtensionField) Set(t pref.ExtensionType, v pref.Value) { + + // SetLazy sets the type and a value that is to be lazily evaluated upon first use. + // This must not be called concurrently. +-func (f *ExtensionField) SetLazy(t pref.ExtensionType, fn func() pref.Value) { ++func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) { + f.typ = t + f.lazy = &lazyExtensionValue{fn: fn} + } + + // Value returns the value of the extension field. + // This may be called concurrently. +-func (f *ExtensionField) Value() pref.Value { ++func (f *ExtensionField) Value() protoreflect.Value { + if f.lazy != nil { + if atomic.LoadUint32(&f.lazy.atomicOnce) == 0 { + f.lazyInit() +@@ -181,7 +181,7 @@ func (f *ExtensionField) Value() pref.Value { + + // Type returns the type of the extension field. + // This may be called concurrently. +-func (f ExtensionField) Type() pref.ExtensionType { ++func (f ExtensionField) Type() protoreflect.ExtensionType { + return f.typ + } + +@@ -193,7 +193,7 @@ func (f ExtensionField) IsSet() bool { + + // IsLazy reports whether a field is lazily encoded. + // It is exported for testing. +-func IsLazy(m pref.Message, fd pref.FieldDescriptor) bool { ++func IsLazy(m protoreflect.Message, fd protoreflect.FieldDescriptor) bool { + var mi *MessageInfo + var p pointer + switch m := m.(type) { +@@ -206,7 +206,7 @@ func IsLazy(m pref.Message, fd pref.FieldDescriptor) bool { + default: + return false + } +- xd, ok := fd.(pref.ExtensionTypeDescriptor) ++ xd, ok := fd.(protoreflect.ExtensionTypeDescriptor) + if !ok { + return false + } +diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +index cb4b482..3fadd24 100644 +--- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go ++++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +@@ -12,9 +12,9 @@ import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/proto" +- pref "google.golang.org/protobuf/reflect/protoreflect" +- preg "google.golang.org/protobuf/reflect/protoregistry" +- piface "google.golang.org/protobuf/runtime/protoiface" ++ "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/reflect/protoregistry" ++ "google.golang.org/protobuf/runtime/protoiface" + ) + + type errInvalidUTF8 struct{} +@@ -30,7 +30,7 @@ func (errInvalidUTF8) Unwrap() error { return errors.Error } + // to the appropriate field-specific function as necessary. + // + // The unmarshal function is set on each field individually as usual. +-func (mi *MessageInfo) initOneofFieldCoders(od pref.OneofDescriptor, si structInfo) { ++func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si structInfo) { + fs := si.oneofsByName[od.Name()] + ft := fs.Type + oneofFields := make(map[reflect.Type]*coderFieldInfo) +@@ -118,13 +118,13 @@ func (mi *MessageInfo) initOneofFieldCoders(od pref.OneofDescriptor, si structIn + } + } + +-func makeWeakMessageFieldCoder(fd pref.FieldDescriptor) pointerCoderFuncs { ++func makeWeakMessageFieldCoder(fd protoreflect.FieldDescriptor) pointerCoderFuncs { + var once sync.Once +- var messageType pref.MessageType ++ var messageType protoreflect.MessageType + lazyInit := func() { + once.Do(func() { + messageName := fd.Message().FullName() +- messageType, _ = preg.GlobalTypes.FindMessageByName(messageName) ++ messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName) + }) + } + +@@ -190,7 +190,7 @@ func makeWeakMessageFieldCoder(fd pref.FieldDescriptor) pointerCoderFuncs { + } + } + +-func makeMessageFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { ++func makeMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { + if mi := getMessageInfo(ft); mi != nil { + funcs := pointerCoderFuncs{ + size: sizeMessageInfo, +@@ -280,7 +280,7 @@ func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarsh + if n < 0 { + return out, errDecode + } +- o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ ++ o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ + Buf: v, + Message: m.ProtoReflect(), + }) +@@ -288,27 +288,27 @@ func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarsh + return out, err + } + out.n = n +- out.initialized = o.Flags&piface.UnmarshalInitialized != 0 ++ out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 + return out, nil + } + +-func sizeMessageValue(v pref.Value, tagsize int, opts marshalOptions) int { ++func sizeMessageValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { + m := v.Message().Interface() + return sizeMessage(m, tagsize, opts) + } + +-func appendMessageValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { ++func appendMessageValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + m := v.Message().Interface() + return appendMessage(b, m, wiretag, opts) + } + +-func consumeMessageValue(b []byte, v pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) { ++func consumeMessageValue(b []byte, v protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (protoreflect.Value, unmarshalOutput, error) { + m := v.Message().Interface() + out, err := consumeMessage(b, m, wtyp, opts) + return v, out, err + } + +-func isInitMessageValue(v pref.Value) error { ++func isInitMessageValue(v protoreflect.Value) error { + m := v.Message().Interface() + return proto.CheckInitialized(m) + } +@@ -321,17 +321,17 @@ var coderMessageValue = valueCoderFuncs{ + merge: mergeMessageValue, + } + +-func sizeGroupValue(v pref.Value, tagsize int, opts marshalOptions) int { ++func sizeGroupValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { + m := v.Message().Interface() + return sizeGroup(m, tagsize, opts) + } + +-func appendGroupValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { ++func appendGroupValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + m := v.Message().Interface() + return appendGroup(b, m, wiretag, opts) + } + +-func consumeGroupValue(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) { ++func consumeGroupValue(b []byte, v protoreflect.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (protoreflect.Value, unmarshalOutput, error) { + m := v.Message().Interface() + out, err := consumeGroup(b, m, num, wtyp, opts) + return v, out, err +@@ -345,7 +345,7 @@ var coderGroupValue = valueCoderFuncs{ + merge: mergeMessageValue, + } + +-func makeGroupFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { ++func makeGroupFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { + num := fd.Number() + if mi := getMessageInfo(ft); mi != nil { + funcs := pointerCoderFuncs{ +@@ -424,7 +424,7 @@ func consumeGroup(b []byte, m proto.Message, num protowire.Number, wtyp protowir + if n < 0 { + return out, errDecode + } +- o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ ++ o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ + Buf: b, + Message: m.ProtoReflect(), + }) +@@ -432,11 +432,11 @@ func consumeGroup(b []byte, m proto.Message, num protowire.Number, wtyp protowir + return out, err + } + out.n = n +- out.initialized = o.Flags&piface.UnmarshalInitialized != 0 ++ out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 + return out, nil + } + +-func makeMessageSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { ++func makeMessageSliceFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { + if mi := getMessageInfo(ft); mi != nil { + funcs := pointerCoderFuncs{ + size: sizeMessageSliceInfo, +@@ -555,7 +555,7 @@ func consumeMessageSlice(b []byte, p pointer, goType reflect.Type, wtyp protowir + return out, errDecode + } + mp := reflect.New(goType.Elem()) +- o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ ++ o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ + Buf: v, + Message: asMessage(mp).ProtoReflect(), + }) +@@ -564,7 +564,7 @@ func consumeMessageSlice(b []byte, p pointer, goType reflect.Type, wtyp protowir + } + p.AppendPointerSlice(pointerOfValue(mp)) + out.n = n +- out.initialized = o.Flags&piface.UnmarshalInitialized != 0 ++ out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 + return out, nil + } + +@@ -581,7 +581,7 @@ func isInitMessageSlice(p pointer, goType reflect.Type) error { + + // Slices of messages + +-func sizeMessageSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int { ++func sizeMessageSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { + list := listv.List() + n := 0 + for i, llen := 0, list.Len(); i < llen; i++ { +@@ -591,7 +591,7 @@ func sizeMessageSliceValue(listv pref.Value, tagsize int, opts marshalOptions) i + return n + } + +-func appendMessageSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { ++func appendMessageSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + mopts := opts.Options() + for i, llen := 0, list.Len(); i < llen; i++ { +@@ -608,30 +608,30 @@ func appendMessageSliceValue(b []byte, listv pref.Value, wiretag uint64, opts ma + return b, nil + } + +-func consumeMessageSliceValue(b []byte, listv pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) { ++func consumeMessageSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp != protowire.BytesType { +- return pref.Value{}, out, errUnknown ++ return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { +- return pref.Value{}, out, errDecode ++ return protoreflect.Value{}, out, errDecode + } + m := list.NewElement() +- o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ ++ o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ + Buf: v, + Message: m.Message(), + }) + if err != nil { +- return pref.Value{}, out, err ++ return protoreflect.Value{}, out, err + } + list.Append(m) + out.n = n +- out.initialized = o.Flags&piface.UnmarshalInitialized != 0 ++ out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 + return listv, out, nil + } + +-func isInitMessageSliceValue(listv pref.Value) error { ++func isInitMessageSliceValue(listv protoreflect.Value) error { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + m := list.Get(i).Message().Interface() +@@ -650,7 +650,7 @@ var coderMessageSliceValue = valueCoderFuncs{ + merge: mergeMessageListValue, + } + +-func sizeGroupSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int { ++func sizeGroupSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { + list := listv.List() + n := 0 + for i, llen := 0, list.Len(); i < llen; i++ { +@@ -660,7 +660,7 @@ func sizeGroupSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int + return n + } + +-func appendGroupSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { ++func appendGroupSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + mopts := opts.Options() + for i, llen := 0, list.Len(); i < llen; i++ { +@@ -676,26 +676,26 @@ func appendGroupSliceValue(b []byte, listv pref.Value, wiretag uint64, opts mars + return b, nil + } + +-func consumeGroupSliceValue(b []byte, listv pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) { ++func consumeGroupSliceValue(b []byte, listv protoreflect.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp != protowire.StartGroupType { +- return pref.Value{}, out, errUnknown ++ return protoreflect.Value{}, out, errUnknown + } + b, n := protowire.ConsumeGroup(num, b) + if n < 0 { +- return pref.Value{}, out, errDecode ++ return protoreflect.Value{}, out, errDecode + } + m := list.NewElement() +- o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ ++ o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ + Buf: b, + Message: m.Message(), + }) + if err != nil { +- return pref.Value{}, out, err ++ return protoreflect.Value{}, out, err + } + list.Append(m) + out.n = n +- out.initialized = o.Flags&piface.UnmarshalInitialized != 0 ++ out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 + return listv, out, nil + } + +@@ -707,7 +707,7 @@ var coderGroupSliceValue = valueCoderFuncs{ + merge: mergeMessageListValue, + } + +-func makeGroupSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { ++func makeGroupSliceFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { + num := fd.Number() + if mi := getMessageInfo(ft); mi != nil { + funcs := pointerCoderFuncs{ +@@ -772,7 +772,7 @@ func consumeGroupSlice(b []byte, p pointer, num protowire.Number, wtyp protowire + return out, errDecode + } + mp := reflect.New(goType.Elem()) +- o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ ++ o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ + Buf: b, + Message: asMessage(mp).ProtoReflect(), + }) +@@ -781,7 +781,7 @@ func consumeGroupSlice(b []byte, p pointer, num protowire.Number, wtyp protowire + } + p.AppendPointerSlice(pointerOfValue(mp)) + out.n = n +- out.initialized = o.Flags&piface.UnmarshalInitialized != 0 ++ out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 + return out, nil + } + +@@ -822,8 +822,8 @@ func consumeGroupSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFie + return out, nil + } + +-func asMessage(v reflect.Value) pref.ProtoMessage { +- if m, ok := v.Interface().(pref.ProtoMessage); ok { ++func asMessage(v reflect.Value) protoreflect.ProtoMessage { ++ if m, ok := v.Interface().(protoreflect.ProtoMessage); ok { + return m + } + return legacyWrapMessage(v).Interface() +diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +index c1245fe..111b9d1 100644 +--- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go ++++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +@@ -10,7 +10,7 @@ import ( + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/genid" +- pref "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/reflect/protoreflect" + ) + + type mapInfo struct { +@@ -19,12 +19,12 @@ type mapInfo struct { + valWiretag uint64 + keyFuncs valueCoderFuncs + valFuncs valueCoderFuncs +- keyZero pref.Value +- keyKind pref.Kind ++ keyZero protoreflect.Value ++ keyKind protoreflect.Kind + conv *mapConverter + } + +-func encoderFuncsForMap(fd pref.FieldDescriptor, ft reflect.Type) (valueMessage *MessageInfo, funcs pointerCoderFuncs) { ++func encoderFuncsForMap(fd protoreflect.FieldDescriptor, ft reflect.Type) (valueMessage *MessageInfo, funcs pointerCoderFuncs) { + // TODO: Consider generating specialized map coders. + keyField := fd.MapKey() + valField := fd.MapValue() +@@ -44,7 +44,7 @@ func encoderFuncsForMap(fd pref.FieldDescriptor, ft reflect.Type) (valueMessage + keyKind: keyField.Kind(), + conv: conv, + } +- if valField.Kind() == pref.MessageKind { ++ if valField.Kind() == protoreflect.MessageKind { + valueMessage = getMessageInfo(ft.Elem()) + } + +@@ -68,9 +68,9 @@ func encoderFuncsForMap(fd pref.FieldDescriptor, ft reflect.Type) (valueMessage + }, + } + switch valField.Kind() { +- case pref.MessageKind: ++ case protoreflect.MessageKind: + funcs.merge = mergeMapOfMessage +- case pref.BytesKind: ++ case protoreflect.BytesKind: + funcs.merge = mergeMapOfBytes + default: + funcs.merge = mergeMap +@@ -135,7 +135,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo + err := errUnknown + switch num { + case genid.MapEntry_Key_field_number: +- var v pref.Value ++ var v protoreflect.Value + var o unmarshalOutput + v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts) + if err != nil { +@@ -144,7 +144,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo + key = v + n = o.n + case genid.MapEntry_Value_field_number: +- var v pref.Value ++ var v protoreflect.Value + var o unmarshalOutput + v, o, err = mapi.valFuncs.unmarshal(b, val, num, wtyp, opts) + if err != nil { +@@ -192,7 +192,7 @@ func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi + err := errUnknown + switch num { + case 1: +- var v pref.Value ++ var v protoreflect.Value + var o unmarshalOutput + v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts) + if err != nil { +diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +index cd40527..6b2fdbb 100644 +--- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go ++++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +@@ -12,15 +12,15 @@ import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/order" +- pref "google.golang.org/protobuf/reflect/protoreflect" +- piface "google.golang.org/protobuf/runtime/protoiface" ++ "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/runtime/protoiface" + ) + + // coderMessageInfo contains per-message information used by the fast-path functions. + // This is a different type from MessageInfo to keep MessageInfo as general-purpose as + // possible. + type coderMessageInfo struct { +- methods piface.Methods ++ methods protoiface.Methods + + orderedCoderFields []*coderFieldInfo + denseCoderFields []*coderFieldInfo +@@ -38,13 +38,13 @@ type coderFieldInfo struct { + funcs pointerCoderFuncs // fast-path per-field functions + mi *MessageInfo // field's message + ft reflect.Type +- validation validationInfo // information used by message validation +- num pref.FieldNumber // field number +- offset offset // struct field offset +- wiretag uint64 // field tag (number + wire type) +- tagsize int // size of the varint-encoded tag +- isPointer bool // true if IsNil may be called on the struct field +- isRequired bool // true if field is required ++ validation validationInfo // information used by message validation ++ num protoreflect.FieldNumber // field number ++ offset offset // struct field offset ++ wiretag uint64 // field tag (number + wire type) ++ tagsize int // size of the varint-encoded tag ++ isPointer bool // true if IsNil may be called on the struct field ++ isRequired bool // true if field is required + } + + func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { +@@ -125,8 +125,8 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { + funcs: funcs, + mi: childMessage, + validation: newFieldValidationInfo(mi, si, fd, ft), +- isPointer: fd.Cardinality() == pref.Repeated || fd.HasPresence(), +- isRequired: fd.Cardinality() == pref.Required, ++ isPointer: fd.Cardinality() == protoreflect.Repeated || fd.HasPresence(), ++ isRequired: fd.Cardinality() == protoreflect.Required, + } + mi.orderedCoderFields = append(mi.orderedCoderFields, cf) + mi.coderFields[cf.num] = cf +@@ -149,7 +149,7 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { + return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num + }) + +- var maxDense pref.FieldNumber ++ var maxDense protoreflect.FieldNumber + for _, cf := range mi.orderedCoderFields { + if cf.num >= 16 && cf.num >= 2*maxDense { + break +@@ -175,12 +175,12 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { + + mi.needsInitCheck = needsInitCheck(mi.Desc) + if mi.methods.Marshal == nil && mi.methods.Size == nil { +- mi.methods.Flags |= piface.SupportMarshalDeterministic ++ mi.methods.Flags |= protoiface.SupportMarshalDeterministic + mi.methods.Marshal = mi.marshal + mi.methods.Size = mi.size + } + if mi.methods.Unmarshal == nil { +- mi.methods.Flags |= piface.SupportUnmarshalDiscardUnknown ++ mi.methods.Flags |= protoiface.SupportUnmarshalDiscardUnknown + mi.methods.Unmarshal = mi.unmarshal + } + if mi.methods.CheckInitialized == nil { +diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go +index e899712..576dcf3 100644 +--- a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go ++++ b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go +@@ -10,7 +10,7 @@ import ( + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/strs" +- pref "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/reflect/protoreflect" + ) + + // pointerCoderFuncs is a set of pointer encoding functions. +@@ -25,83 +25,83 @@ type pointerCoderFuncs struct { + + // valueCoderFuncs is a set of protoreflect.Value encoding functions. + type valueCoderFuncs struct { +- size func(v pref.Value, tagsize int, opts marshalOptions) int +- marshal func(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) +- unmarshal func(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) +- isInit func(v pref.Value) error +- merge func(dst, src pref.Value, opts mergeOptions) pref.Value ++ size func(v protoreflect.Value, tagsize int, opts marshalOptions) int ++ marshal func(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) ++ unmarshal func(b []byte, v protoreflect.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (protoreflect.Value, unmarshalOutput, error) ++ isInit func(v protoreflect.Value) error ++ merge func(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value + } + + // fieldCoder returns pointer functions for a field, used for operating on + // struct fields. +-func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) { ++func fieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) { + switch { + case fd.IsMap(): + return encoderFuncsForMap(fd, ft) +- case fd.Cardinality() == pref.Repeated && !fd.IsPacked(): ++ case fd.Cardinality() == protoreflect.Repeated && !fd.IsPacked(): + // Repeated fields (not packed). + if ft.Kind() != reflect.Slice { + break + } + ft := ft.Elem() + switch fd.Kind() { +- case pref.BoolKind: ++ case protoreflect.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBoolSlice + } +- case pref.EnumKind: ++ case protoreflect.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnumSlice + } +- case pref.Int32Kind: ++ case protoreflect.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32Slice + } +- case pref.Sint32Kind: ++ case protoreflect.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32Slice + } +- case pref.Uint32Kind: ++ case protoreflect.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32Slice + } +- case pref.Int64Kind: ++ case protoreflect.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64Slice + } +- case pref.Sint64Kind: ++ case protoreflect.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64Slice + } +- case pref.Uint64Kind: ++ case protoreflect.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64Slice + } +- case pref.Sfixed32Kind: ++ case protoreflect.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32Slice + } +- case pref.Fixed32Kind: ++ case protoreflect.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32Slice + } +- case pref.FloatKind: ++ case protoreflect.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloatSlice + } +- case pref.Sfixed64Kind: ++ case protoreflect.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64Slice + } +- case pref.Fixed64Kind: ++ case protoreflect.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64Slice + } +- case pref.DoubleKind: ++ case protoreflect.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDoubleSlice + } +- case pref.StringKind: ++ case protoreflect.StringKind: + if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { + return nil, coderStringSliceValidateUTF8 + } +@@ -114,19 +114,19 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytesSlice + } +- case pref.BytesKind: ++ case protoreflect.BytesKind: + if ft.Kind() == reflect.String { + return nil, coderStringSlice + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytesSlice + } +- case pref.MessageKind: ++ case protoreflect.MessageKind: + return getMessageInfo(ft), makeMessageSliceFieldCoder(fd, ft) +- case pref.GroupKind: ++ case protoreflect.GroupKind: + return getMessageInfo(ft), makeGroupSliceFieldCoder(fd, ft) + } +- case fd.Cardinality() == pref.Repeated && fd.IsPacked(): ++ case fd.Cardinality() == protoreflect.Repeated && fd.IsPacked(): + // Packed repeated fields. + // + // Only repeated fields of primitive numeric types +@@ -136,128 +136,128 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer + } + ft := ft.Elem() + switch fd.Kind() { +- case pref.BoolKind: ++ case protoreflect.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBoolPackedSlice + } +- case pref.EnumKind: ++ case protoreflect.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnumPackedSlice + } +- case pref.Int32Kind: ++ case protoreflect.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32PackedSlice + } +- case pref.Sint32Kind: ++ case protoreflect.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32PackedSlice + } +- case pref.Uint32Kind: ++ case protoreflect.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32PackedSlice + } +- case pref.Int64Kind: ++ case protoreflect.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64PackedSlice + } +- case pref.Sint64Kind: ++ case protoreflect.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64PackedSlice + } +- case pref.Uint64Kind: ++ case protoreflect.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64PackedSlice + } +- case pref.Sfixed32Kind: ++ case protoreflect.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32PackedSlice + } +- case pref.Fixed32Kind: ++ case protoreflect.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32PackedSlice + } +- case pref.FloatKind: ++ case protoreflect.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloatPackedSlice + } +- case pref.Sfixed64Kind: ++ case protoreflect.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64PackedSlice + } +- case pref.Fixed64Kind: ++ case protoreflect.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64PackedSlice + } +- case pref.DoubleKind: ++ case protoreflect.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDoublePackedSlice + } + } +- case fd.Kind() == pref.MessageKind: ++ case fd.Kind() == protoreflect.MessageKind: + return getMessageInfo(ft), makeMessageFieldCoder(fd, ft) +- case fd.Kind() == pref.GroupKind: ++ case fd.Kind() == protoreflect.GroupKind: + return getMessageInfo(ft), makeGroupFieldCoder(fd, ft) +- case fd.Syntax() == pref.Proto3 && fd.ContainingOneof() == nil: ++ case fd.Syntax() == protoreflect.Proto3 && fd.ContainingOneof() == nil: + // Populated oneof fields always encode even if set to the zero value, + // which normally are not encoded in proto3. + switch fd.Kind() { +- case pref.BoolKind: ++ case protoreflect.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBoolNoZero + } +- case pref.EnumKind: ++ case protoreflect.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnumNoZero + } +- case pref.Int32Kind: ++ case protoreflect.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32NoZero + } +- case pref.Sint32Kind: ++ case protoreflect.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32NoZero + } +- case pref.Uint32Kind: ++ case protoreflect.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32NoZero + } +- case pref.Int64Kind: ++ case protoreflect.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64NoZero + } +- case pref.Sint64Kind: ++ case protoreflect.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64NoZero + } +- case pref.Uint64Kind: ++ case protoreflect.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64NoZero + } +- case pref.Sfixed32Kind: ++ case protoreflect.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32NoZero + } +- case pref.Fixed32Kind: ++ case protoreflect.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32NoZero + } +- case pref.FloatKind: ++ case protoreflect.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloatNoZero + } +- case pref.Sfixed64Kind: ++ case protoreflect.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64NoZero + } +- case pref.Fixed64Kind: ++ case protoreflect.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64NoZero + } +- case pref.DoubleKind: ++ case protoreflect.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDoubleNoZero + } +- case pref.StringKind: ++ case protoreflect.StringKind: + if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { + return nil, coderStringNoZeroValidateUTF8 + } +@@ -270,7 +270,7 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytesNoZero + } +- case pref.BytesKind: ++ case protoreflect.BytesKind: + if ft.Kind() == reflect.String { + return nil, coderStringNoZero + } +@@ -281,133 +281,133 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer + case ft.Kind() == reflect.Ptr: + ft := ft.Elem() + switch fd.Kind() { +- case pref.BoolKind: ++ case protoreflect.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBoolPtr + } +- case pref.EnumKind: ++ case protoreflect.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnumPtr + } +- case pref.Int32Kind: ++ case protoreflect.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32Ptr + } +- case pref.Sint32Kind: ++ case protoreflect.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32Ptr + } +- case pref.Uint32Kind: ++ case protoreflect.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32Ptr + } +- case pref.Int64Kind: ++ case protoreflect.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64Ptr + } +- case pref.Sint64Kind: ++ case protoreflect.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64Ptr + } +- case pref.Uint64Kind: ++ case protoreflect.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64Ptr + } +- case pref.Sfixed32Kind: ++ case protoreflect.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32Ptr + } +- case pref.Fixed32Kind: ++ case protoreflect.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32Ptr + } +- case pref.FloatKind: ++ case protoreflect.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloatPtr + } +- case pref.Sfixed64Kind: ++ case protoreflect.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64Ptr + } +- case pref.Fixed64Kind: ++ case protoreflect.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64Ptr + } +- case pref.DoubleKind: ++ case protoreflect.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDoublePtr + } +- case pref.StringKind: ++ case protoreflect.StringKind: + if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { + return nil, coderStringPtrValidateUTF8 + } + if ft.Kind() == reflect.String { + return nil, coderStringPtr + } +- case pref.BytesKind: ++ case protoreflect.BytesKind: + if ft.Kind() == reflect.String { + return nil, coderStringPtr + } + } + default: + switch fd.Kind() { +- case pref.BoolKind: ++ case protoreflect.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBool + } +- case pref.EnumKind: ++ case protoreflect.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnum + } +- case pref.Int32Kind: ++ case protoreflect.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32 + } +- case pref.Sint32Kind: ++ case protoreflect.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32 + } +- case pref.Uint32Kind: ++ case protoreflect.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32 + } +- case pref.Int64Kind: ++ case protoreflect.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64 + } +- case pref.Sint64Kind: ++ case protoreflect.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64 + } +- case pref.Uint64Kind: ++ case protoreflect.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64 + } +- case pref.Sfixed32Kind: ++ case protoreflect.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32 + } +- case pref.Fixed32Kind: ++ case protoreflect.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32 + } +- case pref.FloatKind: ++ case protoreflect.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloat + } +- case pref.Sfixed64Kind: ++ case protoreflect.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64 + } +- case pref.Fixed64Kind: ++ case protoreflect.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64 + } +- case pref.DoubleKind: ++ case protoreflect.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDouble + } +- case pref.StringKind: ++ case protoreflect.StringKind: + if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { + return nil, coderStringValidateUTF8 + } +@@ -420,7 +420,7 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytes + } +- case pref.BytesKind: ++ case protoreflect.BytesKind: + if ft.Kind() == reflect.String { + return nil, coderString + } +@@ -434,122 +434,122 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer + + // encoderFuncsForValue returns value functions for a field, used for + // extension values and map encoding. +-func encoderFuncsForValue(fd pref.FieldDescriptor) valueCoderFuncs { ++func encoderFuncsForValue(fd protoreflect.FieldDescriptor) valueCoderFuncs { + switch { +- case fd.Cardinality() == pref.Repeated && !fd.IsPacked(): ++ case fd.Cardinality() == protoreflect.Repeated && !fd.IsPacked(): + switch fd.Kind() { +- case pref.BoolKind: ++ case protoreflect.BoolKind: + return coderBoolSliceValue +- case pref.EnumKind: ++ case protoreflect.EnumKind: + return coderEnumSliceValue +- case pref.Int32Kind: ++ case protoreflect.Int32Kind: + return coderInt32SliceValue +- case pref.Sint32Kind: ++ case protoreflect.Sint32Kind: + return coderSint32SliceValue +- case pref.Uint32Kind: ++ case protoreflect.Uint32Kind: + return coderUint32SliceValue +- case pref.Int64Kind: ++ case protoreflect.Int64Kind: + return coderInt64SliceValue +- case pref.Sint64Kind: ++ case protoreflect.Sint64Kind: + return coderSint64SliceValue +- case pref.Uint64Kind: ++ case protoreflect.Uint64Kind: + return coderUint64SliceValue +- case pref.Sfixed32Kind: ++ case protoreflect.Sfixed32Kind: + return coderSfixed32SliceValue +- case pref.Fixed32Kind: ++ case protoreflect.Fixed32Kind: + return coderFixed32SliceValue +- case pref.FloatKind: ++ case protoreflect.FloatKind: + return coderFloatSliceValue +- case pref.Sfixed64Kind: ++ case protoreflect.Sfixed64Kind: + return coderSfixed64SliceValue +- case pref.Fixed64Kind: ++ case protoreflect.Fixed64Kind: + return coderFixed64SliceValue +- case pref.DoubleKind: ++ case protoreflect.DoubleKind: + return coderDoubleSliceValue +- case pref.StringKind: ++ case protoreflect.StringKind: + // We don't have a UTF-8 validating coder for repeated string fields. + // Value coders are used for extensions and maps. + // Extensions are never proto3, and maps never contain lists. + return coderStringSliceValue +- case pref.BytesKind: ++ case protoreflect.BytesKind: + return coderBytesSliceValue +- case pref.MessageKind: ++ case protoreflect.MessageKind: + return coderMessageSliceValue +- case pref.GroupKind: ++ case protoreflect.GroupKind: + return coderGroupSliceValue + } +- case fd.Cardinality() == pref.Repeated && fd.IsPacked(): ++ case fd.Cardinality() == protoreflect.Repeated && fd.IsPacked(): + switch fd.Kind() { +- case pref.BoolKind: ++ case protoreflect.BoolKind: + return coderBoolPackedSliceValue +- case pref.EnumKind: ++ case protoreflect.EnumKind: + return coderEnumPackedSliceValue +- case pref.Int32Kind: ++ case protoreflect.Int32Kind: + return coderInt32PackedSliceValue +- case pref.Sint32Kind: ++ case protoreflect.Sint32Kind: + return coderSint32PackedSliceValue +- case pref.Uint32Kind: ++ case protoreflect.Uint32Kind: + return coderUint32PackedSliceValue +- case pref.Int64Kind: ++ case protoreflect.Int64Kind: + return coderInt64PackedSliceValue +- case pref.Sint64Kind: ++ case protoreflect.Sint64Kind: + return coderSint64PackedSliceValue +- case pref.Uint64Kind: ++ case protoreflect.Uint64Kind: + return coderUint64PackedSliceValue +- case pref.Sfixed32Kind: ++ case protoreflect.Sfixed32Kind: + return coderSfixed32PackedSliceValue +- case pref.Fixed32Kind: ++ case protoreflect.Fixed32Kind: + return coderFixed32PackedSliceValue +- case pref.FloatKind: ++ case protoreflect.FloatKind: + return coderFloatPackedSliceValue +- case pref.Sfixed64Kind: ++ case protoreflect.Sfixed64Kind: + return coderSfixed64PackedSliceValue +- case pref.Fixed64Kind: ++ case protoreflect.Fixed64Kind: + return coderFixed64PackedSliceValue +- case pref.DoubleKind: ++ case protoreflect.DoubleKind: + return coderDoublePackedSliceValue + } + default: + switch fd.Kind() { + default: +- case pref.BoolKind: ++ case protoreflect.BoolKind: + return coderBoolValue +- case pref.EnumKind: ++ case protoreflect.EnumKind: + return coderEnumValue +- case pref.Int32Kind: ++ case protoreflect.Int32Kind: + return coderInt32Value +- case pref.Sint32Kind: ++ case protoreflect.Sint32Kind: + return coderSint32Value +- case pref.Uint32Kind: ++ case protoreflect.Uint32Kind: + return coderUint32Value +- case pref.Int64Kind: ++ case protoreflect.Int64Kind: + return coderInt64Value +- case pref.Sint64Kind: ++ case protoreflect.Sint64Kind: + return coderSint64Value +- case pref.Uint64Kind: ++ case protoreflect.Uint64Kind: + return coderUint64Value +- case pref.Sfixed32Kind: ++ case protoreflect.Sfixed32Kind: + return coderSfixed32Value +- case pref.Fixed32Kind: ++ case protoreflect.Fixed32Kind: + return coderFixed32Value +- case pref.FloatKind: ++ case protoreflect.FloatKind: + return coderFloatValue +- case pref.Sfixed64Kind: ++ case protoreflect.Sfixed64Kind: + return coderSfixed64Value +- case pref.Fixed64Kind: ++ case protoreflect.Fixed64Kind: + return coderFixed64Value +- case pref.DoubleKind: ++ case protoreflect.DoubleKind: + return coderDoubleValue +- case pref.StringKind: ++ case protoreflect.StringKind: + if strs.EnforceUTF8(fd) { + return coderStringValueValidateUTF8 + } + return coderStringValue +- case pref.BytesKind: ++ case protoreflect.BytesKind: + return coderBytesValue +- case pref.MessageKind: ++ case protoreflect.MessageKind: + return coderMessageValue +- case pref.GroupKind: ++ case protoreflect.GroupKind: + return coderGroupValue + } + } +diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go +index acd61bb..185ef2e 100644 +--- a/vendor/google.golang.org/protobuf/internal/impl/convert.go ++++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go +@@ -8,7 +8,7 @@ import ( + "fmt" + "reflect" + +- pref "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/reflect/protoreflect" + ) + + // unwrapper unwraps the value to the underlying value. +@@ -20,13 +20,13 @@ type unwrapper interface { + // A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types. + type Converter interface { + // PBValueOf converts a reflect.Value to a protoreflect.Value. +- PBValueOf(reflect.Value) pref.Value ++ PBValueOf(reflect.Value) protoreflect.Value + + // GoValueOf converts a protoreflect.Value to a reflect.Value. +- GoValueOf(pref.Value) reflect.Value ++ GoValueOf(protoreflect.Value) reflect.Value + + // IsValidPB returns whether a protoreflect.Value is compatible with this type. +- IsValidPB(pref.Value) bool ++ IsValidPB(protoreflect.Value) bool + + // IsValidGo returns whether a reflect.Value is compatible with this type. + IsValidGo(reflect.Value) bool +@@ -34,12 +34,12 @@ type Converter interface { + // New returns a new field value. + // For scalars, it returns the default value of the field. + // For composite types, it returns a new mutable value. +- New() pref.Value ++ New() protoreflect.Value + + // Zero returns a new field value. + // For scalars, it returns the default value of the field. + // For composite types, it returns an immutable, empty value. +- Zero() pref.Value ++ Zero() protoreflect.Value + } + + // NewConverter matches a Go type with a protobuf field and returns a Converter +@@ -50,7 +50,7 @@ type Converter interface { + // This matcher deliberately supports a wider range of Go types than what + // protoc-gen-go historically generated to be able to automatically wrap some + // v1 messages generated by other forks of protoc-gen-go. +-func NewConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { ++func NewConverter(t reflect.Type, fd protoreflect.FieldDescriptor) Converter { + switch { + case fd.IsList(): + return newListConverter(t, fd) +@@ -59,7 +59,6 @@ func NewConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { + default: + return newSingularConverter(t, fd) + } +- panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) + } + + var ( +@@ -76,68 +75,68 @@ var ( + ) + + var ( +- boolZero = pref.ValueOfBool(false) +- int32Zero = pref.ValueOfInt32(0) +- int64Zero = pref.ValueOfInt64(0) +- uint32Zero = pref.ValueOfUint32(0) +- uint64Zero = pref.ValueOfUint64(0) +- float32Zero = pref.ValueOfFloat32(0) +- float64Zero = pref.ValueOfFloat64(0) +- stringZero = pref.ValueOfString("") +- bytesZero = pref.ValueOfBytes(nil) ++ boolZero = protoreflect.ValueOfBool(false) ++ int32Zero = protoreflect.ValueOfInt32(0) ++ int64Zero = protoreflect.ValueOfInt64(0) ++ uint32Zero = protoreflect.ValueOfUint32(0) ++ uint64Zero = protoreflect.ValueOfUint64(0) ++ float32Zero = protoreflect.ValueOfFloat32(0) ++ float64Zero = protoreflect.ValueOfFloat64(0) ++ stringZero = protoreflect.ValueOfString("") ++ bytesZero = protoreflect.ValueOfBytes(nil) + ) + +-func newSingularConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { +- defVal := func(fd pref.FieldDescriptor, zero pref.Value) pref.Value { +- if fd.Cardinality() == pref.Repeated { ++func newSingularConverter(t reflect.Type, fd protoreflect.FieldDescriptor) Converter { ++ defVal := func(fd protoreflect.FieldDescriptor, zero protoreflect.Value) protoreflect.Value { ++ if fd.Cardinality() == protoreflect.Repeated { + // Default isn't defined for repeated fields. + return zero + } + return fd.Default() + } + switch fd.Kind() { +- case pref.BoolKind: ++ case protoreflect.BoolKind: + if t.Kind() == reflect.Bool { + return &boolConverter{t, defVal(fd, boolZero)} + } +- case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: ++ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if t.Kind() == reflect.Int32 { + return &int32Converter{t, defVal(fd, int32Zero)} + } +- case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: ++ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if t.Kind() == reflect.Int64 { + return &int64Converter{t, defVal(fd, int64Zero)} + } +- case pref.Uint32Kind, pref.Fixed32Kind: ++ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if t.Kind() == reflect.Uint32 { + return &uint32Converter{t, defVal(fd, uint32Zero)} + } +- case pref.Uint64Kind, pref.Fixed64Kind: ++ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if t.Kind() == reflect.Uint64 { + return &uint64Converter{t, defVal(fd, uint64Zero)} + } +- case pref.FloatKind: ++ case protoreflect.FloatKind: + if t.Kind() == reflect.Float32 { + return &float32Converter{t, defVal(fd, float32Zero)} + } +- case pref.DoubleKind: ++ case protoreflect.DoubleKind: + if t.Kind() == reflect.Float64 { + return &float64Converter{t, defVal(fd, float64Zero)} + } +- case pref.StringKind: ++ case protoreflect.StringKind: + if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) { + return &stringConverter{t, defVal(fd, stringZero)} + } +- case pref.BytesKind: ++ case protoreflect.BytesKind: + if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) { + return &bytesConverter{t, defVal(fd, bytesZero)} + } +- case pref.EnumKind: ++ case protoreflect.EnumKind: + // Handle enums, which must be a named int32 type. + if t.Kind() == reflect.Int32 { + return newEnumConverter(t, fd) + } +- case pref.MessageKind, pref.GroupKind: ++ case protoreflect.MessageKind, protoreflect.GroupKind: + return newMessageConverter(t) + } + panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) +@@ -145,184 +144,184 @@ func newSingularConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { + + type boolConverter struct { + goType reflect.Type +- def pref.Value ++ def protoreflect.Value + } + +-func (c *boolConverter) PBValueOf(v reflect.Value) pref.Value { ++func (c *boolConverter) PBValueOf(v reflect.Value) protoreflect.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } +- return pref.ValueOfBool(v.Bool()) ++ return protoreflect.ValueOfBool(v.Bool()) + } +-func (c *boolConverter) GoValueOf(v pref.Value) reflect.Value { ++func (c *boolConverter) GoValueOf(v protoreflect.Value) reflect.Value { + return reflect.ValueOf(v.Bool()).Convert(c.goType) + } +-func (c *boolConverter) IsValidPB(v pref.Value) bool { ++func (c *boolConverter) IsValidPB(v protoreflect.Value) bool { + _, ok := v.Interface().(bool) + return ok + } + func (c *boolConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType + } +-func (c *boolConverter) New() pref.Value { return c.def } +-func (c *boolConverter) Zero() pref.Value { return c.def } ++func (c *boolConverter) New() protoreflect.Value { return c.def } ++func (c *boolConverter) Zero() protoreflect.Value { return c.def } + + type int32Converter struct { + goType reflect.Type +- def pref.Value ++ def protoreflect.Value + } + +-func (c *int32Converter) PBValueOf(v reflect.Value) pref.Value { ++func (c *int32Converter) PBValueOf(v reflect.Value) protoreflect.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } +- return pref.ValueOfInt32(int32(v.Int())) ++ return protoreflect.ValueOfInt32(int32(v.Int())) + } +-func (c *int32Converter) GoValueOf(v pref.Value) reflect.Value { ++func (c *int32Converter) GoValueOf(v protoreflect.Value) reflect.Value { + return reflect.ValueOf(int32(v.Int())).Convert(c.goType) + } +-func (c *int32Converter) IsValidPB(v pref.Value) bool { ++func (c *int32Converter) IsValidPB(v protoreflect.Value) bool { + _, ok := v.Interface().(int32) + return ok + } + func (c *int32Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType + } +-func (c *int32Converter) New() pref.Value { return c.def } +-func (c *int32Converter) Zero() pref.Value { return c.def } ++func (c *int32Converter) New() protoreflect.Value { return c.def } ++func (c *int32Converter) Zero() protoreflect.Value { return c.def } + + type int64Converter struct { + goType reflect.Type +- def pref.Value ++ def protoreflect.Value + } + +-func (c *int64Converter) PBValueOf(v reflect.Value) pref.Value { ++func (c *int64Converter) PBValueOf(v reflect.Value) protoreflect.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } +- return pref.ValueOfInt64(int64(v.Int())) ++ return protoreflect.ValueOfInt64(int64(v.Int())) + } +-func (c *int64Converter) GoValueOf(v pref.Value) reflect.Value { ++func (c *int64Converter) GoValueOf(v protoreflect.Value) reflect.Value { + return reflect.ValueOf(int64(v.Int())).Convert(c.goType) + } +-func (c *int64Converter) IsValidPB(v pref.Value) bool { ++func (c *int64Converter) IsValidPB(v protoreflect.Value) bool { + _, ok := v.Interface().(int64) + return ok + } + func (c *int64Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType + } +-func (c *int64Converter) New() pref.Value { return c.def } +-func (c *int64Converter) Zero() pref.Value { return c.def } ++func (c *int64Converter) New() protoreflect.Value { return c.def } ++func (c *int64Converter) Zero() protoreflect.Value { return c.def } + + type uint32Converter struct { + goType reflect.Type +- def pref.Value ++ def protoreflect.Value + } + +-func (c *uint32Converter) PBValueOf(v reflect.Value) pref.Value { ++func (c *uint32Converter) PBValueOf(v reflect.Value) protoreflect.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } +- return pref.ValueOfUint32(uint32(v.Uint())) ++ return protoreflect.ValueOfUint32(uint32(v.Uint())) + } +-func (c *uint32Converter) GoValueOf(v pref.Value) reflect.Value { ++func (c *uint32Converter) GoValueOf(v protoreflect.Value) reflect.Value { + return reflect.ValueOf(uint32(v.Uint())).Convert(c.goType) + } +-func (c *uint32Converter) IsValidPB(v pref.Value) bool { ++func (c *uint32Converter) IsValidPB(v protoreflect.Value) bool { + _, ok := v.Interface().(uint32) + return ok + } + func (c *uint32Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType + } +-func (c *uint32Converter) New() pref.Value { return c.def } +-func (c *uint32Converter) Zero() pref.Value { return c.def } ++func (c *uint32Converter) New() protoreflect.Value { return c.def } ++func (c *uint32Converter) Zero() protoreflect.Value { return c.def } + + type uint64Converter struct { + goType reflect.Type +- def pref.Value ++ def protoreflect.Value + } + +-func (c *uint64Converter) PBValueOf(v reflect.Value) pref.Value { ++func (c *uint64Converter) PBValueOf(v reflect.Value) protoreflect.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } +- return pref.ValueOfUint64(uint64(v.Uint())) ++ return protoreflect.ValueOfUint64(uint64(v.Uint())) + } +-func (c *uint64Converter) GoValueOf(v pref.Value) reflect.Value { ++func (c *uint64Converter) GoValueOf(v protoreflect.Value) reflect.Value { + return reflect.ValueOf(uint64(v.Uint())).Convert(c.goType) + } +-func (c *uint64Converter) IsValidPB(v pref.Value) bool { ++func (c *uint64Converter) IsValidPB(v protoreflect.Value) bool { + _, ok := v.Interface().(uint64) + return ok + } + func (c *uint64Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType + } +-func (c *uint64Converter) New() pref.Value { return c.def } +-func (c *uint64Converter) Zero() pref.Value { return c.def } ++func (c *uint64Converter) New() protoreflect.Value { return c.def } ++func (c *uint64Converter) Zero() protoreflect.Value { return c.def } + + type float32Converter struct { + goType reflect.Type +- def pref.Value ++ def protoreflect.Value + } + +-func (c *float32Converter) PBValueOf(v reflect.Value) pref.Value { ++func (c *float32Converter) PBValueOf(v reflect.Value) protoreflect.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } +- return pref.ValueOfFloat32(float32(v.Float())) ++ return protoreflect.ValueOfFloat32(float32(v.Float())) + } +-func (c *float32Converter) GoValueOf(v pref.Value) reflect.Value { ++func (c *float32Converter) GoValueOf(v protoreflect.Value) reflect.Value { + return reflect.ValueOf(float32(v.Float())).Convert(c.goType) + } +-func (c *float32Converter) IsValidPB(v pref.Value) bool { ++func (c *float32Converter) IsValidPB(v protoreflect.Value) bool { + _, ok := v.Interface().(float32) + return ok + } + func (c *float32Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType + } +-func (c *float32Converter) New() pref.Value { return c.def } +-func (c *float32Converter) Zero() pref.Value { return c.def } ++func (c *float32Converter) New() protoreflect.Value { return c.def } ++func (c *float32Converter) Zero() protoreflect.Value { return c.def } + + type float64Converter struct { + goType reflect.Type +- def pref.Value ++ def protoreflect.Value + } + +-func (c *float64Converter) PBValueOf(v reflect.Value) pref.Value { ++func (c *float64Converter) PBValueOf(v reflect.Value) protoreflect.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } +- return pref.ValueOfFloat64(float64(v.Float())) ++ return protoreflect.ValueOfFloat64(float64(v.Float())) + } +-func (c *float64Converter) GoValueOf(v pref.Value) reflect.Value { ++func (c *float64Converter) GoValueOf(v protoreflect.Value) reflect.Value { + return reflect.ValueOf(float64(v.Float())).Convert(c.goType) + } +-func (c *float64Converter) IsValidPB(v pref.Value) bool { ++func (c *float64Converter) IsValidPB(v protoreflect.Value) bool { + _, ok := v.Interface().(float64) + return ok + } + func (c *float64Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType + } +-func (c *float64Converter) New() pref.Value { return c.def } +-func (c *float64Converter) Zero() pref.Value { return c.def } ++func (c *float64Converter) New() protoreflect.Value { return c.def } ++func (c *float64Converter) Zero() protoreflect.Value { return c.def } + + type stringConverter struct { + goType reflect.Type +- def pref.Value ++ def protoreflect.Value + } + +-func (c *stringConverter) PBValueOf(v reflect.Value) pref.Value { ++func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } +- return pref.ValueOfString(v.Convert(stringType).String()) ++ return protoreflect.ValueOfString(v.Convert(stringType).String()) + } +-func (c *stringConverter) GoValueOf(v pref.Value) reflect.Value { ++func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value { + // pref.Value.String never panics, so we go through an interface + // conversion here to check the type. + s := v.Interface().(string) +@@ -331,71 +330,71 @@ func (c *stringConverter) GoValueOf(v pref.Value) reflect.Value { + } + return reflect.ValueOf(s).Convert(c.goType) + } +-func (c *stringConverter) IsValidPB(v pref.Value) bool { ++func (c *stringConverter) IsValidPB(v protoreflect.Value) bool { + _, ok := v.Interface().(string) + return ok + } + func (c *stringConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType + } +-func (c *stringConverter) New() pref.Value { return c.def } +-func (c *stringConverter) Zero() pref.Value { return c.def } ++func (c *stringConverter) New() protoreflect.Value { return c.def } ++func (c *stringConverter) Zero() protoreflect.Value { return c.def } + + type bytesConverter struct { + goType reflect.Type +- def pref.Value ++ def protoreflect.Value + } + +-func (c *bytesConverter) PBValueOf(v reflect.Value) pref.Value { ++func (c *bytesConverter) PBValueOf(v reflect.Value) protoreflect.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + if c.goType.Kind() == reflect.String && v.Len() == 0 { +- return pref.ValueOfBytes(nil) // ensure empty string is []byte(nil) ++ return protoreflect.ValueOfBytes(nil) // ensure empty string is []byte(nil) + } +- return pref.ValueOfBytes(v.Convert(bytesType).Bytes()) ++ return protoreflect.ValueOfBytes(v.Convert(bytesType).Bytes()) + } +-func (c *bytesConverter) GoValueOf(v pref.Value) reflect.Value { ++func (c *bytesConverter) GoValueOf(v protoreflect.Value) reflect.Value { + return reflect.ValueOf(v.Bytes()).Convert(c.goType) + } +-func (c *bytesConverter) IsValidPB(v pref.Value) bool { ++func (c *bytesConverter) IsValidPB(v protoreflect.Value) bool { + _, ok := v.Interface().([]byte) + return ok + } + func (c *bytesConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType + } +-func (c *bytesConverter) New() pref.Value { return c.def } +-func (c *bytesConverter) Zero() pref.Value { return c.def } ++func (c *bytesConverter) New() protoreflect.Value { return c.def } ++func (c *bytesConverter) Zero() protoreflect.Value { return c.def } + + type enumConverter struct { + goType reflect.Type +- def pref.Value ++ def protoreflect.Value + } + +-func newEnumConverter(goType reflect.Type, fd pref.FieldDescriptor) Converter { +- var def pref.Value +- if fd.Cardinality() == pref.Repeated { +- def = pref.ValueOfEnum(fd.Enum().Values().Get(0).Number()) ++func newEnumConverter(goType reflect.Type, fd protoreflect.FieldDescriptor) Converter { ++ var def protoreflect.Value ++ if fd.Cardinality() == protoreflect.Repeated { ++ def = protoreflect.ValueOfEnum(fd.Enum().Values().Get(0).Number()) + } else { + def = fd.Default() + } + return &enumConverter{goType, def} + } + +-func (c *enumConverter) PBValueOf(v reflect.Value) pref.Value { ++func (c *enumConverter) PBValueOf(v reflect.Value) protoreflect.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } +- return pref.ValueOfEnum(pref.EnumNumber(v.Int())) ++ return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v.Int())) + } + +-func (c *enumConverter) GoValueOf(v pref.Value) reflect.Value { ++func (c *enumConverter) GoValueOf(v protoreflect.Value) reflect.Value { + return reflect.ValueOf(v.Enum()).Convert(c.goType) + } + +-func (c *enumConverter) IsValidPB(v pref.Value) bool { +- _, ok := v.Interface().(pref.EnumNumber) ++func (c *enumConverter) IsValidPB(v protoreflect.Value) bool { ++ _, ok := v.Interface().(protoreflect.EnumNumber) + return ok + } + +@@ -403,11 +402,11 @@ func (c *enumConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType + } + +-func (c *enumConverter) New() pref.Value { ++func (c *enumConverter) New() protoreflect.Value { + return c.def + } + +-func (c *enumConverter) Zero() pref.Value { ++func (c *enumConverter) Zero() protoreflect.Value { + return c.def + } + +@@ -419,7 +418,7 @@ func newMessageConverter(goType reflect.Type) Converter { + return &messageConverter{goType} + } + +-func (c *messageConverter) PBValueOf(v reflect.Value) pref.Value { ++func (c *messageConverter) PBValueOf(v reflect.Value) protoreflect.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } +@@ -430,13 +429,13 @@ func (c *messageConverter) PBValueOf(v reflect.Value) pref.Value { + v = reflect.Zero(reflect.PtrTo(v.Type())) + } + } +- if m, ok := v.Interface().(pref.ProtoMessage); ok { +- return pref.ValueOfMessage(m.ProtoReflect()) ++ if m, ok := v.Interface().(protoreflect.ProtoMessage); ok { ++ return protoreflect.ValueOfMessage(m.ProtoReflect()) + } +- return pref.ValueOfMessage(legacyWrapMessage(v)) ++ return protoreflect.ValueOfMessage(legacyWrapMessage(v)) + } + +-func (c *messageConverter) GoValueOf(v pref.Value) reflect.Value { ++func (c *messageConverter) GoValueOf(v protoreflect.Value) reflect.Value { + m := v.Message() + var rv reflect.Value + if u, ok := m.(unwrapper); ok { +@@ -460,7 +459,7 @@ func (c *messageConverter) GoValueOf(v pref.Value) reflect.Value { + return rv + } + +-func (c *messageConverter) IsValidPB(v pref.Value) bool { ++func (c *messageConverter) IsValidPB(v protoreflect.Value) bool { + m := v.Message() + var rv reflect.Value + if u, ok := m.(unwrapper); ok { +@@ -478,14 +477,14 @@ func (c *messageConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType + } + +-func (c *messageConverter) New() pref.Value { ++func (c *messageConverter) New() protoreflect.Value { + if c.isNonPointer() { + return c.PBValueOf(reflect.New(c.goType).Elem()) + } + return c.PBValueOf(reflect.New(c.goType.Elem())) + } + +-func (c *messageConverter) Zero() pref.Value { ++func (c *messageConverter) Zero() protoreflect.Value { + return c.PBValueOf(reflect.Zero(c.goType)) + } + +diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go +index 6fccab5..f891365 100644 +--- a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go ++++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go +@@ -8,10 +8,10 @@ import ( + "fmt" + "reflect" + +- pref "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/reflect/protoreflect" + ) + +-func newListConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { ++func newListConverter(t reflect.Type, fd protoreflect.FieldDescriptor) Converter { + switch { + case t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Slice: + return &listPtrConverter{t, newSingularConverter(t.Elem().Elem(), fd)} +@@ -26,16 +26,16 @@ type listConverter struct { + c Converter + } + +-func (c *listConverter) PBValueOf(v reflect.Value) pref.Value { ++func (c *listConverter) PBValueOf(v reflect.Value) protoreflect.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + pv := reflect.New(c.goType) + pv.Elem().Set(v) +- return pref.ValueOfList(&listReflect{pv, c.c}) ++ return protoreflect.ValueOfList(&listReflect{pv, c.c}) + } + +-func (c *listConverter) GoValueOf(v pref.Value) reflect.Value { ++func (c *listConverter) GoValueOf(v protoreflect.Value) reflect.Value { + rv := v.List().(*listReflect).v + if rv.IsNil() { + return reflect.Zero(c.goType) +@@ -43,7 +43,7 @@ func (c *listConverter) GoValueOf(v pref.Value) reflect.Value { + return rv.Elem() + } + +-func (c *listConverter) IsValidPB(v pref.Value) bool { ++func (c *listConverter) IsValidPB(v protoreflect.Value) bool { + list, ok := v.Interface().(*listReflect) + if !ok { + return false +@@ -55,12 +55,12 @@ func (c *listConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType + } + +-func (c *listConverter) New() pref.Value { +- return pref.ValueOfList(&listReflect{reflect.New(c.goType), c.c}) ++func (c *listConverter) New() protoreflect.Value { ++ return protoreflect.ValueOfList(&listReflect{reflect.New(c.goType), c.c}) + } + +-func (c *listConverter) Zero() pref.Value { +- return pref.ValueOfList(&listReflect{reflect.Zero(reflect.PtrTo(c.goType)), c.c}) ++func (c *listConverter) Zero() protoreflect.Value { ++ return protoreflect.ValueOfList(&listReflect{reflect.Zero(reflect.PtrTo(c.goType)), c.c}) + } + + type listPtrConverter struct { +@@ -68,18 +68,18 @@ type listPtrConverter struct { + c Converter + } + +-func (c *listPtrConverter) PBValueOf(v reflect.Value) pref.Value { ++func (c *listPtrConverter) PBValueOf(v reflect.Value) protoreflect.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } +- return pref.ValueOfList(&listReflect{v, c.c}) ++ return protoreflect.ValueOfList(&listReflect{v, c.c}) + } + +-func (c *listPtrConverter) GoValueOf(v pref.Value) reflect.Value { ++func (c *listPtrConverter) GoValueOf(v protoreflect.Value) reflect.Value { + return v.List().(*listReflect).v + } + +-func (c *listPtrConverter) IsValidPB(v pref.Value) bool { ++func (c *listPtrConverter) IsValidPB(v protoreflect.Value) bool { + list, ok := v.Interface().(*listReflect) + if !ok { + return false +@@ -91,11 +91,11 @@ func (c *listPtrConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType + } + +-func (c *listPtrConverter) New() pref.Value { ++func (c *listPtrConverter) New() protoreflect.Value { + return c.PBValueOf(reflect.New(c.goType.Elem())) + } + +-func (c *listPtrConverter) Zero() pref.Value { ++func (c *listPtrConverter) Zero() protoreflect.Value { + return c.PBValueOf(reflect.Zero(c.goType)) + } + +@@ -110,16 +110,16 @@ func (ls *listReflect) Len() int { + } + return ls.v.Elem().Len() + } +-func (ls *listReflect) Get(i int) pref.Value { ++func (ls *listReflect) Get(i int) protoreflect.Value { + return ls.conv.PBValueOf(ls.v.Elem().Index(i)) + } +-func (ls *listReflect) Set(i int, v pref.Value) { ++func (ls *listReflect) Set(i int, v protoreflect.Value) { + ls.v.Elem().Index(i).Set(ls.conv.GoValueOf(v)) + } +-func (ls *listReflect) Append(v pref.Value) { ++func (ls *listReflect) Append(v protoreflect.Value) { + ls.v.Elem().Set(reflect.Append(ls.v.Elem(), ls.conv.GoValueOf(v))) + } +-func (ls *listReflect) AppendMutable() pref.Value { ++func (ls *listReflect) AppendMutable() protoreflect.Value { + if _, ok := ls.conv.(*messageConverter); !ok { + panic("invalid AppendMutable on list with non-message type") + } +@@ -130,7 +130,7 @@ func (ls *listReflect) AppendMutable() pref.Value { + func (ls *listReflect) Truncate(i int) { + ls.v.Elem().Set(ls.v.Elem().Slice(0, i)) + } +-func (ls *listReflect) NewElement() pref.Value { ++func (ls *listReflect) NewElement() protoreflect.Value { + return ls.conv.New() + } + func (ls *listReflect) IsValid() bool { +diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go +index de06b25..f30b0a0 100644 +--- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go ++++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go +@@ -8,7 +8,7 @@ import ( + "fmt" + "reflect" + +- pref "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/reflect/protoreflect" + ) + + type mapConverter struct { +@@ -16,7 +16,7 @@ type mapConverter struct { + keyConv, valConv Converter + } + +-func newMapConverter(t reflect.Type, fd pref.FieldDescriptor) *mapConverter { ++func newMapConverter(t reflect.Type, fd protoreflect.FieldDescriptor) *mapConverter { + if t.Kind() != reflect.Map { + panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) + } +@@ -27,18 +27,18 @@ func newMapConverter(t reflect.Type, fd pref.FieldDescriptor) *mapConverter { + } + } + +-func (c *mapConverter) PBValueOf(v reflect.Value) pref.Value { ++func (c *mapConverter) PBValueOf(v reflect.Value) protoreflect.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } +- return pref.ValueOfMap(&mapReflect{v, c.keyConv, c.valConv}) ++ return protoreflect.ValueOfMap(&mapReflect{v, c.keyConv, c.valConv}) + } + +-func (c *mapConverter) GoValueOf(v pref.Value) reflect.Value { ++func (c *mapConverter) GoValueOf(v protoreflect.Value) reflect.Value { + return v.Map().(*mapReflect).v + } + +-func (c *mapConverter) IsValidPB(v pref.Value) bool { ++func (c *mapConverter) IsValidPB(v protoreflect.Value) bool { + mapv, ok := v.Interface().(*mapReflect) + if !ok { + return false +@@ -50,11 +50,11 @@ func (c *mapConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType + } + +-func (c *mapConverter) New() pref.Value { ++func (c *mapConverter) New() protoreflect.Value { + return c.PBValueOf(reflect.MakeMap(c.goType)) + } + +-func (c *mapConverter) Zero() pref.Value { ++func (c *mapConverter) Zero() protoreflect.Value { + return c.PBValueOf(reflect.Zero(c.goType)) + } + +@@ -67,29 +67,29 @@ type mapReflect struct { + func (ms *mapReflect) Len() int { + return ms.v.Len() + } +-func (ms *mapReflect) Has(k pref.MapKey) bool { ++func (ms *mapReflect) Has(k protoreflect.MapKey) bool { + rk := ms.keyConv.GoValueOf(k.Value()) + rv := ms.v.MapIndex(rk) + return rv.IsValid() + } +-func (ms *mapReflect) Get(k pref.MapKey) pref.Value { ++func (ms *mapReflect) Get(k protoreflect.MapKey) protoreflect.Value { + rk := ms.keyConv.GoValueOf(k.Value()) + rv := ms.v.MapIndex(rk) + if !rv.IsValid() { +- return pref.Value{} ++ return protoreflect.Value{} + } + return ms.valConv.PBValueOf(rv) + } +-func (ms *mapReflect) Set(k pref.MapKey, v pref.Value) { ++func (ms *mapReflect) Set(k protoreflect.MapKey, v protoreflect.Value) { + rk := ms.keyConv.GoValueOf(k.Value()) + rv := ms.valConv.GoValueOf(v) + ms.v.SetMapIndex(rk, rv) + } +-func (ms *mapReflect) Clear(k pref.MapKey) { ++func (ms *mapReflect) Clear(k protoreflect.MapKey) { + rk := ms.keyConv.GoValueOf(k.Value()) + ms.v.SetMapIndex(rk, reflect.Value{}) + } +-func (ms *mapReflect) Mutable(k pref.MapKey) pref.Value { ++func (ms *mapReflect) Mutable(k protoreflect.MapKey) protoreflect.Value { + if _, ok := ms.valConv.(*messageConverter); !ok { + panic("invalid Mutable on map with non-message value type") + } +@@ -100,7 +100,7 @@ func (ms *mapReflect) Mutable(k pref.MapKey) pref.Value { + } + return v + } +-func (ms *mapReflect) Range(f func(pref.MapKey, pref.Value) bool) { ++func (ms *mapReflect) Range(f func(protoreflect.MapKey, protoreflect.Value) bool) { + iter := mapRange(ms.v) + for iter.Next() { + k := ms.keyConv.PBValueOf(iter.Key()).MapKey() +@@ -110,7 +110,7 @@ func (ms *mapReflect) Range(f func(pref.MapKey, pref.Value) bool) { + } + } + } +-func (ms *mapReflect) NewValue() pref.Value { ++func (ms *mapReflect) NewValue() protoreflect.Value { + return ms.valConv.New() + } + func (ms *mapReflect) IsValid() bool { +diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go +index c65b032..cda0520 100644 +--- a/vendor/google.golang.org/protobuf/internal/impl/decode.go ++++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go +@@ -12,9 +12,8 @@ import ( + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" +- preg "google.golang.org/protobuf/reflect/protoregistry" ++ "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" +- piface "google.golang.org/protobuf/runtime/protoiface" + ) + + var errDecode = errors.New("cannot parse invalid wire-format data") +@@ -38,14 +37,16 @@ func (o unmarshalOptions) Options() proto.UnmarshalOptions { + } + } + +-func (o unmarshalOptions) DiscardUnknown() bool { return o.flags&piface.UnmarshalDiscardUnknown != 0 } ++func (o unmarshalOptions) DiscardUnknown() bool { ++ return o.flags&protoiface.UnmarshalDiscardUnknown != 0 ++} + + func (o unmarshalOptions) IsDefault() bool { +- return o.flags == 0 && o.resolver == preg.GlobalTypes ++ return o.flags == 0 && o.resolver == protoregistry.GlobalTypes + } + + var lazyUnmarshalOptions = unmarshalOptions{ +- resolver: preg.GlobalTypes, ++ resolver: protoregistry.GlobalTypes, + depth: protowire.DefaultRecursionLimit, + } + +@@ -55,7 +56,7 @@ type unmarshalOutput struct { + } + + // unmarshal is protoreflect.Methods.Unmarshal. +-func (mi *MessageInfo) unmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) { ++func (mi *MessageInfo) unmarshal(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + var p pointer + if ms, ok := in.Message.(*messageState); ok { + p = ms.pointer() +@@ -67,11 +68,11 @@ func (mi *MessageInfo) unmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutp + resolver: in.Resolver, + depth: in.Depth, + }) +- var flags piface.UnmarshalOutputFlags ++ var flags protoiface.UnmarshalOutputFlags + if out.initialized { +- flags |= piface.UnmarshalInitialized ++ flags |= protoiface.UnmarshalInitialized + } +- return piface.UnmarshalOutput{ ++ return protoiface.UnmarshalOutput{ + Flags: flags, + }, err + } +@@ -210,7 +211,7 @@ func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp p + var err error + xt, err = opts.resolver.FindExtensionByNumber(mi.Desc.FullName(), num) + if err != nil { +- if err == preg.NotFound { ++ if err == protoregistry.NotFound { + return out, errUnknown + } + return out, errors.New("%v: unable to resolve extension %v: %v", mi.Desc.FullName(), num, err) +diff --git a/vendor/google.golang.org/protobuf/internal/impl/enum.go b/vendor/google.golang.org/protobuf/internal/impl/enum.go +index 8c1eab4..5f3ef5a 100644 +--- a/vendor/google.golang.org/protobuf/internal/impl/enum.go ++++ b/vendor/google.golang.org/protobuf/internal/impl/enum.go +@@ -7,15 +7,15 @@ package impl + import ( + "reflect" + +- pref "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/reflect/protoreflect" + ) + + type EnumInfo struct { + GoReflectType reflect.Type // int32 kind +- Desc pref.EnumDescriptor ++ Desc protoreflect.EnumDescriptor + } + +-func (t *EnumInfo) New(n pref.EnumNumber) pref.Enum { +- return reflect.ValueOf(n).Convert(t.GoReflectType).Interface().(pref.Enum) ++func (t *EnumInfo) New(n protoreflect.EnumNumber) protoreflect.Enum { ++ return reflect.ValueOf(n).Convert(t.GoReflectType).Interface().(protoreflect.Enum) + } +-func (t *EnumInfo) Descriptor() pref.EnumDescriptor { return t.Desc } ++func (t *EnumInfo) Descriptor() protoreflect.EnumDescriptor { return t.Desc } +diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go +index e904fd9..cb25b0b 100644 +--- a/vendor/google.golang.org/protobuf/internal/impl/extension.go ++++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go +@@ -9,8 +9,8 @@ import ( + "sync" + "sync/atomic" + +- pref "google.golang.org/protobuf/reflect/protoreflect" +- piface "google.golang.org/protobuf/runtime/protoiface" ++ "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/runtime/protoiface" + ) + + // ExtensionInfo implements ExtensionType. +@@ -45,7 +45,7 @@ type ExtensionInfo struct { + // since the message may no longer implement the MessageV1 interface. + // + // Deprecated: Use the ExtendedType method instead. +- ExtendedType piface.MessageV1 ++ ExtendedType protoiface.MessageV1 + + // ExtensionType is the zero value of the extension type. + // +@@ -83,31 +83,31 @@ const ( + extensionInfoFullInit = 2 + ) + +-func InitExtensionInfo(xi *ExtensionInfo, xd pref.ExtensionDescriptor, goType reflect.Type) { ++func InitExtensionInfo(xi *ExtensionInfo, xd protoreflect.ExtensionDescriptor, goType reflect.Type) { + xi.goType = goType + xi.desc = extensionTypeDescriptor{xd, xi} + xi.init = extensionInfoDescInit + } + +-func (xi *ExtensionInfo) New() pref.Value { ++func (xi *ExtensionInfo) New() protoreflect.Value { + return xi.lazyInit().New() + } +-func (xi *ExtensionInfo) Zero() pref.Value { ++func (xi *ExtensionInfo) Zero() protoreflect.Value { + return xi.lazyInit().Zero() + } +-func (xi *ExtensionInfo) ValueOf(v interface{}) pref.Value { ++func (xi *ExtensionInfo) ValueOf(v interface{}) protoreflect.Value { + return xi.lazyInit().PBValueOf(reflect.ValueOf(v)) + } +-func (xi *ExtensionInfo) InterfaceOf(v pref.Value) interface{} { ++func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) interface{} { + return xi.lazyInit().GoValueOf(v).Interface() + } +-func (xi *ExtensionInfo) IsValidValue(v pref.Value) bool { ++func (xi *ExtensionInfo) IsValidValue(v protoreflect.Value) bool { + return xi.lazyInit().IsValidPB(v) + } + func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool { + return xi.lazyInit().IsValidGo(reflect.ValueOf(v)) + } +-func (xi *ExtensionInfo) TypeDescriptor() pref.ExtensionTypeDescriptor { ++func (xi *ExtensionInfo) TypeDescriptor() protoreflect.ExtensionTypeDescriptor { + if atomic.LoadUint32(&xi.init) < extensionInfoDescInit { + xi.lazyInitSlow() + } +@@ -144,13 +144,13 @@ func (xi *ExtensionInfo) lazyInitSlow() { + } + + type extensionTypeDescriptor struct { +- pref.ExtensionDescriptor ++ protoreflect.ExtensionDescriptor + xi *ExtensionInfo + } + +-func (xtd *extensionTypeDescriptor) Type() pref.ExtensionType { ++func (xtd *extensionTypeDescriptor) Type() protoreflect.ExtensionType { + return xtd.xi + } +-func (xtd *extensionTypeDescriptor) Descriptor() pref.ExtensionDescriptor { ++func (xtd *extensionTypeDescriptor) Descriptor() protoreflect.ExtensionDescriptor { + return xtd.ExtensionDescriptor + } +diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go +index f7d7ffb..c2a803b 100644 +--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go ++++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go +@@ -13,13 +13,12 @@ import ( + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" +- pref "google.golang.org/protobuf/reflect/protoreflect" + ) + + // legacyEnumName returns the name of enums used in legacy code. + // It is neither the protobuf full name nor the qualified Go name, + // but rather an odd hybrid of both. +-func legacyEnumName(ed pref.EnumDescriptor) string { ++func legacyEnumName(ed protoreflect.EnumDescriptor) string { + var protoPkg string + enumName := string(ed.FullName()) + if fd := ed.ParentFile(); fd != nil { +@@ -34,68 +33,68 @@ func legacyEnumName(ed pref.EnumDescriptor) string { + + // legacyWrapEnum wraps v as a protoreflect.Enum, + // where v must be a int32 kind and not implement the v2 API already. +-func legacyWrapEnum(v reflect.Value) pref.Enum { ++func legacyWrapEnum(v reflect.Value) protoreflect.Enum { + et := legacyLoadEnumType(v.Type()) +- return et.New(pref.EnumNumber(v.Int())) ++ return et.New(protoreflect.EnumNumber(v.Int())) + } + + var legacyEnumTypeCache sync.Map // map[reflect.Type]protoreflect.EnumType + + // legacyLoadEnumType dynamically loads a protoreflect.EnumType for t, + // where t must be an int32 kind and not implement the v2 API already. +-func legacyLoadEnumType(t reflect.Type) pref.EnumType { ++func legacyLoadEnumType(t reflect.Type) protoreflect.EnumType { + // Fast-path: check if a EnumType is cached for this concrete type. + if et, ok := legacyEnumTypeCache.Load(t); ok { +- return et.(pref.EnumType) ++ return et.(protoreflect.EnumType) + } + + // Slow-path: derive enum descriptor and initialize EnumType. +- var et pref.EnumType ++ var et protoreflect.EnumType + ed := LegacyLoadEnumDesc(t) + et = &legacyEnumType{ + desc: ed, + goType: t, + } + if et, ok := legacyEnumTypeCache.LoadOrStore(t, et); ok { +- return et.(pref.EnumType) ++ return et.(protoreflect.EnumType) + } + return et + } + + type legacyEnumType struct { +- desc pref.EnumDescriptor ++ desc protoreflect.EnumDescriptor + goType reflect.Type + m sync.Map // map[protoreflect.EnumNumber]proto.Enum + } + +-func (t *legacyEnumType) New(n pref.EnumNumber) pref.Enum { ++func (t *legacyEnumType) New(n protoreflect.EnumNumber) protoreflect.Enum { + if e, ok := t.m.Load(n); ok { +- return e.(pref.Enum) ++ return e.(protoreflect.Enum) + } + e := &legacyEnumWrapper{num: n, pbTyp: t, goTyp: t.goType} + t.m.Store(n, e) + return e + } +-func (t *legacyEnumType) Descriptor() pref.EnumDescriptor { ++func (t *legacyEnumType) Descriptor() protoreflect.EnumDescriptor { + return t.desc + } + + type legacyEnumWrapper struct { +- num pref.EnumNumber +- pbTyp pref.EnumType ++ num protoreflect.EnumNumber ++ pbTyp protoreflect.EnumType + goTyp reflect.Type + } + +-func (e *legacyEnumWrapper) Descriptor() pref.EnumDescriptor { ++func (e *legacyEnumWrapper) Descriptor() protoreflect.EnumDescriptor { + return e.pbTyp.Descriptor() + } +-func (e *legacyEnumWrapper) Type() pref.EnumType { ++func (e *legacyEnumWrapper) Type() protoreflect.EnumType { + return e.pbTyp + } +-func (e *legacyEnumWrapper) Number() pref.EnumNumber { ++func (e *legacyEnumWrapper) Number() protoreflect.EnumNumber { + return e.num + } +-func (e *legacyEnumWrapper) ProtoReflect() pref.Enum { ++func (e *legacyEnumWrapper) ProtoReflect() protoreflect.Enum { + return e + } + func (e *legacyEnumWrapper) protoUnwrap() interface{} { +@@ -105,8 +104,8 @@ func (e *legacyEnumWrapper) protoUnwrap() interface{} { + } + + var ( +- _ pref.Enum = (*legacyEnumWrapper)(nil) +- _ unwrapper = (*legacyEnumWrapper)(nil) ++ _ protoreflect.Enum = (*legacyEnumWrapper)(nil) ++ _ unwrapper = (*legacyEnumWrapper)(nil) + ) + + var legacyEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor +@@ -115,15 +114,15 @@ var legacyEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor + // which must be an int32 kind and not implement the v2 API already. + // + // This is exported for testing purposes. +-func LegacyLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { ++func LegacyLoadEnumDesc(t reflect.Type) protoreflect.EnumDescriptor { + // Fast-path: check if an EnumDescriptor is cached for this concrete type. + if ed, ok := legacyEnumDescCache.Load(t); ok { +- return ed.(pref.EnumDescriptor) ++ return ed.(protoreflect.EnumDescriptor) + } + + // Slow-path: initialize EnumDescriptor from the raw descriptor. + ev := reflect.Zero(t).Interface() +- if _, ok := ev.(pref.Enum); ok { ++ if _, ok := ev.(protoreflect.Enum); ok { + panic(fmt.Sprintf("%v already implements proto.Enum", t)) + } + edV1, ok := ev.(enumV1) +@@ -132,7 +131,7 @@ func LegacyLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { + } + b, idxs := edV1.EnumDescriptor() + +- var ed pref.EnumDescriptor ++ var ed protoreflect.EnumDescriptor + if len(idxs) == 1 { + ed = legacyLoadFileDesc(b).Enums().Get(idxs[0]) + } else { +@@ -158,10 +157,10 @@ var aberrantEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescript + // We are unable to use the global enum registry since it is + // unfortunately keyed by the protobuf full name, which we also do not know. + // Thus, this produces some bogus enum descriptor based on the Go type name. +-func aberrantLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { ++func aberrantLoadEnumDesc(t reflect.Type) protoreflect.EnumDescriptor { + // Fast-path: check if an EnumDescriptor is cached for this concrete type. + if ed, ok := aberrantEnumDescCache.Load(t); ok { +- return ed.(pref.EnumDescriptor) ++ return ed.(protoreflect.EnumDescriptor) + } + + // Slow-path: construct a bogus, but unique EnumDescriptor. +@@ -182,7 +181,7 @@ func aberrantLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { + // An exhaustive query is clearly impractical, but can be best-effort. + + if ed, ok := aberrantEnumDescCache.LoadOrStore(t, ed); ok { +- return ed.(pref.EnumDescriptor) ++ return ed.(protoreflect.EnumDescriptor) + } + return ed + } +@@ -192,7 +191,7 @@ func aberrantLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { + // It should be sufficiently unique within a program. + // + // This is exported for testing purposes. +-func AberrantDeriveFullName(t reflect.Type) pref.FullName { ++func AberrantDeriveFullName(t reflect.Type) protoreflect.FullName { + sanitize := func(r rune) rune { + switch { + case r == '/': +@@ -215,5 +214,5 @@ func AberrantDeriveFullName(t reflect.Type) pref.FullName { + ss[i] = "x" + s + } + } +- return pref.FullName(strings.Join(ss, ".")) ++ return protoreflect.FullName(strings.Join(ss, ".")) + } +diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go +index e3fb0b5..9b64ad5 100644 +--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go ++++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go +@@ -12,21 +12,21 @@ import ( + "reflect" + + "google.golang.org/protobuf/internal/errors" +- pref "google.golang.org/protobuf/reflect/protoreflect" +- piface "google.golang.org/protobuf/runtime/protoiface" ++ "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/runtime/protoiface" + ) + + // These functions exist to support exported APIs in generated protobufs. + // While these are deprecated, they cannot be removed for compatibility reasons. + + // LegacyEnumName returns the name of enums used in legacy code. +-func (Export) LegacyEnumName(ed pref.EnumDescriptor) string { ++func (Export) LegacyEnumName(ed protoreflect.EnumDescriptor) string { + return legacyEnumName(ed) + } + + // LegacyMessageTypeOf returns the protoreflect.MessageType for m, + // with name used as the message name if necessary. +-func (Export) LegacyMessageTypeOf(m piface.MessageV1, name pref.FullName) pref.MessageType { ++func (Export) LegacyMessageTypeOf(m protoiface.MessageV1, name protoreflect.FullName) protoreflect.MessageType { + if mv := (Export{}).protoMessageV2Of(m); mv != nil { + return mv.ProtoReflect().Type() + } +@@ -36,9 +36,9 @@ func (Export) LegacyMessageTypeOf(m piface.MessageV1, name pref.FullName) pref.M + // UnmarshalJSONEnum unmarshals an enum from a JSON-encoded input. + // The input can either be a string representing the enum value by name, + // or a number representing the enum number itself. +-func (Export) UnmarshalJSONEnum(ed pref.EnumDescriptor, b []byte) (pref.EnumNumber, error) { ++func (Export) UnmarshalJSONEnum(ed protoreflect.EnumDescriptor, b []byte) (protoreflect.EnumNumber, error) { + if b[0] == '"' { +- var name pref.Name ++ var name protoreflect.Name + if err := json.Unmarshal(b, &name); err != nil { + return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b) + } +@@ -48,7 +48,7 @@ func (Export) UnmarshalJSONEnum(ed pref.EnumDescriptor, b []byte) (pref.EnumNumb + } + return ev.Number(), nil + } else { +- var num pref.EnumNumber ++ var num protoreflect.EnumNumber + if err := json.Unmarshal(b, &num); err != nil { + return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b) + } +@@ -81,8 +81,8 @@ func (Export) CompressGZIP(in []byte) (out []byte) { + blockHeader[0] = 0x01 // final bit per RFC 1951, section 3.2.3. + blockSize = len(in) + } +- binary.LittleEndian.PutUint16(blockHeader[1:3], uint16(blockSize)^0x0000) +- binary.LittleEndian.PutUint16(blockHeader[3:5], uint16(blockSize)^0xffff) ++ binary.LittleEndian.PutUint16(blockHeader[1:3], uint16(blockSize)) ++ binary.LittleEndian.PutUint16(blockHeader[3:5], ^uint16(blockSize)) + out = append(out, blockHeader[:]...) + out = append(out, in[:blockSize]...) + in = in[blockSize:] +diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +index 49e7231..87b30d0 100644 +--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go ++++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +@@ -12,16 +12,16 @@ import ( + ptag "google.golang.org/protobuf/internal/encoding/tag" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/pragma" +- pref "google.golang.org/protobuf/reflect/protoreflect" +- preg "google.golang.org/protobuf/reflect/protoregistry" +- piface "google.golang.org/protobuf/runtime/protoiface" ++ "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/reflect/protoregistry" ++ "google.golang.org/protobuf/runtime/protoiface" + ) + + func (xi *ExtensionInfo) initToLegacy() { + xd := xi.desc +- var parent piface.MessageV1 ++ var parent protoiface.MessageV1 + messageName := xd.ContainingMessage().FullName() +- if mt, _ := preg.GlobalTypes.FindMessageByName(messageName); mt != nil { ++ if mt, _ := protoregistry.GlobalTypes.FindMessageByName(messageName); mt != nil { + // Create a new parent message and unwrap it if possible. + mv := mt.New().Interface() + t := reflect.TypeOf(mv) +@@ -31,7 +31,7 @@ func (xi *ExtensionInfo) initToLegacy() { + + // Check whether the message implements the legacy v1 Message interface. + mz := reflect.Zero(t).Interface() +- if mz, ok := mz.(piface.MessageV1); ok { ++ if mz, ok := mz.(protoiface.MessageV1); ok { + parent = mz + } + } +@@ -46,7 +46,7 @@ func (xi *ExtensionInfo) initToLegacy() { + + // Reconstruct the legacy enum full name. + var enumName string +- if xd.Kind() == pref.EnumKind { ++ if xd.Kind() == protoreflect.EnumKind { + enumName = legacyEnumName(xd.Enum()) + } + +@@ -77,16 +77,16 @@ func (xi *ExtensionInfo) initFromLegacy() { + // field number is specified. In such a case, use a placeholder. + if xi.ExtendedType == nil || xi.ExtensionType == nil { + xd := placeholderExtension{ +- name: pref.FullName(xi.Name), +- number: pref.FieldNumber(xi.Field), ++ name: protoreflect.FullName(xi.Name), ++ number: protoreflect.FieldNumber(xi.Field), + } + xi.desc = extensionTypeDescriptor{xd, xi} + return + } + + // Resolve enum or message dependencies. +- var ed pref.EnumDescriptor +- var md pref.MessageDescriptor ++ var ed protoreflect.EnumDescriptor ++ var md protoreflect.MessageDescriptor + t := reflect.TypeOf(xi.ExtensionType) + isOptional := t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct + isRepeated := t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +@@ -94,18 +94,18 @@ func (xi *ExtensionInfo) initFromLegacy() { + t = t.Elem() + } + switch v := reflect.Zero(t).Interface().(type) { +- case pref.Enum: ++ case protoreflect.Enum: + ed = v.Descriptor() + case enumV1: + ed = LegacyLoadEnumDesc(t) +- case pref.ProtoMessage: ++ case protoreflect.ProtoMessage: + md = v.ProtoReflect().Descriptor() + case messageV1: + md = LegacyLoadMessageDesc(t) + } + + // Derive basic field information from the struct tag. +- var evs pref.EnumValueDescriptors ++ var evs protoreflect.EnumValueDescriptors + if ed != nil { + evs = ed.Values() + } +@@ -114,8 +114,8 @@ func (xi *ExtensionInfo) initFromLegacy() { + // Construct a v2 ExtensionType. + xd := &filedesc.Extension{L2: new(filedesc.ExtensionL2)} + xd.L0.ParentFile = filedesc.SurrogateProto2 +- xd.L0.FullName = pref.FullName(xi.Name) +- xd.L1.Number = pref.FieldNumber(xi.Field) ++ xd.L0.FullName = protoreflect.FullName(xi.Name) ++ xd.L1.Number = protoreflect.FieldNumber(xi.Field) + xd.L1.Cardinality = fd.L1.Cardinality + xd.L1.Kind = fd.L1.Kind + xd.L2.IsPacked = fd.L1.IsPacked +@@ -138,39 +138,39 @@ func (xi *ExtensionInfo) initFromLegacy() { + } + + type placeholderExtension struct { +- name pref.FullName +- number pref.FieldNumber ++ name protoreflect.FullName ++ number protoreflect.FieldNumber + } + +-func (x placeholderExtension) ParentFile() pref.FileDescriptor { return nil } +-func (x placeholderExtension) Parent() pref.Descriptor { return nil } +-func (x placeholderExtension) Index() int { return 0 } +-func (x placeholderExtension) Syntax() pref.Syntax { return 0 } +-func (x placeholderExtension) Name() pref.Name { return x.name.Name() } +-func (x placeholderExtension) FullName() pref.FullName { return x.name } +-func (x placeholderExtension) IsPlaceholder() bool { return true } +-func (x placeholderExtension) Options() pref.ProtoMessage { return descopts.Field } +-func (x placeholderExtension) Number() pref.FieldNumber { return x.number } +-func (x placeholderExtension) Cardinality() pref.Cardinality { return 0 } +-func (x placeholderExtension) Kind() pref.Kind { return 0 } +-func (x placeholderExtension) HasJSONName() bool { return false } +-func (x placeholderExtension) JSONName() string { return "[" + string(x.name) + "]" } +-func (x placeholderExtension) TextName() string { return "[" + string(x.name) + "]" } +-func (x placeholderExtension) HasPresence() bool { return false } +-func (x placeholderExtension) HasOptionalKeyword() bool { return false } +-func (x placeholderExtension) IsExtension() bool { return true } +-func (x placeholderExtension) IsWeak() bool { return false } +-func (x placeholderExtension) IsPacked() bool { return false } +-func (x placeholderExtension) IsList() bool { return false } +-func (x placeholderExtension) IsMap() bool { return false } +-func (x placeholderExtension) MapKey() pref.FieldDescriptor { return nil } +-func (x placeholderExtension) MapValue() pref.FieldDescriptor { return nil } +-func (x placeholderExtension) HasDefault() bool { return false } +-func (x placeholderExtension) Default() pref.Value { return pref.Value{} } +-func (x placeholderExtension) DefaultEnumValue() pref.EnumValueDescriptor { return nil } +-func (x placeholderExtension) ContainingOneof() pref.OneofDescriptor { return nil } +-func (x placeholderExtension) ContainingMessage() pref.MessageDescriptor { return nil } +-func (x placeholderExtension) Enum() pref.EnumDescriptor { return nil } +-func (x placeholderExtension) Message() pref.MessageDescriptor { return nil } +-func (x placeholderExtension) ProtoType(pref.FieldDescriptor) { return } +-func (x placeholderExtension) ProtoInternal(pragma.DoNotImplement) { return } ++func (x placeholderExtension) ParentFile() protoreflect.FileDescriptor { return nil } ++func (x placeholderExtension) Parent() protoreflect.Descriptor { return nil } ++func (x placeholderExtension) Index() int { return 0 } ++func (x placeholderExtension) Syntax() protoreflect.Syntax { return 0 } ++func (x placeholderExtension) Name() protoreflect.Name { return x.name.Name() } ++func (x placeholderExtension) FullName() protoreflect.FullName { return x.name } ++func (x placeholderExtension) IsPlaceholder() bool { return true } ++func (x placeholderExtension) Options() protoreflect.ProtoMessage { return descopts.Field } ++func (x placeholderExtension) Number() protoreflect.FieldNumber { return x.number } ++func (x placeholderExtension) Cardinality() protoreflect.Cardinality { return 0 } ++func (x placeholderExtension) Kind() protoreflect.Kind { return 0 } ++func (x placeholderExtension) HasJSONName() bool { return false } ++func (x placeholderExtension) JSONName() string { return "[" + string(x.name) + "]" } ++func (x placeholderExtension) TextName() string { return "[" + string(x.name) + "]" } ++func (x placeholderExtension) HasPresence() bool { return false } ++func (x placeholderExtension) HasOptionalKeyword() bool { return false } ++func (x placeholderExtension) IsExtension() bool { return true } ++func (x placeholderExtension) IsWeak() bool { return false } ++func (x placeholderExtension) IsPacked() bool { return false } ++func (x placeholderExtension) IsList() bool { return false } ++func (x placeholderExtension) IsMap() bool { return false } ++func (x placeholderExtension) MapKey() protoreflect.FieldDescriptor { return nil } ++func (x placeholderExtension) MapValue() protoreflect.FieldDescriptor { return nil } ++func (x placeholderExtension) HasDefault() bool { return false } ++func (x placeholderExtension) Default() protoreflect.Value { return protoreflect.Value{} } ++func (x placeholderExtension) DefaultEnumValue() protoreflect.EnumValueDescriptor { return nil } ++func (x placeholderExtension) ContainingOneof() protoreflect.OneofDescriptor { return nil } ++func (x placeholderExtension) ContainingMessage() protoreflect.MessageDescriptor { return nil } ++func (x placeholderExtension) Enum() protoreflect.EnumDescriptor { return nil } ++func (x placeholderExtension) Message() protoreflect.MessageDescriptor { return nil } ++func (x placeholderExtension) ProtoType(protoreflect.FieldDescriptor) { return } ++func (x placeholderExtension) ProtoInternal(pragma.DoNotImplement) { return } +diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +index 029feee..61c483f 100644 +--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go ++++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +@@ -16,14 +16,12 @@ import ( + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" +- pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +- piface "google.golang.org/protobuf/runtime/protoiface" + ) + + // legacyWrapMessage wraps v as a protoreflect.Message, + // where v must be a *struct kind and not implement the v2 API already. +-func legacyWrapMessage(v reflect.Value) pref.Message { ++func legacyWrapMessage(v reflect.Value) protoreflect.Message { + t := v.Type() + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { + return aberrantMessage{v: v} +@@ -35,7 +33,7 @@ func legacyWrapMessage(v reflect.Value) pref.Message { + // legacyLoadMessageType dynamically loads a protoreflect.Type for t, + // where t must be not implement the v2 API already. + // The provided name is used if it cannot be determined from the message. +-func legacyLoadMessageType(t reflect.Type, name pref.FullName) protoreflect.MessageType { ++func legacyLoadMessageType(t reflect.Type, name protoreflect.FullName) protoreflect.MessageType { + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { + return aberrantMessageType{t} + } +@@ -47,7 +45,7 @@ var legacyMessageTypeCache sync.Map // map[reflect.Type]*MessageInfo + // legacyLoadMessageInfo dynamically loads a *MessageInfo for t, + // where t must be a *struct kind and not implement the v2 API already. + // The provided name is used if it cannot be determined from the message. +-func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { ++func legacyLoadMessageInfo(t reflect.Type, name protoreflect.FullName) *MessageInfo { + // Fast-path: check if a MessageInfo is cached for this concrete type. + if mt, ok := legacyMessageTypeCache.Load(t); ok { + return mt.(*MessageInfo) +@@ -68,7 +66,7 @@ func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { + // supports deterministic serialization or not, but this + // preserves the v1 implementation's behavior of always + // calling Marshal methods when present. +- mi.methods.Flags |= piface.SupportMarshalDeterministic ++ mi.methods.Flags |= protoiface.SupportMarshalDeterministic + } + if _, hasUnmarshal = v.(legacyUnmarshaler); hasUnmarshal { + mi.methods.Unmarshal = legacyUnmarshal +@@ -89,18 +87,18 @@ var legacyMessageDescCache sync.Map // map[reflect.Type]protoreflect.MessageDesc + // which should be a *struct kind and must not implement the v2 API already. + // + // This is exported for testing purposes. +-func LegacyLoadMessageDesc(t reflect.Type) pref.MessageDescriptor { ++func LegacyLoadMessageDesc(t reflect.Type) protoreflect.MessageDescriptor { + return legacyLoadMessageDesc(t, "") + } +-func legacyLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor { ++func legacyLoadMessageDesc(t reflect.Type, name protoreflect.FullName) protoreflect.MessageDescriptor { + // Fast-path: check if a MessageDescriptor is cached for this concrete type. + if mi, ok := legacyMessageDescCache.Load(t); ok { +- return mi.(pref.MessageDescriptor) ++ return mi.(protoreflect.MessageDescriptor) + } + + // Slow-path: initialize MessageDescriptor from the raw descriptor. + mv := reflect.Zero(t).Interface() +- if _, ok := mv.(pref.ProtoMessage); ok { ++ if _, ok := mv.(protoreflect.ProtoMessage); ok { + panic(fmt.Sprintf("%v already implements proto.Message", t)) + } + mdV1, ok := mv.(messageV1) +@@ -164,7 +162,7 @@ var ( + // + // This is a best-effort derivation of the message descriptor using the protobuf + // tags on the struct fields. +-func aberrantLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor { ++func aberrantLoadMessageDesc(t reflect.Type, name protoreflect.FullName) protoreflect.MessageDescriptor { + aberrantMessageDescLock.Lock() + defer aberrantMessageDescLock.Unlock() + if aberrantMessageDescCache == nil { +@@ -172,7 +170,7 @@ func aberrantLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDes + } + return aberrantLoadMessageDescReentrant(t, name) + } +-func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.MessageDescriptor { ++func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName) protoreflect.MessageDescriptor { + // Fast-path: check if an MessageDescriptor is cached for this concrete type. + if md, ok := aberrantMessageDescCache[t]; ok { + return md +@@ -225,9 +223,9 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.M + vs := fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0] + for i := 0; i < vs.Len(); i++ { + v := vs.Index(i) +- md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, [2]pref.FieldNumber{ +- pref.FieldNumber(v.FieldByName("Start").Int()), +- pref.FieldNumber(v.FieldByName("End").Int() + 1), ++ md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, [2]protoreflect.FieldNumber{ ++ protoreflect.FieldNumber(v.FieldByName("Start").Int()), ++ protoreflect.FieldNumber(v.FieldByName("End").Int() + 1), + }) + md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, nil) + } +@@ -245,7 +243,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.M + n := len(md.L2.Oneofs.List) + md.L2.Oneofs.List = append(md.L2.Oneofs.List, filedesc.Oneof{}) + od := &md.L2.Oneofs.List[n] +- od.L0.FullName = md.FullName().Append(pref.Name(tag)) ++ od.L0.FullName = md.FullName().Append(protoreflect.Name(tag)) + od.L0.ParentFile = md.L0.ParentFile + od.L0.Parent = md + od.L0.Index = n +@@ -267,14 +265,14 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.M + return md + } + +-func aberrantDeriveMessageName(t reflect.Type, name pref.FullName) pref.FullName { ++func aberrantDeriveMessageName(t reflect.Type, name protoreflect.FullName) protoreflect.FullName { + if name.IsValid() { + return name + } + func() { + defer func() { recover() }() // swallow possible nil panics + if m, ok := reflect.Zero(t).Interface().(interface{ XXX_MessageName() string }); ok { +- name = pref.FullName(m.XXX_MessageName()) ++ name = protoreflect.FullName(m.XXX_MessageName()) + } + }() + if name.IsValid() { +@@ -305,7 +303,7 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, + fd.L0.Index = n + + if fd.L1.IsWeak || fd.L1.HasPacked { +- fd.L1.Options = func() pref.ProtoMessage { ++ fd.L1.Options = func() protoreflect.ProtoMessage { + opts := descopts.Field.ProtoReflect().New() + if fd.L1.IsWeak { + opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true)) +@@ -318,17 +316,17 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, + } + + // Populate Enum and Message. +- if fd.Enum() == nil && fd.Kind() == pref.EnumKind { ++ if fd.Enum() == nil && fd.Kind() == protoreflect.EnumKind { + switch v := reflect.Zero(t).Interface().(type) { +- case pref.Enum: ++ case protoreflect.Enum: + fd.L1.Enum = v.Descriptor() + default: + fd.L1.Enum = LegacyLoadEnumDesc(t) + } + } +- if fd.Message() == nil && (fd.Kind() == pref.MessageKind || fd.Kind() == pref.GroupKind) { ++ if fd.Message() == nil && (fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind) { + switch v := reflect.Zero(t).Interface().(type) { +- case pref.ProtoMessage: ++ case protoreflect.ProtoMessage: + fd.L1.Message = v.ProtoReflect().Descriptor() + case messageV1: + fd.L1.Message = LegacyLoadMessageDesc(t) +@@ -337,13 +335,13 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, + n := len(md.L1.Messages.List) + md.L1.Messages.List = append(md.L1.Messages.List, filedesc.Message{L2: new(filedesc.MessageL2)}) + md2 := &md.L1.Messages.List[n] +- md2.L0.FullName = md.FullName().Append(pref.Name(strs.MapEntryName(string(fd.Name())))) ++ md2.L0.FullName = md.FullName().Append(protoreflect.Name(strs.MapEntryName(string(fd.Name())))) + md2.L0.ParentFile = md.L0.ParentFile + md2.L0.Parent = md + md2.L0.Index = n + + md2.L1.IsMapEntry = true +- md2.L2.Options = func() pref.ProtoMessage { ++ md2.L2.Options = func() protoreflect.ProtoMessage { + opts := descopts.Message.ProtoReflect().New() + opts.Set(opts.Descriptor().Fields().ByName("map_entry"), protoreflect.ValueOfBool(true)) + return opts.Interface() +@@ -364,8 +362,8 @@ type placeholderEnumValues struct { + protoreflect.EnumValueDescriptors + } + +-func (placeholderEnumValues) ByNumber(n pref.EnumNumber) pref.EnumValueDescriptor { +- return filedesc.PlaceholderEnumValue(pref.FullName(fmt.Sprintf("UNKNOWN_%d", n))) ++func (placeholderEnumValues) ByNumber(n protoreflect.EnumNumber) protoreflect.EnumValueDescriptor { ++ return filedesc.PlaceholderEnumValue(protoreflect.FullName(fmt.Sprintf("UNKNOWN_%d", n))) + } + + // legacyMarshaler is the proto.Marshaler interface superseded by protoiface.Methoder. +@@ -383,7 +381,7 @@ type legacyMerger interface { + Merge(protoiface.MessageV1) + } + +-var aberrantProtoMethods = &piface.Methods{ ++var aberrantProtoMethods = &protoiface.Methods{ + Marshal: legacyMarshal, + Unmarshal: legacyUnmarshal, + Merge: legacyMerge, +@@ -392,40 +390,40 @@ var aberrantProtoMethods = &piface.Methods{ + // supports deterministic serialization or not, but this + // preserves the v1 implementation's behavior of always + // calling Marshal methods when present. +- Flags: piface.SupportMarshalDeterministic, ++ Flags: protoiface.SupportMarshalDeterministic, + } + +-func legacyMarshal(in piface.MarshalInput) (piface.MarshalOutput, error) { ++func legacyMarshal(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) { + v := in.Message.(unwrapper).protoUnwrap() + marshaler, ok := v.(legacyMarshaler) + if !ok { +- return piface.MarshalOutput{}, errors.New("%T does not implement Marshal", v) ++ return protoiface.MarshalOutput{}, errors.New("%T does not implement Marshal", v) + } + out, err := marshaler.Marshal() + if in.Buf != nil { + out = append(in.Buf, out...) + } +- return piface.MarshalOutput{ ++ return protoiface.MarshalOutput{ + Buf: out, + }, err + } + +-func legacyUnmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) { ++func legacyUnmarshal(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + v := in.Message.(unwrapper).protoUnwrap() + unmarshaler, ok := v.(legacyUnmarshaler) + if !ok { +- return piface.UnmarshalOutput{}, errors.New("%T does not implement Unmarshal", v) ++ return protoiface.UnmarshalOutput{}, errors.New("%T does not implement Unmarshal", v) + } +- return piface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf) ++ return protoiface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf) + } + +-func legacyMerge(in piface.MergeInput) piface.MergeOutput { ++func legacyMerge(in protoiface.MergeInput) protoiface.MergeOutput { + // Check whether this supports the legacy merger. + dstv := in.Destination.(unwrapper).protoUnwrap() + merger, ok := dstv.(legacyMerger) + if ok { + merger.Merge(Export{}.ProtoMessageV1Of(in.Source)) +- return piface.MergeOutput{Flags: piface.MergeComplete} ++ return protoiface.MergeOutput{Flags: protoiface.MergeComplete} + } + + // If legacy merger is unavailable, implement merge in terms of +@@ -433,29 +431,29 @@ func legacyMerge(in piface.MergeInput) piface.MergeOutput { + srcv := in.Source.(unwrapper).protoUnwrap() + marshaler, ok := srcv.(legacyMarshaler) + if !ok { +- return piface.MergeOutput{} ++ return protoiface.MergeOutput{} + } + dstv = in.Destination.(unwrapper).protoUnwrap() + unmarshaler, ok := dstv.(legacyUnmarshaler) + if !ok { +- return piface.MergeOutput{} ++ return protoiface.MergeOutput{} + } + if !in.Source.IsValid() { + // Legacy Marshal methods may not function on nil messages. + // Check for a typed nil source only after we confirm that + // legacy Marshal/Unmarshal methods are present, for + // consistency. +- return piface.MergeOutput{Flags: piface.MergeComplete} ++ return protoiface.MergeOutput{Flags: protoiface.MergeComplete} + } + b, err := marshaler.Marshal() + if err != nil { +- return piface.MergeOutput{} ++ return protoiface.MergeOutput{} + } + err = unmarshaler.Unmarshal(b) + if err != nil { +- return piface.MergeOutput{} ++ return protoiface.MergeOutput{} + } +- return piface.MergeOutput{Flags: piface.MergeComplete} ++ return protoiface.MergeOutput{Flags: protoiface.MergeComplete} + } + + // aberrantMessageType implements MessageType for all types other than pointer-to-struct. +@@ -463,19 +461,19 @@ type aberrantMessageType struct { + t reflect.Type + } + +-func (mt aberrantMessageType) New() pref.Message { ++func (mt aberrantMessageType) New() protoreflect.Message { + if mt.t.Kind() == reflect.Ptr { + return aberrantMessage{reflect.New(mt.t.Elem())} + } + return aberrantMessage{reflect.Zero(mt.t)} + } +-func (mt aberrantMessageType) Zero() pref.Message { ++func (mt aberrantMessageType) Zero() protoreflect.Message { + return aberrantMessage{reflect.Zero(mt.t)} + } + func (mt aberrantMessageType) GoType() reflect.Type { + return mt.t + } +-func (mt aberrantMessageType) Descriptor() pref.MessageDescriptor { ++func (mt aberrantMessageType) Descriptor() protoreflect.MessageDescriptor { + return LegacyLoadMessageDesc(mt.t) + } + +@@ -499,56 +497,56 @@ func (m aberrantMessage) Reset() { + } + } + +-func (m aberrantMessage) ProtoReflect() pref.Message { ++func (m aberrantMessage) ProtoReflect() protoreflect.Message { + return m + } + +-func (m aberrantMessage) Descriptor() pref.MessageDescriptor { ++func (m aberrantMessage) Descriptor() protoreflect.MessageDescriptor { + return LegacyLoadMessageDesc(m.v.Type()) + } +-func (m aberrantMessage) Type() pref.MessageType { ++func (m aberrantMessage) Type() protoreflect.MessageType { + return aberrantMessageType{m.v.Type()} + } +-func (m aberrantMessage) New() pref.Message { ++func (m aberrantMessage) New() protoreflect.Message { + if m.v.Type().Kind() == reflect.Ptr { + return aberrantMessage{reflect.New(m.v.Type().Elem())} + } + return aberrantMessage{reflect.Zero(m.v.Type())} + } +-func (m aberrantMessage) Interface() pref.ProtoMessage { ++func (m aberrantMessage) Interface() protoreflect.ProtoMessage { + return m + } +-func (m aberrantMessage) Range(f func(pref.FieldDescriptor, pref.Value) bool) { ++func (m aberrantMessage) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + return + } +-func (m aberrantMessage) Has(pref.FieldDescriptor) bool { ++func (m aberrantMessage) Has(protoreflect.FieldDescriptor) bool { + return false + } +-func (m aberrantMessage) Clear(pref.FieldDescriptor) { ++func (m aberrantMessage) Clear(protoreflect.FieldDescriptor) { + panic("invalid Message.Clear on " + string(m.Descriptor().FullName())) + } +-func (m aberrantMessage) Get(fd pref.FieldDescriptor) pref.Value { ++func (m aberrantMessage) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { + if fd.Default().IsValid() { + return fd.Default() + } + panic("invalid Message.Get on " + string(m.Descriptor().FullName())) + } +-func (m aberrantMessage) Set(pref.FieldDescriptor, pref.Value) { ++func (m aberrantMessage) Set(protoreflect.FieldDescriptor, protoreflect.Value) { + panic("invalid Message.Set on " + string(m.Descriptor().FullName())) + } +-func (m aberrantMessage) Mutable(pref.FieldDescriptor) pref.Value { ++func (m aberrantMessage) Mutable(protoreflect.FieldDescriptor) protoreflect.Value { + panic("invalid Message.Mutable on " + string(m.Descriptor().FullName())) + } +-func (m aberrantMessage) NewField(pref.FieldDescriptor) pref.Value { ++func (m aberrantMessage) NewField(protoreflect.FieldDescriptor) protoreflect.Value { + panic("invalid Message.NewField on " + string(m.Descriptor().FullName())) + } +-func (m aberrantMessage) WhichOneof(pref.OneofDescriptor) pref.FieldDescriptor { ++func (m aberrantMessage) WhichOneof(protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + panic("invalid Message.WhichOneof descriptor on " + string(m.Descriptor().FullName())) + } +-func (m aberrantMessage) GetUnknown() pref.RawFields { ++func (m aberrantMessage) GetUnknown() protoreflect.RawFields { + return nil + } +-func (m aberrantMessage) SetUnknown(pref.RawFields) { ++func (m aberrantMessage) SetUnknown(protoreflect.RawFields) { + // SetUnknown discards its input on messages which don't support unknown field storage. + } + func (m aberrantMessage) IsValid() bool { +@@ -557,7 +555,7 @@ func (m aberrantMessage) IsValid() bool { + } + return false + } +-func (m aberrantMessage) ProtoMethods() *piface.Methods { ++func (m aberrantMessage) ProtoMethods() *protoiface.Methods { + return aberrantProtoMethods + } + func (m aberrantMessage) protoUnwrap() interface{} { +diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go +index c65bbc0..7e65f64 100644 +--- a/vendor/google.golang.org/protobuf/internal/impl/merge.go ++++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go +@@ -9,8 +9,8 @@ import ( + "reflect" + + "google.golang.org/protobuf/proto" +- pref "google.golang.org/protobuf/reflect/protoreflect" +- piface "google.golang.org/protobuf/runtime/protoiface" ++ "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/runtime/protoiface" + ) + + type mergeOptions struct{} +@@ -20,17 +20,17 @@ func (o mergeOptions) Merge(dst, src proto.Message) { + } + + // merge is protoreflect.Methods.Merge. +-func (mi *MessageInfo) merge(in piface.MergeInput) piface.MergeOutput { ++func (mi *MessageInfo) merge(in protoiface.MergeInput) protoiface.MergeOutput { + dp, ok := mi.getPointer(in.Destination) + if !ok { +- return piface.MergeOutput{} ++ return protoiface.MergeOutput{} + } + sp, ok := mi.getPointer(in.Source) + if !ok { +- return piface.MergeOutput{} ++ return protoiface.MergeOutput{} + } + mi.mergePointer(dp, sp, mergeOptions{}) +- return piface.MergeOutput{Flags: piface.MergeComplete} ++ return protoiface.MergeOutput{Flags: protoiface.MergeComplete} + } + + func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { +@@ -64,7 +64,7 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { + continue + } + dx := (*dext)[num] +- var dv pref.Value ++ var dv protoreflect.Value + if dx.Type() == sx.Type() { + dv = dx.Value() + } +@@ -85,15 +85,15 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { + } + } + +-func mergeScalarValue(dst, src pref.Value, opts mergeOptions) pref.Value { ++func mergeScalarValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { + return src + } + +-func mergeBytesValue(dst, src pref.Value, opts mergeOptions) pref.Value { +- return pref.ValueOfBytes(append(emptyBuf[:], src.Bytes()...)) ++func mergeBytesValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { ++ return protoreflect.ValueOfBytes(append(emptyBuf[:], src.Bytes()...)) + } + +-func mergeListValue(dst, src pref.Value, opts mergeOptions) pref.Value { ++func mergeListValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { + dstl := dst.List() + srcl := src.List() + for i, llen := 0, srcl.Len(); i < llen; i++ { +@@ -102,29 +102,29 @@ func mergeListValue(dst, src pref.Value, opts mergeOptions) pref.Value { + return dst + } + +-func mergeBytesListValue(dst, src pref.Value, opts mergeOptions) pref.Value { ++func mergeBytesListValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { + dstl := dst.List() + srcl := src.List() + for i, llen := 0, srcl.Len(); i < llen; i++ { + sb := srcl.Get(i).Bytes() + db := append(emptyBuf[:], sb...) +- dstl.Append(pref.ValueOfBytes(db)) ++ dstl.Append(protoreflect.ValueOfBytes(db)) + } + return dst + } + +-func mergeMessageListValue(dst, src pref.Value, opts mergeOptions) pref.Value { ++func mergeMessageListValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { + dstl := dst.List() + srcl := src.List() + for i, llen := 0, srcl.Len(); i < llen; i++ { + sm := srcl.Get(i).Message() + dm := proto.Clone(sm.Interface()).ProtoReflect() +- dstl.Append(pref.ValueOfMessage(dm)) ++ dstl.Append(protoreflect.ValueOfMessage(dm)) + } + return dst + } + +-func mergeMessageValue(dst, src pref.Value, opts mergeOptions) pref.Value { ++func mergeMessageValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { + opts.Merge(dst.Message().Interface(), src.Message().Interface()) + return dst + } +diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go +index a104e28..4f5fb67 100644 +--- a/vendor/google.golang.org/protobuf/internal/impl/message.go ++++ b/vendor/google.golang.org/protobuf/internal/impl/message.go +@@ -14,8 +14,7 @@ import ( + + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/reflect/protoreflect" +- pref "google.golang.org/protobuf/reflect/protoreflect" +- preg "google.golang.org/protobuf/reflect/protoregistry" ++ "google.golang.org/protobuf/reflect/protoregistry" + ) + + // MessageInfo provides protobuf related functionality for a given Go type +@@ -29,7 +28,7 @@ type MessageInfo struct { + GoReflectType reflect.Type // pointer to struct + + // Desc is the underlying message descriptor type and must be populated. +- Desc pref.MessageDescriptor ++ Desc protoreflect.MessageDescriptor + + // Exporter must be provided in a purego environment in order to provide + // access to unexported fields. +@@ -54,7 +53,7 @@ type exporter func(v interface{}, i int) interface{} + // is generated by our implementation of protoc-gen-go (for v2 and on). + // If it is unable to obtain a MessageInfo, it returns nil. + func getMessageInfo(mt reflect.Type) *MessageInfo { +- m, ok := reflect.Zero(mt).Interface().(pref.ProtoMessage) ++ m, ok := reflect.Zero(mt).Interface().(protoreflect.ProtoMessage) + if !ok { + return nil + } +@@ -97,7 +96,7 @@ func (mi *MessageInfo) initOnce() { + // getPointer returns the pointer for a message, which should be of + // the type of the MessageInfo. If the message is of a different type, + // it returns ok==false. +-func (mi *MessageInfo) getPointer(m pref.Message) (p pointer, ok bool) { ++func (mi *MessageInfo) getPointer(m protoreflect.Message) (p pointer, ok bool) { + switch m := m.(type) { + case *messageState: + return m.pointer(), m.messageInfo() == mi +@@ -134,10 +133,10 @@ type structInfo struct { + extensionOffset offset + extensionType reflect.Type + +- fieldsByNumber map[pref.FieldNumber]reflect.StructField +- oneofsByName map[pref.Name]reflect.StructField +- oneofWrappersByType map[reflect.Type]pref.FieldNumber +- oneofWrappersByNumber map[pref.FieldNumber]reflect.Type ++ fieldsByNumber map[protoreflect.FieldNumber]reflect.StructField ++ oneofsByName map[protoreflect.Name]reflect.StructField ++ oneofWrappersByType map[reflect.Type]protoreflect.FieldNumber ++ oneofWrappersByNumber map[protoreflect.FieldNumber]reflect.Type + } + + func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { +@@ -147,10 +146,10 @@ func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { + unknownOffset: invalidOffset, + extensionOffset: invalidOffset, + +- fieldsByNumber: map[pref.FieldNumber]reflect.StructField{}, +- oneofsByName: map[pref.Name]reflect.StructField{}, +- oneofWrappersByType: map[reflect.Type]pref.FieldNumber{}, +- oneofWrappersByNumber: map[pref.FieldNumber]reflect.Type{}, ++ fieldsByNumber: map[protoreflect.FieldNumber]reflect.StructField{}, ++ oneofsByName: map[protoreflect.Name]reflect.StructField{}, ++ oneofWrappersByType: map[reflect.Type]protoreflect.FieldNumber{}, ++ oneofWrappersByNumber: map[protoreflect.FieldNumber]reflect.Type{}, + } + + fieldLoop: +@@ -180,12 +179,12 @@ fieldLoop: + for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { + if len(s) > 0 && strings.Trim(s, "0123456789") == "" { + n, _ := strconv.ParseUint(s, 10, 64) +- si.fieldsByNumber[pref.FieldNumber(n)] = f ++ si.fieldsByNumber[protoreflect.FieldNumber(n)] = f + continue fieldLoop + } + } + if s := f.Tag.Get("protobuf_oneof"); len(s) > 0 { +- si.oneofsByName[pref.Name(s)] = f ++ si.oneofsByName[protoreflect.Name(s)] = f + continue fieldLoop + } + } +@@ -208,8 +207,8 @@ fieldLoop: + for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { + if len(s) > 0 && strings.Trim(s, "0123456789") == "" { + n, _ := strconv.ParseUint(s, 10, 64) +- si.oneofWrappersByType[tf] = pref.FieldNumber(n) +- si.oneofWrappersByNumber[pref.FieldNumber(n)] = tf ++ si.oneofWrappersByType[tf] = protoreflect.FieldNumber(n) ++ si.oneofWrappersByNumber[protoreflect.FieldNumber(n)] = tf + break + } + } +@@ -219,7 +218,11 @@ fieldLoop: + } + + func (mi *MessageInfo) New() protoreflect.Message { +- return mi.MessageOf(reflect.New(mi.GoReflectType.Elem()).Interface()) ++ m := reflect.New(mi.GoReflectType.Elem()).Interface() ++ if r, ok := m.(protoreflect.ProtoMessage); ok { ++ return r.ProtoReflect() ++ } ++ return mi.MessageOf(m) + } + func (mi *MessageInfo) Zero() protoreflect.Message { + return mi.MessageOf(reflect.Zero(mi.GoReflectType).Interface()) +@@ -237,7 +240,7 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType { + fd := mi.Desc.Fields().Get(i) + switch { + case fd.IsWeak(): +- mt, _ := preg.GlobalTypes.FindMessageByName(fd.Message().FullName()) ++ mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()) + return mt + case fd.IsMap(): + return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]} +diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +index 9488b72..d9ea010 100644 +--- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go ++++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +@@ -10,17 +10,17 @@ import ( + + "google.golang.org/protobuf/internal/detrand" + "google.golang.org/protobuf/internal/pragma" +- pref "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/reflect/protoreflect" + ) + + type reflectMessageInfo struct { +- fields map[pref.FieldNumber]*fieldInfo +- oneofs map[pref.Name]*oneofInfo ++ fields map[protoreflect.FieldNumber]*fieldInfo ++ oneofs map[protoreflect.Name]*oneofInfo + + // fieldTypes contains the zero value of an enum or message field. + // For lists, it contains the element type. + // For maps, it contains the entry value type. +- fieldTypes map[pref.FieldNumber]interface{} ++ fieldTypes map[protoreflect.FieldNumber]interface{} + + // denseFields is a subset of fields where: + // 0 < fieldDesc.Number() < len(denseFields) +@@ -30,8 +30,8 @@ type reflectMessageInfo struct { + // rangeInfos is a list of all fields (not belonging to a oneof) and oneofs. + rangeInfos []interface{} // either *fieldInfo or *oneofInfo + +- getUnknown func(pointer) pref.RawFields +- setUnknown func(pointer, pref.RawFields) ++ getUnknown func(pointer) protoreflect.RawFields ++ setUnknown func(pointer, protoreflect.RawFields) + extensionMap func(pointer) *extensionMap + + nilMessage atomicNilMessage +@@ -52,7 +52,7 @@ func (mi *MessageInfo) makeReflectFuncs(t reflect.Type, si structInfo) { + // This code assumes that the struct is well-formed and panics if there are + // any discrepancies. + func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { +- mi.fields = map[pref.FieldNumber]*fieldInfo{} ++ mi.fields = map[protoreflect.FieldNumber]*fieldInfo{} + md := mi.Desc + fds := md.Fields() + for i := 0; i < fds.Len(); i++ { +@@ -82,7 +82,7 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { + mi.fields[fd.Number()] = &fi + } + +- mi.oneofs = map[pref.Name]*oneofInfo{} ++ mi.oneofs = map[protoreflect.Name]*oneofInfo{} + for i := 0; i < md.Oneofs().Len(); i++ { + od := md.Oneofs().Get(i) + mi.oneofs[od.Name()] = makeOneofInfo(od, si, mi.Exporter) +@@ -117,13 +117,13 @@ func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { + switch { + case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsAType: + // Handle as []byte. +- mi.getUnknown = func(p pointer) pref.RawFields { ++ mi.getUnknown = func(p pointer) protoreflect.RawFields { + if p.IsNil() { + return nil + } + return *p.Apply(mi.unknownOffset).Bytes() + } +- mi.setUnknown = func(p pointer, b pref.RawFields) { ++ mi.setUnknown = func(p pointer, b protoreflect.RawFields) { + if p.IsNil() { + panic("invalid SetUnknown on nil Message") + } +@@ -131,7 +131,7 @@ func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { + } + case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsBType: + // Handle as *[]byte. +- mi.getUnknown = func(p pointer) pref.RawFields { ++ mi.getUnknown = func(p pointer) protoreflect.RawFields { + if p.IsNil() { + return nil + } +@@ -141,7 +141,7 @@ func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { + } + return **bp + } +- mi.setUnknown = func(p pointer, b pref.RawFields) { ++ mi.setUnknown = func(p pointer, b protoreflect.RawFields) { + if p.IsNil() { + panic("invalid SetUnknown on nil Message") + } +@@ -152,10 +152,10 @@ func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { + **bp = b + } + default: +- mi.getUnknown = func(pointer) pref.RawFields { ++ mi.getUnknown = func(pointer) protoreflect.RawFields { + return nil + } +- mi.setUnknown = func(p pointer, _ pref.RawFields) { ++ mi.setUnknown = func(p pointer, _ protoreflect.RawFields) { + if p.IsNil() { + panic("invalid SetUnknown on nil Message") + } +@@ -224,7 +224,7 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { + } + if ft != nil { + if mi.fieldTypes == nil { +- mi.fieldTypes = make(map[pref.FieldNumber]interface{}) ++ mi.fieldTypes = make(map[protoreflect.FieldNumber]interface{}) + } + mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface() + } +@@ -233,7 +233,7 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { + + type extensionMap map[int32]ExtensionField + +-func (m *extensionMap) Range(f func(pref.FieldDescriptor, pref.Value) bool) { ++func (m *extensionMap) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if m != nil { + for _, x := range *m { + xd := x.Type().TypeDescriptor() +@@ -247,7 +247,7 @@ func (m *extensionMap) Range(f func(pref.FieldDescriptor, pref.Value) bool) { + } + } + } +-func (m *extensionMap) Has(xt pref.ExtensionType) (ok bool) { ++func (m *extensionMap) Has(xt protoreflect.ExtensionType) (ok bool) { + if m == nil { + return false + } +@@ -266,10 +266,10 @@ func (m *extensionMap) Has(xt pref.ExtensionType) (ok bool) { + } + return true + } +-func (m *extensionMap) Clear(xt pref.ExtensionType) { ++func (m *extensionMap) Clear(xt protoreflect.ExtensionType) { + delete(*m, int32(xt.TypeDescriptor().Number())) + } +-func (m *extensionMap) Get(xt pref.ExtensionType) pref.Value { ++func (m *extensionMap) Get(xt protoreflect.ExtensionType) protoreflect.Value { + xd := xt.TypeDescriptor() + if m != nil { + if x, ok := (*m)[int32(xd.Number())]; ok { +@@ -278,7 +278,7 @@ func (m *extensionMap) Get(xt pref.ExtensionType) pref.Value { + } + return xt.Zero() + } +-func (m *extensionMap) Set(xt pref.ExtensionType, v pref.Value) { ++func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) { + xd := xt.TypeDescriptor() + isValid := true + switch { +@@ -302,9 +302,9 @@ func (m *extensionMap) Set(xt pref.ExtensionType, v pref.Value) { + x.Set(xt, v) + (*m)[int32(xd.Number())] = x + } +-func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value { ++func (m *extensionMap) Mutable(xt protoreflect.ExtensionType) protoreflect.Value { + xd := xt.TypeDescriptor() +- if xd.Kind() != pref.MessageKind && xd.Kind() != pref.GroupKind && !xd.IsList() && !xd.IsMap() { ++ if xd.Kind() != protoreflect.MessageKind && xd.Kind() != protoreflect.GroupKind && !xd.IsList() && !xd.IsMap() { + panic("invalid Mutable on field with non-composite type") + } + if x, ok := (*m)[int32(xd.Number())]; ok { +@@ -320,7 +320,6 @@ func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value { + // in an allocation-free way without needing to have a shadow Go type generated + // for every message type. This technique only works using unsafe. + // +-// + // Example generated code: + // + // type M struct { +@@ -351,12 +350,11 @@ func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value { + // It has access to the message info as its first field, and a pointer to the + // MessageState is identical to a pointer to the concrete message value. + // +-// + // Requirements: +-// • The type M must implement protoreflect.ProtoMessage. +-// • The address of m must not be nil. +-// • The address of m and the address of m.state must be equal, +-// even though they are different Go types. ++// - The type M must implement protoreflect.ProtoMessage. ++// - The address of m must not be nil. ++// - The address of m and the address of m.state must be equal, ++// even though they are different Go types. + type MessageState struct { + pragma.NoUnkeyedLiterals + pragma.DoNotCompare +@@ -368,8 +366,8 @@ type MessageState struct { + type messageState MessageState + + var ( +- _ pref.Message = (*messageState)(nil) +- _ unwrapper = (*messageState)(nil) ++ _ protoreflect.Message = (*messageState)(nil) ++ _ unwrapper = (*messageState)(nil) + ) + + // messageDataType is a tuple of a pointer to the message data and +@@ -387,16 +385,16 @@ type ( + ) + + var ( +- _ pref.Message = (*messageReflectWrapper)(nil) +- _ unwrapper = (*messageReflectWrapper)(nil) +- _ pref.ProtoMessage = (*messageIfaceWrapper)(nil) +- _ unwrapper = (*messageIfaceWrapper)(nil) ++ _ protoreflect.Message = (*messageReflectWrapper)(nil) ++ _ unwrapper = (*messageReflectWrapper)(nil) ++ _ protoreflect.ProtoMessage = (*messageIfaceWrapper)(nil) ++ _ unwrapper = (*messageIfaceWrapper)(nil) + ) + + // MessageOf returns a reflective view over a message. The input must be a + // pointer to a named Go struct. If the provided type has a ProtoReflect method, + // it must be implemented by calling this method. +-func (mi *MessageInfo) MessageOf(m interface{}) pref.Message { ++func (mi *MessageInfo) MessageOf(m interface{}) protoreflect.Message { + if reflect.TypeOf(m) != mi.GoReflectType { + panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType)) + } +@@ -421,7 +419,7 @@ func (m *messageIfaceWrapper) Reset() { + rv.Elem().Set(reflect.Zero(rv.Type().Elem())) + } + } +-func (m *messageIfaceWrapper) ProtoReflect() pref.Message { ++func (m *messageIfaceWrapper) ProtoReflect() protoreflect.Message { + return (*messageReflectWrapper)(m) + } + func (m *messageIfaceWrapper) protoUnwrap() interface{} { +@@ -430,7 +428,7 @@ func (m *messageIfaceWrapper) protoUnwrap() interface{} { + + // checkField verifies that the provided field descriptor is valid. + // Exactly one of the returned values is populated. +-func (mi *MessageInfo) checkField(fd pref.FieldDescriptor) (*fieldInfo, pref.ExtensionType) { ++func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionType) { + var fi *fieldInfo + if n := fd.Number(); 0 < n && int(n) < len(mi.denseFields) { + fi = mi.denseFields[n] +@@ -455,7 +453,7 @@ func (mi *MessageInfo) checkField(fd pref.FieldDescriptor) (*fieldInfo, pref.Ext + if !mi.Desc.ExtensionRanges().Has(fd.Number()) { + panic(fmt.Sprintf("extension %v extends %v outside the extension range", fd.FullName(), mi.Desc.FullName())) + } +- xtd, ok := fd.(pref.ExtensionTypeDescriptor) ++ xtd, ok := fd.(protoreflect.ExtensionTypeDescriptor) + if !ok { + panic(fmt.Sprintf("extension %v does not implement protoreflect.ExtensionTypeDescriptor", fd.FullName())) + } +diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +index 343cf87..5e736c6 100644 +--- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go ++++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +@@ -11,24 +11,24 @@ import ( + "sync" + + "google.golang.org/protobuf/internal/flags" +- pref "google.golang.org/protobuf/reflect/protoreflect" +- preg "google.golang.org/protobuf/reflect/protoregistry" ++ "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/reflect/protoregistry" + ) + + type fieldInfo struct { +- fieldDesc pref.FieldDescriptor ++ fieldDesc protoreflect.FieldDescriptor + + // These fields are used for protobuf reflection support. + has func(pointer) bool + clear func(pointer) +- get func(pointer) pref.Value +- set func(pointer, pref.Value) +- mutable func(pointer) pref.Value +- newMessage func() pref.Message +- newField func() pref.Value ++ get func(pointer) protoreflect.Value ++ set func(pointer, protoreflect.Value) ++ mutable func(pointer) protoreflect.Value ++ newMessage func() protoreflect.Message ++ newField func() protoreflect.Value + } + +-func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo { ++func fieldInfoForMissing(fd protoreflect.FieldDescriptor) fieldInfo { + // This never occurs for generated message types. + // It implies that a hand-crafted type has missing Go fields + // for specific protobuf message fields. +@@ -40,19 +40,19 @@ func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo { + clear: func(p pointer) { + panic("missing Go struct field for " + string(fd.FullName())) + }, +- get: func(p pointer) pref.Value { ++ get: func(p pointer) protoreflect.Value { + return fd.Default() + }, +- set: func(p pointer, v pref.Value) { ++ set: func(p pointer, v protoreflect.Value) { + panic("missing Go struct field for " + string(fd.FullName())) + }, +- mutable: func(p pointer) pref.Value { ++ mutable: func(p pointer) protoreflect.Value { + panic("missing Go struct field for " + string(fd.FullName())) + }, +- newMessage: func() pref.Message { ++ newMessage: func() protoreflect.Message { + panic("missing Go struct field for " + string(fd.FullName())) + }, +- newField: func() pref.Value { ++ newField: func() protoreflect.Value { + if v := fd.Default(); v.IsValid() { + return v + } +@@ -61,7 +61,7 @@ func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo { + } + } + +-func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x exporter, ot reflect.Type) fieldInfo { ++func fieldInfoForOneof(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter, ot reflect.Type) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Interface { + panic(fmt.Sprintf("field %v has invalid type: got %v, want interface kind", fd.FullName(), ft)) +@@ -102,7 +102,7 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export + } + rv.Set(reflect.Zero(rv.Type())) + }, +- get: func(p pointer) pref.Value { ++ get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } +@@ -113,7 +113,7 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export + rv = rv.Elem().Elem().Field(0) + return conv.PBValueOf(rv) + }, +- set: func(p pointer, v pref.Value) { ++ set: func(p pointer, v protoreflect.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { + rv.Set(reflect.New(ot)) +@@ -121,7 +121,7 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export + rv = rv.Elem().Elem().Field(0) + rv.Set(conv.GoValueOf(v)) + }, +- mutable: func(p pointer) pref.Value { ++ mutable: func(p pointer) protoreflect.Value { + if !isMessage { + panic(fmt.Sprintf("field %v with invalid Mutable call on field with non-composite type", fd.FullName())) + } +@@ -131,20 +131,20 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export + } + rv = rv.Elem().Elem().Field(0) + if rv.Kind() == reflect.Ptr && rv.IsNil() { +- rv.Set(conv.GoValueOf(pref.ValueOfMessage(conv.New().Message()))) ++ rv.Set(conv.GoValueOf(protoreflect.ValueOfMessage(conv.New().Message()))) + } + return conv.PBValueOf(rv) + }, +- newMessage: func() pref.Message { ++ newMessage: func() protoreflect.Message { + return conv.New().Message() + }, +- newField: func() pref.Value { ++ newField: func() protoreflect.Value { + return conv.New() + }, + } + } + +-func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ++func fieldInfoForMap(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Map { + panic(fmt.Sprintf("field %v has invalid type: got %v, want map kind", fd.FullName(), ft)) +@@ -166,7 +166,7 @@ func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, +- get: func(p pointer) pref.Value { ++ get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } +@@ -176,7 +176,7 @@ func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter + } + return conv.PBValueOf(rv) + }, +- set: func(p pointer, v pref.Value) { ++ set: func(p pointer, v protoreflect.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + pv := conv.GoValueOf(v) + if pv.IsNil() { +@@ -184,20 +184,20 @@ func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter + } + rv.Set(pv) + }, +- mutable: func(p pointer) pref.Value { ++ mutable: func(p pointer) protoreflect.Value { + v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if v.IsNil() { + v.Set(reflect.MakeMap(fs.Type)) + } + return conv.PBValueOf(v) + }, +- newField: func() pref.Value { ++ newField: func() protoreflect.Value { + return conv.New() + }, + } + } + +-func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ++func fieldInfoForList(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Slice { + panic(fmt.Sprintf("field %v has invalid type: got %v, want slice kind", fd.FullName(), ft)) +@@ -219,7 +219,7 @@ func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporte + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, +- get: func(p pointer) pref.Value { ++ get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } +@@ -229,7 +229,7 @@ func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporte + } + return conv.PBValueOf(rv) + }, +- set: func(p pointer, v pref.Value) { ++ set: func(p pointer, v protoreflect.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + pv := conv.GoValueOf(v) + if pv.IsNil() { +@@ -237,11 +237,11 @@ func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporte + } + rv.Set(pv.Elem()) + }, +- mutable: func(p pointer) pref.Value { ++ mutable: func(p pointer) protoreflect.Value { + v := p.Apply(fieldOffset).AsValueOf(fs.Type) + return conv.PBValueOf(v) + }, +- newField: func() pref.Value { ++ newField: func() protoreflect.Value { + return conv.New() + }, + } +@@ -252,7 +252,7 @@ var ( + emptyBytes = reflect.ValueOf([]byte{}) + ) + +-func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ++func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { + ft := fs.Type + nullable := fd.HasPresence() + isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 +@@ -300,7 +300,7 @@ func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x expor + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, +- get: func(p pointer) pref.Value { ++ get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } +@@ -315,7 +315,7 @@ func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x expor + } + return conv.PBValueOf(rv) + }, +- set: func(p pointer, v pref.Value) { ++ set: func(p pointer, v protoreflect.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if nullable && rv.Kind() == reflect.Ptr { + if rv.IsNil() { +@@ -332,23 +332,23 @@ func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x expor + } + } + }, +- newField: func() pref.Value { ++ newField: func() protoreflect.Value { + return conv.New() + }, + } + } + +-func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldInfo { ++func fieldInfoForWeakMessage(fd protoreflect.FieldDescriptor, weakOffset offset) fieldInfo { + if !flags.ProtoLegacy { + panic("no support for proto1 weak fields") + } + + var once sync.Once +- var messageType pref.MessageType ++ var messageType protoreflect.MessageType + lazyInit := func() { + once.Do(func() { + messageName := fd.Message().FullName() +- messageType, _ = preg.GlobalTypes.FindMessageByName(messageName) ++ messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName) + if messageType == nil { + panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName())) + } +@@ -368,18 +368,18 @@ func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldIn + clear: func(p pointer) { + p.Apply(weakOffset).WeakFields().clear(num) + }, +- get: func(p pointer) pref.Value { ++ get: func(p pointer) protoreflect.Value { + lazyInit() + if p.IsNil() { +- return pref.ValueOfMessage(messageType.Zero()) ++ return protoreflect.ValueOfMessage(messageType.Zero()) + } + m, ok := p.Apply(weakOffset).WeakFields().get(num) + if !ok { +- return pref.ValueOfMessage(messageType.Zero()) ++ return protoreflect.ValueOfMessage(messageType.Zero()) + } +- return pref.ValueOfMessage(m.ProtoReflect()) ++ return protoreflect.ValueOfMessage(m.ProtoReflect()) + }, +- set: func(p pointer, v pref.Value) { ++ set: func(p pointer, v protoreflect.Value) { + lazyInit() + m := v.Message() + if m.Descriptor() != messageType.Descriptor() { +@@ -390,7 +390,7 @@ func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldIn + } + p.Apply(weakOffset).WeakFields().set(num, m.Interface()) + }, +- mutable: func(p pointer) pref.Value { ++ mutable: func(p pointer) protoreflect.Value { + lazyInit() + fs := p.Apply(weakOffset).WeakFields() + m, ok := fs.get(num) +@@ -398,20 +398,20 @@ func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldIn + m = messageType.New().Interface() + fs.set(num, m) + } +- return pref.ValueOfMessage(m.ProtoReflect()) ++ return protoreflect.ValueOfMessage(m.ProtoReflect()) + }, +- newMessage: func() pref.Message { ++ newMessage: func() protoreflect.Message { + lazyInit() + return messageType.New() + }, +- newField: func() pref.Value { ++ newField: func() protoreflect.Value { + lazyInit() +- return pref.ValueOfMessage(messageType.New()) ++ return protoreflect.ValueOfMessage(messageType.New()) + }, + } + } + +-func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ++func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { + ft := fs.Type + conv := NewConverter(ft, fd) + +@@ -433,47 +433,47 @@ func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x expo + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, +- get: func(p pointer) pref.Value { ++ get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return conv.PBValueOf(rv) + }, +- set: func(p pointer, v pref.Value) { ++ set: func(p pointer, v protoreflect.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(conv.GoValueOf(v)) + if fs.Type.Kind() == reflect.Ptr && rv.IsNil() { + panic(fmt.Sprintf("field %v has invalid nil pointer", fd.FullName())) + } + }, +- mutable: func(p pointer) pref.Value { ++ mutable: func(p pointer) protoreflect.Value { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if fs.Type.Kind() == reflect.Ptr && rv.IsNil() { + rv.Set(conv.GoValueOf(conv.New())) + } + return conv.PBValueOf(rv) + }, +- newMessage: func() pref.Message { ++ newMessage: func() protoreflect.Message { + return conv.New().Message() + }, +- newField: func() pref.Value { ++ newField: func() protoreflect.Value { + return conv.New() + }, + } + } + + type oneofInfo struct { +- oneofDesc pref.OneofDescriptor +- which func(pointer) pref.FieldNumber ++ oneofDesc protoreflect.OneofDescriptor ++ which func(pointer) protoreflect.FieldNumber + } + +-func makeOneofInfo(od pref.OneofDescriptor, si structInfo, x exporter) *oneofInfo { ++func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) *oneofInfo { + oi := &oneofInfo{oneofDesc: od} + if od.IsSynthetic() { + fs := si.fieldsByNumber[od.Fields().Get(0).Number()] + fieldOffset := offsetOf(fs, x) +- oi.which = func(p pointer) pref.FieldNumber { ++ oi.which = func(p pointer) protoreflect.FieldNumber { + if p.IsNil() { + return 0 + } +@@ -486,7 +486,7 @@ func makeOneofInfo(od pref.OneofDescriptor, si structInfo, x exporter) *oneofInf + } else { + fs := si.oneofsByName[od.Name()] + fieldOffset := offsetOf(fs, x) +- oi.which = func(p pointer) pref.FieldNumber { ++ oi.which = func(p pointer) protoreflect.FieldNumber { + if p.IsNil() { + return 0 + } +diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go +index 08cfb60..a24e6bb 100644 +--- a/vendor/google.golang.org/protobuf/internal/impl/validate.go ++++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go +@@ -16,9 +16,9 @@ import ( + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" +- pref "google.golang.org/protobuf/reflect/protoreflect" +- preg "google.golang.org/protobuf/reflect/protoregistry" +- piface "google.golang.org/protobuf/runtime/protoiface" ++ "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/reflect/protoregistry" ++ "google.golang.org/protobuf/runtime/protoiface" + ) + + // ValidationStatus is the result of validating the wire-format encoding of a message. +@@ -56,20 +56,20 @@ func (v ValidationStatus) String() string { + // of the message type. + // + // This function is exposed for testing. +-func Validate(mt pref.MessageType, in piface.UnmarshalInput) (out piface.UnmarshalOutput, _ ValidationStatus) { ++func Validate(mt protoreflect.MessageType, in protoiface.UnmarshalInput) (out protoiface.UnmarshalOutput, _ ValidationStatus) { + mi, ok := mt.(*MessageInfo) + if !ok { + return out, ValidationUnknown + } + if in.Resolver == nil { +- in.Resolver = preg.GlobalTypes ++ in.Resolver = protoregistry.GlobalTypes + } + o, st := mi.validate(in.Buf, 0, unmarshalOptions{ + flags: in.Flags, + resolver: in.Resolver, + }) + if o.initialized { +- out.Flags |= piface.UnmarshalInitialized ++ out.Flags |= protoiface.UnmarshalInitialized + } + return out, st + } +@@ -106,22 +106,22 @@ const ( + validationTypeMessageSetItem + ) + +-func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescriptor, ft reflect.Type) validationInfo { ++func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd protoreflect.FieldDescriptor, ft reflect.Type) validationInfo { + var vi validationInfo + switch { + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + switch fd.Kind() { +- case pref.MessageKind: ++ case protoreflect.MessageKind: + vi.typ = validationTypeMessage + if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok { + vi.mi = getMessageInfo(ot.Field(0).Type) + } +- case pref.GroupKind: ++ case protoreflect.GroupKind: + vi.typ = validationTypeGroup + if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok { + vi.mi = getMessageInfo(ot.Field(0).Type) + } +- case pref.StringKind: ++ case protoreflect.StringKind: + if strs.EnforceUTF8(fd) { + vi.typ = validationTypeUTF8String + } +@@ -129,7 +129,7 @@ func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescrip + default: + vi = newValidationInfo(fd, ft) + } +- if fd.Cardinality() == pref.Required { ++ if fd.Cardinality() == protoreflect.Required { + // Avoid overflow. The required field check is done with a 64-bit mask, with + // any message containing more than 64 required fields always reported as + // potentially uninitialized, so it is not important to get a precise count +@@ -142,22 +142,22 @@ func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescrip + return vi + } + +-func newValidationInfo(fd pref.FieldDescriptor, ft reflect.Type) validationInfo { ++func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validationInfo { + var vi validationInfo + switch { + case fd.IsList(): + switch fd.Kind() { +- case pref.MessageKind: ++ case protoreflect.MessageKind: + vi.typ = validationTypeMessage + if ft.Kind() == reflect.Slice { + vi.mi = getMessageInfo(ft.Elem()) + } +- case pref.GroupKind: ++ case protoreflect.GroupKind: + vi.typ = validationTypeGroup + if ft.Kind() == reflect.Slice { + vi.mi = getMessageInfo(ft.Elem()) + } +- case pref.StringKind: ++ case protoreflect.StringKind: + vi.typ = validationTypeBytes + if strs.EnforceUTF8(fd) { + vi.typ = validationTypeUTF8String +@@ -175,33 +175,33 @@ func newValidationInfo(fd pref.FieldDescriptor, ft reflect.Type) validationInfo + case fd.IsMap(): + vi.typ = validationTypeMap + switch fd.MapKey().Kind() { +- case pref.StringKind: ++ case protoreflect.StringKind: + if strs.EnforceUTF8(fd) { + vi.keyType = validationTypeUTF8String + } + } + switch fd.MapValue().Kind() { +- case pref.MessageKind: ++ case protoreflect.MessageKind: + vi.valType = validationTypeMessage + if ft.Kind() == reflect.Map { + vi.mi = getMessageInfo(ft.Elem()) + } +- case pref.StringKind: ++ case protoreflect.StringKind: + if strs.EnforceUTF8(fd) { + vi.valType = validationTypeUTF8String + } + } + default: + switch fd.Kind() { +- case pref.MessageKind: ++ case protoreflect.MessageKind: + vi.typ = validationTypeMessage + if !fd.IsWeak() { + vi.mi = getMessageInfo(ft) + } +- case pref.GroupKind: ++ case protoreflect.GroupKind: + vi.typ = validationTypeGroup + vi.mi = getMessageInfo(ft) +- case pref.StringKind: ++ case protoreflect.StringKind: + vi.typ = validationTypeBytes + if strs.EnforceUTF8(fd) { + vi.typ = validationTypeUTF8String +@@ -314,11 +314,11 @@ State: + break + } + messageName := fd.Message().FullName() +- messageType, err := preg.GlobalTypes.FindMessageByName(messageName) ++ messageType, err := protoregistry.GlobalTypes.FindMessageByName(messageName) + switch err { + case nil: + vi.mi, _ = messageType.(*MessageInfo) +- case preg.NotFound: ++ case protoregistry.NotFound: + vi.typ = validationTypeBytes + default: + return out, ValidationUnknown +@@ -335,7 +335,7 @@ State: + // unmarshaling to begin failing. Supporting this requires some way to + // determine if the resolver is frozen. + xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), num) +- if err != nil && err != preg.NotFound { ++ if err != nil && err != protoregistry.NotFound { + return out, ValidationUnknown + } + if err == nil { +@@ -513,7 +513,7 @@ State: + } + xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), typeid) + switch { +- case err == preg.NotFound: ++ case err == protoregistry.NotFound: + b = b[n:] + case err != nil: + return out, ValidationUnknown +diff --git a/vendor/google.golang.org/protobuf/internal/impl/weak.go b/vendor/google.golang.org/protobuf/internal/impl/weak.go +index 009cbef..eb79a7b 100644 +--- a/vendor/google.golang.org/protobuf/internal/impl/weak.go ++++ b/vendor/google.golang.org/protobuf/internal/impl/weak.go +@@ -7,7 +7,7 @@ package impl + import ( + "fmt" + +- pref "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + ) + +@@ -17,32 +17,32 @@ import ( + // defined directly on it. + type weakFields WeakFields + +-func (w weakFields) get(num pref.FieldNumber) (pref.ProtoMessage, bool) { ++func (w weakFields) get(num protoreflect.FieldNumber) (protoreflect.ProtoMessage, bool) { + m, ok := w[int32(num)] + return m, ok + } + +-func (w *weakFields) set(num pref.FieldNumber, m pref.ProtoMessage) { ++func (w *weakFields) set(num protoreflect.FieldNumber, m protoreflect.ProtoMessage) { + if *w == nil { + *w = make(weakFields) + } + (*w)[int32(num)] = m + } + +-func (w *weakFields) clear(num pref.FieldNumber) { ++func (w *weakFields) clear(num protoreflect.FieldNumber) { + delete(*w, int32(num)) + } + +-func (Export) HasWeak(w WeakFields, num pref.FieldNumber) bool { ++func (Export) HasWeak(w WeakFields, num protoreflect.FieldNumber) bool { + _, ok := w[int32(num)] + return ok + } + +-func (Export) ClearWeak(w *WeakFields, num pref.FieldNumber) { ++func (Export) ClearWeak(w *WeakFields, num protoreflect.FieldNumber) { + delete(*w, int32(num)) + } + +-func (Export) GetWeak(w WeakFields, num pref.FieldNumber, name pref.FullName) pref.ProtoMessage { ++func (Export) GetWeak(w WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName) protoreflect.ProtoMessage { + if m, ok := w[int32(num)]; ok { + return m + } +@@ -53,7 +53,7 @@ func (Export) GetWeak(w WeakFields, num pref.FieldNumber, name pref.FullName) pr + return mt.Zero().Interface() + } + +-func (Export) SetWeak(w *WeakFields, num pref.FieldNumber, name pref.FullName, m pref.ProtoMessage) { ++func (Export) SetWeak(w *WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName, m protoreflect.ProtoMessage) { + if m != nil { + mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) + if mt == nil { +diff --git a/vendor/google.golang.org/protobuf/internal/order/order.go b/vendor/google.golang.org/protobuf/internal/order/order.go +index 2a24953..33745ed 100644 +--- a/vendor/google.golang.org/protobuf/internal/order/order.go ++++ b/vendor/google.golang.org/protobuf/internal/order/order.go +@@ -5,12 +5,12 @@ + package order + + import ( +- pref "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/reflect/protoreflect" + ) + + // FieldOrder specifies the ordering to visit message fields. + // It is a function that reports whether x is ordered before y. +-type FieldOrder func(x, y pref.FieldDescriptor) bool ++type FieldOrder func(x, y protoreflect.FieldDescriptor) bool + + var ( + // AnyFieldOrder specifies no specific field ordering. +@@ -18,9 +18,9 @@ var ( + + // LegacyFieldOrder sorts fields in the same ordering as emitted by + // wire serialization in the github.com/golang/protobuf implementation. +- LegacyFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { ++ LegacyFieldOrder FieldOrder = func(x, y protoreflect.FieldDescriptor) bool { + ox, oy := x.ContainingOneof(), y.ContainingOneof() +- inOneof := func(od pref.OneofDescriptor) bool { ++ inOneof := func(od protoreflect.OneofDescriptor) bool { + return od != nil && !od.IsSynthetic() + } + +@@ -41,14 +41,14 @@ var ( + } + + // NumberFieldOrder sorts fields by their field number. +- NumberFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { ++ NumberFieldOrder FieldOrder = func(x, y protoreflect.FieldDescriptor) bool { + return x.Number() < y.Number() + } + + // IndexNameFieldOrder sorts non-extension fields before extension fields. + // Non-extensions are sorted according to their declaration index. + // Extensions are sorted according to their full name. +- IndexNameFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { ++ IndexNameFieldOrder FieldOrder = func(x, y protoreflect.FieldDescriptor) bool { + // Non-extension fields sort before extension fields. + if x.IsExtension() != y.IsExtension() { + return !x.IsExtension() && y.IsExtension() +@@ -64,7 +64,7 @@ var ( + + // KeyOrder specifies the ordering to visit map entries. + // It is a function that reports whether x is ordered before y. +-type KeyOrder func(x, y pref.MapKey) bool ++type KeyOrder func(x, y protoreflect.MapKey) bool + + var ( + // AnyKeyOrder specifies no specific key ordering. +@@ -72,7 +72,7 @@ var ( + + // GenericKeyOrder sorts false before true, numeric keys in ascending order, + // and strings in lexicographical ordering according to UTF-8 codepoints. +- GenericKeyOrder KeyOrder = func(x, y pref.MapKey) bool { ++ GenericKeyOrder KeyOrder = func(x, y protoreflect.MapKey) bool { + switch x.Interface().(type) { + case bool: + return !x.Bool() && y.Bool() +diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go +index c8090e0..1665a68 100644 +--- a/vendor/google.golang.org/protobuf/internal/order/range.go ++++ b/vendor/google.golang.org/protobuf/internal/order/range.go +@@ -9,12 +9,12 @@ import ( + "sort" + "sync" + +- pref "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/reflect/protoreflect" + ) + + type messageField struct { +- fd pref.FieldDescriptor +- v pref.Value ++ fd protoreflect.FieldDescriptor ++ v protoreflect.Value + } + + var messageFieldPool = sync.Pool{ +@@ -25,8 +25,8 @@ type ( + // FieldRnger is an interface for visiting all fields in a message. + // The protoreflect.Message type implements this interface. + FieldRanger interface{ Range(VisitField) } +- // VisitField is called everytime a message field is visited. +- VisitField = func(pref.FieldDescriptor, pref.Value) bool ++ // VisitField is called every time a message field is visited. ++ VisitField = func(protoreflect.FieldDescriptor, protoreflect.Value) bool + ) + + // RangeFields iterates over the fields of fs according to the specified order. +@@ -47,7 +47,7 @@ func RangeFields(fs FieldRanger, less FieldOrder, fn VisitField) { + }() + + // Collect all fields in the message and sort them. +- fs.Range(func(fd pref.FieldDescriptor, v pref.Value) bool { ++ fs.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + fields = append(fields, messageField{fd, v}) + return true + }) +@@ -64,8 +64,8 @@ func RangeFields(fs FieldRanger, less FieldOrder, fn VisitField) { + } + + type mapEntry struct { +- k pref.MapKey +- v pref.Value ++ k protoreflect.MapKey ++ v protoreflect.Value + } + + var mapEntryPool = sync.Pool{ +@@ -76,8 +76,8 @@ type ( + // EntryRanger is an interface for visiting all fields in a message. + // The protoreflect.Map type implements this interface. + EntryRanger interface{ Range(VisitEntry) } +- // VisitEntry is called everytime a map entry is visited. +- VisitEntry = func(pref.MapKey, pref.Value) bool ++ // VisitEntry is called every time a map entry is visited. ++ VisitEntry = func(protoreflect.MapKey, protoreflect.Value) bool + ) + + // RangeEntries iterates over the entries of es according to the specified order. +@@ -98,7 +98,7 @@ func RangeEntries(es EntryRanger, less KeyOrder, fn VisitEntry) { + }() + + // Collect all entries in the map and sort them. +- es.Range(func(k pref.MapKey, v pref.Value) bool { ++ es.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + entries = append(entries, mapEntry{k, v}) + return true + }) +diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go +index 56a8a4e..61a84d3 100644 +--- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go ++++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go +@@ -10,7 +10,7 @@ package strs + import ( + "unsafe" + +- pref "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/reflect/protoreflect" + ) + + type ( +@@ -59,7 +59,7 @@ type Builder struct { + + // AppendFullName is equivalent to protoreflect.FullName.Append, + // but optimized for large batches where each name has a shared lifetime. +-func (sb *Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { ++func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName { + n := len(prefix) + len(".") + len(name) + if len(prefix) == 0 { + n -= len(".") +@@ -68,7 +68,7 @@ func (sb *Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.Ful + sb.buf = append(sb.buf, prefix...) + sb.buf = append(sb.buf, '.') + sb.buf = append(sb.buf, name...) +- return pref.FullName(sb.last(n)) ++ return protoreflect.FullName(sb.last(n)) + } + + // MakeString is equivalent to string(b), but optimized for large batches +@@ -87,7 +87,7 @@ func (sb *Builder) grow(n int) { + // Unlike strings.Builder, we do not need to copy over the contents + // of the old buffer since our builder provides no API for + // retrieving previously created strings. +- sb.buf = make([]byte, 2*(cap(sb.buf)+n)) ++ sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n)) + } + + func (sb *Builder) last(n int) string { +diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go +index 3d40d52..f7014cd 100644 +--- a/vendor/google.golang.org/protobuf/internal/version/version.go ++++ b/vendor/google.golang.org/protobuf/internal/version/version.go +@@ -12,47 +12,46 @@ import ( + + // These constants determine the current version of this module. + // +-// + // For our release process, we enforce the following rules: +-// * Tagged releases use a tag that is identical to String. +-// * Tagged releases never reference a commit where the String +-// contains "devel". +-// * The set of all commits in this repository where String +-// does not contain "devel" must have a unique String. +-// ++// - Tagged releases use a tag that is identical to String. ++// - Tagged releases never reference a commit where the String ++// contains "devel". ++// - The set of all commits in this repository where String ++// does not contain "devel" must have a unique String. + // + // Steps for tagging a new release: +-// 1. Create a new CL. + // +-// 2. Update Minor, Patch, and/or PreRelease as necessary. +-// PreRelease must not contain the string "devel". ++// 1. Create a new CL. + // +-// 3. Since the last released minor version, have there been any changes to +-// generator that relies on new functionality in the runtime? +-// If yes, then increment RequiredGenerated. ++// 2. Update Minor, Patch, and/or PreRelease as necessary. ++// PreRelease must not contain the string "devel". + // +-// 4. Since the last released minor version, have there been any changes to +-// the runtime that removes support for old .pb.go source code? +-// If yes, then increment SupportMinimum. ++// 3. Since the last released minor version, have there been any changes to ++// generator that relies on new functionality in the runtime? ++// If yes, then increment RequiredGenerated. + // +-// 5. Send out the CL for review and submit it. +-// Note that the next CL in step 8 must be submitted after this CL +-// without any other CLs in-between. ++// 4. Since the last released minor version, have there been any changes to ++// the runtime that removes support for old .pb.go source code? ++// If yes, then increment SupportMinimum. + // +-// 6. Tag a new version, where the tag is is the current String. ++// 5. Send out the CL for review and submit it. ++// Note that the next CL in step 8 must be submitted after this CL ++// without any other CLs in-between. + // +-// 7. Write release notes for all notable changes +-// between this release and the last release. ++// 6. Tag a new version, where the tag is is the current String. + // +-// 8. Create a new CL. ++// 7. Write release notes for all notable changes ++// between this release and the last release. + // +-// 9. Update PreRelease to include the string "devel". +-// For example: "" -> "devel" or "rc.1" -> "rc.1.devel" ++// 8. Create a new CL. + // +-// 10. Send out the CL for review and submit it. ++// 9. Update PreRelease to include the string "devel". ++// For example: "" -> "devel" or "rc.1" -> "rc.1.devel" ++// ++// 10. Send out the CL for review and submit it. + const ( + Major = 1 +- Minor = 28 ++ Minor = 30 + Patch = 0 + PreRelease = "" + ) +@@ -60,6 +59,7 @@ const ( + // String formats the version string for this module in semver format. + // + // Examples: ++// + // v1.20.1 + // v1.21.0-rc.1 + func String() string { +diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go +index 11bf717..48d4794 100644 +--- a/vendor/google.golang.org/protobuf/proto/decode.go ++++ b/vendor/google.golang.org/protobuf/proto/decode.go +@@ -19,7 +19,8 @@ import ( + // UnmarshalOptions configures the unmarshaler. + // + // Example usage: +-// err := UnmarshalOptions{DiscardUnknown: true}.Unmarshal(b, m) ++// ++// err := UnmarshalOptions{DiscardUnknown: true}.Unmarshal(b, m) + type UnmarshalOptions struct { + pragma.NoUnkeyedLiterals + +diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go +index c52d8c4..ec71e71 100644 +--- a/vendor/google.golang.org/protobuf/proto/doc.go ++++ b/vendor/google.golang.org/protobuf/proto/doc.go +@@ -5,19 +5,15 @@ + // Package proto provides functions operating on protocol buffer messages. + // + // For documentation on protocol buffers in general, see: +-// +-// https://developers.google.com/protocol-buffers ++// https://protobuf.dev. + // + // For a tutorial on using protocol buffers with Go, see: +-// +-// https://developers.google.com/protocol-buffers/docs/gotutorial ++// https://protobuf.dev/getting-started/gotutorial. + // + // For a guide to generated Go protocol buffer code, see: ++// https://protobuf.dev/reference/go/go-generated. + // +-// https://developers.google.com/protocol-buffers/docs/reference/go-generated +-// +-// +-// Binary serialization ++// # Binary serialization + // + // This package contains functions to convert to and from the wire format, + // an efficient binary serialization of protocol buffers. +@@ -30,8 +26,7 @@ + // • Unmarshal converts a message from the wire format. + // The UnmarshalOptions type provides more control over wire unmarshaling. + // +-// +-// Basic message operations ++// # Basic message operations + // + // • Clone makes a deep copy of a message. + // +@@ -45,8 +40,7 @@ + // + // • CheckInitialized reports whether all required fields in a message are set. + // +-// +-// Optional scalar constructors ++// # Optional scalar constructors + // + // The API for some generated messages represents optional scalar fields + // as pointers to a value. For example, an optional string field has the +@@ -61,16 +55,14 @@ + // + // Optional scalar fields are only supported in proto2. + // +-// +-// Extension accessors ++// # Extension accessors + // + // • HasExtension, GetExtension, SetExtension, and ClearExtension + // access extension field values in a protocol buffer message. + // + // Extension fields are only supported in proto2. + // +-// +-// Related packages ++// # Related packages + // + // • Package "google.golang.org/protobuf/encoding/protojson" converts messages to + // and from JSON. +diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go +index d18239c..bf7f816 100644 +--- a/vendor/google.golang.org/protobuf/proto/encode.go ++++ b/vendor/google.golang.org/protobuf/proto/encode.go +@@ -16,7 +16,8 @@ import ( + // MarshalOptions configures the marshaler. + // + // Example usage: +-// b, err := MarshalOptions{Deterministic: true}.Marshal(m) ++// ++// b, err := MarshalOptions{Deterministic: true}.Marshal(m) + type MarshalOptions struct { + pragma.NoUnkeyedLiterals + +@@ -101,7 +102,9 @@ func (o MarshalOptions) Marshal(m Message) ([]byte, error) { + // otherwise it returns a non-nil empty buffer. + // + // This is to assist the edge-case where user-code does the following: ++// + // m1.OptionalBytes, _ = proto.Marshal(m2) ++// + // where they expect the proto2 "optional_bytes" field to be populated + // if any only if m2 is a valid message. + func emptyBytesForMessage(m Message) []byte { +diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go +index 4dba2b9..1a0be1b 100644 +--- a/vendor/google.golang.org/protobuf/proto/equal.go ++++ b/vendor/google.golang.org/protobuf/proto/equal.go +@@ -5,163 +5,53 @@ + package proto + + import ( +- "bytes" +- "math" + "reflect" + +- "google.golang.org/protobuf/encoding/protowire" +- pref "google.golang.org/protobuf/reflect/protoreflect" ++ "google.golang.org/protobuf/reflect/protoreflect" + ) + +-// Equal reports whether two messages are equal. +-// If two messages marshal to the same bytes under deterministic serialization, +-// then Equal is guaranteed to report true. ++// Equal reports whether two messages are equal, ++// by recursively comparing the fields of the message. + // +-// Two messages are equal if they belong to the same message descriptor, +-// have the same set of populated known and extension field values, +-// and the same set of unknown fields values. If either of the top-level +-// messages are invalid, then Equal reports true only if both are invalid. ++// - Bytes fields are equal if they contain identical bytes. ++// Empty bytes (regardless of nil-ness) are considered equal. + // +-// Scalar values are compared with the equivalent of the == operator in Go, +-// except bytes values which are compared using bytes.Equal and +-// floating point values which specially treat NaNs as equal. +-// Message values are compared by recursively calling Equal. +-// Lists are equal if each element value is also equal. +-// Maps are equal if they have the same set of keys, where the pair of values +-// for each key is also equal. ++// - Floating-point fields are equal if they contain the same value. ++// Unlike the == operator, a NaN is equal to another NaN. ++// ++// - Other scalar fields are equal if they contain the same value. ++// ++// - Message fields are equal if they have ++// the same set of populated known and extension field values, and ++// the same set of unknown fields values. ++// ++// - Lists are equal if they are the same length and ++// each corresponding element is equal. ++// ++// - Maps are equal if they have the same set of keys and ++// the corresponding value for each key is equal. ++// ++// An invalid message is not equal to a valid message. ++// An invalid message is only equal to another invalid message of the ++// same type. An invalid message often corresponds to a nil pointer ++// of the concrete message type. For example, (*pb.M)(nil) is not equal ++// to &pb.M{}. ++// If two valid messages marshal to the same bytes under deterministic ++// serialization, then Equal is guaranteed to report true. + func Equal(x, y Message) bool { + if x == nil || y == nil { + return x == nil && y == nil + } ++ if reflect.TypeOf(x).Kind() == reflect.Ptr && x == y { ++ // Avoid an expensive comparison if both inputs are identical pointers. ++ return true ++ } + mx := x.ProtoReflect() + my := y.ProtoReflect() + if mx.IsValid() != my.IsValid() { + return false + } +- return equalMessage(mx, my) +-} +- +-// equalMessage compares two messages. +-func equalMessage(mx, my pref.Message) bool { +- if mx.Descriptor() != my.Descriptor() { +- return false +- } +- +- nx := 0 +- equal := true +- mx.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool { +- nx++ +- vy := my.Get(fd) +- equal = my.Has(fd) && equalField(fd, vx, vy) +- return equal +- }) +- if !equal { +- return false +- } +- ny := 0 +- my.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool { +- ny++ +- return true +- }) +- if nx != ny { +- return false +- } +- +- return equalUnknown(mx.GetUnknown(), my.GetUnknown()) +-} +- +-// equalField compares two fields. +-func equalField(fd pref.FieldDescriptor, x, y pref.Value) bool { +- switch { +- case fd.IsList(): +- return equalList(fd, x.List(), y.List()) +- case fd.IsMap(): +- return equalMap(fd, x.Map(), y.Map()) +- default: +- return equalValue(fd, x, y) +- } +-} +- +-// equalMap compares two maps. +-func equalMap(fd pref.FieldDescriptor, x, y pref.Map) bool { +- if x.Len() != y.Len() { +- return false +- } +- equal := true +- x.Range(func(k pref.MapKey, vx pref.Value) bool { +- vy := y.Get(k) +- equal = y.Has(k) && equalValue(fd.MapValue(), vx, vy) +- return equal +- }) +- return equal +-} +- +-// equalList compares two lists. +-func equalList(fd pref.FieldDescriptor, x, y pref.List) bool { +- if x.Len() != y.Len() { +- return false +- } +- for i := x.Len() - 1; i >= 0; i-- { +- if !equalValue(fd, x.Get(i), y.Get(i)) { +- return false +- } +- } +- return true +-} +- +-// equalValue compares two singular values. +-func equalValue(fd pref.FieldDescriptor, x, y pref.Value) bool { +- switch fd.Kind() { +- case pref.BoolKind: +- return x.Bool() == y.Bool() +- case pref.EnumKind: +- return x.Enum() == y.Enum() +- case pref.Int32Kind, pref.Sint32Kind, +- pref.Int64Kind, pref.Sint64Kind, +- pref.Sfixed32Kind, pref.Sfixed64Kind: +- return x.Int() == y.Int() +- case pref.Uint32Kind, pref.Uint64Kind, +- pref.Fixed32Kind, pref.Fixed64Kind: +- return x.Uint() == y.Uint() +- case pref.FloatKind, pref.DoubleKind: +- fx := x.Float() +- fy := y.Float() +- if math.IsNaN(fx) || math.IsNaN(fy) { +- return math.IsNaN(fx) && math.IsNaN(fy) +- } +- return fx == fy +- case pref.StringKind: +- return x.String() == y.String() +- case pref.BytesKind: +- return bytes.Equal(x.Bytes(), y.Bytes()) +- case pref.MessageKind, pref.GroupKind: +- return equalMessage(x.Message(), y.Message()) +- default: +- return x.Interface() == y.Interface() +- } +-} +- +-// equalUnknown compares unknown fields by direct comparison on the raw bytes +-// of each individual field number. +-func equalUnknown(x, y pref.RawFields) bool { +- if len(x) != len(y) { +- return false +- } +- if bytes.Equal([]byte(x), []byte(y)) { +- return true +- } +- +- mx := make(map[pref.FieldNumber]pref.RawFields) +- my := make(map[pref.FieldNumber]pref.RawFields) +- for len(x) > 0 { +- fnum, _, n := protowire.ConsumeField(x) +- mx[fnum] = append(mx[fnum], x[:n]...) +- x = x[n:] +- } +- for len(y) > 0 { +- fnum, _, n := protowire.ConsumeField(y) +- my[fnum] = append(my[fnum], y[:n]...) +- y = y[n:] +- } +- return reflect.DeepEqual(mx, my) ++ vx := protoreflect.ValueOfMessage(mx) ++ vy := protoreflect.ValueOfMessage(my) ++ return vx.Equal(vy) + } +diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +index cebb36c..27d7e35 100644 +--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go ++++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +@@ -155,9 +155,9 @@ func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, + // + // Suppose the scope was "fizz.buzz" and the reference was "Foo.Bar", + // then the following full names are searched: +-// * fizz.buzz.Foo.Bar +-// * fizz.Foo.Bar +-// * Foo.Bar ++// - fizz.buzz.Foo.Bar ++// - fizz.Foo.Bar ++// - Foo.Bar + func (r *resolver) findDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.Descriptor, error) { + if !ref.IsValid() { + return nil, errors.New("invalid name reference: %q", ref) +diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go +index dd85915..55aa149 100644 +--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go ++++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go +@@ -8,8 +8,7 @@ + // defined in proto source files and value interfaces which provide the + // ability to examine and manipulate the contents of messages. + // +-// +-// Protocol Buffer Descriptors ++// # Protocol Buffer Descriptors + // + // Protobuf descriptors (e.g., EnumDescriptor or MessageDescriptor) + // are immutable objects that represent protobuf type information. +@@ -26,8 +25,7 @@ + // The "google.golang.org/protobuf/reflect/protodesc" package converts between + // google.protobuf.DescriptorProto messages and protobuf descriptors. + // +-// +-// Go Type Descriptors ++// # Go Type Descriptors + // + // A type descriptor (e.g., EnumType or MessageType) is a constructor for + // a concrete Go type that represents the associated protobuf descriptor. +@@ -41,8 +39,7 @@ + // The "google.golang.org/protobuf/types/dynamicpb" package can be used to + // create Go type descriptors from protobuf descriptors. + // +-// +-// Value Interfaces ++// # Value Interfaces + // + // The Enum and Message interfaces provide a reflective view over an + // enum or message instance. For enums, it provides the ability to retrieve +@@ -55,13 +52,11 @@ + // The "github.com/golang/protobuf/proto".MessageReflect function can be used + // to obtain a reflective view on older messages. + // +-// +-// Relationships ++// # Relationships + // + // The following diagrams demonstrate the relationships between + // various types declared in this package. + // +-// + // ┌───────────────────────────────────┐ + // V │ + // ┌────────────── New(n) ─────────────┐ │ +@@ -83,7 +78,6 @@ + // + // • An Enum is a concrete enum instance. Generated enums implement Enum. + // +-// + // ┌──────────────── New() ─────────────────┐ + // │ │ + // │ ┌─── Descriptor() ─────┐ │ ┌── Interface() ───┐ +@@ -98,12 +92,22 @@ + // + // • A MessageType describes a concrete Go message type. + // It has a MessageDescriptor and can construct a Message instance. ++// Just as how Go's reflect.Type is a reflective description of a Go type, ++// a MessageType is a reflective description of a Go type for a protobuf message. + // + // • A MessageDescriptor describes an abstract protobuf message type. +-// +-// • A Message is a concrete message instance. Generated messages implement +-// ProtoMessage, which can convert to/from a Message. +-// ++// It has no understanding of Go types. In order to construct a MessageType ++// from just a MessageDescriptor, you can consider looking up the message type ++// in the global registry using protoregistry.GlobalTypes.FindMessageByName ++// or constructing a dynamic MessageType using dynamicpb.NewMessageType. ++// ++// • A Message is a reflective view over a concrete message instance. ++// Generated messages implement ProtoMessage, which can convert to a Message. ++// Just as how Go's reflect.Value is a reflective view over a Go value, ++// a Message is a reflective view over a concrete protobuf message instance. ++// Using Go reflection as an analogy, the ProtoReflect method is similar to ++// calling reflect.ValueOf, and the Message.Interface method is similar to ++// calling reflect.Value.Interface. + // + // ┌── TypeDescriptor() ──┐ ┌───── Descriptor() ─────┐ + // │ V │ V +diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go +index 121ba3a..0b99428 100644 +--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go ++++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go +@@ -87,6 +87,7 @@ func (p1 SourcePath) Equal(p2 SourcePath) bool { + // in a future version of this module. + // + // Example output: ++// + // .message_type[6].nested_type[15].field[3] + func (p SourcePath) String() string { + b := p.appendFileDescriptorProto(nil) +diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +index b03c122..54ce326 100644 +--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go ++++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +@@ -35,6 +35,8 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte { + b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo) + case 12: + b = p.appendSingularField(b, "syntax", nil) ++ case 13: ++ b = p.appendSingularField(b, "edition", nil) + } + return b + } +@@ -236,6 +238,8 @@ func (p *SourcePath) appendMessageOptions(b []byte) []byte { + b = p.appendSingularField(b, "deprecated", nil) + case 7: + b = p.appendSingularField(b, "map_entry", nil) ++ case 11: ++ b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } +@@ -279,6 +283,8 @@ func (p *SourcePath) appendEnumOptions(b []byte) []byte { + b = p.appendSingularField(b, "allow_alias", nil) + case 3: + b = p.appendSingularField(b, "deprecated", nil) ++ case 6: ++ b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } +@@ -345,10 +351,18 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte { + b = p.appendSingularField(b, "jstype", nil) + case 5: + b = p.appendSingularField(b, "lazy", nil) ++ case 15: ++ b = p.appendSingularField(b, "unverified_lazy", nil) + case 3: + b = p.appendSingularField(b, "deprecated", nil) + case 10: + b = p.appendSingularField(b, "weak", nil) ++ case 16: ++ b = p.appendSingularField(b, "debug_redact", nil) ++ case 17: ++ b = p.appendSingularField(b, "retention", nil) ++ case 18: ++ b = p.appendSingularField(b, "target", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } +diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +index 8e53c44..3867470 100644 +--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go ++++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +@@ -480,6 +480,7 @@ type ExtensionDescriptors interface { + // relative to the parent that it is declared within. + // + // For example: ++// + // syntax = "proto2"; + // package example; + // message FooMessage { +diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go +index f319810..37601b7 100644 +--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go ++++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go +@@ -148,7 +148,7 @@ type Message interface { + // be preserved in marshaling or other operations. + IsValid() bool + +- // ProtoMethods returns optional fast-path implementions of various operations. ++ // ProtoMethods returns optional fast-path implementations of various operations. + // This method may return nil. + // + // The returned methods type is identical to +diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go +new file mode 100644 +index 0000000..5916525 +--- /dev/null ++++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go +@@ -0,0 +1,168 @@ ++// Copyright 2022 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package protoreflect ++ ++import ( ++ "bytes" ++ "fmt" ++ "math" ++ "reflect" ++ ++ "google.golang.org/protobuf/encoding/protowire" ++) ++ ++// Equal reports whether v1 and v2 are recursively equal. ++// ++// - Values of different types are always unequal. ++// ++// - Bytes values are equal if they contain identical bytes. ++// Empty bytes (regardless of nil-ness) are considered equal. ++// ++// - Floating point values are equal if they contain the same value. ++// Unlike the == operator, a NaN is equal to another NaN. ++// ++// - Enums are equal if they contain the same number. ++// Since Value does not contain an enum descriptor, ++// enum values do not consider the type of the enum. ++// ++// - Other scalar values are equal if they contain the same value. ++// ++// - Message values are equal if they belong to the same message descriptor, ++// have the same set of populated known and extension field values, ++// and the same set of unknown fields values. ++// ++// - Lists are equal if they are the same length and ++// each corresponding element is equal. ++// ++// - Maps are equal if they have the same set of keys and ++// the corresponding value for each key is equal. ++func (v1 Value) Equal(v2 Value) bool { ++ return equalValue(v1, v2) ++} ++ ++func equalValue(x, y Value) bool { ++ eqType := x.typ == y.typ ++ switch x.typ { ++ case nilType: ++ return eqType ++ case boolType: ++ return eqType && x.Bool() == y.Bool() ++ case int32Type, int64Type: ++ return eqType && x.Int() == y.Int() ++ case uint32Type, uint64Type: ++ return eqType && x.Uint() == y.Uint() ++ case float32Type, float64Type: ++ return eqType && equalFloat(x.Float(), y.Float()) ++ case stringType: ++ return eqType && x.String() == y.String() ++ case bytesType: ++ return eqType && bytes.Equal(x.Bytes(), y.Bytes()) ++ case enumType: ++ return eqType && x.Enum() == y.Enum() ++ default: ++ switch x := x.Interface().(type) { ++ case Message: ++ y, ok := y.Interface().(Message) ++ return ok && equalMessage(x, y) ++ case List: ++ y, ok := y.Interface().(List) ++ return ok && equalList(x, y) ++ case Map: ++ y, ok := y.Interface().(Map) ++ return ok && equalMap(x, y) ++ default: ++ panic(fmt.Sprintf("unknown type: %T", x)) ++ } ++ } ++} ++ ++// equalFloat compares two floats, where NaNs are treated as equal. ++func equalFloat(x, y float64) bool { ++ if math.IsNaN(x) || math.IsNaN(y) { ++ return math.IsNaN(x) && math.IsNaN(y) ++ } ++ return x == y ++} ++ ++// equalMessage compares two messages. ++func equalMessage(mx, my Message) bool { ++ if mx.Descriptor() != my.Descriptor() { ++ return false ++ } ++ ++ nx := 0 ++ equal := true ++ mx.Range(func(fd FieldDescriptor, vx Value) bool { ++ nx++ ++ vy := my.Get(fd) ++ equal = my.Has(fd) && equalValue(vx, vy) ++ return equal ++ }) ++ if !equal { ++ return false ++ } ++ ny := 0 ++ my.Range(func(fd FieldDescriptor, vx Value) bool { ++ ny++ ++ return true ++ }) ++ if nx != ny { ++ return false ++ } ++ ++ return equalUnknown(mx.GetUnknown(), my.GetUnknown()) ++} ++ ++// equalList compares two lists. ++func equalList(x, y List) bool { ++ if x.Len() != y.Len() { ++ return false ++ } ++ for i := x.Len() - 1; i >= 0; i-- { ++ if !equalValue(x.Get(i), y.Get(i)) { ++ return false ++ } ++ } ++ return true ++} ++ ++// equalMap compares two maps. ++func equalMap(x, y Map) bool { ++ if x.Len() != y.Len() { ++ return false ++ } ++ equal := true ++ x.Range(func(k MapKey, vx Value) bool { ++ vy := y.Get(k) ++ equal = y.Has(k) && equalValue(vx, vy) ++ return equal ++ }) ++ return equal ++} ++ ++// equalUnknown compares unknown fields by direct comparison on the raw bytes ++// of each individual field number. ++func equalUnknown(x, y RawFields) bool { ++ if len(x) != len(y) { ++ return false ++ } ++ if bytes.Equal([]byte(x), []byte(y)) { ++ return true ++ } ++ ++ mx := make(map[FieldNumber]RawFields) ++ my := make(map[FieldNumber]RawFields) ++ for len(x) > 0 { ++ fnum, _, n := protowire.ConsumeField(x) ++ mx[fnum] = append(mx[fnum], x[:n]...) ++ x = x[n:] ++ } ++ for len(y) > 0 { ++ fnum, _, n := protowire.ConsumeField(y) ++ my[fnum] = append(my[fnum], y[:n]...) ++ y = y[n:] ++ } ++ return reflect.DeepEqual(mx, my) ++} +diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +index eb7764c..08e5ef7 100644 +--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go ++++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +@@ -50,14 +50,15 @@ import ( + // always references the source object. + // + // For example: ++// + // // Append a 0 to a "repeated int32" field. + // // Since the Value returned by Mutable is guaranteed to alias + // // the source message, modifying the Value modifies the message. +-// message.Mutable(fieldDesc).(List).Append(protoreflect.ValueOfInt32(0)) ++// message.Mutable(fieldDesc).List().Append(protoreflect.ValueOfInt32(0)) + // + // // Assign [0] to a "repeated int32" field by creating a new Value, + // // modifying it, and assigning it. +-// list := message.NewField(fieldDesc).(List) ++// list := message.NewField(fieldDesc).List() + // list.Append(protoreflect.ValueOfInt32(0)) + // message.Set(fieldDesc, list) + // // ERROR: Since it is not defined whether Set aliases the source, +@@ -392,6 +393,7 @@ func (v Value) MapKey() MapKey { + // ╚═════════╧═════════════════════════════════════╝ + // + // A MapKey is constructed and accessed through a Value: ++// + // k := ValueOf("hash").MapKey() // convert string to MapKey + // s := k.String() // convert MapKey to string + // +diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +index 59f024c..aeb5597 100644 +--- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go ++++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +@@ -30,9 +30,11 @@ import ( + // conflictPolicy configures the policy for handling registration conflicts. + // + // It can be over-written at compile time with a linker-initialized variable: ++// + // go build -ldflags "-X google.golang.org/protobuf/reflect/protoregistry.conflictPolicy=warn" + // + // It can be over-written at program execution with an environment variable: ++// + // GOLANG_PROTOBUF_REGISTRATION_CONFLICT=warn ./main + // + // Neither of the above are covered by the compatibility promise and +@@ -44,7 +46,7 @@ var conflictPolicy = "panic" // "panic" | "warn" | "ignore" + // It is a variable so that the behavior is easily overridden in another file. + var ignoreConflict = func(d protoreflect.Descriptor, err error) bool { + const env = "GOLANG_PROTOBUF_REGISTRATION_CONFLICT" +- const faq = "https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict" ++ const faq = "https://protobuf.dev/reference/go/faq#namespace-conflict" + policy := conflictPolicy + if v := os.Getenv(env); v != "" { + policy = v +diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go +index ff094e1..a105cb2 100644 +--- a/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go ++++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go +@@ -26,16 +26,19 @@ const ( + // EnforceVersion is used by code generated by protoc-gen-go + // to statically enforce minimum and maximum versions of this package. + // A compilation failure implies either that: +-// * the runtime package is too old and needs to be updated OR +-// * the generated code is too old and needs to be regenerated. ++// - the runtime package is too old and needs to be updated OR ++// - the generated code is too old and needs to be regenerated. + // + // The runtime package can be upgraded by running: ++// + // go get google.golang.org/protobuf + // + // The generated code can be regenerated by running: ++// + // protoc --go_out=${PROTOC_GEN_GO_ARGS} ${PROTO_FILES} + // + // Example usage by generated code: ++// + // const ( + // // Verify that this generated code is sufficiently up-to-date. + // _ = protoimpl.EnforceVersion(genVersion - protoimpl.MinVersion) +@@ -49,6 +52,7 @@ const ( + type EnforceVersion uint + + // This enforces the following invariant: ++// + // MinVersion ≤ GenVersion ≤ MaxVersion + const ( + _ = EnforceVersion(GenVersion - MinVersion) +diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +index abe4ab5..dac5671 100644 +--- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go ++++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +@@ -406,6 +406,152 @@ func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1} + } + ++// If set to RETENTION_SOURCE, the option will be omitted from the binary. ++// Note: as of January 2023, support for this is in progress and does not yet ++// have an effect (b/264593489). ++type FieldOptions_OptionRetention int32 ++ ++const ( ++ FieldOptions_RETENTION_UNKNOWN FieldOptions_OptionRetention = 0 ++ FieldOptions_RETENTION_RUNTIME FieldOptions_OptionRetention = 1 ++ FieldOptions_RETENTION_SOURCE FieldOptions_OptionRetention = 2 ++) ++ ++// Enum value maps for FieldOptions_OptionRetention. ++var ( ++ FieldOptions_OptionRetention_name = map[int32]string{ ++ 0: "RETENTION_UNKNOWN", ++ 1: "RETENTION_RUNTIME", ++ 2: "RETENTION_SOURCE", ++ } ++ FieldOptions_OptionRetention_value = map[string]int32{ ++ "RETENTION_UNKNOWN": 0, ++ "RETENTION_RUNTIME": 1, ++ "RETENTION_SOURCE": 2, ++ } ++) ++ ++func (x FieldOptions_OptionRetention) Enum() *FieldOptions_OptionRetention { ++ p := new(FieldOptions_OptionRetention) ++ *p = x ++ return p ++} ++ ++func (x FieldOptions_OptionRetention) String() string { ++ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) ++} ++ ++func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor { ++ return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() ++} ++ ++func (FieldOptions_OptionRetention) Type() protoreflect.EnumType { ++ return &file_google_protobuf_descriptor_proto_enumTypes[5] ++} ++ ++func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber { ++ return protoreflect.EnumNumber(x) ++} ++ ++// Deprecated: Do not use. ++func (x *FieldOptions_OptionRetention) UnmarshalJSON(b []byte) error { ++ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) ++ if err != nil { ++ return err ++ } ++ *x = FieldOptions_OptionRetention(num) ++ return nil ++} ++ ++// Deprecated: Use FieldOptions_OptionRetention.Descriptor instead. ++func (FieldOptions_OptionRetention) EnumDescriptor() ([]byte, []int) { ++ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 2} ++} ++ ++// This indicates the types of entities that the field may apply to when used ++// as an option. If it is unset, then the field may be freely used as an ++// option on any kind of entity. Note: as of January 2023, support for this is ++// in progress and does not yet have an effect (b/264593489). ++type FieldOptions_OptionTargetType int32 ++ ++const ( ++ FieldOptions_TARGET_TYPE_UNKNOWN FieldOptions_OptionTargetType = 0 ++ FieldOptions_TARGET_TYPE_FILE FieldOptions_OptionTargetType = 1 ++ FieldOptions_TARGET_TYPE_EXTENSION_RANGE FieldOptions_OptionTargetType = 2 ++ FieldOptions_TARGET_TYPE_MESSAGE FieldOptions_OptionTargetType = 3 ++ FieldOptions_TARGET_TYPE_FIELD FieldOptions_OptionTargetType = 4 ++ FieldOptions_TARGET_TYPE_ONEOF FieldOptions_OptionTargetType = 5 ++ FieldOptions_TARGET_TYPE_ENUM FieldOptions_OptionTargetType = 6 ++ FieldOptions_TARGET_TYPE_ENUM_ENTRY FieldOptions_OptionTargetType = 7 ++ FieldOptions_TARGET_TYPE_SERVICE FieldOptions_OptionTargetType = 8 ++ FieldOptions_TARGET_TYPE_METHOD FieldOptions_OptionTargetType = 9 ++) ++ ++// Enum value maps for FieldOptions_OptionTargetType. ++var ( ++ FieldOptions_OptionTargetType_name = map[int32]string{ ++ 0: "TARGET_TYPE_UNKNOWN", ++ 1: "TARGET_TYPE_FILE", ++ 2: "TARGET_TYPE_EXTENSION_RANGE", ++ 3: "TARGET_TYPE_MESSAGE", ++ 4: "TARGET_TYPE_FIELD", ++ 5: "TARGET_TYPE_ONEOF", ++ 6: "TARGET_TYPE_ENUM", ++ 7: "TARGET_TYPE_ENUM_ENTRY", ++ 8: "TARGET_TYPE_SERVICE", ++ 9: "TARGET_TYPE_METHOD", ++ } ++ FieldOptions_OptionTargetType_value = map[string]int32{ ++ "TARGET_TYPE_UNKNOWN": 0, ++ "TARGET_TYPE_FILE": 1, ++ "TARGET_TYPE_EXTENSION_RANGE": 2, ++ "TARGET_TYPE_MESSAGE": 3, ++ "TARGET_TYPE_FIELD": 4, ++ "TARGET_TYPE_ONEOF": 5, ++ "TARGET_TYPE_ENUM": 6, ++ "TARGET_TYPE_ENUM_ENTRY": 7, ++ "TARGET_TYPE_SERVICE": 8, ++ "TARGET_TYPE_METHOD": 9, ++ } ++) ++ ++func (x FieldOptions_OptionTargetType) Enum() *FieldOptions_OptionTargetType { ++ p := new(FieldOptions_OptionTargetType) ++ *p = x ++ return p ++} ++ ++func (x FieldOptions_OptionTargetType) String() string { ++ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) ++} ++ ++func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor { ++ return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() ++} ++ ++func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType { ++ return &file_google_protobuf_descriptor_proto_enumTypes[6] ++} ++ ++func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber { ++ return protoreflect.EnumNumber(x) ++} ++ ++// Deprecated: Do not use. ++func (x *FieldOptions_OptionTargetType) UnmarshalJSON(b []byte) error { ++ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) ++ if err != nil { ++ return err ++ } ++ *x = FieldOptions_OptionTargetType(num) ++ return nil ++} ++ ++// Deprecated: Use FieldOptions_OptionTargetType.Descriptor instead. ++func (FieldOptions_OptionTargetType) EnumDescriptor() ([]byte, []int) { ++ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 3} ++} ++ + // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, + // or neither? HTTP based RPC implementation may choose GET verb for safe + // methods, and PUT verb for idempotent methods instead of the default POST. +@@ -442,11 +588,11 @@ func (x MethodOptions_IdempotencyLevel) String() string { + } + + func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { +- return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() ++ return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() + } + + func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { +- return &file_google_protobuf_descriptor_proto_enumTypes[5] ++ return &file_google_protobuf_descriptor_proto_enumTypes[7] + } + + func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { +@@ -468,6 +614,70 @@ func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17, 0} + } + ++// Represents the identified object's effect on the element in the original ++// .proto file. ++type GeneratedCodeInfo_Annotation_Semantic int32 ++ ++const ( ++ // There is no effect or the effect is indescribable. ++ GeneratedCodeInfo_Annotation_NONE GeneratedCodeInfo_Annotation_Semantic = 0 ++ // The element is set or otherwise mutated. ++ GeneratedCodeInfo_Annotation_SET GeneratedCodeInfo_Annotation_Semantic = 1 ++ // An alias to the element is returned. ++ GeneratedCodeInfo_Annotation_ALIAS GeneratedCodeInfo_Annotation_Semantic = 2 ++) ++ ++// Enum value maps for GeneratedCodeInfo_Annotation_Semantic. ++var ( ++ GeneratedCodeInfo_Annotation_Semantic_name = map[int32]string{ ++ 0: "NONE", ++ 1: "SET", ++ 2: "ALIAS", ++ } ++ GeneratedCodeInfo_Annotation_Semantic_value = map[string]int32{ ++ "NONE": 0, ++ "SET": 1, ++ "ALIAS": 2, ++ } ++) ++ ++func (x GeneratedCodeInfo_Annotation_Semantic) Enum() *GeneratedCodeInfo_Annotation_Semantic { ++ p := new(GeneratedCodeInfo_Annotation_Semantic) ++ *p = x ++ return p ++} ++ ++func (x GeneratedCodeInfo_Annotation_Semantic) String() string { ++ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) ++} ++ ++func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { ++ return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() ++} ++ ++func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { ++ return &file_google_protobuf_descriptor_proto_enumTypes[8] ++} ++ ++func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { ++ return protoreflect.EnumNumber(x) ++} ++ ++// Deprecated: Do not use. ++func (x *GeneratedCodeInfo_Annotation_Semantic) UnmarshalJSON(b []byte) error { ++ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) ++ if err != nil { ++ return err ++ } ++ *x = GeneratedCodeInfo_Annotation_Semantic(num) ++ return nil ++} ++ ++// Deprecated: Use GeneratedCodeInfo_Annotation_Semantic.Descriptor instead. ++func (GeneratedCodeInfo_Annotation_Semantic) EnumDescriptor() ([]byte, []int) { ++ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0, 0} ++} ++ + // The protocol compiler can output a FileDescriptorSet containing the .proto + // files it parses. + type FileDescriptorSet struct { +@@ -544,8 +754,12 @@ type FileDescriptorProto struct { + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. +- // The supported values are "proto2" and "proto3". ++ // The supported values are "proto2", "proto3", and "editions". ++ // ++ // If `edition` is present, this value must be "editions". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` ++ // The edition of the proto file, which is an opaque string. ++ Edition *string `protobuf:"bytes,13,opt,name=edition" json:"edition,omitempty"` + } + + func (x *FileDescriptorProto) Reset() { +@@ -664,6 +878,13 @@ func (x *FileDescriptorProto) GetSyntax() string { + return "" + } + ++func (x *FileDescriptorProto) GetEdition() string { ++ if x != nil && x.Edition != nil { ++ return *x.Edition ++ } ++ return "" ++} ++ + // Describes a message type. + type DescriptorProto struct { + state protoimpl.MessageState +@@ -860,7 +1081,6 @@ type FieldDescriptorProto struct { + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. +- // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. +@@ -1382,22 +1602,22 @@ type FileOptions struct { + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` +- // If set, all the classes from the .proto file are wrapped in a single +- // outer class with the given name. This applies to both Proto1 +- // (equivalent to the old "--one_java_file" option) and Proto2 (where +- // a .proto always translates to a single class, but you may want to +- // explicitly choose the class name). ++ // Controls the name of the wrapper Java class generated for the .proto file. ++ // That class will always contain the .proto file's getDescriptor() method as ++ // well as any top-level extensions defined in the .proto file. ++ // If java_multiple_files is disabled, then all the other classes from the ++ // .proto file will be nested inside the single wrapper outer class. + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` +- // If set true, then the Java code generator will generate a separate .java ++ // If enabled, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto +- // file. Thus, these types will *not* be nested inside the outer class +- // named by java_outer_classname. However, the outer class will still be ++ // file. Thus, these types will *not* be nested inside the wrapper class ++ // named by java_outer_classname. However, the wrapper class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + // +- // Deprecated: Do not use. ++ // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 +@@ -1531,7 +1751,7 @@ func (x *FileOptions) GetJavaMultipleFiles() bool { + return Default_FileOptions_JavaMultipleFiles + } + +-// Deprecated: Do not use. ++// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. + func (x *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if x != nil && x.JavaGenerateEqualsAndHash != nil { + return *x.JavaGenerateEqualsAndHash +@@ -1670,10 +1890,12 @@ type MessageOptions struct { + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: +- // message Foo { +- // option message_set_wire_format = true; +- // extensions 4 to max; +- // } ++ // ++ // message Foo { ++ // option message_set_wire_format = true; ++ // extensions 4 to max; ++ // } ++ // + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // +@@ -1692,28 +1914,44 @@ type MessageOptions struct { + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` ++ // NOTE: Do not set the option in .proto files. Always use the maps syntax ++ // instead. The option should only be implicitly set by the proto compiler ++ // parser. ++ // + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: +- // map map_field = 1; ++ // ++ // map map_field = 1; ++ // + // The parsed descriptor looks like: +- // message MapFieldEntry { +- // option map_entry = true; +- // optional KeyType key = 1; +- // optional ValueType value = 2; +- // } +- // repeated MapFieldEntry map_field = 1; ++ // ++ // message MapFieldEntry { ++ // option map_entry = true; ++ // optional KeyType key = 1; ++ // optional ValueType value = 2; ++ // } ++ // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementations still need to work as + // if the field is a repeated message field. +- // +- // NOTE: Do not set the option in .proto files. Always use the maps syntax +- // instead. The option should only be implicitly set by the proto compiler +- // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` ++ // Enable the legacy handling of JSON field name conflicts. This lowercases ++ // and strips underscored from the fields before comparison in proto3 only. ++ // The new behavior takes `json_name` into account and applies to proto2 as ++ // well. ++ // ++ // This should only be used as a temporary measure against broken builds due ++ // to the change in behavior for JSON field name conflicts. ++ // ++ // TODO(b/261750190) This is legacy behavior we plan to remove once downstream ++ // teams have had time to migrate. ++ // ++ // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. ++ DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + } +@@ -1785,6 +2023,14 @@ func (x *MessageOptions) GetMapEntry() bool { + return false + } + ++// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. ++func (x *MessageOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { ++ if x != nil && x.DeprecatedLegacyJsonFieldConflicts != nil { ++ return *x.DeprecatedLegacyJsonFieldConflicts ++ } ++ return false ++} ++ + func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption +@@ -1838,7 +2084,6 @@ type FieldOptions struct { + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // +- // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. +@@ -1849,7 +2094,14 @@ type FieldOptions struct { + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. ++ // ++ // As of May 2022, lazy verifies the contents of the byte stream during ++ // parsing. An invalid byte stream will cause the overall parsing to fail. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` ++ // unverified_lazy does no correctness checks on the byte stream. This should ++ // only be used where lazy with verification is prohibitive for performance ++ // reasons. ++ UnverifiedLazy *bool `protobuf:"varint,15,opt,name=unverified_lazy,json=unverifiedLazy,def=0" json:"unverified_lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this +@@ -1857,17 +2109,24 @@ type FieldOptions struct { + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` ++ // Indicate that the field value should not be printed out when using debug ++ // formats, e.g. when the field contains sensitive credentials. ++ DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` ++ Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` ++ Target *FieldOptions_OptionTargetType `protobuf:"varint,18,opt,name=target,enum=google.protobuf.FieldOptions_OptionTargetType" json:"target,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + } + + // Default values for FieldOptions fields. + const ( +- Default_FieldOptions_Ctype = FieldOptions_STRING +- Default_FieldOptions_Jstype = FieldOptions_JS_NORMAL +- Default_FieldOptions_Lazy = bool(false) +- Default_FieldOptions_Deprecated = bool(false) +- Default_FieldOptions_Weak = bool(false) ++ Default_FieldOptions_Ctype = FieldOptions_STRING ++ Default_FieldOptions_Jstype = FieldOptions_JS_NORMAL ++ Default_FieldOptions_Lazy = bool(false) ++ Default_FieldOptions_UnverifiedLazy = bool(false) ++ Default_FieldOptions_Deprecated = bool(false) ++ Default_FieldOptions_Weak = bool(false) ++ Default_FieldOptions_DebugRedact = bool(false) + ) + + func (x *FieldOptions) Reset() { +@@ -1930,6 +2189,13 @@ func (x *FieldOptions) GetLazy() bool { + return Default_FieldOptions_Lazy + } + ++func (x *FieldOptions) GetUnverifiedLazy() bool { ++ if x != nil && x.UnverifiedLazy != nil { ++ return *x.UnverifiedLazy ++ } ++ return Default_FieldOptions_UnverifiedLazy ++} ++ + func (x *FieldOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated +@@ -1944,6 +2210,27 @@ func (x *FieldOptions) GetWeak() bool { + return Default_FieldOptions_Weak + } + ++func (x *FieldOptions) GetDebugRedact() bool { ++ if x != nil && x.DebugRedact != nil { ++ return *x.DebugRedact ++ } ++ return Default_FieldOptions_DebugRedact ++} ++ ++func (x *FieldOptions) GetRetention() FieldOptions_OptionRetention { ++ if x != nil && x.Retention != nil { ++ return *x.Retention ++ } ++ return FieldOptions_RETENTION_UNKNOWN ++} ++ ++func (x *FieldOptions) GetTarget() FieldOptions_OptionTargetType { ++ if x != nil && x.Target != nil { ++ return *x.Target ++ } ++ return FieldOptions_TARGET_TYPE_UNKNOWN ++} ++ + func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption +@@ -2014,6 +2301,15 @@ type EnumOptions struct { + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` ++ // Enable the legacy handling of JSON field name conflicts. This lowercases ++ // and strips underscored from the fields before comparison in proto3 only. ++ // The new behavior takes `json_name` into account and applies to proto2 as ++ // well. ++ // TODO(b/261750190) Remove this legacy behavior once downstream teams have ++ // had time to migrate. ++ // ++ // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. ++ DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + } +@@ -2069,6 +2365,14 @@ func (x *EnumOptions) GetDeprecated() bool { + return Default_EnumOptions_Deprecated + } + ++// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. ++func (x *EnumOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { ++ if x != nil && x.DeprecatedLegacyJsonFieldConflicts != nil { ++ return *x.DeprecatedLegacyJsonFieldConflicts ++ } ++ return false ++} ++ + func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption +@@ -2399,43 +2703,48 @@ type SourceCodeInfo struct { + // tools. + // + // For example, say we have a file like: +- // message Foo { +- // optional string foo = 1; +- // } ++ // ++ // message Foo { ++ // optional string foo = 1; ++ // } ++ // + // Let's look at just the field definition: +- // optional string foo = 1; +- // ^ ^^ ^^ ^ ^^^ +- // a bc de f ghi ++ // ++ // optional string foo = 1; ++ // ^ ^^ ^^ ^ ^^^ ++ // a bc de f ghi ++ // + // We have the following locations: +- // span path represents +- // [a,i) [ 4, 0, 2, 0 ] The whole field definition. +- // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). +- // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). +- // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). +- // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). ++ // ++ // span path represents ++ // [a,i) [ 4, 0, 2, 0 ] The whole field definition. ++ // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). ++ // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). ++ // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). ++ // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: +- // - A location may refer to a repeated field itself (i.e. not to any +- // particular index within it). This is used whenever a set of elements are +- // logically enclosed in a single code segment. For example, an entire +- // extend block (possibly containing multiple extension definitions) will +- // have an outer location whose path refers to the "extensions" repeated +- // field without an index. +- // - Multiple locations may have the same path. This happens when a single +- // logical declaration is spread out across multiple places. The most +- // obvious example is the "extend" block again -- there may be multiple +- // extend blocks in the same scope, each of which will have the same path. +- // - A location's span is not always a subset of its parent's span. For +- // example, the "extendee" of an extension declaration appears at the +- // beginning of the "extend" block and is shared by all extensions within +- // the block. +- // - Just because a location's span is a subset of some other location's span +- // does not mean that it is a descendant. For example, a "group" defines +- // both a type and a field in a single declaration. Thus, the locations +- // corresponding to the type and field and their components will overlap. +- // - Code which tries to interpret locations should probably be designed to +- // ignore those that it doesn't understand, as more types of locations could +- // be recorded in the future. ++ // - A location may refer to a repeated field itself (i.e. not to any ++ // particular index within it). This is used whenever a set of elements are ++ // logically enclosed in a single code segment. For example, an entire ++ // extend block (possibly containing multiple extension definitions) will ++ // have an outer location whose path refers to the "extensions" repeated ++ // field without an index. ++ // - Multiple locations may have the same path. This happens when a single ++ // logical declaration is spread out across multiple places. The most ++ // obvious example is the "extend" block again -- there may be multiple ++ // extend blocks in the same scope, each of which will have the same path. ++ // - A location's span is not always a subset of its parent's span. For ++ // example, the "extendee" of an extension declaration appears at the ++ // beginning of the "extend" block and is shared by all extensions within ++ // the block. ++ // - Just because a location's span is a subset of some other location's span ++ // does not mean that it is a descendant. For example, a "group" defines ++ // both a type and a field in a single declaration. Thus, the locations ++ // corresponding to the type and field and their components will overlap. ++ // - Code which tries to interpret locations should probably be designed to ++ // ignore those that it doesn't understand, as more types of locations could ++ // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + } + +@@ -2715,8 +3024,8 @@ func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + // The name of the uninterpreted option. Each string represents a segment in + // a dot-separated name. is_extension is true iff a segment represents an + // extension (denoted with parentheses in options specs in .proto files). +-// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +-// "foo.(bar.baz).qux". ++// E.g.,{ ["foo", false], ["bar.baz", true], ["moo", false] } represents ++// "foo.(bar.baz).moo". + type UninterpretedOption_NamePart struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache +@@ -2781,23 +3090,34 @@ type SourceCodeInfo_Location struct { + // location. + // + // Each element is a field number or an index. They form a path from +- // the root FileDescriptorProto to the place where the definition. For +- // example, this path: +- // [ 4, 3, 2, 7, 1 ] ++ // the root FileDescriptorProto to the place where the definition occurs. ++ // For example, this path: ++ // ++ // [ 4, 3, 2, 7, 1 ] ++ // + // refers to: +- // file.message_type(3) // 4, 3 +- // .field(7) // 2, 7 +- // .name() // 1 ++ // ++ // file.message_type(3) // 4, 3 ++ // .field(7) // 2, 7 ++ // .name() // 1 ++ // + // This is because FileDescriptorProto.message_type has field number 4: +- // repeated DescriptorProto message_type = 4; ++ // ++ // repeated DescriptorProto message_type = 4; ++ // + // and DescriptorProto.field has field number 2: +- // repeated FieldDescriptorProto field = 2; ++ // ++ // repeated FieldDescriptorProto field = 2; ++ // + // and FieldDescriptorProto.name has field number 1: +- // optional string name = 1; ++ // ++ // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: +- // [ 4, 3, 2, 7 ] ++ // ++ // [ 4, 3, 2, 7 ] ++ // + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` +@@ -2826,34 +3146,34 @@ type SourceCodeInfo_Location struct { + // + // Examples: + // +- // optional int32 foo = 1; // Comment attached to foo. +- // // Comment attached to bar. +- // optional int32 bar = 2; ++ // optional int32 foo = 1; // Comment attached to foo. ++ // // Comment attached to bar. ++ // optional int32 bar = 2; + // +- // optional string baz = 3; +- // // Comment attached to baz. +- // // Another line attached to baz. ++ // optional string baz = 3; ++ // // Comment attached to baz. ++ // // Another line attached to baz. + // +- // // Comment attached to qux. +- // // +- // // Another line attached to qux. +- // optional double qux = 4; ++ // // Comment attached to moo. ++ // // ++ // // Another line attached to moo. ++ // optional double moo = 4; + // +- // // Detached comment for corge. This is not leading or trailing comments +- // // to qux or corge because there are blank lines separating it from +- // // both. ++ // // Detached comment for corge. This is not leading or trailing comments ++ // // to moo or corge because there are blank lines separating it from ++ // // both. + // +- // // Detached comment for corge paragraph 2. ++ // // Detached comment for corge paragraph 2. + // +- // optional string corge = 5; +- // /* Block comment attached +- // * to corge. Leading asterisks +- // * will be removed. */ +- // /* Block comment attached to +- // * grault. */ +- // optional int32 grault = 6; ++ // optional string corge = 5; ++ // /* Block comment attached ++ // * to corge. Leading asterisks ++ // * will be removed. */ ++ // /* Block comment attached to ++ // * grault. */ ++ // optional int32 grault = 6; + // +- // // ignored detached comments. ++ // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` +@@ -2940,9 +3260,10 @@ type GeneratedCodeInfo_Annotation struct { + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that +- // relates to the identified offset. The end offset should be one past ++ // relates to the identified object. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). +- End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` ++ End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` ++ Semantic *GeneratedCodeInfo_Annotation_Semantic `protobuf:"varint,5,opt,name=semantic,enum=google.protobuf.GeneratedCodeInfo_Annotation_Semantic" json:"semantic,omitempty"` + } + + func (x *GeneratedCodeInfo_Annotation) Reset() { +@@ -3005,6 +3326,13 @@ func (x *GeneratedCodeInfo_Annotation) GetEnd() int32 { + return 0 + } + ++func (x *GeneratedCodeInfo_Annotation) GetSemantic() GeneratedCodeInfo_Annotation_Semantic { ++ if x != nil && x.Semantic != nil { ++ return *x.Semantic ++ } ++ return GeneratedCodeInfo_Annotation_NONE ++} ++ + var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor + + var file_google_protobuf_descriptor_proto_rawDesc = []byte{ +@@ -3016,7 +3344,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, +- 0x6c, 0x65, 0x22, 0xe4, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, ++ 0x6c, 0x65, 0x22, 0xfe, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, +@@ -3054,330 +3382,391 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ + 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, +- 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, +- 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, +- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, +- 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, +- 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, +- 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, +- 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, +- 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, +- 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, +- 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, +- 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, +- 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, +- 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, +- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, +- 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, +- 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, +- 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, +- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, +- 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, +- 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, +- 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, +- 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, ++ 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x64, 0x69, ++ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, ++ 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, ++ 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, ++ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, ++ 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, ++ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, ++ 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, ++ 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, ++ 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, ++ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, ++ 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, ++ 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, ++ 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, ++ 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, +- 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, +- 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, +- 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, +- 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, +- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, +- 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, +- 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, +- 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, +- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, +- 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, +- 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, +- 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, ++ 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, ++ 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, ++ 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, ++ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, ++ 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, ++ 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, ++ 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, +- 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, +- 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, +- 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, +- 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, +- 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, +- 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, +- 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, +- 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, +- 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, +- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, +- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, +- 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, +- 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, +- 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, +- 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, +- 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, +- 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x7c, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, +- 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, +- 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, +- 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, +- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, +- 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, +- 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, +- 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, +- 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, +- 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, +- 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, +- 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, +- 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, +- 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, +- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, +- 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, +- 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, +- 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, +- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, +- 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, +- 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, +- 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, +- 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, +- 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, +- 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, +- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, +- 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, +- 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, +- 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, +- 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, +- 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, +- 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, +- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, +- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, +- 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, +- 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, +- 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, +- 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, +- 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, +- 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, +- 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, +- 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, +- 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, +- 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, +- 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, +- 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, +- 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, +- 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, +- 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, +- 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, +- 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, +- 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, +- 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, +- 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, +- 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, +- 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, +- 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, +- 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, +- 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, +- 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, +- 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, +- 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, +- 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, +- 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, +- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, +- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, +- 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, +- 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, +- 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, +- 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, +- 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, +- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, +- 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, +- 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, +- 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, +- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, +- 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, +- 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, +- 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, +- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, +- 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, +- 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, +- 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, +- 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, +- 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, +- 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, +- 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, +- 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, +- 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, +- 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, +- 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, +- 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, +- 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, +- 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, +- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, +- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, +- 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, +- 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, +- 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, +- 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, +- 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, +- 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, +- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, +- 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, +- 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, +- 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, +- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, +- 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, +- 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, +- 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, +- 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, +- 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, +- 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, +- 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, +- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, +- 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, +- 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, +- 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, +- 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, +- 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, +- 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, +- 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, +- 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, +- 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, +- 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, +- 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, +- 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, +- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, +- 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, +- 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, +- 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, +- 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, +- 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, +- 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, +- 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, +- 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, +- 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, +- 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, +- 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, +- 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, +- 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, +- 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, +- 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, +- 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, +- 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, +- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, +- 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, +- 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, +- 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, +- 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, +- 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, +- 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, +- 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, +- 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, +- 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, +- 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, +- 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, +- 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, +- 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, +- 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, +- 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, ++ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, ++ 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, ++ 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, ++ 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, ++ 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, ++ 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, ++ 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, ++ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, ++ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, ++ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, ++ 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, ++ 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, ++ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, ++ 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, ++ 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, ++ 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, ++ 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, ++ 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, ++ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, ++ 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, ++ 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, ++ 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, ++ 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, ++ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, ++ 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, ++ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, ++ 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, ++ 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, ++ 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, ++ 0x7c, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, ++ 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, ++ 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, ++ 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, ++ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, ++ 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, ++ 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, ++ 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, ++ 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, ++ 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, ++ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, ++ 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, ++ 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, ++ 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, ++ 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, ++ 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, ++ 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, ++ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, ++ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, ++ 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, ++ 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, ++ 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, ++ 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, ++ 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, ++ 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, ++ 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, ++ 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, ++ 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, ++ 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, ++ 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, ++ 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, ++ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, ++ 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, ++ 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, ++ 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, ++ 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, ++ 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, ++ 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, ++ 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, ++ 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, ++ 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, ++ 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, ++ 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, ++ 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, ++ 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, ++ 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, ++ 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, ++ 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, ++ 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, ++ 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, ++ 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, ++ 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, ++ 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, ++ 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, ++ 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, ++ 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, ++ 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, ++ 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, ++ 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, ++ 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, ++ 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, ++ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, ++ 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, ++ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, ++ 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, ++ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, ++ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, ++ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, ++ 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, ++ 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, ++ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, ++ 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, ++ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, ++ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, ++ 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, ++ 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, ++ 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, ++ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, ++ 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, ++ 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, ++ 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, ++ 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, ++ 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, ++ 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, ++ 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, ++ 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, ++ 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, ++ 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, ++ 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, ++ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, ++ 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, ++ 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, ++ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, ++ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, ++ 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, ++ 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, ++ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, ++ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, ++ 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, ++ 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, ++ 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, ++ 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, ++ 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, ++ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, ++ 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, ++ 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, ++ 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, ++ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, ++ 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, ++ 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, ++ 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, ++ 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, ++ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, ++ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, ++ 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, ++ 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, ++ 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, ++ 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, ++ 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, ++ 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, ++ 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, ++ 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, ++ 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, ++ 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, ++ 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, ++ 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, ++ 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, ++ 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, ++ 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, ++ 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, ++ 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, ++ 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, ++ 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, ++ 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, ++ 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, ++ 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, ++ 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, ++ 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, ++ 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, ++ 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, ++ 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, ++ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, ++ 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, ++ 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, ++ 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, ++ 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, ++ 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, +- 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, +- 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, +- 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, +- 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, +- 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, +- 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, +- 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, +- 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, +- 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, +- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, +- 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, +- 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, +- 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, +- 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, +- 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, +- 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, +- 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, +- 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, +- 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, +- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, +- 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, +- 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, +- 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, +- 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, +- 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, +- 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, +- 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, +- 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, +- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, +- 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, +- 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, +- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, +- 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, +- 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, +- 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, +- 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, +- 0x10, 0x27, 0x22, 0xd1, 0x02, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, +- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, +- 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, +- 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, +- 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, +- 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, +- 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, +- 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, +- 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, +- 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, +- 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, +- 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, +- 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, +- 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, +- 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, ++ 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, ++ 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, ++ 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, ++ 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, ++ 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, ++ 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, ++ 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, ++ 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, ++ 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, ++ 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, ++ 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, ++ 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, ++ 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, ++ 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, ++ 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, ++ 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, ++ 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, ++ 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, ++ 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, ++ 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, ++ 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, ++ 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, ++ 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, ++ 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, ++ 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, ++ 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, ++ 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, ++ 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, ++ 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, ++ 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, ++ 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, ++ 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, ++ 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, ++ 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, ++ 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, ++ 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, ++ 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, +- 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, +- 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, +- 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xe2, 0x03, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, +- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, +- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, ++ 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, ++ 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, ++ 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, ++ 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, ++ 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, ++ 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xbb, 0x03, 0x0a, ++ 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, ++ 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, ++ 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, ++ 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, ++ 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, ++ 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, ++ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, ++ 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, ++ 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, ++ 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, ++ 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, ++ 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, ++ 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, ++ 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, ++ 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, ++ 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, ++ 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, ++ 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, ++ 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, ++ 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, ++ 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, ++ 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, ++ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, ++ 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, ++ 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, ++ 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, ++ 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, ++ 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xb7, 0x08, 0x0a, 0x0c, 0x46, ++ 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, ++ 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, ++ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, ++ 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, ++ 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, ++ 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, ++ 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, ++ 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, +- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, +- 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, +- 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, +- 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, +- 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, +- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, +- 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, +- 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, +- 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, +- 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, +- 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, +- 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, +- 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, +- 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, +- 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, +- 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, +- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, +- 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, +- 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, +- 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, +- 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, +- 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, +- 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, +- 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, +- 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, +- 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, +- 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, +- 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, ++ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, ++ 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, ++ 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, ++ 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, ++ 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, ++ 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, ++ 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, ++ 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, ++ 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, ++ 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, ++ 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, ++ 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, ++ 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, ++ 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, ++ 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, ++ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, ++ 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, ++ 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, ++ 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x12, 0x20, ++ 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, ++ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, ++ 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, ++ 0x79, 0x70, 0x65, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, +- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, +- 0x22, 0xc0, 0x01, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, +- 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, +- 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, +- 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, +- 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, +- 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, +- 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, +- 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, +- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, +- 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, +- 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, +- 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, +- 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, ++ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, ++ 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, ++ 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, ++ 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, ++ 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, ++ 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, ++ 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, ++ 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, ++ 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, ++ 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, ++ 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, ++ 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, ++ 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, ++ 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, ++ 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, ++ 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, ++ 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, ++ 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, ++ 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, ++ 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, ++ 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, ++ 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, ++ 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, ++ 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, ++ 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, ++ 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, ++ 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, ++ 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, ++ 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, ++ 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, ++ 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, ++ 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, ++ 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, ++ 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, ++ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, ++ 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, ++ 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, ++ 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x98, 0x02, 0x0a, 0x0b, 0x45, 0x6e, ++ 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, ++ 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, ++ 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, ++ 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, ++ 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, ++ 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, ++ 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, ++ 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, ++ 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, ++ 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, ++ 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, ++ 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, ++ 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, ++ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, ++ 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, ++ 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, ++ 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, ++ 0x08, 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, ++ 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, ++ 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, ++ 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, ++ 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, ++ 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, ++ 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, ++ 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, ++ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, ++ 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, ++ 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, +- 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, ++ 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, + 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, +@@ -3385,97 +3774,95 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ + 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, +- 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, +- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, +- 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, +- 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, +- 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, +- 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, ++ 0x80, 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, ++ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, ++ 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, ++ 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, ++ 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, ++ 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, ++ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, ++ 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, ++ 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, ++ 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, ++ 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, ++ 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, ++ 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, ++ 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, ++ 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, ++ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, ++ 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, ++ 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, ++ 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, ++ 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, ++ 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, ++ 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, ++ 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, ++ 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, ++ 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, +- 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, +- 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, +- 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, +- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, +- 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, +- 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, +- 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, +- 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, +- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, +- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, +- 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, +- 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, +- 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, +- 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, +- 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, +- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, +- 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, +- 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, +- 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, +- 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, +- 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, +- 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, +- 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, +- 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, +- 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, +- 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, +- 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, +- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, +- 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, +- 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, +- 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, +- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, +- 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, +- 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, +- 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, +- 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, +- 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, +- 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, +- 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, +- 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, +- 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, +- 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, +- 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, +- 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, +- 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, +- 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, +- 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, +- 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, +- 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, +- 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, +- 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, +- 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, +- 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, +- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, +- 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, +- 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, +- 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, +- 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, +- 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, +- 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, +- 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, +- 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, +- 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, +- 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, +- 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, +- 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, +- 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, +- 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, +- 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd1, 0x01, +- 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, +- 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, +- 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, +- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, +- 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, +- 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, +- 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, +- 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, +- 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, +- 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, +- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, +- 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, +- 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, +- 0x64, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, ++ 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, ++ 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, ++ 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, ++ 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, ++ 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, ++ 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, ++ 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, ++ 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, ++ 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, ++ 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, ++ 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, ++ 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, ++ 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, ++ 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, ++ 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, ++ 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, ++ 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, ++ 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, ++ 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, ++ 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, ++ 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, ++ 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, ++ 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, ++ 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, ++ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, ++ 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, ++ 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, ++ 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, ++ 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, ++ 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, ++ 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, ++ 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, ++ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, ++ 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, ++ 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, ++ 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, ++ 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, ++ 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, ++ 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, ++ 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, ++ 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, ++ 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, ++ 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, ++ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, ++ 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, ++ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, ++ 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, ++ 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, ++ 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, ++ 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, ++ 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, ++ 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, ++ 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, ++ 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, ++ 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, ++ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, ++ 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, ++ 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, ++ 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, ++ 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, ++ 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, ++ 0x02, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, +@@ -3498,7 +3885,7 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { + return file_google_protobuf_descriptor_proto_rawDescData + } + +-var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 6) ++var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 9) + var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 27) + var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ + (FieldDescriptorProto_Type)(0), // 0: google.protobuf.FieldDescriptorProto.Type +@@ -3506,84 +3893,90 @@ var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ + (FileOptions_OptimizeMode)(0), // 2: google.protobuf.FileOptions.OptimizeMode + (FieldOptions_CType)(0), // 3: google.protobuf.FieldOptions.CType + (FieldOptions_JSType)(0), // 4: google.protobuf.FieldOptions.JSType +- (MethodOptions_IdempotencyLevel)(0), // 5: google.protobuf.MethodOptions.IdempotencyLevel +- (*FileDescriptorSet)(nil), // 6: google.protobuf.FileDescriptorSet +- (*FileDescriptorProto)(nil), // 7: google.protobuf.FileDescriptorProto +- (*DescriptorProto)(nil), // 8: google.protobuf.DescriptorProto +- (*ExtensionRangeOptions)(nil), // 9: google.protobuf.ExtensionRangeOptions +- (*FieldDescriptorProto)(nil), // 10: google.protobuf.FieldDescriptorProto +- (*OneofDescriptorProto)(nil), // 11: google.protobuf.OneofDescriptorProto +- (*EnumDescriptorProto)(nil), // 12: google.protobuf.EnumDescriptorProto +- (*EnumValueDescriptorProto)(nil), // 13: google.protobuf.EnumValueDescriptorProto +- (*ServiceDescriptorProto)(nil), // 14: google.protobuf.ServiceDescriptorProto +- (*MethodDescriptorProto)(nil), // 15: google.protobuf.MethodDescriptorProto +- (*FileOptions)(nil), // 16: google.protobuf.FileOptions +- (*MessageOptions)(nil), // 17: google.protobuf.MessageOptions +- (*FieldOptions)(nil), // 18: google.protobuf.FieldOptions +- (*OneofOptions)(nil), // 19: google.protobuf.OneofOptions +- (*EnumOptions)(nil), // 20: google.protobuf.EnumOptions +- (*EnumValueOptions)(nil), // 21: google.protobuf.EnumValueOptions +- (*ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions +- (*MethodOptions)(nil), // 23: google.protobuf.MethodOptions +- (*UninterpretedOption)(nil), // 24: google.protobuf.UninterpretedOption +- (*SourceCodeInfo)(nil), // 25: google.protobuf.SourceCodeInfo +- (*GeneratedCodeInfo)(nil), // 26: google.protobuf.GeneratedCodeInfo +- (*DescriptorProto_ExtensionRange)(nil), // 27: google.protobuf.DescriptorProto.ExtensionRange +- (*DescriptorProto_ReservedRange)(nil), // 28: google.protobuf.DescriptorProto.ReservedRange +- (*EnumDescriptorProto_EnumReservedRange)(nil), // 29: google.protobuf.EnumDescriptorProto.EnumReservedRange +- (*UninterpretedOption_NamePart)(nil), // 30: google.protobuf.UninterpretedOption.NamePart +- (*SourceCodeInfo_Location)(nil), // 31: google.protobuf.SourceCodeInfo.Location +- (*GeneratedCodeInfo_Annotation)(nil), // 32: google.protobuf.GeneratedCodeInfo.Annotation ++ (FieldOptions_OptionRetention)(0), // 5: google.protobuf.FieldOptions.OptionRetention ++ (FieldOptions_OptionTargetType)(0), // 6: google.protobuf.FieldOptions.OptionTargetType ++ (MethodOptions_IdempotencyLevel)(0), // 7: google.protobuf.MethodOptions.IdempotencyLevel ++ (GeneratedCodeInfo_Annotation_Semantic)(0), // 8: google.protobuf.GeneratedCodeInfo.Annotation.Semantic ++ (*FileDescriptorSet)(nil), // 9: google.protobuf.FileDescriptorSet ++ (*FileDescriptorProto)(nil), // 10: google.protobuf.FileDescriptorProto ++ (*DescriptorProto)(nil), // 11: google.protobuf.DescriptorProto ++ (*ExtensionRangeOptions)(nil), // 12: google.protobuf.ExtensionRangeOptions ++ (*FieldDescriptorProto)(nil), // 13: google.protobuf.FieldDescriptorProto ++ (*OneofDescriptorProto)(nil), // 14: google.protobuf.OneofDescriptorProto ++ (*EnumDescriptorProto)(nil), // 15: google.protobuf.EnumDescriptorProto ++ (*EnumValueDescriptorProto)(nil), // 16: google.protobuf.EnumValueDescriptorProto ++ (*ServiceDescriptorProto)(nil), // 17: google.protobuf.ServiceDescriptorProto ++ (*MethodDescriptorProto)(nil), // 18: google.protobuf.MethodDescriptorProto ++ (*FileOptions)(nil), // 19: google.protobuf.FileOptions ++ (*MessageOptions)(nil), // 20: google.protobuf.MessageOptions ++ (*FieldOptions)(nil), // 21: google.protobuf.FieldOptions ++ (*OneofOptions)(nil), // 22: google.protobuf.OneofOptions ++ (*EnumOptions)(nil), // 23: google.protobuf.EnumOptions ++ (*EnumValueOptions)(nil), // 24: google.protobuf.EnumValueOptions ++ (*ServiceOptions)(nil), // 25: google.protobuf.ServiceOptions ++ (*MethodOptions)(nil), // 26: google.protobuf.MethodOptions ++ (*UninterpretedOption)(nil), // 27: google.protobuf.UninterpretedOption ++ (*SourceCodeInfo)(nil), // 28: google.protobuf.SourceCodeInfo ++ (*GeneratedCodeInfo)(nil), // 29: google.protobuf.GeneratedCodeInfo ++ (*DescriptorProto_ExtensionRange)(nil), // 30: google.protobuf.DescriptorProto.ExtensionRange ++ (*DescriptorProto_ReservedRange)(nil), // 31: google.protobuf.DescriptorProto.ReservedRange ++ (*EnumDescriptorProto_EnumReservedRange)(nil), // 32: google.protobuf.EnumDescriptorProto.EnumReservedRange ++ (*UninterpretedOption_NamePart)(nil), // 33: google.protobuf.UninterpretedOption.NamePart ++ (*SourceCodeInfo_Location)(nil), // 34: google.protobuf.SourceCodeInfo.Location ++ (*GeneratedCodeInfo_Annotation)(nil), // 35: google.protobuf.GeneratedCodeInfo.Annotation + } + var file_google_protobuf_descriptor_proto_depIdxs = []int32{ +- 7, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto +- 8, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto +- 12, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto +- 14, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto +- 10, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto +- 16, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions +- 25, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo +- 10, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto +- 10, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto +- 8, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto +- 12, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto +- 27, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange +- 11, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto +- 17, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions +- 28, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange +- 24, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption ++ 10, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto ++ 11, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto ++ 15, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto ++ 17, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto ++ 13, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto ++ 19, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions ++ 28, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo ++ 13, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto ++ 13, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto ++ 11, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto ++ 15, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto ++ 30, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange ++ 14, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto ++ 20, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions ++ 31, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange ++ 27, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 1, // 16: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label + 0, // 17: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type +- 18, // 18: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions +- 19, // 19: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions +- 13, // 20: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto +- 20, // 21: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions +- 29, // 22: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange +- 21, // 23: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions +- 15, // 24: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto +- 22, // 25: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions +- 23, // 26: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions ++ 21, // 18: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions ++ 22, // 19: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions ++ 16, // 20: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto ++ 23, // 21: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions ++ 32, // 22: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange ++ 24, // 23: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions ++ 18, // 24: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto ++ 25, // 25: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions ++ 26, // 26: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 2, // 27: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode +- 24, // 28: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption +- 24, // 29: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption ++ 27, // 28: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption ++ 27, // 29: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 3, // 30: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType + 4, // 31: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType +- 24, // 32: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption +- 24, // 33: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption +- 24, // 34: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption +- 24, // 35: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption +- 24, // 36: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption +- 5, // 37: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel +- 24, // 38: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption +- 30, // 39: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart +- 31, // 40: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location +- 32, // 41: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation +- 9, // 42: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions +- 43, // [43:43] is the sub-list for method output_type +- 43, // [43:43] is the sub-list for method input_type +- 43, // [43:43] is the sub-list for extension type_name +- 43, // [43:43] is the sub-list for extension extendee +- 0, // [0:43] is the sub-list for field type_name ++ 5, // 32: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention ++ 6, // 33: google.protobuf.FieldOptions.target:type_name -> google.protobuf.FieldOptions.OptionTargetType ++ 27, // 34: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption ++ 27, // 35: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption ++ 27, // 36: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption ++ 27, // 37: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption ++ 27, // 38: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption ++ 7, // 39: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel ++ 27, // 40: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption ++ 33, // 41: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart ++ 34, // 42: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location ++ 35, // 43: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation ++ 12, // 44: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions ++ 8, // 45: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic ++ 46, // [46:46] is the sub-list for method output_type ++ 46, // [46:46] is the sub-list for method input_type ++ 46, // [46:46] is the sub-list for extension type_name ++ 46, // [46:46] is the sub-list for extension extendee ++ 0, // [0:46] is the sub-list for field type_name + } + + func init() { file_google_protobuf_descriptor_proto_init() } +@@ -3940,7 +4333,7 @@ func file_google_protobuf_descriptor_proto_init() { + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, +- NumEnums: 6, ++ NumEnums: 9, + NumMessages: 27, + NumExtensions: 0, + NumServices: 0, +diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +index 8c10797..a6c7a33 100644 +--- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go ++++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +@@ -37,8 +37,7 @@ + // It is functionally a tuple of the full name of the remote message type and + // the serialized bytes of the remote message value. + // +-// +-// Constructing an Any ++// # Constructing an Any + // + // An Any message containing another message value is constructed using New: + // +@@ -48,8 +47,7 @@ + // } + // ... // make use of any + // +-// +-// Unmarshaling an Any ++// # Unmarshaling an Any + // + // With a populated Any message, the underlying message can be serialized into + // a remote concrete message value in a few ways. +@@ -95,8 +93,7 @@ + // listed in the case clauses are linked into the Go binary and therefore also + // registered in the global registry. + // +-// +-// Type checking an Any ++// # Type checking an Any + // + // In order to type check whether an Any message represents some other message, + // then use the MessageIs method: +@@ -115,7 +112,6 @@ + // } + // ... // make use of m + // } +-// + package anypb + + import ( +@@ -136,45 +132,49 @@ import ( + // + // Example 1: Pack and unpack a message in C++. + // +-// Foo foo = ...; +-// Any any; +-// any.PackFrom(foo); +-// ... +-// if (any.UnpackTo(&foo)) { +-// ... +-// } ++// Foo foo = ...; ++// Any any; ++// any.PackFrom(foo); ++// ... ++// if (any.UnpackTo(&foo)) { ++// ... ++// } + // + // Example 2: Pack and unpack a message in Java. + // +-// Foo foo = ...; +-// Any any = Any.pack(foo); +-// ... +-// if (any.is(Foo.class)) { +-// foo = any.unpack(Foo.class); +-// } +-// +-// Example 3: Pack and unpack a message in Python. +-// +-// foo = Foo(...) +-// any = Any() +-// any.Pack(foo) +-// ... +-// if any.Is(Foo.DESCRIPTOR): +-// any.Unpack(foo) +-// ... +-// +-// Example 4: Pack and unpack a message in Go +-// +-// foo := &pb.Foo{...} +-// any, err := anypb.New(foo) +-// if err != nil { +-// ... +-// } +-// ... +-// foo := &pb.Foo{} +-// if err := any.UnmarshalTo(foo); err != nil { +-// ... +-// } ++// Foo foo = ...; ++// Any any = Any.pack(foo); ++// ... ++// if (any.is(Foo.class)) { ++// foo = any.unpack(Foo.class); ++// } ++// // or ... ++// if (any.isSameTypeAs(Foo.getDefaultInstance())) { ++// foo = any.unpack(Foo.getDefaultInstance()); ++// } ++// ++// Example 3: Pack and unpack a message in Python. ++// ++// foo = Foo(...) ++// any = Any() ++// any.Pack(foo) ++// ... ++// if any.Is(Foo.DESCRIPTOR): ++// any.Unpack(foo) ++// ... ++// ++// Example 4: Pack and unpack a message in Go ++// ++// foo := &pb.Foo{...} ++// any, err := anypb.New(foo) ++// if err != nil { ++// ... ++// } ++// ... ++// foo := &pb.Foo{} ++// if err := any.UnmarshalTo(foo); err != nil { ++// ... ++// } + // + // The pack methods provided by protobuf library will by default use + // 'type.googleapis.com/full.type.name' as the type URL and the unpack +@@ -182,35 +182,33 @@ import ( + // in the type URL, for example "foo.bar.com/x/y.z" will yield type + // name "y.z". + // ++// # JSON + // +-// JSON +-// ==== + // The JSON representation of an `Any` value uses the regular + // representation of the deserialized, embedded message, with an + // additional field `@type` which contains the type URL. Example: + // +-// package google.profile; +-// message Person { +-// string first_name = 1; +-// string last_name = 2; +-// } ++// package google.profile; ++// message Person { ++// string first_name = 1; ++// string last_name = 2; ++// } + // +-// { +-// "@type": "type.googleapis.com/google.profile.Person", +-// "firstName": , +-// "lastName": +-// } ++// { ++// "@type": "type.googleapis.com/google.profile.Person", ++// "firstName": , ++// "lastName": ++// } + // + // If the embedded message type is well-known and has a custom JSON + // representation, that representation will be embedded adding a field + // `value` which holds the custom JSON in addition to the `@type` + // field. Example (for message [google.protobuf.Duration][]): + // +-// { +-// "@type": "type.googleapis.com/google.protobuf.Duration", +-// "value": "1.212s" +-// } +-// ++// { ++// "@type": "type.googleapis.com/google.protobuf.Duration", ++// "value": "1.212s" ++// } + type Any struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache +@@ -228,14 +226,14 @@ type Any struct { + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // +- // * If no scheme is provided, `https` is assumed. +- // * An HTTP GET on the URL must yield a [google.protobuf.Type][] +- // value in binary format, or produce an error. +- // * Applications are allowed to cache lookup results based on the +- // URL, or have them precompiled into a binary to avoid any +- // lookup. Therefore, binary compatibility needs to be preserved +- // on changes to types. (Use versioned type names to manage +- // breaking changes.) ++ // - If no scheme is provided, `https` is assumed. ++ // - An HTTP GET on the URL must yield a [google.protobuf.Type][] ++ // value in binary format, or produce an error. ++ // - Applications are allowed to cache lookup results based on the ++ // URL, or have them precompiled into a binary to avoid any ++ // lookup. Therefore, binary compatibility needs to be preserved ++ // on changes to types. (Use versioned type names to manage ++ // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with +@@ -243,7 +241,6 @@ type Any struct { + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. +- // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +index a583ca2..df709a8 100644 +--- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go ++++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +@@ -35,8 +35,7 @@ + // + // The Duration message represents a signed span of time. + // +-// +-// Conversion to a Go Duration ++// # Conversion to a Go Duration + // + // The AsDuration method can be used to convert a Duration message to a + // standard Go time.Duration value: +@@ -65,15 +64,13 @@ + // the resulting value to the closest representable value (e.g., math.MaxInt64 + // for positive overflow and math.MinInt64 for negative overflow). + // +-// +-// Conversion from a Go Duration ++// # Conversion from a Go Duration + // + // The durationpb.New function can be used to construct a Duration message + // from a standard Go time.Duration value: + // + // dur := durationpb.New(d) + // ... // make use of d as a *durationpb.Duration +-// + package durationpb + + import ( +@@ -96,43 +93,43 @@ import ( + // + // Example 1: Compute Duration from two Timestamps in pseudo code. + // +-// Timestamp start = ...; +-// Timestamp end = ...; +-// Duration duration = ...; ++// Timestamp start = ...; ++// Timestamp end = ...; ++// Duration duration = ...; + // +-// duration.seconds = end.seconds - start.seconds; +-// duration.nanos = end.nanos - start.nanos; ++// duration.seconds = end.seconds - start.seconds; ++// duration.nanos = end.nanos - start.nanos; + // +-// if (duration.seconds < 0 && duration.nanos > 0) { +-// duration.seconds += 1; +-// duration.nanos -= 1000000000; +-// } else if (duration.seconds > 0 && duration.nanos < 0) { +-// duration.seconds -= 1; +-// duration.nanos += 1000000000; +-// } ++// if (duration.seconds < 0 && duration.nanos > 0) { ++// duration.seconds += 1; ++// duration.nanos -= 1000000000; ++// } else if (duration.seconds > 0 && duration.nanos < 0) { ++// duration.seconds -= 1; ++// duration.nanos += 1000000000; ++// } + // + // Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. + // +-// Timestamp start = ...; +-// Duration duration = ...; +-// Timestamp end = ...; ++// Timestamp start = ...; ++// Duration duration = ...; ++// Timestamp end = ...; + // +-// end.seconds = start.seconds + duration.seconds; +-// end.nanos = start.nanos + duration.nanos; ++// end.seconds = start.seconds + duration.seconds; ++// end.nanos = start.nanos + duration.nanos; + // +-// if (end.nanos < 0) { +-// end.seconds -= 1; +-// end.nanos += 1000000000; +-// } else if (end.nanos >= 1000000000) { +-// end.seconds += 1; +-// end.nanos -= 1000000000; +-// } ++// if (end.nanos < 0) { ++// end.seconds -= 1; ++// end.nanos += 1000000000; ++// } else if (end.nanos >= 1000000000) { ++// end.seconds += 1; ++// end.nanos -= 1000000000; ++// } + // + // Example 3: Compute Duration from datetime.timedelta in Python. + // +-// td = datetime.timedelta(days=3, minutes=10) +-// duration = Duration() +-// duration.FromTimedelta(td) ++// td = datetime.timedelta(days=3, minutes=10) ++// duration = Duration() ++// duration.FromTimedelta(td) + // + // # JSON Mapping + // +@@ -143,8 +140,6 @@ import ( + // encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should + // be expressed in JSON format as "3.000000001s", and 3 seconds and 1 + // microsecond should be expressed in JSON format as "3.000001s". +-// +-// + type Duration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache +diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +index c9ae921..61f69fc 100644 +--- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go ++++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +@@ -36,8 +36,7 @@ + // The Timestamp message represents a timestamp, + // an instant in time since the Unix epoch (January 1st, 1970). + // +-// +-// Conversion to a Go Time ++// # Conversion to a Go Time + // + // The AsTime method can be used to convert a Timestamp message to a + // standard Go time.Time value in UTC: +@@ -59,8 +58,7 @@ + // ... // handle error + // } + // +-// +-// Conversion from a Go Time ++// # Conversion from a Go Time + // + // The timestamppb.New function can be used to construct a Timestamp message + // from a standard Go time.Time value: +@@ -72,7 +70,6 @@ + // + // ts := timestamppb.Now() + // ... // make use of ts as a *timestamppb.Timestamp +-// + package timestamppb + + import ( +@@ -101,52 +98,50 @@ import ( + // + // Example 1: Compute Timestamp from POSIX `time()`. + // +-// Timestamp timestamp; +-// timestamp.set_seconds(time(NULL)); +-// timestamp.set_nanos(0); ++// Timestamp timestamp; ++// timestamp.set_seconds(time(NULL)); ++// timestamp.set_nanos(0); + // + // Example 2: Compute Timestamp from POSIX `gettimeofday()`. + // +-// struct timeval tv; +-// gettimeofday(&tv, NULL); ++// struct timeval tv; ++// gettimeofday(&tv, NULL); + // +-// Timestamp timestamp; +-// timestamp.set_seconds(tv.tv_sec); +-// timestamp.set_nanos(tv.tv_usec * 1000); ++// Timestamp timestamp; ++// timestamp.set_seconds(tv.tv_sec); ++// timestamp.set_nanos(tv.tv_usec * 1000); + // + // Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. + // +-// FILETIME ft; +-// GetSystemTimeAsFileTime(&ft); +-// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; ++// FILETIME ft; ++// GetSystemTimeAsFileTime(&ft); ++// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; + // +-// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +-// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +-// Timestamp timestamp; +-// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +-// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); ++// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z ++// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. ++// Timestamp timestamp; ++// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); ++// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); + // + // Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. + // +-// long millis = System.currentTimeMillis(); +-// +-// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +-// .setNanos((int) ((millis % 1000) * 1000000)).build(); ++// long millis = System.currentTimeMillis(); + // ++// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) ++// .setNanos((int) ((millis % 1000) * 1000000)).build(); + // + // Example 5: Compute Timestamp from Java `Instant.now()`. + // +-// Instant now = Instant.now(); +-// +-// Timestamp timestamp = +-// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) +-// .setNanos(now.getNano()).build(); ++// Instant now = Instant.now(); + // ++// Timestamp timestamp = ++// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) ++// .setNanos(now.getNano()).build(); + // + // Example 6: Compute Timestamp from current time in Python. + // +-// timestamp = Timestamp() +-// timestamp.GetCurrentTime() ++// timestamp = Timestamp() ++// timestamp.GetCurrentTime() + // + // # JSON Mapping + // +@@ -174,8 +169,6 @@ import ( + // the Joda Time's [`ISODateTimeFormat.dateTime()`]( + // http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D + // ) to obtain a formatter capable of generating timestamps in this format. +-// +-// + type Timestamp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache +diff --git a/vendor/k8s.io/client-go/pkg/version/base.go b/vendor/k8s.io/client-go/pkg/version/base.go +index 83ec8a9..51e34dd 100644 +--- a/vendor/k8s.io/client-go/pkg/version/base.go ++++ b/vendor/k8s.io/client-go/pkg/version/base.go +@@ -55,8 +55,8 @@ var ( + // NOTE: The $Format strings are replaced during 'git archive' thanks to the + // companion .gitattributes file containing 'export-subst' in this same + // directory. See also https://git-scm.com/docs/gitattributes +- gitVersion string = "v0.0.0-master+f03765681fe81ee1e0633ee1734bf48ab3bccf2b" +- gitCommit string = "f03765681fe81ee1e0633ee1734bf48ab3bccf2b" // sha1 from git, output of $(git rev-parse HEAD) ++ gitVersion string = "v0.0.0-master+$Format:%H$" ++ gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) + gitTreeState string = "" // state of git tree, either "clean" or "dirty" + + buildDate string = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') +diff --git a/vendor/modules.txt b/vendor/modules.txt +index 9f9fcc2..7ef0ab8 100644 +--- a/vendor/modules.txt ++++ b/vendor/modules.txt +@@ -4,7 +4,7 @@ github.com/beorn7/perks/quantile + # github.com/blang/semver v3.5.1+incompatible + ## explicit + github.com/blang/semver +-# github.com/cespare/xxhash/v2 v2.1.2 ++# github.com/cespare/xxhash/v2 v2.2.0 + ## explicit; go 1.11 + github.com/cespare/xxhash/v2 + # github.com/containernetworking/cni v1.1.2 +@@ -46,8 +46,9 @@ github.com/gogo/protobuf/sortkeys + # github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da + ## explicit + github.com/golang/groupcache/lru +-# github.com/golang/protobuf v1.5.2 ++# github.com/golang/protobuf v1.5.3 + ## explicit; go 1.9 ++github.com/golang/protobuf/jsonpb + github.com/golang/protobuf/proto + github.com/golang/protobuf/ptypes + github.com/golang/protobuf/ptypes/any +@@ -149,10 +150,9 @@ github.com/vishvananda/netlink/nl + # github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f + ## explicit; go 1.12 + github.com/vishvananda/netns +-# golang.org/x/net v0.7.0 ++# golang.org/x/net v0.17.0 + ## explicit; go 1.17 + golang.org/x/net/context +-golang.org/x/net/context/ctxhttp + golang.org/x/net/html + golang.org/x/net/html/atom + golang.org/x/net/html/charset +@@ -162,20 +162,19 @@ golang.org/x/net/http2/hpack + golang.org/x/net/idna + golang.org/x/net/internal/timeseries + golang.org/x/net/trace +-# golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f +-## explicit; go 1.11 ++# golang.org/x/oauth2 v0.7.0 ++## explicit; go 1.17 + golang.org/x/oauth2 + golang.org/x/oauth2/internal +-# golang.org/x/sys v0.5.0 ++# golang.org/x/sys v0.13.0 + ## explicit; go 1.17 +-golang.org/x/sys/internal/unsafeheader + golang.org/x/sys/plan9 + golang.org/x/sys/unix + golang.org/x/sys/windows +-# golang.org/x/term v0.5.0 ++# golang.org/x/term v0.13.0 + ## explicit; go 1.17 + golang.org/x/term +-# golang.org/x/text v0.7.0 ++# golang.org/x/text v0.13.0 + ## explicit; go 1.17 + golang.org/x/text/encoding + golang.org/x/text/encoding/charmap +@@ -209,11 +208,11 @@ google.golang.org/appengine/internal/log + google.golang.org/appengine/internal/remote_api + google.golang.org/appengine/internal/urlfetch + google.golang.org/appengine/urlfetch +-# google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 +-## explicit; go 1.11 ++# google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 ++## explicit; go 1.19 + google.golang.org/genproto/googleapis/rpc/status +-# google.golang.org/grpc v1.40.0 +-## explicit; go 1.11 ++# google.golang.org/grpc v1.56.3 ++## explicit; go 1.17 + google.golang.org/grpc + google.golang.org/grpc/attributes + google.golang.org/grpc/backoff +@@ -222,6 +221,7 @@ google.golang.org/grpc/balancer/base + google.golang.org/grpc/balancer/grpclb/state + google.golang.org/grpc/balancer/roundrobin + google.golang.org/grpc/binarylog/grpc_binarylog_v1 ++google.golang.org/grpc/channelz + google.golang.org/grpc/codes + google.golang.org/grpc/connectivity + google.golang.org/grpc/credentials +@@ -231,6 +231,7 @@ google.golang.org/grpc/encoding/proto + google.golang.org/grpc/grpclog + google.golang.org/grpc/internal + google.golang.org/grpc/internal/backoff ++google.golang.org/grpc/internal/balancer/gracefulswitch + google.golang.org/grpc/internal/balancerload + google.golang.org/grpc/internal/binarylog + google.golang.org/grpc/internal/buffer +@@ -242,6 +243,7 @@ google.golang.org/grpc/internal/grpcrand + google.golang.org/grpc/internal/grpcsync + google.golang.org/grpc/internal/grpcutil + google.golang.org/grpc/internal/metadata ++google.golang.org/grpc/internal/pretty + google.golang.org/grpc/internal/resolver + google.golang.org/grpc/internal/resolver/dns + google.golang.org/grpc/internal/resolver/passthrough +@@ -259,14 +261,16 @@ google.golang.org/grpc/serviceconfig + google.golang.org/grpc/stats + google.golang.org/grpc/status + google.golang.org/grpc/tap +-# google.golang.org/protobuf v1.28.0 ++# google.golang.org/protobuf v1.30.0 + ## explicit; go 1.11 ++google.golang.org/protobuf/encoding/protojson + google.golang.org/protobuf/encoding/prototext + google.golang.org/protobuf/encoding/protowire + google.golang.org/protobuf/internal/descfmt + google.golang.org/protobuf/internal/descopts + google.golang.org/protobuf/internal/detrand + google.golang.org/protobuf/internal/encoding/defval ++google.golang.org/protobuf/internal/encoding/json + google.golang.org/protobuf/internal/encoding/messageset + google.golang.org/protobuf/internal/encoding/tag + google.golang.org/protobuf/internal/encoding/text +-- +2.34.1 + diff --git a/SPECS/multus/CVE-2023-45288.patch b/SPECS/multus/CVE-2023-45288.patch new file mode 100644 index 00000000000..09942176fe8 --- /dev/null +++ b/SPECS/multus/CVE-2023-45288.patch @@ -0,0 +1,8790 @@ +From 319e6f6e683cc82da402207a4d616505dd133b8c Mon Sep 17 00:00:00 2001 +From: xiaohongdeng <“worldsky86rough@gmail.com”> +Date: Tue, 26 Nov 2024 23:52:44 +0000 +Subject: [PATCH] upgrade golang.org/x/net to 0.23.0 + +--- + go.mod | 8 +- + go.sum | 16 +- + vendor/golang.org/x/net/context/go17.go | 1 - + vendor/golang.org/x/net/context/go19.go | 1 - + vendor/golang.org/x/net/context/pre_go17.go | 1 - + vendor/golang.org/x/net/context/pre_go19.go | 1 - + vendor/golang.org/x/net/html/token.go | 12 +- + vendor/golang.org/x/net/http2/databuffer.go | 59 +-- + vendor/golang.org/x/net/http2/frame.go | 42 ++- + vendor/golang.org/x/net/http2/go111.go | 30 -- + vendor/golang.org/x/net/http2/go115.go | 27 -- + vendor/golang.org/x/net/http2/go118.go | 17 - + vendor/golang.org/x/net/http2/not_go111.go | 21 -- + vendor/golang.org/x/net/http2/not_go115.go | 31 -- + vendor/golang.org/x/net/http2/not_go118.go | 17 - + vendor/golang.org/x/net/http2/pipe.go | 11 +- + vendor/golang.org/x/net/http2/server.go | 37 +- + vendor/golang.org/x/net/http2/testsync.go | 331 +++++++++++++++++ + vendor/golang.org/x/net/http2/transport.go | 340 ++++++++++++++---- + vendor/golang.org/x/net/idna/go118.go | 1 - + vendor/golang.org/x/net/idna/idna10.0.0.go | 1 - + vendor/golang.org/x/net/idna/idna9.0.0.go | 1 - + vendor/golang.org/x/net/idna/pre_go118.go | 1 - + vendor/golang.org/x/net/idna/tables10.0.0.go | 1 - + vendor/golang.org/x/net/idna/tables11.0.0.go | 1 - + vendor/golang.org/x/net/idna/tables12.0.0.go | 1 - + vendor/golang.org/x/net/idna/tables13.0.0.go | 1 - + vendor/golang.org/x/net/idna/tables15.0.0.go | 1 - + vendor/golang.org/x/net/idna/tables9.0.0.go | 1 - + vendor/golang.org/x/net/idna/trie12.0.0.go | 1 - + vendor/golang.org/x/net/idna/trie13.0.0.go | 1 - + .../golang.org/x/sys/plan9/pwd_go15_plan9.go | 1 - + vendor/golang.org/x/sys/plan9/pwd_plan9.go | 1 - + vendor/golang.org/x/sys/plan9/race.go | 1 - + vendor/golang.org/x/sys/plan9/race0.go | 1 - + vendor/golang.org/x/sys/plan9/str.go | 1 - + vendor/golang.org/x/sys/plan9/syscall.go | 1 - + .../x/sys/plan9/zsyscall_plan9_386.go | 1 - + .../x/sys/plan9/zsyscall_plan9_amd64.go | 1 - + .../x/sys/plan9/zsyscall_plan9_arm.go | 1 - + vendor/golang.org/x/sys/unix/aliases.go | 4 +- + vendor/golang.org/x/sys/unix/asm_aix_ppc64.s | 1 - + vendor/golang.org/x/sys/unix/asm_bsd_386.s | 2 - + vendor/golang.org/x/sys/unix/asm_bsd_amd64.s | 2 - + vendor/golang.org/x/sys/unix/asm_bsd_arm.s | 2 - + vendor/golang.org/x/sys/unix/asm_bsd_arm64.s | 2 - + vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s | 2 - + .../golang.org/x/sys/unix/asm_bsd_riscv64.s | 2 - + vendor/golang.org/x/sys/unix/asm_linux_386.s | 1 - + .../golang.org/x/sys/unix/asm_linux_amd64.s | 1 - + vendor/golang.org/x/sys/unix/asm_linux_arm.s | 1 - + .../golang.org/x/sys/unix/asm_linux_arm64.s | 3 - + .../golang.org/x/sys/unix/asm_linux_loong64.s | 3 - + .../golang.org/x/sys/unix/asm_linux_mips64x.s | 3 - + .../golang.org/x/sys/unix/asm_linux_mipsx.s | 3 - + .../golang.org/x/sys/unix/asm_linux_ppc64x.s | 3 - + .../golang.org/x/sys/unix/asm_linux_riscv64.s | 2 - + .../golang.org/x/sys/unix/asm_linux_s390x.s | 3 - + .../x/sys/unix/asm_openbsd_mips64.s | 1 - + .../golang.org/x/sys/unix/asm_solaris_amd64.s | 1 - + vendor/golang.org/x/sys/unix/asm_zos_s390x.s | 3 - + vendor/golang.org/x/sys/unix/cap_freebsd.go | 1 - + vendor/golang.org/x/sys/unix/constants.go | 1 - + vendor/golang.org/x/sys/unix/dev_aix_ppc.go | 1 - + vendor/golang.org/x/sys/unix/dev_aix_ppc64.go | 1 - + vendor/golang.org/x/sys/unix/dev_zos.go | 1 - + vendor/golang.org/x/sys/unix/dirent.go | 1 - + vendor/golang.org/x/sys/unix/endian_big.go | 1 - + vendor/golang.org/x/sys/unix/endian_little.go | 1 - + vendor/golang.org/x/sys/unix/env_unix.go | 1 - + vendor/golang.org/x/sys/unix/epoll_zos.go | 1 - + vendor/golang.org/x/sys/unix/fcntl.go | 3 +- + .../x/sys/unix/fcntl_linux_32bit.go | 1 - + vendor/golang.org/x/sys/unix/fdset.go | 1 - + vendor/golang.org/x/sys/unix/fstatfs_zos.go | 1 - + vendor/golang.org/x/sys/unix/gccgo.go | 1 - + vendor/golang.org/x/sys/unix/gccgo_c.c | 1 - + .../x/sys/unix/gccgo_linux_amd64.go | 1 - + vendor/golang.org/x/sys/unix/ifreq_linux.go | 1 - + vendor/golang.org/x/sys/unix/ioctl_linux.go | 5 + + vendor/golang.org/x/sys/unix/ioctl_signed.go | 1 - + .../golang.org/x/sys/unix/ioctl_unsigned.go | 1 - + vendor/golang.org/x/sys/unix/ioctl_zos.go | 1 - + vendor/golang.org/x/sys/unix/mkerrors.sh | 43 ++- + vendor/golang.org/x/sys/unix/mmap_nomremap.go | 1 - + vendor/golang.org/x/sys/unix/mremap.go | 1 - + vendor/golang.org/x/sys/unix/pagesize_unix.go | 1 - + .../golang.org/x/sys/unix/pledge_openbsd.go | 92 ++--- + vendor/golang.org/x/sys/unix/ptrace_darwin.go | 1 - + vendor/golang.org/x/sys/unix/ptrace_ios.go | 1 - + vendor/golang.org/x/sys/unix/race.go | 1 - + vendor/golang.org/x/sys/unix/race0.go | 1 - + .../x/sys/unix/readdirent_getdents.go | 1 - + .../x/sys/unix/readdirent_getdirentries.go | 1 - + vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 1 - + .../x/sys/unix/sockcmsg_unix_other.go | 1 - + vendor/golang.org/x/sys/unix/syscall.go | 1 - + vendor/golang.org/x/sys/unix/syscall_aix.go | 4 +- + .../golang.org/x/sys/unix/syscall_aix_ppc.go | 1 - + .../x/sys/unix/syscall_aix_ppc64.go | 1 - + vendor/golang.org/x/sys/unix/syscall_bsd.go | 3 +- + .../x/sys/unix/syscall_darwin_amd64.go | 1 - + .../x/sys/unix/syscall_darwin_arm64.go | 1 - + .../x/sys/unix/syscall_darwin_libSystem.go | 3 +- + .../x/sys/unix/syscall_dragonfly_amd64.go | 1 - + .../golang.org/x/sys/unix/syscall_freebsd.go | 12 +- + .../x/sys/unix/syscall_freebsd_386.go | 1 - + .../x/sys/unix/syscall_freebsd_amd64.go | 1 - + .../x/sys/unix/syscall_freebsd_arm.go | 1 - + .../x/sys/unix/syscall_freebsd_arm64.go | 1 - + .../x/sys/unix/syscall_freebsd_riscv64.go | 1 - + vendor/golang.org/x/sys/unix/syscall_hurd.go | 1 - + .../golang.org/x/sys/unix/syscall_hurd_386.go | 1 - + .../golang.org/x/sys/unix/syscall_illumos.go | 1 - + vendor/golang.org/x/sys/unix/syscall_linux.go | 132 ++++++- + .../x/sys/unix/syscall_linux_386.go | 1 - + .../x/sys/unix/syscall_linux_alarm.go | 2 - + .../x/sys/unix/syscall_linux_amd64.go | 1 - + .../x/sys/unix/syscall_linux_amd64_gc.go | 1 - + .../x/sys/unix/syscall_linux_arm.go | 1 - + .../x/sys/unix/syscall_linux_arm64.go | 1 - + .../golang.org/x/sys/unix/syscall_linux_gc.go | 1 - + .../x/sys/unix/syscall_linux_gc_386.go | 1 - + .../x/sys/unix/syscall_linux_gc_arm.go | 1 - + .../x/sys/unix/syscall_linux_gccgo_386.go | 1 - + .../x/sys/unix/syscall_linux_gccgo_arm.go | 1 - + .../x/sys/unix/syscall_linux_loong64.go | 1 - + .../x/sys/unix/syscall_linux_mips64x.go | 2 - + .../x/sys/unix/syscall_linux_mipsx.go | 2 - + .../x/sys/unix/syscall_linux_ppc.go | 1 - + .../x/sys/unix/syscall_linux_ppc64x.go | 2 - + .../x/sys/unix/syscall_linux_riscv64.go | 1 - + .../x/sys/unix/syscall_linux_s390x.go | 1 - + .../x/sys/unix/syscall_linux_sparc64.go | 1 - + .../x/sys/unix/syscall_netbsd_386.go | 1 - + .../x/sys/unix/syscall_netbsd_amd64.go | 1 - + .../x/sys/unix/syscall_netbsd_arm.go | 1 - + .../x/sys/unix/syscall_netbsd_arm64.go | 1 - + .../golang.org/x/sys/unix/syscall_openbsd.go | 28 +- + .../x/sys/unix/syscall_openbsd_386.go | 1 - + .../x/sys/unix/syscall_openbsd_amd64.go | 1 - + .../x/sys/unix/syscall_openbsd_arm.go | 1 - + .../x/sys/unix/syscall_openbsd_arm64.go | 1 - + .../x/sys/unix/syscall_openbsd_libc.go | 1 - + .../x/sys/unix/syscall_openbsd_ppc64.go | 1 - + .../x/sys/unix/syscall_openbsd_riscv64.go | 1 - + .../golang.org/x/sys/unix/syscall_solaris.go | 5 +- + .../x/sys/unix/syscall_solaris_amd64.go | 1 - + vendor/golang.org/x/sys/unix/syscall_unix.go | 1 - + .../golang.org/x/sys/unix/syscall_unix_gc.go | 2 - + .../x/sys/unix/syscall_unix_gc_ppc64x.go | 3 - + .../x/sys/unix/syscall_zos_s390x.go | 3 +- + vendor/golang.org/x/sys/unix/sysvshm_linux.go | 1 - + vendor/golang.org/x/sys/unix/sysvshm_unix.go | 1 - + .../x/sys/unix/sysvshm_unix_other.go | 1 - + vendor/golang.org/x/sys/unix/timestruct.go | 1 - + .../golang.org/x/sys/unix/unveil_openbsd.go | 41 ++- + vendor/golang.org/x/sys/unix/xattr_bsd.go | 1 - + .../golang.org/x/sys/unix/zerrors_aix_ppc.go | 1 - + .../x/sys/unix/zerrors_aix_ppc64.go | 1 - + .../x/sys/unix/zerrors_darwin_amd64.go | 1 - + .../x/sys/unix/zerrors_darwin_arm64.go | 1 - + .../x/sys/unix/zerrors_dragonfly_amd64.go | 1 - + .../x/sys/unix/zerrors_freebsd_386.go | 1 - + .../x/sys/unix/zerrors_freebsd_amd64.go | 1 - + .../x/sys/unix/zerrors_freebsd_arm.go | 1 - + .../x/sys/unix/zerrors_freebsd_arm64.go | 1 - + .../x/sys/unix/zerrors_freebsd_riscv64.go | 1 - + vendor/golang.org/x/sys/unix/zerrors_linux.go | 104 +++++- + .../x/sys/unix/zerrors_linux_386.go | 4 +- + .../x/sys/unix/zerrors_linux_amd64.go | 4 +- + .../x/sys/unix/zerrors_linux_arm.go | 4 +- + .../x/sys/unix/zerrors_linux_arm64.go | 4 +- + .../x/sys/unix/zerrors_linux_loong64.go | 5 +- + .../x/sys/unix/zerrors_linux_mips.go | 4 +- + .../x/sys/unix/zerrors_linux_mips64.go | 4 +- + .../x/sys/unix/zerrors_linux_mips64le.go | 4 +- + .../x/sys/unix/zerrors_linux_mipsle.go | 4 +- + .../x/sys/unix/zerrors_linux_ppc.go | 4 +- + .../x/sys/unix/zerrors_linux_ppc64.go | 4 +- + .../x/sys/unix/zerrors_linux_ppc64le.go | 4 +- + .../x/sys/unix/zerrors_linux_riscv64.go | 7 +- + .../x/sys/unix/zerrors_linux_s390x.go | 4 +- + .../x/sys/unix/zerrors_linux_sparc64.go | 4 +- + .../x/sys/unix/zerrors_netbsd_386.go | 1 - + .../x/sys/unix/zerrors_netbsd_amd64.go | 1 - + .../x/sys/unix/zerrors_netbsd_arm.go | 1 - + .../x/sys/unix/zerrors_netbsd_arm64.go | 1 - + .../x/sys/unix/zerrors_openbsd_386.go | 1 - + .../x/sys/unix/zerrors_openbsd_amd64.go | 1 - + .../x/sys/unix/zerrors_openbsd_arm.go | 1 - + .../x/sys/unix/zerrors_openbsd_arm64.go | 1 - + .../x/sys/unix/zerrors_openbsd_mips64.go | 1 - + .../x/sys/unix/zerrors_openbsd_ppc64.go | 1 - + .../x/sys/unix/zerrors_openbsd_riscv64.go | 1 - + .../x/sys/unix/zerrors_solaris_amd64.go | 1 - + .../x/sys/unix/zerrors_zos_s390x.go | 1 - + .../x/sys/unix/zptrace_armnn_linux.go | 2 - + .../x/sys/unix/zptrace_mipsnn_linux.go | 2 - + .../x/sys/unix/zptrace_mipsnnle_linux.go | 2 - + .../x/sys/unix/zptrace_x86_linux.go | 2 - + .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 1 - + .../x/sys/unix/zsyscall_aix_ppc64.go | 1 - + .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 1 - + .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 1 - + .../x/sys/unix/zsyscall_darwin_amd64.go | 1 - + .../x/sys/unix/zsyscall_darwin_arm64.go | 1 - + .../x/sys/unix/zsyscall_dragonfly_amd64.go | 1 - + .../x/sys/unix/zsyscall_freebsd_386.go | 1 - + .../x/sys/unix/zsyscall_freebsd_amd64.go | 1 - + .../x/sys/unix/zsyscall_freebsd_arm.go | 1 - + .../x/sys/unix/zsyscall_freebsd_arm64.go | 1 - + .../x/sys/unix/zsyscall_freebsd_riscv64.go | 1 - + .../x/sys/unix/zsyscall_illumos_amd64.go | 1 - + .../golang.org/x/sys/unix/zsyscall_linux.go | 36 +- + .../x/sys/unix/zsyscall_linux_386.go | 1 - + .../x/sys/unix/zsyscall_linux_amd64.go | 1 - + .../x/sys/unix/zsyscall_linux_arm.go | 1 - + .../x/sys/unix/zsyscall_linux_arm64.go | 1 - + .../x/sys/unix/zsyscall_linux_loong64.go | 1 - + .../x/sys/unix/zsyscall_linux_mips.go | 1 - + .../x/sys/unix/zsyscall_linux_mips64.go | 1 - + .../x/sys/unix/zsyscall_linux_mips64le.go | 1 - + .../x/sys/unix/zsyscall_linux_mipsle.go | 1 - + .../x/sys/unix/zsyscall_linux_ppc.go | 1 - + .../x/sys/unix/zsyscall_linux_ppc64.go | 1 - + .../x/sys/unix/zsyscall_linux_ppc64le.go | 1 - + .../x/sys/unix/zsyscall_linux_riscv64.go | 1 - + .../x/sys/unix/zsyscall_linux_s390x.go | 1 - + .../x/sys/unix/zsyscall_linux_sparc64.go | 1 - + .../x/sys/unix/zsyscall_netbsd_386.go | 1 - + .../x/sys/unix/zsyscall_netbsd_amd64.go | 1 - + .../x/sys/unix/zsyscall_netbsd_arm.go | 1 - + .../x/sys/unix/zsyscall_netbsd_arm64.go | 1 - + .../x/sys/unix/zsyscall_openbsd_386.go | 70 +++- + .../x/sys/unix/zsyscall_openbsd_386.s | 20 ++ + .../x/sys/unix/zsyscall_openbsd_amd64.go | 70 +++- + .../x/sys/unix/zsyscall_openbsd_amd64.s | 20 ++ + .../x/sys/unix/zsyscall_openbsd_arm.go | 70 +++- + .../x/sys/unix/zsyscall_openbsd_arm.s | 20 ++ + .../x/sys/unix/zsyscall_openbsd_arm64.go | 70 +++- + .../x/sys/unix/zsyscall_openbsd_arm64.s | 20 ++ + .../x/sys/unix/zsyscall_openbsd_mips64.go | 70 +++- + .../x/sys/unix/zsyscall_openbsd_mips64.s | 20 ++ + .../x/sys/unix/zsyscall_openbsd_ppc64.go | 70 +++- + .../x/sys/unix/zsyscall_openbsd_ppc64.s | 24 ++ + .../x/sys/unix/zsyscall_openbsd_riscv64.go | 70 +++- + .../x/sys/unix/zsyscall_openbsd_riscv64.s | 20 ++ + .../x/sys/unix/zsyscall_solaris_amd64.go | 1 - + .../x/sys/unix/zsyscall_zos_s390x.go | 1 - + .../x/sys/unix/zsysctl_openbsd_386.go | 1 - + .../x/sys/unix/zsysctl_openbsd_amd64.go | 1 - + .../x/sys/unix/zsysctl_openbsd_arm.go | 1 - + .../x/sys/unix/zsysctl_openbsd_arm64.go | 1 - + .../x/sys/unix/zsysctl_openbsd_mips64.go | 1 - + .../x/sys/unix/zsysctl_openbsd_ppc64.go | 1 - + .../x/sys/unix/zsysctl_openbsd_riscv64.go | 1 - + .../x/sys/unix/zsysnum_darwin_amd64.go | 1 - + .../x/sys/unix/zsysnum_darwin_arm64.go | 1 - + .../x/sys/unix/zsysnum_dragonfly_amd64.go | 1 - + .../x/sys/unix/zsysnum_freebsd_386.go | 1 - + .../x/sys/unix/zsysnum_freebsd_amd64.go | 1 - + .../x/sys/unix/zsysnum_freebsd_arm.go | 1 - + .../x/sys/unix/zsysnum_freebsd_arm64.go | 1 - + .../x/sys/unix/zsysnum_freebsd_riscv64.go | 1 - + .../x/sys/unix/zsysnum_linux_386.go | 6 +- + .../x/sys/unix/zsysnum_linux_amd64.go | 6 +- + .../x/sys/unix/zsysnum_linux_arm.go | 6 +- + .../x/sys/unix/zsysnum_linux_arm64.go | 6 +- + .../x/sys/unix/zsysnum_linux_loong64.go | 6 +- + .../x/sys/unix/zsysnum_linux_mips.go | 6 +- + .../x/sys/unix/zsysnum_linux_mips64.go | 6 +- + .../x/sys/unix/zsysnum_linux_mips64le.go | 6 +- + .../x/sys/unix/zsysnum_linux_mipsle.go | 6 +- + .../x/sys/unix/zsysnum_linux_ppc.go | 6 +- + .../x/sys/unix/zsysnum_linux_ppc64.go | 6 +- + .../x/sys/unix/zsysnum_linux_ppc64le.go | 6 +- + .../x/sys/unix/zsysnum_linux_riscv64.go | 6 +- + .../x/sys/unix/zsysnum_linux_s390x.go | 6 +- + .../x/sys/unix/zsysnum_linux_sparc64.go | 6 +- + .../x/sys/unix/zsysnum_netbsd_386.go | 1 - + .../x/sys/unix/zsysnum_netbsd_amd64.go | 1 - + .../x/sys/unix/zsysnum_netbsd_arm.go | 1 - + .../x/sys/unix/zsysnum_netbsd_arm64.go | 1 - + .../x/sys/unix/zsysnum_openbsd_386.go | 1 - + .../x/sys/unix/zsysnum_openbsd_amd64.go | 1 - + .../x/sys/unix/zsysnum_openbsd_arm.go | 1 - + .../x/sys/unix/zsysnum_openbsd_arm64.go | 1 - + .../x/sys/unix/zsysnum_openbsd_mips64.go | 1 - + .../x/sys/unix/zsysnum_openbsd_ppc64.go | 1 - + .../x/sys/unix/zsysnum_openbsd_riscv64.go | 1 - + .../x/sys/unix/zsysnum_zos_s390x.go | 1 - + .../golang.org/x/sys/unix/ztypes_aix_ppc.go | 1 - + .../golang.org/x/sys/unix/ztypes_aix_ppc64.go | 1 - + .../x/sys/unix/ztypes_darwin_amd64.go | 1 - + .../x/sys/unix/ztypes_darwin_arm64.go | 1 - + .../x/sys/unix/ztypes_dragonfly_amd64.go | 1 - + .../x/sys/unix/ztypes_freebsd_386.go | 1 - + .../x/sys/unix/ztypes_freebsd_amd64.go | 1 - + .../x/sys/unix/ztypes_freebsd_arm.go | 1 - + .../x/sys/unix/ztypes_freebsd_arm64.go | 1 - + .../x/sys/unix/ztypes_freebsd_riscv64.go | 1 - + vendor/golang.org/x/sys/unix/ztypes_linux.go | 230 ++++++++---- + .../golang.org/x/sys/unix/ztypes_linux_386.go | 1 - + .../x/sys/unix/ztypes_linux_amd64.go | 1 - + .../golang.org/x/sys/unix/ztypes_linux_arm.go | 1 - + .../x/sys/unix/ztypes_linux_arm64.go | 1 - + .../x/sys/unix/ztypes_linux_loong64.go | 1 - + .../x/sys/unix/ztypes_linux_mips.go | 1 - + .../x/sys/unix/ztypes_linux_mips64.go | 1 - + .../x/sys/unix/ztypes_linux_mips64le.go | 1 - + .../x/sys/unix/ztypes_linux_mipsle.go | 1 - + .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 1 - + .../x/sys/unix/ztypes_linux_ppc64.go | 1 - + .../x/sys/unix/ztypes_linux_ppc64le.go | 1 - + .../x/sys/unix/ztypes_linux_riscv64.go | 1 - + .../x/sys/unix/ztypes_linux_s390x.go | 1 - + .../x/sys/unix/ztypes_linux_sparc64.go | 1 - + .../x/sys/unix/ztypes_netbsd_386.go | 1 - + .../x/sys/unix/ztypes_netbsd_amd64.go | 1 - + .../x/sys/unix/ztypes_netbsd_arm.go | 1 - + .../x/sys/unix/ztypes_netbsd_arm64.go | 1 - + .../x/sys/unix/ztypes_openbsd_386.go | 1 - + .../x/sys/unix/ztypes_openbsd_amd64.go | 1 - + .../x/sys/unix/ztypes_openbsd_arm.go | 1 - + .../x/sys/unix/ztypes_openbsd_arm64.go | 1 - + .../x/sys/unix/ztypes_openbsd_mips64.go | 1 - + .../x/sys/unix/ztypes_openbsd_ppc64.go | 1 - + .../x/sys/unix/ztypes_openbsd_riscv64.go | 1 - + .../x/sys/unix/ztypes_solaris_amd64.go | 1 - + .../golang.org/x/sys/unix/ztypes_zos_s390x.go | 1 - + vendor/golang.org/x/sys/windows/aliases.go | 1 - + vendor/golang.org/x/sys/windows/empty.s | 1 - + .../golang.org/x/sys/windows/env_windows.go | 17 +- + vendor/golang.org/x/sys/windows/eventlog.go | 1 - + vendor/golang.org/x/sys/windows/mksyscall.go | 1 - + vendor/golang.org/x/sys/windows/race.go | 1 - + vendor/golang.org/x/sys/windows/race0.go | 1 - + vendor/golang.org/x/sys/windows/service.go | 1 - + vendor/golang.org/x/sys/windows/str.go | 1 - + vendor/golang.org/x/sys/windows/syscall.go | 1 - + .../x/sys/windows/syscall_windows.go | 10 +- + .../golang.org/x/sys/windows/types_windows.go | 28 +- + .../x/sys/windows/zsyscall_windows.go | 37 ++ + vendor/golang.org/x/term/term_unix.go | 1 - + vendor/golang.org/x/term/term_unix_bsd.go | 1 - + vendor/golang.org/x/term/term_unix_other.go | 1 - + vendor/golang.org/x/term/term_unsupported.go | 1 - + .../x/text/secure/bidirule/bidirule10.0.0.go | 1 - + .../x/text/secure/bidirule/bidirule9.0.0.go | 1 - + .../x/text/unicode/bidi/tables10.0.0.go | 1 - + .../x/text/unicode/bidi/tables11.0.0.go | 1 - + .../x/text/unicode/bidi/tables12.0.0.go | 1 - + .../x/text/unicode/bidi/tables13.0.0.go | 1 - + .../x/text/unicode/bidi/tables15.0.0.go | 1 - + .../x/text/unicode/bidi/tables9.0.0.go | 1 - + .../x/text/unicode/norm/tables10.0.0.go | 1 - + .../x/text/unicode/norm/tables11.0.0.go | 1 - + .../x/text/unicode/norm/tables12.0.0.go | 1 - + .../x/text/unicode/norm/tables13.0.0.go | 1 - + .../x/text/unicode/norm/tables15.0.0.go | 1 - + .../x/text/unicode/norm/tables9.0.0.go | 1 - + vendor/modules.txt | 16 +- + 363 files changed, 2094 insertions(+), 863 deletions(-) + delete mode 100644 vendor/golang.org/x/net/http2/go111.go + delete mode 100644 vendor/golang.org/x/net/http2/go115.go + delete mode 100644 vendor/golang.org/x/net/http2/go118.go + delete mode 100644 vendor/golang.org/x/net/http2/not_go111.go + delete mode 100644 vendor/golang.org/x/net/http2/not_go115.go + delete mode 100644 vendor/golang.org/x/net/http2/not_go118.go + create mode 100644 vendor/golang.org/x/net/http2/testsync.go + +diff --git a/go.mod b/go.mod +index 192b65c..f35bcd8 100644 +--- a/go.mod ++++ b/go.mod +@@ -13,8 +13,8 @@ require ( + github.com/onsi/gomega v1.24.0 + github.com/pkg/errors v0.9.1 // indirect + github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5 +- golang.org/x/net v0.17.0 +- golang.org/x/sys v0.13.0 ++ golang.org/x/net v0.23.0 ++ golang.org/x/sys v0.18.0 + google.golang.org/grpc v1.56.3 + gopkg.in/natefinch/lumberjack.v2 v2.0.0 + k8s.io/api v0.22.8 +@@ -53,8 +53,8 @@ require ( + github.com/prometheus/procfs v0.7.3 // indirect + github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect + golang.org/x/oauth2 v0.7.0 // indirect +- golang.org/x/term v0.13.0 // indirect +- golang.org/x/text v0.13.0 // indirect ++ golang.org/x/term v0.18.0 // indirect ++ golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect +diff --git a/go.sum b/go.sum +index 0dabda6..844eeb8 100644 +--- a/go.sum ++++ b/go.sum +@@ -501,8 +501,8 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b + golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= + golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= + golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +-golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +-golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= ++golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= ++golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= + golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= + golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +@@ -576,13 +576,13 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc + golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +-golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +-golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= ++golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= + golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= + golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= + golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +-golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +-golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= ++golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= ++golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= + golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= + golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= + golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +@@ -590,8 +590,8 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= + golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +-golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +-golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= ++golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= ++golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= + golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= + golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= + golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go +index 2cb9c40..0c1b867 100644 +--- a/vendor/golang.org/x/net/context/go17.go ++++ b/vendor/golang.org/x/net/context/go17.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build go1.7 +-// +build go1.7 + + package context + +diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go +index 64d31ec..e31e35a 100644 +--- a/vendor/golang.org/x/net/context/go19.go ++++ b/vendor/golang.org/x/net/context/go19.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build go1.9 +-// +build go1.9 + + package context + +diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go +index 7b6b685..065ff3d 100644 +--- a/vendor/golang.org/x/net/context/pre_go17.go ++++ b/vendor/golang.org/x/net/context/pre_go17.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build !go1.7 +-// +build !go1.7 + + package context + +diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go +index 1f97153..ec5a638 100644 +--- a/vendor/golang.org/x/net/context/pre_go19.go ++++ b/vendor/golang.org/x/net/context/pre_go19.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build !go1.9 +-// +build !go1.9 + + package context + +diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go +index de67f93..3c57880 100644 +--- a/vendor/golang.org/x/net/html/token.go ++++ b/vendor/golang.org/x/net/html/token.go +@@ -910,9 +910,6 @@ func (z *Tokenizer) readTagAttrKey() { + return + } + switch c { +- case ' ', '\n', '\r', '\t', '\f', '/': +- z.pendingAttr[0].end = z.raw.end - 1 +- return + case '=': + if z.pendingAttr[0].start+1 == z.raw.end { + // WHATWG 13.2.5.32, if we see an equals sign before the attribute name +@@ -920,7 +917,9 @@ func (z *Tokenizer) readTagAttrKey() { + continue + } + fallthrough +- case '>': ++ case ' ', '\n', '\r', '\t', '\f', '/', '>': ++ // WHATWG 13.2.5.33 Attribute name state ++ // We need to reconsume the char in the after attribute name state to support the / character + z.raw.end-- + z.pendingAttr[0].end = z.raw.end + return +@@ -939,6 +938,11 @@ func (z *Tokenizer) readTagAttrVal() { + if z.err != nil { + return + } ++ if c == '/' { ++ // WHATWG 13.2.5.34 After attribute name state ++ // U+002F SOLIDUS (/) - Switch to the self-closing start tag state. ++ return ++ } + if c != '=' { + z.raw.end-- + return +diff --git a/vendor/golang.org/x/net/http2/databuffer.go b/vendor/golang.org/x/net/http2/databuffer.go +index a3067f8..e6f55cb 100644 +--- a/vendor/golang.org/x/net/http2/databuffer.go ++++ b/vendor/golang.org/x/net/http2/databuffer.go +@@ -20,41 +20,44 @@ import ( + // TODO: Benchmark to determine if the pools are necessary. The GC may have + // improved enough that we can instead allocate chunks like this: + // make([]byte, max(16<<10, expectedBytesRemaining)) +-var ( +- dataChunkSizeClasses = []int{ +- 1 << 10, +- 2 << 10, +- 4 << 10, +- 8 << 10, +- 16 << 10, +- } +- dataChunkPools = [...]sync.Pool{ +- {New: func() interface{} { return make([]byte, 1<<10) }}, +- {New: func() interface{} { return make([]byte, 2<<10) }}, +- {New: func() interface{} { return make([]byte, 4<<10) }}, +- {New: func() interface{} { return make([]byte, 8<<10) }}, +- {New: func() interface{} { return make([]byte, 16<<10) }}, +- } +-) ++var dataChunkPools = [...]sync.Pool{ ++ {New: func() interface{} { return new([1 << 10]byte) }}, ++ {New: func() interface{} { return new([2 << 10]byte) }}, ++ {New: func() interface{} { return new([4 << 10]byte) }}, ++ {New: func() interface{} { return new([8 << 10]byte) }}, ++ {New: func() interface{} { return new([16 << 10]byte) }}, ++} + + func getDataBufferChunk(size int64) []byte { +- i := 0 +- for ; i < len(dataChunkSizeClasses)-1; i++ { +- if size <= int64(dataChunkSizeClasses[i]) { +- break +- } ++ switch { ++ case size <= 1<<10: ++ return dataChunkPools[0].Get().(*[1 << 10]byte)[:] ++ case size <= 2<<10: ++ return dataChunkPools[1].Get().(*[2 << 10]byte)[:] ++ case size <= 4<<10: ++ return dataChunkPools[2].Get().(*[4 << 10]byte)[:] ++ case size <= 8<<10: ++ return dataChunkPools[3].Get().(*[8 << 10]byte)[:] ++ default: ++ return dataChunkPools[4].Get().(*[16 << 10]byte)[:] + } +- return dataChunkPools[i].Get().([]byte) + } + + func putDataBufferChunk(p []byte) { +- for i, n := range dataChunkSizeClasses { +- if len(p) == n { +- dataChunkPools[i].Put(p) +- return +- } ++ switch len(p) { ++ case 1 << 10: ++ dataChunkPools[0].Put((*[1 << 10]byte)(p)) ++ case 2 << 10: ++ dataChunkPools[1].Put((*[2 << 10]byte)(p)) ++ case 4 << 10: ++ dataChunkPools[2].Put((*[4 << 10]byte)(p)) ++ case 8 << 10: ++ dataChunkPools[3].Put((*[8 << 10]byte)(p)) ++ case 16 << 10: ++ dataChunkPools[4].Put((*[16 << 10]byte)(p)) ++ default: ++ panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) + } +- panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) + } + + // dataBuffer is an io.ReadWriter backed by a list of data chunks. +diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go +index c1f6b90..43557ab 100644 +--- a/vendor/golang.org/x/net/http2/frame.go ++++ b/vendor/golang.org/x/net/http2/frame.go +@@ -1510,13 +1510,12 @@ func (mh *MetaHeadersFrame) checkPseudos() error { + } + + func (fr *Framer) maxHeaderStringLen() int { +- v := fr.maxHeaderListSize() +- if uint32(int(v)) == v { +- return int(v) ++ v := int(fr.maxHeaderListSize()) ++ if v < 0 { ++ // If maxHeaderListSize overflows an int, use no limit (0). ++ return 0 + } +- // They had a crazy big number for MaxHeaderBytes anyway, +- // so give them unlimited header lengths: +- return 0 ++ return v + } + + // readMetaFrame returns 0 or more CONTINUATION frames from fr and +@@ -1565,6 +1564,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { + if size > remainSize { + hdec.SetEmitEnabled(false) + mh.Truncated = true ++ remainSize = 0 + return + } + remainSize -= size +@@ -1577,6 +1577,36 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { + var hc headersOrContinuation = hf + for { + frag := hc.HeaderBlockFragment() ++ ++ // Avoid parsing large amounts of headers that we will then discard. ++ // If the sender exceeds the max header list size by too much, ++ // skip parsing the fragment and close the connection. ++ // ++ // "Too much" is either any CONTINUATION frame after we've already ++ // exceeded the max header list size (in which case remainSize is 0), ++ // or a frame whose encoded size is more than twice the remaining ++ // header list bytes we're willing to accept. ++ if int64(len(frag)) > int64(2*remainSize) { ++ if VerboseLogs { ++ log.Printf("http2: header list too large") ++ } ++ // It would be nice to send a RST_STREAM before sending the GOAWAY, ++ // but the structure of the server's frame writer makes this difficult. ++ return nil, ConnectionError(ErrCodeProtocol) ++ } ++ ++ // Also close the connection after any CONTINUATION frame following an ++ // invalid header, since we stop tracking the size of the headers after ++ // an invalid one. ++ if invalid != nil { ++ if VerboseLogs { ++ log.Printf("http2: invalid header: %v", invalid) ++ } ++ // It would be nice to send a RST_STREAM before sending the GOAWAY, ++ // but the structure of the server's frame writer makes this difficult. ++ return nil, ConnectionError(ErrCodeProtocol) ++ } ++ + if _, err := hdec.Write(frag); err != nil { + return nil, ConnectionError(ErrCodeCompression) + } +diff --git a/vendor/golang.org/x/net/http2/go111.go b/vendor/golang.org/x/net/http2/go111.go +deleted file mode 100644 +index 5bf62b0..0000000 +--- a/vendor/golang.org/x/net/http2/go111.go ++++ /dev/null +@@ -1,30 +0,0 @@ +-// Copyright 2018 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-//go:build go1.11 +-// +build go1.11 +- +-package http2 +- +-import ( +- "net/http/httptrace" +- "net/textproto" +-) +- +-func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { +- return trace != nil && trace.WroteHeaderField != nil +-} +- +-func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { +- if trace != nil && trace.WroteHeaderField != nil { +- trace.WroteHeaderField(k, []string{v}) +- } +-} +- +-func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { +- if trace != nil { +- return trace.Got1xxResponse +- } +- return nil +-} +diff --git a/vendor/golang.org/x/net/http2/go115.go b/vendor/golang.org/x/net/http2/go115.go +deleted file mode 100644 +index 908af1a..0000000 +--- a/vendor/golang.org/x/net/http2/go115.go ++++ /dev/null +@@ -1,27 +0,0 @@ +-// Copyright 2021 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-//go:build go1.15 +-// +build go1.15 +- +-package http2 +- +-import ( +- "context" +- "crypto/tls" +-) +- +-// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS +-// connection. +-func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { +- dialer := &tls.Dialer{ +- Config: cfg, +- } +- cn, err := dialer.DialContext(ctx, network, addr) +- if err != nil { +- return nil, err +- } +- tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed +- return tlsCn, nil +-} +diff --git a/vendor/golang.org/x/net/http2/go118.go b/vendor/golang.org/x/net/http2/go118.go +deleted file mode 100644 +index aca4b2b..0000000 +--- a/vendor/golang.org/x/net/http2/go118.go ++++ /dev/null +@@ -1,17 +0,0 @@ +-// Copyright 2021 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-//go:build go1.18 +-// +build go1.18 +- +-package http2 +- +-import ( +- "crypto/tls" +- "net" +-) +- +-func tlsUnderlyingConn(tc *tls.Conn) net.Conn { +- return tc.NetConn() +-} +diff --git a/vendor/golang.org/x/net/http2/not_go111.go b/vendor/golang.org/x/net/http2/not_go111.go +deleted file mode 100644 +index cc0baa8..0000000 +--- a/vendor/golang.org/x/net/http2/not_go111.go ++++ /dev/null +@@ -1,21 +0,0 @@ +-// Copyright 2018 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-//go:build !go1.11 +-// +build !go1.11 +- +-package http2 +- +-import ( +- "net/http/httptrace" +- "net/textproto" +-) +- +-func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return false } +- +-func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {} +- +-func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { +- return nil +-} +diff --git a/vendor/golang.org/x/net/http2/not_go115.go b/vendor/golang.org/x/net/http2/not_go115.go +deleted file mode 100644 +index e6c04cf..0000000 +--- a/vendor/golang.org/x/net/http2/not_go115.go ++++ /dev/null +@@ -1,31 +0,0 @@ +-// Copyright 2021 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-//go:build !go1.15 +-// +build !go1.15 +- +-package http2 +- +-import ( +- "context" +- "crypto/tls" +-) +- +-// dialTLSWithContext opens a TLS connection. +-func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { +- cn, err := tls.Dial(network, addr, cfg) +- if err != nil { +- return nil, err +- } +- if err := cn.Handshake(); err != nil { +- return nil, err +- } +- if cfg.InsecureSkipVerify { +- return cn, nil +- } +- if err := cn.VerifyHostname(cfg.ServerName); err != nil { +- return nil, err +- } +- return cn, nil +-} +diff --git a/vendor/golang.org/x/net/http2/not_go118.go b/vendor/golang.org/x/net/http2/not_go118.go +deleted file mode 100644 +index eab532c..0000000 +--- a/vendor/golang.org/x/net/http2/not_go118.go ++++ /dev/null +@@ -1,17 +0,0 @@ +-// Copyright 2021 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-//go:build !go1.18 +-// +build !go1.18 +- +-package http2 +- +-import ( +- "crypto/tls" +- "net" +-) +- +-func tlsUnderlyingConn(tc *tls.Conn) net.Conn { +- return nil +-} +diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go +index 684d984..3b9f06b 100644 +--- a/vendor/golang.org/x/net/http2/pipe.go ++++ b/vendor/golang.org/x/net/http2/pipe.go +@@ -77,7 +77,10 @@ func (p *pipe) Read(d []byte) (n int, err error) { + } + } + +-var errClosedPipeWrite = errors.New("write on closed buffer") ++var ( ++ errClosedPipeWrite = errors.New("write on closed buffer") ++ errUninitializedPipeWrite = errors.New("write on uninitialized buffer") ++) + + // Write copies bytes from p into the buffer and wakes a reader. + // It is an error to write more data than the buffer can hold. +@@ -91,6 +94,12 @@ func (p *pipe) Write(d []byte) (n int, err error) { + if p.err != nil || p.breakErr != nil { + return 0, errClosedPipeWrite + } ++ // pipe.setBuffer is never invoked, leaving the buffer uninitialized. ++ // We shouldn't try to write to an uninitialized pipe, ++ // but returning an error is better than panicking. ++ if p.b == nil { ++ return 0, errUninitializedPipeWrite ++ } + return p.b.Write(d) + } + +diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go +index 02c88b6..ce2e8b4 100644 +--- a/vendor/golang.org/x/net/http2/server.go ++++ b/vendor/golang.org/x/net/http2/server.go +@@ -124,6 +124,7 @@ type Server struct { + // IdleTimeout specifies how long until idle clients should be + // closed with a GOAWAY frame. PING frames are not considered + // activity for the purposes of IdleTimeout. ++ // If zero or negative, there is no timeout. + IdleTimeout time.Duration + + // MaxUploadBufferPerConnection is the size of the initial flow +@@ -434,7 +435,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + // passes the connection off to us with the deadline already set. + // Write deadlines are set per stream in serverConn.newStream. + // Disarm the net.Conn write deadline here. +- if sc.hs.WriteTimeout != 0 { ++ if sc.hs.WriteTimeout > 0 { + sc.conn.SetWriteDeadline(time.Time{}) + } + +@@ -924,7 +925,7 @@ func (sc *serverConn) serve() { + sc.setConnState(http.StateActive) + sc.setConnState(http.StateIdle) + +- if sc.srv.IdleTimeout != 0 { ++ if sc.srv.IdleTimeout > 0 { + sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) + defer sc.idleTimer.Stop() + } +@@ -1637,7 +1638,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { + delete(sc.streams, st.id) + if len(sc.streams) == 0 { + sc.setConnState(http.StateIdle) +- if sc.srv.IdleTimeout != 0 { ++ if sc.srv.IdleTimeout > 0 { + sc.idleTimer.Reset(sc.srv.IdleTimeout) + } + if h1ServerKeepAlivesDisabled(sc.hs) { +@@ -2017,7 +2018,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { + // similar to how the http1 server works. Here it's + // technically more like the http1 Server's ReadHeaderTimeout + // (in Go 1.8), though. That's a more sane option anyway. +- if sc.hs.ReadTimeout != 0 { ++ if sc.hs.ReadTimeout > 0 { + sc.conn.SetReadDeadline(time.Time{}) + st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) + } +@@ -2038,7 +2039,7 @@ func (sc *serverConn) upgradeRequest(req *http.Request) { + + // Disable any read deadline set by the net/http package + // prior to the upgrade. +- if sc.hs.ReadTimeout != 0 { ++ if sc.hs.ReadTimeout > 0 { + sc.conn.SetReadDeadline(time.Time{}) + } + +@@ -2116,7 +2117,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream + st.flow.conn = &sc.flow // link to conn-level counter + st.flow.add(sc.initialStreamSendWindowSize) + st.inflow.init(sc.srv.initialStreamRecvWindowSize()) +- if sc.hs.WriteTimeout != 0 { ++ if sc.hs.WriteTimeout > 0 { + st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) + } + +@@ -2549,7 +2550,6 @@ type responseWriterState struct { + wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. + sentHeader bool // have we sent the header frame? + handlerDone bool // handler has finished +- dirty bool // a Write failed; don't reuse this responseWriterState + + sentContentLen int64 // non-zero if handler set a Content-Length header + wroteBytes int64 +@@ -2669,7 +2669,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { + date: date, + }) + if err != nil { +- rws.dirty = true + return 0, err + } + if endStream { +@@ -2690,7 +2689,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { + if len(p) > 0 || endStream { + // only send a 0 byte DATA frame if we're ending the stream. + if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { +- rws.dirty = true + return 0, err + } + } +@@ -2702,9 +2700,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { + trailers: rws.trailers, + endStream: true, + }) +- if err != nil { +- rws.dirty = true +- } + return len(p), err + } + return len(p), nil +@@ -2920,14 +2915,12 @@ func (rws *responseWriterState) writeHeader(code int) { + h.Del("Transfer-Encoding") + } + +- if rws.conn.writeHeaders(rws.stream, &writeResHeaders{ ++ rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + httpResCode: code, + h: h, + endStream: rws.handlerDone && !rws.hasTrailers(), +- }) != nil { +- rws.dirty = true +- } ++ }) + + return + } +@@ -2992,19 +2985,10 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, + + func (w *responseWriter) handlerDone() { + rws := w.rws +- dirty := rws.dirty + rws.handlerDone = true + w.Flush() + w.rws = nil +- if !dirty { +- // Only recycle the pool if all prior Write calls to +- // the serverConn goroutine completed successfully. If +- // they returned earlier due to resets from the peer +- // there might still be write goroutines outstanding +- // from the serverConn referencing the rws memory. See +- // issue 20704. +- responseWriterStatePool.Put(rws) +- } ++ responseWriterStatePool.Put(rws) + } + + // Push errors. +@@ -3187,6 +3171,7 @@ func (sc *serverConn) startPush(msg *startPushRequest) { + panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) + } + ++ sc.curHandlers++ + go sc.runHandler(rw, req, sc.handler.ServeHTTP) + return promisedID, nil + } +diff --git a/vendor/golang.org/x/net/http2/testsync.go b/vendor/golang.org/x/net/http2/testsync.go +new file mode 100644 +index 0000000..61075bd +--- /dev/null ++++ b/vendor/golang.org/x/net/http2/testsync.go +@@ -0,0 +1,331 @@ ++// Copyright 2024 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++package http2 ++ ++import ( ++ "context" ++ "sync" ++ "time" ++) ++ ++// testSyncHooks coordinates goroutines in tests. ++// ++// For example, a call to ClientConn.RoundTrip involves several goroutines, including: ++// - the goroutine running RoundTrip; ++// - the clientStream.doRequest goroutine, which writes the request; and ++// - the clientStream.readLoop goroutine, which reads the response. ++// ++// Using testSyncHooks, a test can start a RoundTrip and identify when all these goroutines ++// are blocked waiting for some condition such as reading the Request.Body or waiting for ++// flow control to become available. ++// ++// The testSyncHooks also manage timers and synthetic time in tests. ++// This permits us to, for example, start a request and cause it to time out waiting for ++// response headers without resorting to time.Sleep calls. ++type testSyncHooks struct { ++ // active/inactive act as a mutex and condition variable. ++ // ++ // - neither chan contains a value: testSyncHooks is locked. ++ // - active contains a value: unlocked, and at least one goroutine is not blocked ++ // - inactive contains a value: unlocked, and all goroutines are blocked ++ active chan struct{} ++ inactive chan struct{} ++ ++ // goroutine counts ++ total int // total goroutines ++ condwait map[*sync.Cond]int // blocked in sync.Cond.Wait ++ blocked []*testBlockedGoroutine // otherwise blocked ++ ++ // fake time ++ now time.Time ++ timers []*fakeTimer ++ ++ // Transport testing: Report various events. ++ newclientconn func(*ClientConn) ++ newstream func(*clientStream) ++} ++ ++// testBlockedGoroutine is a blocked goroutine. ++type testBlockedGoroutine struct { ++ f func() bool // blocked until f returns true ++ ch chan struct{} // closed when unblocked ++} ++ ++func newTestSyncHooks() *testSyncHooks { ++ h := &testSyncHooks{ ++ active: make(chan struct{}, 1), ++ inactive: make(chan struct{}, 1), ++ condwait: map[*sync.Cond]int{}, ++ } ++ h.inactive <- struct{}{} ++ h.now = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) ++ return h ++} ++ ++// lock acquires the testSyncHooks mutex. ++func (h *testSyncHooks) lock() { ++ select { ++ case <-h.active: ++ case <-h.inactive: ++ } ++} ++ ++// waitInactive waits for all goroutines to become inactive. ++func (h *testSyncHooks) waitInactive() { ++ for { ++ <-h.inactive ++ if !h.unlock() { ++ break ++ } ++ } ++} ++ ++// unlock releases the testSyncHooks mutex. ++// It reports whether any goroutines are active. ++func (h *testSyncHooks) unlock() (active bool) { ++ // Look for a blocked goroutine which can be unblocked. ++ blocked := h.blocked[:0] ++ unblocked := false ++ for _, b := range h.blocked { ++ if !unblocked && b.f() { ++ unblocked = true ++ close(b.ch) ++ } else { ++ blocked = append(blocked, b) ++ } ++ } ++ h.blocked = blocked ++ ++ // Count goroutines blocked on condition variables. ++ condwait := 0 ++ for _, count := range h.condwait { ++ condwait += count ++ } ++ ++ if h.total > condwait+len(blocked) { ++ h.active <- struct{}{} ++ return true ++ } else { ++ h.inactive <- struct{}{} ++ return false ++ } ++} ++ ++// goRun starts a new goroutine. ++func (h *testSyncHooks) goRun(f func()) { ++ h.lock() ++ h.total++ ++ h.unlock() ++ go func() { ++ defer func() { ++ h.lock() ++ h.total-- ++ h.unlock() ++ }() ++ f() ++ }() ++} ++ ++// blockUntil indicates that a goroutine is blocked waiting for some condition to become true. ++// It waits until f returns true before proceeding. ++// ++// Example usage: ++// ++// h.blockUntil(func() bool { ++// // Is the context done yet? ++// select { ++// case <-ctx.Done(): ++// default: ++// return false ++// } ++// return true ++// }) ++// // Wait for the context to become done. ++// <-ctx.Done() ++// ++// The function f passed to blockUntil must be non-blocking and idempotent. ++func (h *testSyncHooks) blockUntil(f func() bool) { ++ if f() { ++ return ++ } ++ ch := make(chan struct{}) ++ h.lock() ++ h.blocked = append(h.blocked, &testBlockedGoroutine{ ++ f: f, ++ ch: ch, ++ }) ++ h.unlock() ++ <-ch ++} ++ ++// broadcast is sync.Cond.Broadcast. ++func (h *testSyncHooks) condBroadcast(cond *sync.Cond) { ++ h.lock() ++ delete(h.condwait, cond) ++ h.unlock() ++ cond.Broadcast() ++} ++ ++// broadcast is sync.Cond.Wait. ++func (h *testSyncHooks) condWait(cond *sync.Cond) { ++ h.lock() ++ h.condwait[cond]++ ++ h.unlock() ++} ++ ++// newTimer creates a new fake timer. ++func (h *testSyncHooks) newTimer(d time.Duration) timer { ++ h.lock() ++ defer h.unlock() ++ t := &fakeTimer{ ++ hooks: h, ++ when: h.now.Add(d), ++ c: make(chan time.Time), ++ } ++ h.timers = append(h.timers, t) ++ return t ++} ++ ++// afterFunc creates a new fake AfterFunc timer. ++func (h *testSyncHooks) afterFunc(d time.Duration, f func()) timer { ++ h.lock() ++ defer h.unlock() ++ t := &fakeTimer{ ++ hooks: h, ++ when: h.now.Add(d), ++ f: f, ++ } ++ h.timers = append(h.timers, t) ++ return t ++} ++ ++func (h *testSyncHooks) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { ++ ctx, cancel := context.WithCancel(ctx) ++ t := h.afterFunc(d, cancel) ++ return ctx, func() { ++ t.Stop() ++ cancel() ++ } ++} ++ ++func (h *testSyncHooks) timeUntilEvent() time.Duration { ++ h.lock() ++ defer h.unlock() ++ var next time.Time ++ for _, t := range h.timers { ++ if next.IsZero() || t.when.Before(next) { ++ next = t.when ++ } ++ } ++ if d := next.Sub(h.now); d > 0 { ++ return d ++ } ++ return 0 ++} ++ ++// advance advances time and causes synthetic timers to fire. ++func (h *testSyncHooks) advance(d time.Duration) { ++ h.lock() ++ defer h.unlock() ++ h.now = h.now.Add(d) ++ timers := h.timers[:0] ++ for _, t := range h.timers { ++ t := t // remove after go.mod depends on go1.22 ++ t.mu.Lock() ++ switch { ++ case t.when.After(h.now): ++ timers = append(timers, t) ++ case t.when.IsZero(): ++ // stopped timer ++ default: ++ t.when = time.Time{} ++ if t.c != nil { ++ close(t.c) ++ } ++ if t.f != nil { ++ h.total++ ++ go func() { ++ defer func() { ++ h.lock() ++ h.total-- ++ h.unlock() ++ }() ++ t.f() ++ }() ++ } ++ } ++ t.mu.Unlock() ++ } ++ h.timers = timers ++} ++ ++// A timer wraps a time.Timer, or a synthetic equivalent in tests. ++// Unlike time.Timer, timer is single-use: The timer channel is closed when the timer expires. ++type timer interface { ++ C() <-chan time.Time ++ Stop() bool ++ Reset(d time.Duration) bool ++} ++ ++// timeTimer implements timer using real time. ++type timeTimer struct { ++ t *time.Timer ++ c chan time.Time ++} ++ ++// newTimeTimer creates a new timer using real time. ++func newTimeTimer(d time.Duration) timer { ++ ch := make(chan time.Time) ++ t := time.AfterFunc(d, func() { ++ close(ch) ++ }) ++ return &timeTimer{t, ch} ++} ++ ++// newTimeAfterFunc creates an AfterFunc timer using real time. ++func newTimeAfterFunc(d time.Duration, f func()) timer { ++ return &timeTimer{ ++ t: time.AfterFunc(d, f), ++ } ++} ++ ++func (t timeTimer) C() <-chan time.Time { return t.c } ++func (t timeTimer) Stop() bool { return t.t.Stop() } ++func (t timeTimer) Reset(d time.Duration) bool { return t.t.Reset(d) } ++ ++// fakeTimer implements timer using fake time. ++type fakeTimer struct { ++ hooks *testSyncHooks ++ ++ mu sync.Mutex ++ when time.Time // when the timer will fire ++ c chan time.Time // closed when the timer fires; mutually exclusive with f ++ f func() // called when the timer fires; mutually exclusive with c ++} ++ ++func (t *fakeTimer) C() <-chan time.Time { return t.c } ++ ++func (t *fakeTimer) Stop() bool { ++ t.mu.Lock() ++ defer t.mu.Unlock() ++ stopped := t.when.IsZero() ++ t.when = time.Time{} ++ return stopped ++} ++ ++func (t *fakeTimer) Reset(d time.Duration) bool { ++ if t.c != nil || t.f == nil { ++ panic("fakeTimer only supports Reset on AfterFunc timers") ++ } ++ t.mu.Lock() ++ defer t.mu.Unlock() ++ t.hooks.lock() ++ defer t.hooks.unlock() ++ active := !t.when.IsZero() ++ t.when = t.hooks.now.Add(d) ++ if !active { ++ t.hooks.timers = append(t.hooks.timers, t) ++ } ++ return active ++} +diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go +index 4515b22..ce375c8 100644 +--- a/vendor/golang.org/x/net/http2/transport.go ++++ b/vendor/golang.org/x/net/http2/transport.go +@@ -147,6 +147,12 @@ type Transport struct { + // waiting for their turn. + StrictMaxConcurrentStreams bool + ++ // IdleConnTimeout is the maximum amount of time an idle ++ // (keep-alive) connection will remain idle before closing ++ // itself. ++ // Zero means no limit. ++ IdleConnTimeout time.Duration ++ + // ReadIdleTimeout is the timeout after which a health check using ping + // frame will be carried out if no frame is received on the connection. + // Note that a ping response will is considered a received frame, so if +@@ -178,6 +184,8 @@ type Transport struct { + + connPoolOnce sync.Once + connPoolOrDef ClientConnPool // non-nil version of ConnPool ++ ++ syncHooks *testSyncHooks + } + + func (t *Transport) maxHeaderListSize() uint32 { +@@ -302,7 +310,7 @@ type ClientConn struct { + readerErr error // set before readerDone is closed + + idleTimeout time.Duration // or 0 for never +- idleTimer *time.Timer ++ idleTimer timer + + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes +@@ -344,6 +352,60 @@ type ClientConn struct { + werr error // first write error that has occurred + hbuf bytes.Buffer // HPACK encoder writes into this + henc *hpack.Encoder ++ ++ syncHooks *testSyncHooks // can be nil ++} ++ ++// Hook points used for testing. ++// Outside of tests, cc.syncHooks is nil and these all have minimal implementations. ++// Inside tests, see the testSyncHooks function docs. ++ ++// goRun starts a new goroutine. ++func (cc *ClientConn) goRun(f func()) { ++ if cc.syncHooks != nil { ++ cc.syncHooks.goRun(f) ++ return ++ } ++ go f() ++} ++ ++// condBroadcast is cc.cond.Broadcast. ++func (cc *ClientConn) condBroadcast() { ++ if cc.syncHooks != nil { ++ cc.syncHooks.condBroadcast(cc.cond) ++ } ++ cc.cond.Broadcast() ++} ++ ++// condWait is cc.cond.Wait. ++func (cc *ClientConn) condWait() { ++ if cc.syncHooks != nil { ++ cc.syncHooks.condWait(cc.cond) ++ } ++ cc.cond.Wait() ++} ++ ++// newTimer creates a new time.Timer, or a synthetic timer in tests. ++func (cc *ClientConn) newTimer(d time.Duration) timer { ++ if cc.syncHooks != nil { ++ return cc.syncHooks.newTimer(d) ++ } ++ return newTimeTimer(d) ++} ++ ++// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. ++func (cc *ClientConn) afterFunc(d time.Duration, f func()) timer { ++ if cc.syncHooks != nil { ++ return cc.syncHooks.afterFunc(d, f) ++ } ++ return newTimeAfterFunc(d, f) ++} ++ ++func (cc *ClientConn) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { ++ if cc.syncHooks != nil { ++ return cc.syncHooks.contextWithTimeout(ctx, d) ++ } ++ return context.WithTimeout(ctx, d) + } + + // clientStream is the state for a single HTTP/2 stream. One of these +@@ -425,7 +487,7 @@ func (cs *clientStream) abortStreamLocked(err error) { + // TODO(dneil): Clean up tests where cs.cc.cond is nil. + if cs.cc.cond != nil { + // Wake up writeRequestBody if it is waiting on flow control. +- cs.cc.cond.Broadcast() ++ cs.cc.condBroadcast() + } + } + +@@ -435,7 +497,7 @@ func (cs *clientStream) abortRequestBodyWrite() { + defer cc.mu.Unlock() + if cs.reqBody != nil && cs.reqBodyClosed == nil { + cs.closeReqBodyLocked() +- cc.cond.Broadcast() ++ cc.condBroadcast() + } + } + +@@ -445,10 +507,10 @@ func (cs *clientStream) closeReqBodyLocked() { + } + cs.reqBodyClosed = make(chan struct{}) + reqBodyClosed := cs.reqBodyClosed +- go func() { ++ cs.cc.goRun(func() { + cs.reqBody.Close() + close(reqBodyClosed) +- }() ++ }) + } + + type stickyErrWriter struct { +@@ -537,15 +599,6 @@ func authorityAddr(scheme string, authority string) (addr string) { + return net.JoinHostPort(host, port) + } + +-var retryBackoffHook func(time.Duration) *time.Timer +- +-func backoffNewTimer(d time.Duration) *time.Timer { +- if retryBackoffHook != nil { +- return retryBackoffHook(d) +- } +- return time.NewTimer(d) +-} +- + // RoundTripOpt is like RoundTrip, but takes options. + func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { + if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { +@@ -573,13 +626,27 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res + backoff := float64(uint(1) << (uint(retry) - 1)) + backoff += backoff * (0.1 * mathrand.Float64()) + d := time.Second * time.Duration(backoff) +- timer := backoffNewTimer(d) ++ var tm timer ++ if t.syncHooks != nil { ++ tm = t.syncHooks.newTimer(d) ++ t.syncHooks.blockUntil(func() bool { ++ select { ++ case <-tm.C(): ++ case <-req.Context().Done(): ++ default: ++ return false ++ } ++ return true ++ }) ++ } else { ++ tm = newTimeTimer(d) ++ } + select { +- case <-timer.C: ++ case <-tm.C(): + t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) + continue + case <-req.Context().Done(): +- timer.Stop() ++ tm.Stop() + err = req.Context().Err() + } + } +@@ -658,6 +725,9 @@ func canRetryError(err error) bool { + } + + func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { ++ if t.syncHooks != nil { ++ return t.newClientConn(nil, singleUse, t.syncHooks) ++ } + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err +@@ -666,7 +736,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b + if err != nil { + return nil, err + } +- return t.newClientConn(tconn, singleUse) ++ return t.newClientConn(tconn, singleUse, nil) + } + + func (t *Transport) newTLSConfig(host string) *tls.Config { +@@ -732,10 +802,10 @@ func (t *Transport) maxEncoderHeaderTableSize() uint32 { + } + + func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { +- return t.newClientConn(c, t.disableKeepAlives()) ++ return t.newClientConn(c, t.disableKeepAlives(), nil) + } + +-func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { ++func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHooks) (*ClientConn, error) { + cc := &ClientConn{ + t: t, + tconn: c, +@@ -750,10 +820,15 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro + wantSettingsAck: true, + pings: make(map[[8]byte]chan struct{}), + reqHeaderMu: make(chan struct{}, 1), ++ syncHooks: hooks, ++ } ++ if hooks != nil { ++ hooks.newclientconn(cc) ++ c = cc.tconn + } + if d := t.idleConnTimeout(); d != 0 { + cc.idleTimeout = d +- cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) ++ cc.idleTimer = cc.afterFunc(d, cc.onIdleTimeout) + } + if VerboseLogs { + t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) +@@ -818,7 +893,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro + return nil, cc.werr + } + +- go cc.readLoop() ++ cc.goRun(cc.readLoop) + return cc, nil + } + +@@ -826,7 +901,7 @@ func (cc *ClientConn) healthCheck() { + pingTimeout := cc.t.pingTimeout() + // We don't need to periodically ping in the health check, because the readLoop of ClientConn will + // trigger the healthCheck again if there is no frame received. +- ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) ++ ctx, cancel := cc.contextWithTimeout(context.Background(), pingTimeout) + defer cancel() + cc.vlogf("http2: Transport sending health check") + err := cc.Ping(ctx) +@@ -1018,7 +1093,7 @@ func (cc *ClientConn) forceCloseConn() { + if !ok { + return + } +- if nc := tlsUnderlyingConn(tc); nc != nil { ++ if nc := tc.NetConn(); nc != nil { + nc.Close() + } + } +@@ -1056,7 +1131,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { + // Wait for all in-flight streams to complete or connection to close + done := make(chan struct{}) + cancelled := false // guarded by cc.mu +- go func() { ++ cc.goRun(func() { + cc.mu.Lock() + defer cc.mu.Unlock() + for { +@@ -1068,9 +1143,9 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { + if cancelled { + break + } +- cc.cond.Wait() ++ cc.condWait() + } +- }() ++ }) + shutdownEnterWaitStateHook() + select { + case <-done: +@@ -1080,7 +1155,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { + cc.mu.Lock() + // Free the goroutine above + cancelled = true +- cc.cond.Broadcast() ++ cc.condBroadcast() + cc.mu.Unlock() + return ctx.Err() + } +@@ -1118,7 +1193,7 @@ func (cc *ClientConn) closeForError(err error) { + for _, cs := range cc.streams { + cs.abortStreamLocked(err) + } +- cc.cond.Broadcast() ++ cc.condBroadcast() + cc.mu.Unlock() + cc.closeConn() + } +@@ -1215,6 +1290,10 @@ func (cc *ClientConn) decrStreamReservationsLocked() { + } + + func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { ++ return cc.roundTrip(req, nil) ++} ++ ++func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) (*http.Response, error) { + ctx := req.Context() + cs := &clientStream{ + cc: cc, +@@ -1229,9 +1308,23 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + respHeaderRecv: make(chan struct{}), + donec: make(chan struct{}), + } +- go cs.doRequest(req) ++ cc.goRun(func() { ++ cs.doRequest(req) ++ }) + + waitDone := func() error { ++ if cc.syncHooks != nil { ++ cc.syncHooks.blockUntil(func() bool { ++ select { ++ case <-cs.donec: ++ case <-ctx.Done(): ++ case <-cs.reqCancel: ++ default: ++ return false ++ } ++ return true ++ }) ++ } + select { + case <-cs.donec: + return nil +@@ -1292,7 +1385,24 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + return err + } + ++ if streamf != nil { ++ streamf(cs) ++ } ++ + for { ++ if cc.syncHooks != nil { ++ cc.syncHooks.blockUntil(func() bool { ++ select { ++ case <-cs.respHeaderRecv: ++ case <-cs.abort: ++ case <-ctx.Done(): ++ case <-cs.reqCancel: ++ default: ++ return false ++ } ++ return true ++ }) ++ } + select { + case <-cs.respHeaderRecv: + return handleResponseHeaders() +@@ -1348,6 +1458,21 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { + if cc.reqHeaderMu == nil { + panic("RoundTrip on uninitialized ClientConn") // for tests + } ++ var newStreamHook func(*clientStream) ++ if cc.syncHooks != nil { ++ newStreamHook = cc.syncHooks.newstream ++ cc.syncHooks.blockUntil(func() bool { ++ select { ++ case cc.reqHeaderMu <- struct{}{}: ++ <-cc.reqHeaderMu ++ case <-cs.reqCancel: ++ case <-ctx.Done(): ++ default: ++ return false ++ } ++ return true ++ }) ++ } + select { + case cc.reqHeaderMu <- struct{}{}: + case <-cs.reqCancel: +@@ -1372,6 +1497,10 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { + } + cc.mu.Unlock() + ++ if newStreamHook != nil { ++ newStreamHook(cs) ++ } ++ + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + if !cc.t.disableCompression() && + req.Header.Get("Accept-Encoding") == "" && +@@ -1452,15 +1581,30 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { + var respHeaderTimer <-chan time.Time + var respHeaderRecv chan struct{} + if d := cc.responseHeaderTimeout(); d != 0 { +- timer := time.NewTimer(d) ++ timer := cc.newTimer(d) + defer timer.Stop() +- respHeaderTimer = timer.C ++ respHeaderTimer = timer.C() + respHeaderRecv = cs.respHeaderRecv + } + // Wait until the peer half-closes its end of the stream, + // or until the request is aborted (via context, error, or otherwise), + // whichever comes first. + for { ++ if cc.syncHooks != nil { ++ cc.syncHooks.blockUntil(func() bool { ++ select { ++ case <-cs.peerClosed: ++ case <-respHeaderTimer: ++ case <-respHeaderRecv: ++ case <-cs.abort: ++ case <-ctx.Done(): ++ case <-cs.reqCancel: ++ default: ++ return false ++ } ++ return true ++ }) ++ } + select { + case <-cs.peerClosed: + return nil +@@ -1609,7 +1753,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { + return nil + } + cc.pendingRequests++ +- cc.cond.Wait() ++ cc.condWait() + cc.pendingRequests-- + select { + case <-cs.abort: +@@ -1871,10 +2015,26 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) + cs.flow.take(take) + return take, nil + } +- cc.cond.Wait() ++ cc.condWait() + } + } + ++func validateHeaders(hdrs http.Header) string { ++ for k, vv := range hdrs { ++ if !httpguts.ValidHeaderFieldName(k) { ++ return fmt.Sprintf("name %q", k) ++ } ++ for _, v := range vv { ++ if !httpguts.ValidHeaderFieldValue(v) { ++ // Don't include the value in the error, ++ // because it may be sensitive. ++ return fmt.Sprintf("value for header %q", k) ++ } ++ } ++ } ++ return "" ++} ++ + var errNilRequestURL = errors.New("http2: Request.URI is nil") + + // requires cc.wmu be held. +@@ -1912,19 +2072,14 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail + } + } + +- // Check for any invalid headers and return an error before we ++ // Check for any invalid headers+trailers and return an error before we + // potentially pollute our hpack state. (We want to be able to + // continue to reuse the hpack encoder for future requests) +- for k, vv := range req.Header { +- if !httpguts.ValidHeaderFieldName(k) { +- return nil, fmt.Errorf("invalid HTTP header name %q", k) +- } +- for _, v := range vv { +- if !httpguts.ValidHeaderFieldValue(v) { +- // Don't include the value in the error, because it may be sensitive. +- return nil, fmt.Errorf("invalid HTTP header value for header %q", k) +- } +- } ++ if err := validateHeaders(req.Header); err != "" { ++ return nil, fmt.Errorf("invalid HTTP header %s", err) ++ } ++ if err := validateHeaders(req.Trailer); err != "" { ++ return nil, fmt.Errorf("invalid HTTP trailer %s", err) + } + + enumerateHeaders := func(f func(name, value string)) { +@@ -2143,7 +2298,7 @@ func (cc *ClientConn) forgetStreamID(id uint32) { + } + // Wake up writeRequestBody via clientStream.awaitFlowControl and + // wake up RoundTrip if there is a pending request. +- cc.cond.Broadcast() ++ cc.condBroadcast() + + closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil + if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 { +@@ -2231,7 +2386,7 @@ func (rl *clientConnReadLoop) cleanup() { + cs.abortStreamLocked(err) + } + } +- cc.cond.Broadcast() ++ cc.condBroadcast() + cc.mu.Unlock() + } + +@@ -2266,10 +2421,9 @@ func (rl *clientConnReadLoop) run() error { + cc := rl.cc + gotSettings := false + readIdleTimeout := cc.t.ReadIdleTimeout +- var t *time.Timer ++ var t timer + if readIdleTimeout != 0 { +- t = time.AfterFunc(readIdleTimeout, cc.healthCheck) +- defer t.Stop() ++ t = cc.afterFunc(readIdleTimeout, cc.healthCheck) + } + for { + f, err := cc.fr.ReadFrame() +@@ -2684,7 +2838,7 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { + }) + return nil + } +- if !cs.firstByte { ++ if !cs.pastHeaders { + cc.logf("protocol error: received DATA before a HEADERS frame") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, +@@ -2867,7 +3021,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { + for _, cs := range cc.streams { + cs.flow.add(delta) + } +- cc.cond.Broadcast() ++ cc.condBroadcast() + + cc.initialWindowSize = s.Val + case SettingHeaderTableSize: +@@ -2911,9 +3065,18 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { + fl = &cs.flow + } + if !fl.add(int32(f.Increment)) { ++ // For stream, the sender sends RST_STREAM with an error code of FLOW_CONTROL_ERROR ++ if cs != nil { ++ rl.endStreamError(cs, StreamError{ ++ StreamID: f.StreamID, ++ Code: ErrCodeFlowControl, ++ }) ++ return nil ++ } ++ + return ConnectionError(ErrCodeFlowControl) + } +- cc.cond.Broadcast() ++ cc.condBroadcast() + return nil + } + +@@ -2955,24 +3118,38 @@ func (cc *ClientConn) Ping(ctx context.Context) error { + } + cc.mu.Unlock() + } +- errc := make(chan error, 1) +- go func() { ++ var pingError error ++ errc := make(chan struct{}) ++ cc.goRun(func() { + cc.wmu.Lock() + defer cc.wmu.Unlock() +- if err := cc.fr.WritePing(false, p); err != nil { +- errc <- err ++ if pingError = cc.fr.WritePing(false, p); pingError != nil { ++ close(errc) + return + } +- if err := cc.bw.Flush(); err != nil { +- errc <- err ++ if pingError = cc.bw.Flush(); pingError != nil { ++ close(errc) + return + } +- }() ++ }) ++ if cc.syncHooks != nil { ++ cc.syncHooks.blockUntil(func() bool { ++ select { ++ case <-c: ++ case <-errc: ++ case <-ctx.Done(): ++ case <-cc.readerDone: ++ default: ++ return false ++ } ++ return true ++ }) ++ } + select { + case <-c: + return nil +- case err := <-errc: +- return err ++ case <-errc: ++ return pingError + case <-ctx.Done(): + return ctx.Err() + case <-cc.readerDone: +@@ -3141,9 +3318,17 @@ func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, err + } + + func (t *Transport) idleConnTimeout() time.Duration { ++ // to keep things backwards compatible, we use non-zero values of ++ // IdleConnTimeout, followed by using the IdleConnTimeout on the underlying ++ // http1 transport, followed by 0 ++ if t.IdleConnTimeout != 0 { ++ return t.IdleConnTimeout ++ } ++ + if t.t1 != nil { + return t.t1.IdleConnTimeout + } ++ + return 0 + } + +@@ -3201,3 +3386,34 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) { + trace.GotFirstResponseByte() + } + } ++ ++func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { ++ return trace != nil && trace.WroteHeaderField != nil ++} ++ ++func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { ++ if trace != nil && trace.WroteHeaderField != nil { ++ trace.WroteHeaderField(k, []string{v}) ++ } ++} ++ ++func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { ++ if trace != nil { ++ return trace.Got1xxResponse ++ } ++ return nil ++} ++ ++// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS ++// connection. ++func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { ++ dialer := &tls.Dialer{ ++ Config: cfg, ++ } ++ cn, err := dialer.DialContext(ctx, network, addr) ++ if err != nil { ++ return nil, err ++ } ++ tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed ++ return tlsCn, nil ++} +diff --git a/vendor/golang.org/x/net/idna/go118.go b/vendor/golang.org/x/net/idna/go118.go +index c5c4338..712f1ad 100644 +--- a/vendor/golang.org/x/net/idna/go118.go ++++ b/vendor/golang.org/x/net/idna/go118.go +@@ -5,7 +5,6 @@ + // license that can be found in the LICENSE file. + + //go:build go1.18 +-// +build go1.18 + + package idna + +diff --git a/vendor/golang.org/x/net/idna/idna10.0.0.go b/vendor/golang.org/x/net/idna/idna10.0.0.go +index 64ccf85..7b37178 100644 +--- a/vendor/golang.org/x/net/idna/idna10.0.0.go ++++ b/vendor/golang.org/x/net/idna/idna10.0.0.go +@@ -5,7 +5,6 @@ + // license that can be found in the LICENSE file. + + //go:build go1.10 +-// +build go1.10 + + // Package idna implements IDNA2008 using the compatibility processing + // defined by UTS (Unicode Technical Standard) #46, which defines a standard to +diff --git a/vendor/golang.org/x/net/idna/idna9.0.0.go b/vendor/golang.org/x/net/idna/idna9.0.0.go +index ee1698c..cc6a892 100644 +--- a/vendor/golang.org/x/net/idna/idna9.0.0.go ++++ b/vendor/golang.org/x/net/idna/idna9.0.0.go +@@ -5,7 +5,6 @@ + // license that can be found in the LICENSE file. + + //go:build !go1.10 +-// +build !go1.10 + + // Package idna implements IDNA2008 using the compatibility processing + // defined by UTS (Unicode Technical Standard) #46, which defines a standard to +diff --git a/vendor/golang.org/x/net/idna/pre_go118.go b/vendor/golang.org/x/net/idna/pre_go118.go +index 3aaccab..40e74bb 100644 +--- a/vendor/golang.org/x/net/idna/pre_go118.go ++++ b/vendor/golang.org/x/net/idna/pre_go118.go +@@ -5,7 +5,6 @@ + // license that can be found in the LICENSE file. + + //go:build !go1.18 +-// +build !go1.18 + + package idna + +diff --git a/vendor/golang.org/x/net/idna/tables10.0.0.go b/vendor/golang.org/x/net/idna/tables10.0.0.go +index d1d62ef..c6c2bf1 100644 +--- a/vendor/golang.org/x/net/idna/tables10.0.0.go ++++ b/vendor/golang.org/x/net/idna/tables10.0.0.go +@@ -1,7 +1,6 @@ + // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + + //go:build go1.10 && !go1.13 +-// +build go1.10,!go1.13 + + package idna + +diff --git a/vendor/golang.org/x/net/idna/tables11.0.0.go b/vendor/golang.org/x/net/idna/tables11.0.0.go +index 167efba..7678939 100644 +--- a/vendor/golang.org/x/net/idna/tables11.0.0.go ++++ b/vendor/golang.org/x/net/idna/tables11.0.0.go +@@ -1,7 +1,6 @@ + // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + + //go:build go1.13 && !go1.14 +-// +build go1.13,!go1.14 + + package idna + +diff --git a/vendor/golang.org/x/net/idna/tables12.0.0.go b/vendor/golang.org/x/net/idna/tables12.0.0.go +index ab40f7b..0600cd2 100644 +--- a/vendor/golang.org/x/net/idna/tables12.0.0.go ++++ b/vendor/golang.org/x/net/idna/tables12.0.0.go +@@ -1,7 +1,6 @@ + // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + + //go:build go1.14 && !go1.16 +-// +build go1.14,!go1.16 + + package idna + +diff --git a/vendor/golang.org/x/net/idna/tables13.0.0.go b/vendor/golang.org/x/net/idna/tables13.0.0.go +index 66701ea..2fb768e 100644 +--- a/vendor/golang.org/x/net/idna/tables13.0.0.go ++++ b/vendor/golang.org/x/net/idna/tables13.0.0.go +@@ -1,7 +1,6 @@ + // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + + //go:build go1.16 && !go1.21 +-// +build go1.16,!go1.21 + + package idna + +diff --git a/vendor/golang.org/x/net/idna/tables15.0.0.go b/vendor/golang.org/x/net/idna/tables15.0.0.go +index 4003377..5ff05fe 100644 +--- a/vendor/golang.org/x/net/idna/tables15.0.0.go ++++ b/vendor/golang.org/x/net/idna/tables15.0.0.go +@@ -1,7 +1,6 @@ + // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + + //go:build go1.21 +-// +build go1.21 + + package idna + +diff --git a/vendor/golang.org/x/net/idna/tables9.0.0.go b/vendor/golang.org/x/net/idna/tables9.0.0.go +index 4074b53..0f25e84 100644 +--- a/vendor/golang.org/x/net/idna/tables9.0.0.go ++++ b/vendor/golang.org/x/net/idna/tables9.0.0.go +@@ -1,7 +1,6 @@ + // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + + //go:build !go1.10 +-// +build !go1.10 + + package idna + +diff --git a/vendor/golang.org/x/net/idna/trie12.0.0.go b/vendor/golang.org/x/net/idna/trie12.0.0.go +index bb63f90..8a75b96 100644 +--- a/vendor/golang.org/x/net/idna/trie12.0.0.go ++++ b/vendor/golang.org/x/net/idna/trie12.0.0.go +@@ -5,7 +5,6 @@ + // license that can be found in the LICENSE file. + + //go:build !go1.16 +-// +build !go1.16 + + package idna + +diff --git a/vendor/golang.org/x/net/idna/trie13.0.0.go b/vendor/golang.org/x/net/idna/trie13.0.0.go +index 7d68a8d..fa45bb9 100644 +--- a/vendor/golang.org/x/net/idna/trie13.0.0.go ++++ b/vendor/golang.org/x/net/idna/trie13.0.0.go +@@ -5,7 +5,6 @@ + // license that can be found in the LICENSE file. + + //go:build go1.16 +-// +build go1.16 + + package idna + +diff --git a/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go b/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go +index c9b6993..73687de 100644 +--- a/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go ++++ b/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build go1.5 +-// +build go1.5 + + package plan9 + +diff --git a/vendor/golang.org/x/sys/plan9/pwd_plan9.go b/vendor/golang.org/x/sys/plan9/pwd_plan9.go +index 98bf56b..fb94582 100644 +--- a/vendor/golang.org/x/sys/plan9/pwd_plan9.go ++++ b/vendor/golang.org/x/sys/plan9/pwd_plan9.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build !go1.5 +-// +build !go1.5 + + package plan9 + +diff --git a/vendor/golang.org/x/sys/plan9/race.go b/vendor/golang.org/x/sys/plan9/race.go +index 62377d2..c02d9ed 100644 +--- a/vendor/golang.org/x/sys/plan9/race.go ++++ b/vendor/golang.org/x/sys/plan9/race.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build plan9 && race +-// +build plan9,race + + package plan9 + +diff --git a/vendor/golang.org/x/sys/plan9/race0.go b/vendor/golang.org/x/sys/plan9/race0.go +index f8da308..7b15e15 100644 +--- a/vendor/golang.org/x/sys/plan9/race0.go ++++ b/vendor/golang.org/x/sys/plan9/race0.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build plan9 && !race +-// +build plan9,!race + + package plan9 + +diff --git a/vendor/golang.org/x/sys/plan9/str.go b/vendor/golang.org/x/sys/plan9/str.go +index 55fa8d0..ba3e8ff 100644 +--- a/vendor/golang.org/x/sys/plan9/str.go ++++ b/vendor/golang.org/x/sys/plan9/str.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build plan9 +-// +build plan9 + + package plan9 + +diff --git a/vendor/golang.org/x/sys/plan9/syscall.go b/vendor/golang.org/x/sys/plan9/syscall.go +index 67e5b01..d631fd6 100644 +--- a/vendor/golang.org/x/sys/plan9/syscall.go ++++ b/vendor/golang.org/x/sys/plan9/syscall.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build plan9 +-// +build plan9 + + // Package plan9 contains an interface to the low-level operating system + // primitives. OS details vary depending on the underlying system, and +diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go +index 3f40b9b..f780d5c 100644 +--- a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go ++++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build plan9 && 386 +-// +build plan9,386 + + package plan9 + +diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go +index 0e6a96a..7de6106 100644 +--- a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go ++++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build plan9 && amd64 +-// +build plan9,amd64 + + package plan9 + +diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go +index 244c501..ea85780 100644 +--- a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go ++++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build plan9 && arm +-// +build plan9,arm + + package plan9 + +diff --git a/vendor/golang.org/x/sys/unix/aliases.go b/vendor/golang.org/x/sys/unix/aliases.go +index abc89c1..b0e4198 100644 +--- a/vendor/golang.org/x/sys/unix/aliases.go ++++ b/vendor/golang.org/x/sys/unix/aliases.go +@@ -2,9 +2,7 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9 +-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos +-// +build go1.9 ++//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s +index db9171c..269e173 100644 +--- a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s ++++ b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build gc +-// +build gc + + #include "textflag.h" + +diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_386.s b/vendor/golang.org/x/sys/unix/asm_bsd_386.s +index e0fcd9b..a4fcef0 100644 +--- a/vendor/golang.org/x/sys/unix/asm_bsd_386.s ++++ b/vendor/golang.org/x/sys/unix/asm_bsd_386.s +@@ -3,8 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build (freebsd || netbsd || openbsd) && gc +-// +build freebsd netbsd openbsd +-// +build gc + + #include "textflag.h" + +diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s +index 2b99c34..1e63615 100644 +--- a/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s ++++ b/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s +@@ -3,8 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build (darwin || dragonfly || freebsd || netbsd || openbsd) && gc +-// +build darwin dragonfly freebsd netbsd openbsd +-// +build gc + + #include "textflag.h" + +diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_arm.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s +index d702d4a..6496c31 100644 +--- a/vendor/golang.org/x/sys/unix/asm_bsd_arm.s ++++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s +@@ -3,8 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build (freebsd || netbsd || openbsd) && gc +-// +build freebsd netbsd openbsd +-// +build gc + + #include "textflag.h" + +diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s +index fe36a73..4fd1f54 100644 +--- a/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s ++++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s +@@ -3,8 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build (darwin || freebsd || netbsd || openbsd) && gc +-// +build darwin freebsd netbsd openbsd +-// +build gc + + #include "textflag.h" + +diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s b/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s +index e5b9a84..42f7eb9 100644 +--- a/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s ++++ b/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s +@@ -3,8 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build (darwin || freebsd || netbsd || openbsd) && gc +-// +build darwin freebsd netbsd openbsd +-// +build gc + + #include "textflag.h" + +diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s b/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s +index d560019..f890266 100644 +--- a/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s ++++ b/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s +@@ -3,8 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build (darwin || freebsd || netbsd || openbsd) && gc +-// +build darwin freebsd netbsd openbsd +-// +build gc + + #include "textflag.h" + +diff --git a/vendor/golang.org/x/sys/unix/asm_linux_386.s b/vendor/golang.org/x/sys/unix/asm_linux_386.s +index 8fd101d..3b47348 100644 +--- a/vendor/golang.org/x/sys/unix/asm_linux_386.s ++++ b/vendor/golang.org/x/sys/unix/asm_linux_386.s +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build gc +-// +build gc + + #include "textflag.h" + +diff --git a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s +index 7ed38e4..67e29f3 100644 +--- a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s ++++ b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build gc +-// +build gc + + #include "textflag.h" + +diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm.s b/vendor/golang.org/x/sys/unix/asm_linux_arm.s +index 8ef1d51..d6ae269 100644 +--- a/vendor/golang.org/x/sys/unix/asm_linux_arm.s ++++ b/vendor/golang.org/x/sys/unix/asm_linux_arm.s +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build gc +-// +build gc + + #include "textflag.h" + +diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s +index 98ae027..01e5e25 100644 +--- a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s ++++ b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s +@@ -3,9 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build linux && arm64 && gc +-// +build linux +-// +build arm64 +-// +build gc + + #include "textflag.h" + +diff --git a/vendor/golang.org/x/sys/unix/asm_linux_loong64.s b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s +index 5653572..2abf12f 100644 +--- a/vendor/golang.org/x/sys/unix/asm_linux_loong64.s ++++ b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s +@@ -3,9 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build linux && loong64 && gc +-// +build linux +-// +build loong64 +-// +build gc + + #include "textflag.h" + +diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s +index 21231d2..f84bae7 100644 +--- a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s ++++ b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s +@@ -3,9 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build linux && (mips64 || mips64le) && gc +-// +build linux +-// +build mips64 mips64le +-// +build gc + + #include "textflag.h" + +diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s +index 6783b26..f08f628 100644 +--- a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s ++++ b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s +@@ -3,9 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build linux && (mips || mipsle) && gc +-// +build linux +-// +build mips mipsle +-// +build gc + + #include "textflag.h" + +diff --git a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s +index 19d4989..bdfc024 100644 +--- a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s ++++ b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s +@@ -3,9 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build linux && (ppc64 || ppc64le) && gc +-// +build linux +-// +build ppc64 ppc64le +-// +build gc + + #include "textflag.h" + +diff --git a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s +index e42eb81..2e8c996 100644 +--- a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s ++++ b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s +@@ -3,8 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build riscv64 && gc +-// +build riscv64 +-// +build gc + + #include "textflag.h" + +diff --git a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s +index c46aab3..2c394b1 100644 +--- a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s ++++ b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s +@@ -3,9 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build linux && s390x && gc +-// +build linux +-// +build s390x +-// +build gc + + #include "textflag.h" + +diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s +index 5e7a116..fab586a 100644 +--- a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s ++++ b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build gc +-// +build gc + + #include "textflag.h" + +diff --git a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s +index f8c5394..f949ec5 100644 +--- a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s ++++ b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build gc +-// +build gc + + #include "textflag.h" + +diff --git a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s +index 3b54e18..2f67ba8 100644 +--- a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s ++++ b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s +@@ -3,9 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build zos && s390x && gc +-// +build zos +-// +build s390x +-// +build gc + + #include "textflag.h" + +diff --git a/vendor/golang.org/x/sys/unix/cap_freebsd.go b/vendor/golang.org/x/sys/unix/cap_freebsd.go +index 0b7c6ad..a086578 100644 +--- a/vendor/golang.org/x/sys/unix/cap_freebsd.go ++++ b/vendor/golang.org/x/sys/unix/cap_freebsd.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build freebsd +-// +build freebsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/constants.go b/vendor/golang.org/x/sys/unix/constants.go +index 394a396..6fb7cb7 100644 +--- a/vendor/golang.org/x/sys/unix/constants.go ++++ b/vendor/golang.org/x/sys/unix/constants.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go +index 65a9985..d785134 100644 +--- a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go ++++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build aix && ppc +-// +build aix,ppc + + // Functions to access/create device major and minor numbers matching the + // encoding used by AIX. +diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go +index 8fc08ad..623a5e6 100644 +--- a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go ++++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build aix && ppc64 +-// +build aix,ppc64 + + // Functions to access/create device major and minor numbers matching the + // encoding used AIX. +diff --git a/vendor/golang.org/x/sys/unix/dev_zos.go b/vendor/golang.org/x/sys/unix/dev_zos.go +index a388e59..bb6a64f 100644 +--- a/vendor/golang.org/x/sys/unix/dev_zos.go ++++ b/vendor/golang.org/x/sys/unix/dev_zos.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build zos && s390x +-// +build zos,s390x + + // Functions to access/create device major and minor numbers matching the + // encoding used by z/OS. +diff --git a/vendor/golang.org/x/sys/unix/dirent.go b/vendor/golang.org/x/sys/unix/dirent.go +index 2499f97..1ebf117 100644 +--- a/vendor/golang.org/x/sys/unix/dirent.go ++++ b/vendor/golang.org/x/sys/unix/dirent.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/endian_big.go b/vendor/golang.org/x/sys/unix/endian_big.go +index a520265..1095fd3 100644 +--- a/vendor/golang.org/x/sys/unix/endian_big.go ++++ b/vendor/golang.org/x/sys/unix/endian_big.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + // + //go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64 +-// +build armbe arm64be m68k mips mips64 mips64p32 ppc ppc64 s390 s390x shbe sparc sparc64 + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go +index b0f2bc4..b9f0e27 100644 +--- a/vendor/golang.org/x/sys/unix/endian_little.go ++++ b/vendor/golang.org/x/sys/unix/endian_little.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + // + //go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh +-// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/env_unix.go b/vendor/golang.org/x/sys/unix/env_unix.go +index 29ccc4d..a96da71 100644 +--- a/vendor/golang.org/x/sys/unix/env_unix.go ++++ b/vendor/golang.org/x/sys/unix/env_unix.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos + + // Unix environment variables. + +diff --git a/vendor/golang.org/x/sys/unix/epoll_zos.go b/vendor/golang.org/x/sys/unix/epoll_zos.go +index cedaf7e..7753fdd 100644 +--- a/vendor/golang.org/x/sys/unix/epoll_zos.go ++++ b/vendor/golang.org/x/sys/unix/epoll_zos.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build zos && s390x +-// +build zos,s390x + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/fcntl.go b/vendor/golang.org/x/sys/unix/fcntl.go +index e9b9912..6200876 100644 +--- a/vendor/golang.org/x/sys/unix/fcntl.go ++++ b/vendor/golang.org/x/sys/unix/fcntl.go +@@ -2,8 +2,7 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build dragonfly || freebsd || linux || netbsd || openbsd +-// +build dragonfly freebsd linux netbsd openbsd ++//go:build dragonfly || freebsd || linux || netbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go +index 29d4480..13b4acd 100644 +--- a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go ++++ b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build (linux && 386) || (linux && arm) || (linux && mips) || (linux && mipsle) || (linux && ppc) +-// +build linux,386 linux,arm linux,mips linux,mipsle linux,ppc + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/fdset.go b/vendor/golang.org/x/sys/unix/fdset.go +index a8068f9..9e83d18 100644 +--- a/vendor/golang.org/x/sys/unix/fdset.go ++++ b/vendor/golang.org/x/sys/unix/fdset.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/fstatfs_zos.go b/vendor/golang.org/x/sys/unix/fstatfs_zos.go +index e377cc9..c8bde60 100644 +--- a/vendor/golang.org/x/sys/unix/fstatfs_zos.go ++++ b/vendor/golang.org/x/sys/unix/fstatfs_zos.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build zos && s390x +-// +build zos,s390x + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go +index b06f52d..aca5721 100644 +--- a/vendor/golang.org/x/sys/unix/gccgo.go ++++ b/vendor/golang.org/x/sys/unix/gccgo.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build gccgo && !aix && !hurd +-// +build gccgo,!aix,!hurd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c +index f98a1c5..d468b7b 100644 +--- a/vendor/golang.org/x/sys/unix/gccgo_c.c ++++ b/vendor/golang.org/x/sys/unix/gccgo_c.c +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build gccgo && !aix && !hurd +-// +build gccgo,!aix,!hurd + + #include + #include +diff --git a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go +index e60e49a..972d61b 100644 +--- a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go ++++ b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build gccgo && linux && amd64 +-// +build gccgo,linux,amd64 + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ifreq_linux.go b/vendor/golang.org/x/sys/unix/ifreq_linux.go +index 15721a5..848840a 100644 +--- a/vendor/golang.org/x/sys/unix/ifreq_linux.go ++++ b/vendor/golang.org/x/sys/unix/ifreq_linux.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build linux +-// +build linux + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go +index 0d12c08..dbe680e 100644 +--- a/vendor/golang.org/x/sys/unix/ioctl_linux.go ++++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go +@@ -231,3 +231,8 @@ func IoctlLoopGetStatus64(fd int) (*LoopInfo64, error) { + func IoctlLoopSetStatus64(fd int, value *LoopInfo64) error { + return ioctlPtr(fd, LOOP_SET_STATUS64, unsafe.Pointer(value)) + } ++ ++// IoctlLoopConfigure configures all loop device parameters in a single step ++func IoctlLoopConfigure(fd int, value *LoopConfig) error { ++ return ioctlPtr(fd, LOOP_CONFIGURE, unsafe.Pointer(value)) ++} +diff --git a/vendor/golang.org/x/sys/unix/ioctl_signed.go b/vendor/golang.org/x/sys/unix/ioctl_signed.go +index 7def958..5b0759b 100644 +--- a/vendor/golang.org/x/sys/unix/ioctl_signed.go ++++ b/vendor/golang.org/x/sys/unix/ioctl_signed.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build aix || solaris +-// +build aix solaris + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ioctl_unsigned.go b/vendor/golang.org/x/sys/unix/ioctl_unsigned.go +index 649913d..20f470b 100644 +--- a/vendor/golang.org/x/sys/unix/ioctl_unsigned.go ++++ b/vendor/golang.org/x/sys/unix/ioctl_unsigned.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd +-// +build darwin dragonfly freebsd hurd linux netbsd openbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ioctl_zos.go b/vendor/golang.org/x/sys/unix/ioctl_zos.go +index cdc21bf..c8b2a75 100644 +--- a/vendor/golang.org/x/sys/unix/ioctl_zos.go ++++ b/vendor/golang.org/x/sys/unix/ioctl_zos.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build zos && s390x +-// +build zos,s390x + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh +index 47fa6a7..fdcaa97 100644 +--- a/vendor/golang.org/x/sys/unix/mkerrors.sh ++++ b/vendor/golang.org/x/sys/unix/mkerrors.sh +@@ -248,6 +248,7 @@ struct ltchars { + #include + #include + #include ++#include + #include + #include + #include +@@ -283,10 +284,6 @@ struct ltchars { + #include + #endif + +-#ifndef MSG_FASTOPEN +-#define MSG_FASTOPEN 0x20000000 +-#endif +- + #ifndef PTRACE_GETREGS + #define PTRACE_GETREGS 0xc + #endif +@@ -295,14 +292,6 @@ struct ltchars { + #define PTRACE_SETREGS 0xd + #endif + +-#ifndef SOL_NETLINK +-#define SOL_NETLINK 270 +-#endif +- +-#ifndef SOL_SMC +-#define SOL_SMC 286 +-#endif +- + #ifdef SOL_BLUETOOTH + // SPARC includes this in /usr/include/sparc64-linux-gnu/bits/socket.h + // but it is already in bluetooth_linux.go +@@ -319,10 +308,23 @@ struct ltchars { + #undef TIPC_WAIT_FOREVER + #define TIPC_WAIT_FOREVER 0xffffffff + +-// Copied from linux/l2tp.h +-// Including linux/l2tp.h here causes conflicts between linux/in.h +-// and netinet/in.h included via net/route.h above. +-#define IPPROTO_L2TP 115 ++// Copied from linux/netfilter/nf_nat.h ++// Including linux/netfilter/nf_nat.h here causes conflicts between linux/in.h ++// and netinet/in.h. ++#define NF_NAT_RANGE_MAP_IPS (1 << 0) ++#define NF_NAT_RANGE_PROTO_SPECIFIED (1 << 1) ++#define NF_NAT_RANGE_PROTO_RANDOM (1 << 2) ++#define NF_NAT_RANGE_PERSISTENT (1 << 3) ++#define NF_NAT_RANGE_PROTO_RANDOM_FULLY (1 << 4) ++#define NF_NAT_RANGE_PROTO_OFFSET (1 << 5) ++#define NF_NAT_RANGE_NETMAP (1 << 6) ++#define NF_NAT_RANGE_PROTO_RANDOM_ALL \ ++ (NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PROTO_RANDOM_FULLY) ++#define NF_NAT_RANGE_MASK \ ++ (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED | \ ++ NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PERSISTENT | \ ++ NF_NAT_RANGE_PROTO_RANDOM_FULLY | NF_NAT_RANGE_PROTO_OFFSET | \ ++ NF_NAT_RANGE_NETMAP) + + // Copied from linux/hid.h. + // Keep in sync with the size of the referenced fields. +@@ -519,6 +521,7 @@ ccflags="$@" + $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || + $2 ~ /^LO_(KEY|NAME)_SIZE$/ || + $2 ~ /^LOOP_(CLR|CTL|GET|SET)_/ || ++ $2 == "LOOP_CONFIGURE" || + $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || + $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || + $2 ~ /^NFC_.*_(MAX)?SIZE$/ || +@@ -560,7 +563,7 @@ ccflags="$@" + $2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|LOCKS|MEMLOCK|MSGQUEUE|NICE|NOFILE|NPROC|RSS|RTPRIO|RTTIME|SIGPENDING|STACK)|RLIM_INFINITY/ || + $2 ~ /^PRIO_(PROCESS|PGRP|USER)/ || + $2 ~ /^CLONE_[A-Z_]+/ || +- $2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+)$/ && ++ $2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+|BPF_F_LINK)$/ && + $2 ~ /^(BPF|DLT)_/ || + $2 ~ /^AUDIT_/ || + $2 ~ /^(CLOCK|TIMER)_/ || +@@ -581,7 +584,7 @@ ccflags="$@" + $2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ || + $2 ~ /^KEYCTL_/ || + $2 ~ /^PERF_/ || +- $2 ~ /^SECCOMP_MODE_/ || ++ $2 ~ /^SECCOMP_/ || + $2 ~ /^SEEK_/ || + $2 ~ /^SCHED_/ || + $2 ~ /^SPLICE_/ || +@@ -602,6 +605,9 @@ ccflags="$@" + $2 ~ /^FSOPT_/ || + $2 ~ /^WDIO[CFS]_/ || + $2 ~ /^NFN/ || ++ $2 !~ /^NFT_META_IIFTYPE/ && ++ $2 ~ /^NFT_/ || ++ $2 ~ /^NF_NAT_/ || + $2 ~ /^XDP_/ || + $2 ~ /^RWF_/ || + $2 ~ /^(HDIO|WIN|SMART)_/ || +@@ -663,7 +669,6 @@ echo '// mkerrors.sh' "$@" + echo '// Code generated by the command above; see README.md. DO NOT EDIT.' + echo + echo "//go:build ${GOARCH} && ${GOOS}" +-echo "// +build ${GOARCH},${GOOS}" + echo + go tool cgo -godefs -- "$@" _const.go >_error.out + cat _error.out | grep -vf _error.grep | grep -vf _signal.grep +diff --git a/vendor/golang.org/x/sys/unix/mmap_nomremap.go b/vendor/golang.org/x/sys/unix/mmap_nomremap.go +index ca05136..4b68e59 100644 +--- a/vendor/golang.org/x/sys/unix/mmap_nomremap.go ++++ b/vendor/golang.org/x/sys/unix/mmap_nomremap.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build aix || darwin || dragonfly || freebsd || openbsd || solaris +-// +build aix darwin dragonfly freebsd openbsd solaris + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/mremap.go b/vendor/golang.org/x/sys/unix/mremap.go +index fa93d0a..fd45fe5 100644 +--- a/vendor/golang.org/x/sys/unix/mremap.go ++++ b/vendor/golang.org/x/sys/unix/mremap.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build linux || netbsd +-// +build linux netbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/pagesize_unix.go b/vendor/golang.org/x/sys/unix/pagesize_unix.go +index 53f1b4c..4d0a343 100644 +--- a/vendor/golang.org/x/sys/unix/pagesize_unix.go ++++ b/vendor/golang.org/x/sys/unix/pagesize_unix.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris +-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + + // For Unix, get the pagesize from the runtime. + +diff --git a/vendor/golang.org/x/sys/unix/pledge_openbsd.go b/vendor/golang.org/x/sys/unix/pledge_openbsd.go +index eb48294..6a09af5 100644 +--- a/vendor/golang.org/x/sys/unix/pledge_openbsd.go ++++ b/vendor/golang.org/x/sys/unix/pledge_openbsd.go +@@ -8,54 +8,31 @@ import ( + "errors" + "fmt" + "strconv" +- "syscall" +- "unsafe" + ) + + // Pledge implements the pledge syscall. + // +-// The pledge syscall does not accept execpromises on OpenBSD releases +-// before 6.3. +-// +-// execpromises must be empty when Pledge is called on OpenBSD +-// releases predating 6.3, otherwise an error will be returned. ++// This changes both the promises and execpromises; use PledgePromises or ++// PledgeExecpromises to only change the promises or execpromises ++// respectively. + // + // For more information see pledge(2). + func Pledge(promises, execpromises string) error { +- maj, min, err := majmin() +- if err != nil { ++ if err := pledgeAvailable(); err != nil { + return err + } + +- err = pledgeAvailable(maj, min, execpromises) ++ pptr, err := BytePtrFromString(promises) + if err != nil { + return err + } + +- pptr, err := syscall.BytePtrFromString(promises) ++ exptr, err := BytePtrFromString(execpromises) + if err != nil { + return err + } + +- // This variable will hold either a nil unsafe.Pointer or +- // an unsafe.Pointer to a string (execpromises). +- var expr unsafe.Pointer +- +- // If we're running on OpenBSD > 6.2, pass execpromises to the syscall. +- if maj > 6 || (maj == 6 && min > 2) { +- exptr, err := syscall.BytePtrFromString(execpromises) +- if err != nil { +- return err +- } +- expr = unsafe.Pointer(exptr) +- } +- +- _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(unsafe.Pointer(pptr)), uintptr(expr), 0) +- if e != 0 { +- return e +- } +- +- return nil ++ return pledge(pptr, exptr) + } + + // PledgePromises implements the pledge syscall. +@@ -64,30 +41,16 @@ func Pledge(promises, execpromises string) error { + // + // For more information see pledge(2). + func PledgePromises(promises string) error { +- maj, min, err := majmin() +- if err != nil { +- return err +- } +- +- err = pledgeAvailable(maj, min, "") +- if err != nil { ++ if err := pledgeAvailable(); err != nil { + return err + } + +- // This variable holds the execpromises and is always nil. +- var expr unsafe.Pointer +- +- pptr, err := syscall.BytePtrFromString(promises) ++ pptr, err := BytePtrFromString(promises) + if err != nil { + return err + } + +- _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(unsafe.Pointer(pptr)), uintptr(expr), 0) +- if e != 0 { +- return e +- } +- +- return nil ++ return pledge(pptr, nil) + } + + // PledgeExecpromises implements the pledge syscall. +@@ -96,30 +59,16 @@ func PledgePromises(promises string) error { + // + // For more information see pledge(2). + func PledgeExecpromises(execpromises string) error { +- maj, min, err := majmin() +- if err != nil { ++ if err := pledgeAvailable(); err != nil { + return err + } + +- err = pledgeAvailable(maj, min, execpromises) ++ exptr, err := BytePtrFromString(execpromises) + if err != nil { + return err + } + +- // This variable holds the promises and is always nil. +- var pptr unsafe.Pointer +- +- exptr, err := syscall.BytePtrFromString(execpromises) +- if err != nil { +- return err +- } +- +- _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(pptr), uintptr(unsafe.Pointer(exptr)), 0) +- if e != 0 { +- return e +- } +- +- return nil ++ return pledge(nil, exptr) + } + + // majmin returns major and minor version number for an OpenBSD system. +@@ -147,16 +96,15 @@ func majmin() (major int, minor int, err error) { + + // pledgeAvailable checks for availability of the pledge(2) syscall + // based on the running OpenBSD version. +-func pledgeAvailable(maj, min int, execpromises string) error { +- // If OpenBSD <= 5.9, pledge is not available. +- if (maj == 5 && min != 9) || maj < 5 { +- return fmt.Errorf("pledge syscall is not available on OpenBSD %d.%d", maj, min) ++func pledgeAvailable() error { ++ maj, min, err := majmin() ++ if err != nil { ++ return err + } + +- // If OpenBSD <= 6.2 and execpromises is not empty, +- // return an error - execpromises is not available before 6.3 +- if (maj < 6 || (maj == 6 && min <= 2)) && execpromises != "" { +- return fmt.Errorf("cannot use execpromises on OpenBSD %d.%d", maj, min) ++ // Require OpenBSD 6.4 as a minimum. ++ if maj < 6 || (maj == 6 && min <= 3) { ++ return fmt.Errorf("cannot call Pledge on OpenBSD %d.%d", maj, min) + } + + return nil +diff --git a/vendor/golang.org/x/sys/unix/ptrace_darwin.go b/vendor/golang.org/x/sys/unix/ptrace_darwin.go +index 463c3ef..3f0975f 100644 +--- a/vendor/golang.org/x/sys/unix/ptrace_darwin.go ++++ b/vendor/golang.org/x/sys/unix/ptrace_darwin.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build darwin && !ios +-// +build darwin,!ios + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ptrace_ios.go b/vendor/golang.org/x/sys/unix/ptrace_ios.go +index ed0509a..a4d35db 100644 +--- a/vendor/golang.org/x/sys/unix/ptrace_ios.go ++++ b/vendor/golang.org/x/sys/unix/ptrace_ios.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build ios +-// +build ios + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/race.go b/vendor/golang.org/x/sys/unix/race.go +index 6f6c5fe..714d2aa 100644 +--- a/vendor/golang.org/x/sys/unix/race.go ++++ b/vendor/golang.org/x/sys/unix/race.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build (darwin && race) || (linux && race) || (freebsd && race) +-// +build darwin,race linux,race freebsd,race + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/race0.go b/vendor/golang.org/x/sys/unix/race0.go +index 706e132..4a9f663 100644 +--- a/vendor/golang.org/x/sys/unix/race0.go ++++ b/vendor/golang.org/x/sys/unix/race0.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build aix || (darwin && !race) || (linux && !race) || (freebsd && !race) || netbsd || openbsd || solaris || dragonfly || zos +-// +build aix darwin,!race linux,!race freebsd,!race netbsd openbsd solaris dragonfly zos + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdents.go b/vendor/golang.org/x/sys/unix/readdirent_getdents.go +index 4d62575..dbd2b6c 100644 +--- a/vendor/golang.org/x/sys/unix/readdirent_getdents.go ++++ b/vendor/golang.org/x/sys/unix/readdirent_getdents.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build aix || dragonfly || freebsd || linux || netbsd || openbsd +-// +build aix dragonfly freebsd linux netbsd openbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go +index 2a4ba47..130398b 100644 +--- a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go ++++ b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build darwin +-// +build darwin + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go +index 3865943..c3a62db 100644 +--- a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go ++++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos + + // Socket control messages + +diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go +index 0840fe4..4a1eab3 100644 +--- a/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go ++++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build aix || darwin || freebsd || linux || netbsd || openbsd || solaris || zos +-// +build aix darwin freebsd linux netbsd openbsd solaris zos + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go +index 63e8c83..5ea74da 100644 +--- a/vendor/golang.org/x/sys/unix/syscall.go ++++ b/vendor/golang.org/x/sys/unix/syscall.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos + + // Package unix contains an interface to the low-level operating system + // primitives. OS details vary depending on the underlying system, and +diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go +index e94e6cd..67ce6ce 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_aix.go ++++ b/vendor/golang.org/x/sys/unix/syscall_aix.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build aix +-// +build aix + + // Aix system calls. + // This file is compiled as ordinary Go code, +@@ -107,7 +106,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { + if n > 0 { + sl += _Socklen(n) + 1 + } +- if sa.raw.Path[0] == '@' { ++ if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { ++ // Check sl > 3 so we don't change unnamed socket behavior. + sa.raw.Path[0] = 0 + // Don't count trailing NUL for abstract address. + sl-- +diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go +index f2871fa..1fdaa47 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go ++++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build aix && ppc +-// +build aix,ppc + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go +index 75718ec..c87f9a9 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go ++++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build aix && ppc64 +-// +build aix,ppc64 + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go +index 4217de5..a00c3e5 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_bsd.go ++++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build darwin || dragonfly || freebsd || netbsd || openbsd +-// +build darwin dragonfly freebsd netbsd openbsd + + // BSD system call wrappers shared by *BSD based systems + // including OS X (Darwin) and FreeBSD. Like the other +@@ -317,7 +316,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { + if err != nil { + return "", err + } +- return string(buf[:vallen-1]), nil ++ return ByteSliceToString(buf[:vallen]), nil + } + + //sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) +diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +index b37310c..0eaecf5 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go ++++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build amd64 && darwin +-// +build amd64,darwin + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +index d51ec99..f36c670 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go ++++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build arm64 && darwin +-// +build arm64,darwin + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go +index 53c9664..2f0fa76 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go ++++ b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go +@@ -2,8 +2,7 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build darwin && go1.12 +-// +build darwin,go1.12 ++//go:build darwin + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go +index 4e2d321..14bab6b 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go ++++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build amd64 && dragonfly +-// +build amd64,dragonfly + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go +index 64d1bb4..2b57e0f 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go ++++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go +@@ -13,6 +13,7 @@ + package unix + + import ( ++ "errors" + "sync" + "unsafe" + ) +@@ -169,25 +170,26 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { + func Uname(uname *Utsname) error { + mib := []_C_int{CTL_KERN, KERN_OSTYPE} + n := unsafe.Sizeof(uname.Sysname) +- if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil { ++ // Suppress ENOMEM errors to be compatible with the C library __xuname() implementation. ++ if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { + return err + } + + mib = []_C_int{CTL_KERN, KERN_HOSTNAME} + n = unsafe.Sizeof(uname.Nodename) +- if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil { ++ if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { + return err + } + + mib = []_C_int{CTL_KERN, KERN_OSRELEASE} + n = unsafe.Sizeof(uname.Release) +- if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil { ++ if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { + return err + } + + mib = []_C_int{CTL_KERN, KERN_VERSION} + n = unsafe.Sizeof(uname.Version) +- if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil { ++ if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { + return err + } + +@@ -205,7 +207,7 @@ func Uname(uname *Utsname) error { + + mib = []_C_int{CTL_HW, HW_MACHINE} + n = unsafe.Sizeof(uname.Machine) +- if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil { ++ if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { + return err + } + +diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +index b8da510..3967bca 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go ++++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build 386 && freebsd +-// +build 386,freebsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +index 47155c4..eff19ad 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go ++++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build amd64 && freebsd +-// +build amd64,freebsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +index 0893209..4f24b51 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go ++++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build arm && freebsd +-// +build arm,freebsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go +index d151a0d..ac30759 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go ++++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build arm64 && freebsd +-// +build arm64,freebsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go +index d5cd64b..aab725c 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go ++++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build riscv64 && freebsd +-// +build riscv64,freebsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go +index 381fd46..ba46651 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_hurd.go ++++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build hurd +-// +build hurd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd_386.go b/vendor/golang.org/x/sys/unix/syscall_hurd_386.go +index 7cf54a3..df89f9e 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_hurd_386.go ++++ b/vendor/golang.org/x/sys/unix/syscall_hurd_386.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build 386 && hurd +-// +build 386,hurd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_illumos.go b/vendor/golang.org/x/sys/unix/syscall_illumos.go +index 87db5a6..a863f70 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_illumos.go ++++ b/vendor/golang.org/x/sys/unix/syscall_illumos.go +@@ -5,7 +5,6 @@ + // illumos system calls not present on Solaris. + + //go:build amd64 && illumos +-// +build amd64,illumos + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go +index fb4e502..5682e26 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_linux.go ++++ b/vendor/golang.org/x/sys/unix/syscall_linux.go +@@ -61,15 +61,23 @@ func FanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname string) ( + } + + //sys fchmodat(dirfd int, path string, mode uint32) (err error) +- +-func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { +- // Linux fchmodat doesn't support the flags parameter. Mimick glibc's behavior +- // and check the flags. Otherwise the mode would be applied to the symlink +- // destination which is not what the user expects. +- if flags&^AT_SYMLINK_NOFOLLOW != 0 { +- return EINVAL +- } else if flags&AT_SYMLINK_NOFOLLOW != 0 { +- return EOPNOTSUPP ++//sys fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) ++ ++func Fchmodat(dirfd int, path string, mode uint32, flags int) error { ++ // Linux fchmodat doesn't support the flags parameter, but fchmodat2 does. ++ // Try fchmodat2 if flags are specified. ++ if flags != 0 { ++ err := fchmodat2(dirfd, path, mode, flags) ++ if err == ENOSYS { ++ // fchmodat2 isn't available. If the flags are known to be valid, ++ // return EOPNOTSUPP to indicate that fchmodat doesn't support them. ++ if flags&^(AT_SYMLINK_NOFOLLOW|AT_EMPTY_PATH) != 0 { ++ return EINVAL ++ } else if flags&(AT_SYMLINK_NOFOLLOW|AT_EMPTY_PATH) != 0 { ++ return EOPNOTSUPP ++ } ++ } ++ return err + } + return fchmodat(dirfd, path, mode) + } +@@ -417,7 +425,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { + if n > 0 { + sl += _Socklen(n) + 1 + } +- if sa.raw.Path[0] == '@' { ++ if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { ++ // Check sl > 3 so we don't change unnamed socket behavior. + sa.raw.Path[0] = 0 + // Don't count trailing NUL for abstract address. + sl-- +@@ -1301,7 +1310,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { + return "", err + } + } +- return string(buf[:vallen-1]), nil ++ return ByteSliceToString(buf[:vallen]), nil + } + + func GetsockoptTpacketStats(fd, level, opt int) (*TpacketStats, error) { +@@ -1840,6 +1849,105 @@ func Dup2(oldfd, newfd int) error { + //sys Fsmount(fd int, flags int, mountAttrs int) (fsfd int, err error) + //sys Fsopen(fsName string, flags int) (fd int, err error) + //sys Fspick(dirfd int, pathName string, flags int) (fd int, err error) ++ ++//sys fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) ++ ++func fsconfigCommon(fd int, cmd uint, key string, value *byte, aux int) (err error) { ++ var keyp *byte ++ if keyp, err = BytePtrFromString(key); err != nil { ++ return ++ } ++ return fsconfig(fd, cmd, keyp, value, aux) ++} ++ ++// FsconfigSetFlag is equivalent to fsconfig(2) called ++// with cmd == FSCONFIG_SET_FLAG. ++// ++// fd is the filesystem context to act upon. ++// key the parameter key to set. ++func FsconfigSetFlag(fd int, key string) (err error) { ++ return fsconfigCommon(fd, FSCONFIG_SET_FLAG, key, nil, 0) ++} ++ ++// FsconfigSetString is equivalent to fsconfig(2) called ++// with cmd == FSCONFIG_SET_STRING. ++// ++// fd is the filesystem context to act upon. ++// key the parameter key to set. ++// value is the parameter value to set. ++func FsconfigSetString(fd int, key string, value string) (err error) { ++ var valuep *byte ++ if valuep, err = BytePtrFromString(value); err != nil { ++ return ++ } ++ return fsconfigCommon(fd, FSCONFIG_SET_STRING, key, valuep, 0) ++} ++ ++// FsconfigSetBinary is equivalent to fsconfig(2) called ++// with cmd == FSCONFIG_SET_BINARY. ++// ++// fd is the filesystem context to act upon. ++// key the parameter key to set. ++// value is the parameter value to set. ++func FsconfigSetBinary(fd int, key string, value []byte) (err error) { ++ if len(value) == 0 { ++ return EINVAL ++ } ++ return fsconfigCommon(fd, FSCONFIG_SET_BINARY, key, &value[0], len(value)) ++} ++ ++// FsconfigSetPath is equivalent to fsconfig(2) called ++// with cmd == FSCONFIG_SET_PATH. ++// ++// fd is the filesystem context to act upon. ++// key the parameter key to set. ++// path is a non-empty path for specified key. ++// atfd is a file descriptor at which to start lookup from or AT_FDCWD. ++func FsconfigSetPath(fd int, key string, path string, atfd int) (err error) { ++ var valuep *byte ++ if valuep, err = BytePtrFromString(path); err != nil { ++ return ++ } ++ return fsconfigCommon(fd, FSCONFIG_SET_PATH, key, valuep, atfd) ++} ++ ++// FsconfigSetPathEmpty is equivalent to fsconfig(2) called ++// with cmd == FSCONFIG_SET_PATH_EMPTY. The same as ++// FconfigSetPath but with AT_PATH_EMPTY implied. ++func FsconfigSetPathEmpty(fd int, key string, path string, atfd int) (err error) { ++ var valuep *byte ++ if valuep, err = BytePtrFromString(path); err != nil { ++ return ++ } ++ return fsconfigCommon(fd, FSCONFIG_SET_PATH_EMPTY, key, valuep, atfd) ++} ++ ++// FsconfigSetFd is equivalent to fsconfig(2) called ++// with cmd == FSCONFIG_SET_FD. ++// ++// fd is the filesystem context to act upon. ++// key the parameter key to set. ++// value is a file descriptor to be assigned to specified key. ++func FsconfigSetFd(fd int, key string, value int) (err error) { ++ return fsconfigCommon(fd, FSCONFIG_SET_FD, key, nil, value) ++} ++ ++// FsconfigCreate is equivalent to fsconfig(2) called ++// with cmd == FSCONFIG_CMD_CREATE. ++// ++// fd is the filesystem context to act upon. ++func FsconfigCreate(fd int) (err error) { ++ return fsconfig(fd, FSCONFIG_CMD_CREATE, nil, nil, 0) ++} ++ ++// FsconfigReconfigure is equivalent to fsconfig(2) called ++// with cmd == FSCONFIG_CMD_RECONFIGURE. ++// ++// fd is the filesystem context to act upon. ++func FsconfigReconfigure(fd int) (err error) { ++ return fsconfig(fd, FSCONFIG_CMD_RECONFIGURE, nil, nil, 0) ++} ++ + //sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64 + //sysnb Getpgid(pid int) (pgid int, err error) + +@@ -2482,3 +2590,5 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { + } + return attr, nil + } ++ ++//sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) +diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go +index c7d9945..506dafa 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go ++++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build 386 && linux +-// +build 386,linux + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go b/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go +index 08086ac..38d5564 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go ++++ b/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go +@@ -3,8 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build linux && (386 || amd64 || mips || mipsle || mips64 || mipsle || ppc64 || ppc64le || ppc || s390x || sparc64) +-// +build linux +-// +build 386 amd64 mips mipsle mips64 mipsle ppc64 ppc64le ppc s390x sparc64 + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +index 70601ce..d557cf8 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go ++++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build amd64 && linux +-// +build amd64,linux + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go +index 8b0f0f3..facdb83 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go ++++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build amd64 && linux && gc +-// +build amd64,linux,gc + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +index da29864..cd2dd79 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go ++++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build arm && linux +-// +build arm,linux + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +index f526668..cf2ee6c 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go ++++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build arm64 && linux +-// +build arm64,linux + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go +index 2b1168d..ffc4c2b 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_linux_gc.go ++++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build linux && gc +-// +build linux,gc + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go +index 9843fb4..9ebfdcf 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go ++++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build linux && gc && 386 +-// +build linux,gc,386 + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go +index a6008fc..5f2b57c 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go ++++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build arm && gc && linux +-// +build arm,gc,linux + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go +index 7740af2..d1a3ad8 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go ++++ b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build linux && gccgo && 386 +-// +build linux,gccgo,386 + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go +index e16a122..f2f6742 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go ++++ b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build linux && gccgo && arm +-// +build linux,gccgo,arm + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +index f6ab02e..3d0e984 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go ++++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build loong64 && linux +-// +build loong64,linux + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +index 93fe59d..70963a9 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go ++++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +@@ -3,8 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build linux && (mips64 || mips64le) +-// +build linux +-// +build mips64 mips64le + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +index aae7f0f..c218ebd 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go ++++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +@@ -3,8 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build linux && (mips || mipsle) +-// +build linux +-// +build mips mipsle + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go +index 66eff19..e6c4850 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go ++++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build linux && ppc +-// +build linux,ppc + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +index 806aa25..7286a9a 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go ++++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +@@ -3,8 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build linux && (ppc64 || ppc64le) +-// +build linux +-// +build ppc64 ppc64le + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +index 5e6ceee..6f5a288 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go ++++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build riscv64 && linux +-// +build riscv64,linux + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +index 2f89e8f..66f3121 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go ++++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build s390x && linux +-// +build s390x,linux + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +index 7ca064a..11d1f16 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go ++++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build sparc64 && linux +-// +build sparc64,linux + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go +index 5199d28..7a5eb57 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go ++++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build 386 && netbsd +-// +build 386,netbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go +index 70a9c52..62d8957 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go ++++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build amd64 && netbsd +-// +build amd64,netbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go +index 3eb5942..ce6a068 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go ++++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build arm && netbsd +-// +build arm,netbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go +index fc6ccfd..d46d689 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go ++++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build arm64 && netbsd +-// +build arm64,netbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go +index 6f34479..b25343c 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go ++++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go +@@ -137,18 +137,13 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e + } + + func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { +- var _p0 unsafe.Pointer ++ var bufptr *Statfs_t + var bufsize uintptr + if len(buf) > 0 { +- _p0 = unsafe.Pointer(&buf[0]) ++ bufptr = &buf[0] + bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) + } +- r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) +- n = int(r0) +- if e1 != 0 { +- err = e1 +- } +- return ++ return getfsstat(bufptr, bufsize, flags) + } + + //sysnb getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) +@@ -171,6 +166,20 @@ func Getresgid() (rgid, egid, sgid int) { + + //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL + ++//sys fcntl(fd int, cmd int, arg int) (n int, err error) ++//sys fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) = SYS_FCNTL ++ ++// FcntlInt performs a fcntl syscall on fd with the provided command and argument. ++func FcntlInt(fd uintptr, cmd, arg int) (int, error) { ++ return fcntl(int(fd), cmd, arg) ++} ++ ++// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. ++func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { ++ _, err := fcntlPtr(int(fd), cmd, unsafe.Pointer(lk)) ++ return err ++} ++ + //sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) + + func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { +@@ -326,4 +335,7 @@ func Uname(uname *Utsname) error { + //sys write(fd int, p []byte) (n int, err error) + //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) + //sys munmap(addr uintptr, length uintptr) (err error) ++//sys getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) + //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) ++//sys pledge(promises *byte, execpromises *byte) (err error) ++//sys unveil(path *byte, flags *byte) (err error) +diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go +index 6baabcd..9ddc89f 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go ++++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build 386 && openbsd +-// +build 386,openbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go +index bab2536..70a3c96 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go ++++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build amd64 && openbsd +-// +build amd64,openbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go +index 8eed3c4..265caa8 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go ++++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build arm && openbsd +-// +build arm,openbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go +index 483dde9..ac4fda1 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go ++++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build arm64 && openbsd +-// +build arm64,openbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go +index 04aa43f..0a451e6 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go ++++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build openbsd +-// +build openbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go +index c279613..30a308c 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go ++++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build ppc64 && openbsd +-// +build ppc64,openbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go +index 23199a7..ea95433 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go ++++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build riscv64 && openbsd +-// +build riscv64,openbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go +index b99cfa1..21974af 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_solaris.go ++++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go +@@ -128,7 +128,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { + if n > 0 { + sl += _Socklen(n) + 1 + } +- if sa.raw.Path[0] == '@' { ++ if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { ++ // Check sl > 3 so we don't change unnamed socket behavior. + sa.raw.Path[0] = 0 + // Don't count trailing NUL for abstract address. + sl-- +@@ -157,7 +158,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { + if err != nil { + return "", err + } +- return string(buf[:vallen-1]), nil ++ return ByteSliceToString(buf[:vallen]), nil + } + + const ImplementsGetwd = true +diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go +index 0bd25ef..e02d8ce 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go ++++ b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build amd64 && solaris +-// +build amd64,solaris + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go +index f6eda27..77081de 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_unix.go ++++ b/vendor/golang.org/x/sys/unix/syscall_unix.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris +-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go +index b6919ca..05c95bc 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go ++++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go +@@ -3,8 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build (darwin || dragonfly || freebsd || (linux && !ppc64 && !ppc64le) || netbsd || openbsd || solaris) && gc +-// +build darwin dragonfly freebsd linux,!ppc64,!ppc64le netbsd openbsd solaris +-// +build gc + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go +index f6f707a..23f39b7 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go ++++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go +@@ -3,9 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build linux && (ppc64le || ppc64) && gc +-// +build linux +-// +build ppc64le ppc64 +-// +build gc + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +index 4596d04..b473038 100644 +--- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go ++++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build zos && s390x +-// +build zos,s390x + + package unix + +@@ -1105,7 +1104,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { + return "", err + } + +- return string(buf[:vallen-1]), nil ++ return ByteSliceToString(buf[:vallen]), nil + } + + func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { +diff --git a/vendor/golang.org/x/sys/unix/sysvshm_linux.go b/vendor/golang.org/x/sys/unix/sysvshm_linux.go +index 2c3a443..4fcd38d 100644 +--- a/vendor/golang.org/x/sys/unix/sysvshm_linux.go ++++ b/vendor/golang.org/x/sys/unix/sysvshm_linux.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build linux +-// +build linux + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix.go b/vendor/golang.org/x/sys/unix/sysvshm_unix.go +index 5bb41d1..79a84f1 100644 +--- a/vendor/golang.org/x/sys/unix/sysvshm_unix.go ++++ b/vendor/golang.org/x/sys/unix/sysvshm_unix.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build (darwin && !ios) || linux +-// +build darwin,!ios linux + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go +index 71bddef..9eb0db6 100644 +--- a/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go ++++ b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build darwin && !ios +-// +build darwin,!ios + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/timestruct.go b/vendor/golang.org/x/sys/unix/timestruct.go +index 616b1b2..7997b19 100644 +--- a/vendor/golang.org/x/sys/unix/timestruct.go ++++ b/vendor/golang.org/x/sys/unix/timestruct.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/unveil_openbsd.go b/vendor/golang.org/x/sys/unix/unveil_openbsd.go +index 168d5ae..cb7e598 100644 +--- a/vendor/golang.org/x/sys/unix/unveil_openbsd.go ++++ b/vendor/golang.org/x/sys/unix/unveil_openbsd.go +@@ -4,39 +4,48 @@ + + package unix + +-import ( +- "syscall" +- "unsafe" +-) ++import "fmt" + + // Unveil implements the unveil syscall. + // For more information see unveil(2). + // Note that the special case of blocking further + // unveil calls is handled by UnveilBlock. + func Unveil(path string, flags string) error { +- pathPtr, err := syscall.BytePtrFromString(path) +- if err != nil { ++ if err := supportsUnveil(); err != nil { + return err + } +- flagsPtr, err := syscall.BytePtrFromString(flags) ++ pathPtr, err := BytePtrFromString(path) + if err != nil { + return err + } +- _, _, e := syscall.Syscall(SYS_UNVEIL, uintptr(unsafe.Pointer(pathPtr)), uintptr(unsafe.Pointer(flagsPtr)), 0) +- if e != 0 { +- return e ++ flagsPtr, err := BytePtrFromString(flags) ++ if err != nil { ++ return err + } +- return nil ++ return unveil(pathPtr, flagsPtr) + } + + // UnveilBlock blocks future unveil calls. + // For more information see unveil(2). + func UnveilBlock() error { +- // Both pointers must be nil. +- var pathUnsafe, flagsUnsafe unsafe.Pointer +- _, _, e := syscall.Syscall(SYS_UNVEIL, uintptr(pathUnsafe), uintptr(flagsUnsafe), 0) +- if e != 0 { +- return e ++ if err := supportsUnveil(); err != nil { ++ return err + } ++ return unveil(nil, nil) ++} ++ ++// supportsUnveil checks for availability of the unveil(2) system call based ++// on the running OpenBSD version. ++func supportsUnveil() error { ++ maj, min, err := majmin() ++ if err != nil { ++ return err ++ } ++ ++ // unveil is not available before 6.4 ++ if maj < 6 || (maj == 6 && min <= 3) { ++ return fmt.Errorf("cannot call Unveil on OpenBSD %d.%d", maj, min) ++ } ++ + return nil + } +diff --git a/vendor/golang.org/x/sys/unix/xattr_bsd.go b/vendor/golang.org/x/sys/unix/xattr_bsd.go +index f5f8e9f..e168793 100644 +--- a/vendor/golang.org/x/sys/unix/xattr_bsd.go ++++ b/vendor/golang.org/x/sys/unix/xattr_bsd.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build freebsd || netbsd +-// +build freebsd netbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go +index ca9799b..2fb219d 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build ppc && aix +-// +build ppc,aix + + // Created by cgo -godefs - DO NOT EDIT + // cgo -godefs -- -maix32 _const.go +diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go +index 200c8c2..b0e6f5c 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build ppc64 && aix +-// +build ppc64,aix + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -maix64 _const.go +diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +index 1430076..e40fa85 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build amd64 && darwin +-// +build amd64,darwin + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -m64 _const.go +diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +index ab044a7..bb02aa6 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build arm64 && darwin +-// +build arm64,darwin + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -m64 _const.go +diff --git a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go +index 17bba0e..c0e0f86 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build amd64 && dragonfly +-// +build amd64,dragonfly + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -m64 _const.go +diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +index f8c2c51..6c69239 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build 386 && freebsd +-// +build 386,freebsd + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -m32 _const.go +diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +index 96310c3..dd9163f 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build amd64 && freebsd +-// +build amd64,freebsd + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -m64 _const.go +diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +index 777b69d..493a2a7 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build arm && freebsd +-// +build arm,freebsd + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- _const.go +diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go +index c557ac2..8b437b3 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build arm64 && freebsd +-// +build arm64,freebsd + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -m64 _const.go +diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go +index 341b4d9..67c02dd 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build riscv64 && freebsd +-// +build riscv64,freebsd + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -m64 _const.go +diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go +index f9c7f47..36bf839 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_linux.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go +@@ -1,7 +1,6 @@ + // Code generated by mkmerge; DO NOT EDIT. + + //go:build linux +-// +build linux + + package unix + +@@ -481,10 +480,13 @@ const ( + BPF_FROM_BE = 0x8 + BPF_FROM_LE = 0x0 + BPF_FS_MAGIC = 0xcafe4a11 ++ BPF_F_AFTER = 0x10 + BPF_F_ALLOW_MULTI = 0x2 + BPF_F_ALLOW_OVERRIDE = 0x1 + BPF_F_ANY_ALIGNMENT = 0x2 +- BPF_F_KPROBE_MULTI_RETURN = 0x1 ++ BPF_F_BEFORE = 0x8 ++ BPF_F_ID = 0x20 ++ BPF_F_NETFILTER_IP_DEFRAG = 0x1 + BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_REPLACE = 0x4 + BPF_F_SLEEPABLE = 0x10 +@@ -521,6 +523,7 @@ const ( + BPF_MAJOR_VERSION = 0x1 + BPF_MAXINSNS = 0x1000 + BPF_MEM = 0x60 ++ BPF_MEMSX = 0x80 + BPF_MEMWORDS = 0x10 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 +@@ -776,6 +779,8 @@ const ( + DEVLINK_GENL_MCGRP_CONFIG_NAME = "config" + DEVLINK_GENL_NAME = "devlink" + DEVLINK_GENL_VERSION = 0x1 ++ DEVLINK_PORT_FN_CAP_IPSEC_CRYPTO = 0x4 ++ DEVLINK_PORT_FN_CAP_IPSEC_PACKET = 0x8 + DEVLINK_PORT_FN_CAP_MIGRATABLE = 0x2 + DEVLINK_PORT_FN_CAP_ROCE = 0x1 + DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX = 0x14 +@@ -1698,6 +1703,7 @@ const ( + KEXEC_ON_CRASH = 0x1 + KEXEC_PRESERVE_CONTEXT = 0x2 + KEXEC_SEGMENT_MAX = 0x10 ++ KEXEC_UPDATE_ELFCOREHDR = 0x4 + KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 +@@ -1779,6 +1785,8 @@ const ( + LANDLOCK_ACCESS_FS_REMOVE_FILE = 0x20 + LANDLOCK_ACCESS_FS_TRUNCATE = 0x4000 + LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 ++ LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 ++ LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 + LANDLOCK_CREATE_RULESET_VERSION = 0x1 + LINUX_REBOOT_CMD_CAD_OFF = 0x0 + LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef +@@ -1795,6 +1803,7 @@ const ( + LOCK_SH = 0x1 + LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 ++ LOOP_CONFIGURE = 0x4c0a + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 +@@ -2120,6 +2129,60 @@ const ( + NFNL_SUBSYS_QUEUE = 0x3 + NFNL_SUBSYS_ULOG = 0x4 + NFS_SUPER_MAGIC = 0x6969 ++ NFT_CHAIN_FLAGS = 0x7 ++ NFT_CHAIN_MAXNAMELEN = 0x100 ++ NFT_CT_MAX = 0x17 ++ NFT_DATA_RESERVED_MASK = 0xffffff00 ++ NFT_DATA_VALUE_MAXLEN = 0x40 ++ NFT_EXTHDR_OP_MAX = 0x4 ++ NFT_FIB_RESULT_MAX = 0x3 ++ NFT_INNER_MASK = 0xf ++ NFT_LOGLEVEL_MAX = 0x8 ++ NFT_NAME_MAXLEN = 0x100 ++ NFT_NG_MAX = 0x1 ++ NFT_OBJECT_CONNLIMIT = 0x5 ++ NFT_OBJECT_COUNTER = 0x1 ++ NFT_OBJECT_CT_EXPECT = 0x9 ++ NFT_OBJECT_CT_HELPER = 0x3 ++ NFT_OBJECT_CT_TIMEOUT = 0x7 ++ NFT_OBJECT_LIMIT = 0x4 ++ NFT_OBJECT_MAX = 0xa ++ NFT_OBJECT_QUOTA = 0x2 ++ NFT_OBJECT_SECMARK = 0x8 ++ NFT_OBJECT_SYNPROXY = 0xa ++ NFT_OBJECT_TUNNEL = 0x6 ++ NFT_OBJECT_UNSPEC = 0x0 ++ NFT_OBJ_MAXNAMELEN = 0x100 ++ NFT_OSF_MAXGENRELEN = 0x10 ++ NFT_QUEUE_FLAG_BYPASS = 0x1 ++ NFT_QUEUE_FLAG_CPU_FANOUT = 0x2 ++ NFT_QUEUE_FLAG_MASK = 0x3 ++ NFT_REG32_COUNT = 0x10 ++ NFT_REG32_SIZE = 0x4 ++ NFT_REG_MAX = 0x4 ++ NFT_REG_SIZE = 0x10 ++ NFT_REJECT_ICMPX_MAX = 0x3 ++ NFT_RT_MAX = 0x4 ++ NFT_SECMARK_CTX_MAXLEN = 0x100 ++ NFT_SET_MAXNAMELEN = 0x100 ++ NFT_SOCKET_MAX = 0x3 ++ NFT_TABLE_F_MASK = 0x3 ++ NFT_TABLE_MAXNAMELEN = 0x100 ++ NFT_TRACETYPE_MAX = 0x3 ++ NFT_TUNNEL_F_MASK = 0x7 ++ NFT_TUNNEL_MAX = 0x1 ++ NFT_TUNNEL_MODE_MAX = 0x2 ++ NFT_USERDATA_MAXLEN = 0x100 ++ NFT_XFRM_KEY_MAX = 0x6 ++ NF_NAT_RANGE_MAP_IPS = 0x1 ++ NF_NAT_RANGE_MASK = 0x7f ++ NF_NAT_RANGE_NETMAP = 0x40 ++ NF_NAT_RANGE_PERSISTENT = 0x8 ++ NF_NAT_RANGE_PROTO_OFFSET = 0x20 ++ NF_NAT_RANGE_PROTO_RANDOM = 0x4 ++ NF_NAT_RANGE_PROTO_RANDOM_ALL = 0x14 ++ NF_NAT_RANGE_PROTO_RANDOM_FULLY = 0x10 ++ NF_NAT_RANGE_PROTO_SPECIFIED = 0x2 + NILFS_SUPER_MAGIC = 0x3434 + NL0 = 0x0 + NL1 = 0x100 +@@ -2275,6 +2338,7 @@ const ( + PERF_MEM_LVLNUM_PMEM = 0xe + PERF_MEM_LVLNUM_RAM = 0xd + PERF_MEM_LVLNUM_SHIFT = 0x21 ++ PERF_MEM_LVLNUM_UNC = 0x8 + PERF_MEM_LVL_HIT = 0x2 + PERF_MEM_LVL_IO = 0x1000 + PERF_MEM_LVL_L1 = 0x8 +@@ -2403,6 +2467,7 @@ const ( + PR_MCE_KILL_GET = 0x22 + PR_MCE_KILL_LATE = 0x0 + PR_MCE_KILL_SET = 0x1 ++ PR_MDWE_NO_INHERIT = 0x2 + PR_MDWE_REFUSE_EXEC_GAIN = 0x1 + PR_MPX_DISABLE_MANAGEMENT = 0x2c + PR_MPX_ENABLE_MANAGEMENT = 0x2b +@@ -2607,8 +2672,9 @@ const ( + RTAX_FEATURES = 0xc + RTAX_FEATURE_ALLFRAG = 0x8 + RTAX_FEATURE_ECN = 0x1 +- RTAX_FEATURE_MASK = 0xf ++ RTAX_FEATURE_MASK = 0x1f + RTAX_FEATURE_SACK = 0x2 ++ RTAX_FEATURE_TCP_USEC_TS = 0x10 + RTAX_FEATURE_TIMESTAMP = 0x4 + RTAX_HOPLIMIT = 0xa + RTAX_INITCWND = 0xb +@@ -2851,9 +2917,38 @@ const ( + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x1d + SC_LOG_FLUSH = 0x100000 ++ SECCOMP_ADDFD_FLAG_SEND = 0x2 ++ SECCOMP_ADDFD_FLAG_SETFD = 0x1 ++ SECCOMP_FILTER_FLAG_LOG = 0x2 ++ SECCOMP_FILTER_FLAG_NEW_LISTENER = 0x8 ++ SECCOMP_FILTER_FLAG_SPEC_ALLOW = 0x4 ++ SECCOMP_FILTER_FLAG_TSYNC = 0x1 ++ SECCOMP_FILTER_FLAG_TSYNC_ESRCH = 0x10 ++ SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV = 0x20 ++ SECCOMP_GET_ACTION_AVAIL = 0x2 ++ SECCOMP_GET_NOTIF_SIZES = 0x3 ++ SECCOMP_IOCTL_NOTIF_RECV = 0xc0502100 ++ SECCOMP_IOCTL_NOTIF_SEND = 0xc0182101 ++ SECCOMP_IOC_MAGIC = '!' + SECCOMP_MODE_DISABLED = 0x0 + SECCOMP_MODE_FILTER = 0x2 + SECCOMP_MODE_STRICT = 0x1 ++ SECCOMP_RET_ACTION = 0x7fff0000 ++ SECCOMP_RET_ACTION_FULL = 0xffff0000 ++ SECCOMP_RET_ALLOW = 0x7fff0000 ++ SECCOMP_RET_DATA = 0xffff ++ SECCOMP_RET_ERRNO = 0x50000 ++ SECCOMP_RET_KILL = 0x0 ++ SECCOMP_RET_KILL_PROCESS = 0x80000000 ++ SECCOMP_RET_KILL_THREAD = 0x0 ++ SECCOMP_RET_LOG = 0x7ffc0000 ++ SECCOMP_RET_TRACE = 0x7ff00000 ++ SECCOMP_RET_TRAP = 0x30000 ++ SECCOMP_RET_USER_NOTIF = 0x7fc00000 ++ SECCOMP_SET_MODE_FILTER = 0x1 ++ SECCOMP_SET_MODE_STRICT = 0x0 ++ SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP = 0x1 ++ SECCOMP_USER_NOTIF_FLAG_CONTINUE = 0x1 + SECRETMEM_MAGIC = 0x5345434d + SECURITYFS_MAGIC = 0x73636673 + SEEK_CUR = 0x1 +@@ -3013,6 +3108,7 @@ const ( + SOL_TIPC = 0x10f + SOL_TLS = 0x11a + SOL_UDP = 0x11 ++ SOL_VSOCK = 0x11f + SOL_X25 = 0x106 + SOL_XDP = 0x11b + SOMAXCONN = 0x1000 +@@ -3461,6 +3557,7 @@ const ( + XDP_PACKET_HEADROOM = 0x100 + XDP_PGOFF_RX_RING = 0x0 + XDP_PGOFF_TX_RING = 0x80000000 ++ XDP_PKT_CONTD = 0x1 + XDP_RING_NEED_WAKEUP = 0x1 + XDP_RX_RING = 0x2 + XDP_SHARED_UMEM = 0x1 +@@ -3473,6 +3570,7 @@ const ( + XDP_UMEM_REG = 0x4 + XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 + XDP_USE_NEED_WAKEUP = 0x8 ++ XDP_USE_SG = 0x10 + XDP_ZEROCOPY = 0x4 + XENFS_SUPER_MAGIC = 0xabba1974 + XFS_SUPER_MAGIC = 0x58465342 +diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +index 30aee00..42ff8c3 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build 386 && linux +-// +build 386,linux + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -Wall -Werror -static -I/tmp/386/include -m32 _const.go +@@ -282,6 +281,9 @@ const ( + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 ++ SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 ++ SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 ++ SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 +diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +index 8ebfa51..dca4360 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build amd64 && linux +-// +build amd64,linux + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -Wall -Werror -static -I/tmp/amd64/include -m64 _const.go +@@ -283,6 +282,9 @@ const ( + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 ++ SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 ++ SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 ++ SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 +diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +index 271a21c..5cca668 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build arm && linux +-// +build arm,linux + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -Wall -Werror -static -I/tmp/arm/include _const.go +@@ -289,6 +288,9 @@ const ( + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 ++ SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 ++ SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 ++ SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 +diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +index 910c330..d8cae6d 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build arm64 && linux +-// +build arm64,linux + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -Wall -Werror -static -I/tmp/arm64/include -fsigned-char _const.go +@@ -279,6 +278,9 @@ const ( + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 ++ SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 ++ SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 ++ SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 +diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +index a640798..28e39af 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build loong64 && linux +-// +build loong64,linux + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -Wall -Werror -static -I/tmp/loong64/include _const.go +@@ -119,6 +118,7 @@ const ( + IXOFF = 0x1000 + IXON = 0x400 + LASX_CTX_MAGIC = 0x41535801 ++ LBT_CTX_MAGIC = 0x42540001 + LSX_CTX_MAGIC = 0x53580001 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 +@@ -275,6 +275,9 @@ const ( + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 ++ SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 ++ SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 ++ SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 +diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +index 0d5925d..cd66e92 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build mips && linux +-// +build mips,linux + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -Wall -Werror -static -I/tmp/mips/include _const.go +@@ -282,6 +281,9 @@ const ( + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 ++ SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 ++ SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 ++ SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x80 + SIOCATMARK = 0x40047307 +diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +index d72a00e..c1595eb 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build mips64 && linux +-// +build mips64,linux + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -Wall -Werror -static -I/tmp/mips64/include _const.go +@@ -282,6 +281,9 @@ const ( + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 ++ SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 ++ SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 ++ SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x80 + SIOCATMARK = 0x40047307 +diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +index 02ba129..ee9456b 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build mips64le && linux +-// +build mips64le,linux + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -Wall -Werror -static -I/tmp/mips64le/include _const.go +@@ -282,6 +281,9 @@ const ( + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 ++ SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 ++ SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 ++ SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x80 + SIOCATMARK = 0x40047307 +diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +index 8daa6dd..8cfca81 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build mipsle && linux +-// +build mipsle,linux + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -Wall -Werror -static -I/tmp/mipsle/include _const.go +@@ -282,6 +281,9 @@ const ( + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 ++ SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 ++ SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 ++ SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x80 + SIOCATMARK = 0x40047307 +diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +index 63c8fa2..60b0deb 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build ppc && linux +-// +build ppc,linux + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc/include _const.go +@@ -337,6 +336,9 @@ const ( + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 ++ SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 ++ SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 ++ SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 +diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +index 930799e..f90aa72 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build ppc64 && linux +-// +build ppc64,linux + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64/include _const.go +@@ -341,6 +340,9 @@ const ( + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 ++ SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 ++ SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 ++ SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 +diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +index 8605a7d..ba9e015 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build ppc64le && linux +-// +build ppc64le,linux + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64le/include _const.go +@@ -341,6 +340,9 @@ const ( + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 ++ SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 ++ SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 ++ SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 +diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +index 95a016f..07cdfd6 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build riscv64 && linux +-// +build riscv64,linux + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -Wall -Werror -static -I/tmp/riscv64/include _const.go +@@ -228,6 +227,9 @@ const ( + PPPIOCUNBRIDGECHAN = 0x7434 + PPPIOCXFERUNIT = 0x744e + PR_SET_PTRACER_ANY = 0xffffffffffffffff ++ PTRACE_GETFDPIC = 0x21 ++ PTRACE_GETFDPIC_EXEC = 0x0 ++ PTRACE_GETFDPIC_INTERP = 0x1 + RLIMIT_AS = 0x9 + RLIMIT_MEMLOCK = 0x8 + RLIMIT_NOFILE = 0x7 +@@ -270,6 +272,9 @@ const ( + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 ++ SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 ++ SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 ++ SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 +diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +index 1ae0108..2f1dd21 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build s390x && linux +-// +build s390x,linux + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -Wall -Werror -static -I/tmp/s390x/include -fsigned-char _const.go +@@ -345,6 +344,9 @@ const ( + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 ++ SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 ++ SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 ++ SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 +diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +index 1bb7c63..f40519d 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build sparc64 && linux +-// +build sparc64,linux + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -Wall -Werror -static -I/tmp/sparc64/include _const.go +@@ -336,6 +335,9 @@ const ( + SCM_TIMESTAMPNS = 0x21 + SCM_TXTIME = 0x3f + SCM_WIFI_STATUS = 0x25 ++ SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 ++ SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 ++ SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 + SFD_CLOEXEC = 0x400000 + SFD_NONBLOCK = 0x4000 + SF_FP = 0x38 +diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go +index 72f7420..130085d 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build 386 && netbsd +-// +build 386,netbsd + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -m32 _const.go +diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go +index 8d4eb0c..84769a1 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build amd64 && netbsd +-// +build amd64,netbsd + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -m64 _const.go +diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go +index 9eef974..602ded0 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build arm && netbsd +-// +build arm,netbsd + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -marm _const.go +diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go +index 3b62ba1..efc0406 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build arm64 && netbsd +-// +build arm64,netbsd + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -m64 _const.go +diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +index af20e47..5a6500f 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build 386 && openbsd +-// +build 386,openbsd + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -m32 _const.go +diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go +index 6015fcb..a5aeeb9 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build amd64 && openbsd +-// +build amd64,openbsd + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -m64 _const.go +diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +index 8d44955..0e9748a 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build arm && openbsd +-// +build arm,openbsd + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- _const.go +diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go +index ae16fe7..4f4449a 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build arm64 && openbsd +-// +build arm64,openbsd + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -m64 _const.go +diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go +index 03d90fe..76a363f 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build mips64 && openbsd +-// +build mips64,openbsd + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -m64 _const.go +diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go +index 8e2c51b..43ca0cd 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build ppc64 && openbsd +-// +build ppc64,openbsd + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -m64 _const.go +diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go +index 13d4030..b1b8bb2 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build riscv64 && openbsd +-// +build riscv64,openbsd + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -m64 _const.go +diff --git a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go +index 1afee6a..d2ddd31 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build amd64 && solaris +-// +build amd64,solaris + + // Code generated by cmd/cgo -godefs; DO NOT EDIT. + // cgo -godefs -- -m64 _const.go +diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +index fc7d050..4dfd2e0 100644 +--- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go ++++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build zos && s390x +-// +build zos,s390x + + // Hand edited based on zerrors_linux_s390x.go + // TODO: auto-generate. +diff --git a/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go +index 97f20ca..586317c 100644 +--- a/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go ++++ b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go +@@ -1,8 +1,6 @@ + // Code generated by linux/mkall.go generatePtracePair("arm", "arm64"). DO NOT EDIT. + + //go:build linux && (arm || arm64) +-// +build linux +-// +build arm arm64 + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go +index 0b5f794..d7c881b 100644 +--- a/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go ++++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go +@@ -1,8 +1,6 @@ + // Code generated by linux/mkall.go generatePtracePair("mips", "mips64"). DO NOT EDIT. + + //go:build linux && (mips || mips64) +-// +build linux +-// +build mips mips64 + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go +index 2807f7e..2d2de5d 100644 +--- a/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go ++++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go +@@ -1,8 +1,6 @@ + // Code generated by linux/mkall.go generatePtracePair("mipsle", "mips64le"). DO NOT EDIT. + + //go:build linux && (mipsle || mips64le) +-// +build linux +-// +build mipsle mips64le + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go +index 281ea64..5adc79f 100644 +--- a/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go ++++ b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go +@@ -1,8 +1,6 @@ + // Code generated by linux/mkall.go generatePtracePair("386", "amd64"). DO NOT EDIT. + + //go:build linux && (386 || amd64) +-// +build linux +-// +build 386 amd64 + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +index d1d1d23..6ea64a3 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build aix && ppc +-// +build aix,ppc + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +index f99a18a..99ee439 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build aix && ppc64 +-// +build aix,ppc64 + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go +index c4d50ae..b68a783 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build aix && ppc64 && gc +-// +build aix,ppc64,gc + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go +index 6903d3b..0a87450 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build aix && ppc64 && gccgo +-// +build aix,ppc64,gccgo + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +index 1cad561..ccb02f2 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build darwin && amd64 +-// +build darwin,amd64 + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +index b18edbd..1b40b99 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build darwin && arm64 +-// +build darwin,arm64 + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +index 0c67df6..aad65fc 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build dragonfly && amd64 +-// +build dragonfly,amd64 + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +index e6e05d1..c009639 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build freebsd && 386 +-// +build freebsd,386 + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +index 7508acc..7664df7 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build freebsd && amd64 +-// +build freebsd,amd64 + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +index 7b56aea..ae09918 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build freebsd && arm +-// +build freebsd,arm + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +index cc623dc..11fd5d4 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build freebsd && arm64 +-// +build freebsd,arm64 + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go +index 5818491..c3d2d65 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build freebsd && riscv64 +-// +build freebsd,riscv64 + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go +index 6be25cd..c698cbc 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build illumos && amd64 +-// +build illumos,amd64 + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go +index 1ff3aec..87d8612 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go +@@ -1,7 +1,6 @@ + // Code generated by mkmerge; DO NOT EDIT. + + //go:build linux +-// +build linux + + package unix + +@@ -38,6 +37,21 @@ func fchmodat(dirfd int, path string, mode uint32) (err error) { + + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + ++func fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall6(SYS_FCHMODAT2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ + func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { +@@ -892,6 +906,16 @@ func Fspick(dirfd int, pathName string, flags int) (fd int, err error) { + + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + ++func fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) { ++ _, _, e1 := Syscall6(SYS_FSCONFIG, uintptr(fd), uintptr(cmd), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(value)), uintptr(aux), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ + func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { +@@ -2195,3 +2219,13 @@ func schedGetattr(pid int, attr *SchedAttr, size uint, flags uint) (err error) { + } + return + } ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) { ++ _, _, e1 := Syscall6(SYS_CACHESTAT, uintptr(fd), uintptr(unsafe.Pointer(crange)), uintptr(unsafe.Pointer(cstat)), uintptr(flags), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +index 07b549c..4def3e9 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build linux && 386 +-// +build linux,386 + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +index 5f481bf..fef2bc8 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build linux && amd64 +-// +build linux,amd64 + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +index 824cd52..a9fd76a 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build linux && arm +-// +build linux,arm + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +index e77aecf..4600650 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build linux && arm64 +-// +build linux,arm64 + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go +index 806ffd1..c8987d2 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build linux && loong64 +-// +build linux,loong64 + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +index 961a3af..921f430 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build linux && mips +-// +build linux,mips + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +index ed05005..44f0678 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build linux && mips64 +-// +build linux,mips64 + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +index d365b71..e7fa0ab 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build linux && mips64le +-// +build linux,mips64le + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +index c3f1b8b..8c51256 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build linux && mipsle +-// +build linux,mipsle + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go +index a6574cf..7392fd4 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build linux && ppc +-// +build linux,ppc + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +index f409902..4118043 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build linux && ppc64 +-// +build linux,ppc64 + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +index 9dfcc29..40c6ce7 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build linux && ppc64le +-// +build linux,ppc64le + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +index 0ab4f2e..2cfe34a 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build linux && riscv64 +-// +build linux,riscv64 + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +index 6cde322..61e6f07 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build linux && s390x +-// +build linux,s390x + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +index 5253d65..834b842 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build linux && sparc64 +-// +build linux,sparc64 + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +index 2df3c5b..e91ebc1 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build netbsd && 386 +-// +build netbsd,386 + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +index a60556b..be28bab 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build netbsd && amd64 +-// +build netbsd,amd64 + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +index 9f78891..fb587e8 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build netbsd && arm +-// +build netbsd,arm + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +index 82a4cb2..d576438 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build netbsd && arm64 +-// +build netbsd,arm64 + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +index 66b3b64..9dc4241 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build openbsd && 386 +-// +build openbsd,386 + + package unix + +@@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr + + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + ++func fcntl(fd int, cmd int, arg int) (n int, err error) { ++ r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++var libc_fcntl_trampoline_addr uintptr ++ ++//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { ++ r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) +@@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr + + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + ++func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { ++ r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++var libc_getfsstat_trampoline_addr uintptr ++ ++//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) +@@ -2229,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error + var libc_utimensat_trampoline_addr uintptr + + //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func pledge(promises *byte, execpromises *byte) (err error) { ++ _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++var libc_pledge_trampoline_addr uintptr ++ ++//go:cgo_import_dynamic libc_pledge pledge "libc.so" ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func unveil(path *byte, flags *byte) (err error) { ++ _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++var libc_unveil_trampoline_addr uintptr ++ ++//go:cgo_import_dynamic libc_unveil unveil "libc.so" +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +index 3dcacd3..41b5617 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s ++++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +@@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4 + DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB) + ++TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 ++ JMP libc_fcntl(SB) ++GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $4 ++DATA ·libc_fcntl_trampoline_addr(SB)/4, $libc_fcntl_trampoline<>(SB) ++ + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ppoll(SB) + GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4 +@@ -668,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 + DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) + ++TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 ++ JMP libc_getfsstat(SB) ++GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $4 ++DATA ·libc_getfsstat_trampoline_addr(SB)/4, $libc_getfsstat_trampoline<>(SB) ++ + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) + GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 + DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) ++ ++TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 ++ JMP libc_pledge(SB) ++GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $4 ++DATA ·libc_pledge_trampoline_addr(SB)/4, $libc_pledge_trampoline<>(SB) ++ ++TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 ++ JMP libc_unveil(SB) ++GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $4 ++DATA ·libc_unveil_trampoline_addr(SB)/4, $libc_unveil_trampoline<>(SB) +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +index c5c4cc1..0d3a075 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build openbsd && amd64 +-// +build openbsd,amd64 + + package unix + +@@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr + + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + ++func fcntl(fd int, cmd int, arg int) (n int, err error) { ++ r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++var libc_fcntl_trampoline_addr uintptr ++ ++//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { ++ r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) +@@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr + + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + ++func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { ++ r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++var libc_getfsstat_trampoline_addr uintptr ++ ++//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) +@@ -2229,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error + var libc_utimensat_trampoline_addr uintptr + + //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func pledge(promises *byte, execpromises *byte) (err error) { ++ _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++var libc_pledge_trampoline_addr uintptr ++ ++//go:cgo_import_dynamic libc_pledge pledge "libc.so" ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func unveil(path *byte, flags *byte) (err error) { ++ _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++var libc_unveil_trampoline_addr uintptr ++ ++//go:cgo_import_dynamic libc_unveil unveil "libc.so" +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +index 2763620..4019a65 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s ++++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +@@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 + DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + ++TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 ++ JMP libc_fcntl(SB) ++GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 ++DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) ++ + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ppoll(SB) + GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 +@@ -668,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 + DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) + ++TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 ++ JMP libc_getfsstat(SB) ++GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 ++DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) ++ + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) + GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 + DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) ++ ++TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 ++ JMP libc_pledge(SB) ++GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 ++DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) ++ ++TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 ++ JMP libc_unveil(SB) ++GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 ++DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +index 93bfbb3..c39f777 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build openbsd && arm +-// +build openbsd,arm + + package unix + +@@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr + + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + ++func fcntl(fd int, cmd int, arg int) (n int, err error) { ++ r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++var libc_fcntl_trampoline_addr uintptr ++ ++//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { ++ r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) +@@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr + + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + ++func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { ++ r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++var libc_getfsstat_trampoline_addr uintptr ++ ++//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) +@@ -2229,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error + var libc_utimensat_trampoline_addr uintptr + + //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func pledge(promises *byte, execpromises *byte) (err error) { ++ _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++var libc_pledge_trampoline_addr uintptr ++ ++//go:cgo_import_dynamic libc_pledge pledge "libc.so" ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func unveil(path *byte, flags *byte) (err error) { ++ _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++var libc_unveil_trampoline_addr uintptr ++ ++//go:cgo_import_dynamic libc_unveil unveil "libc.so" +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s +index c922314..ac4af24 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s ++++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s +@@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4 + DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB) + ++TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 ++ JMP libc_fcntl(SB) ++GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $4 ++DATA ·libc_fcntl_trampoline_addr(SB)/4, $libc_fcntl_trampoline<>(SB) ++ + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ppoll(SB) + GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4 +@@ -668,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 + DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) + ++TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 ++ JMP libc_getfsstat(SB) ++GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $4 ++DATA ·libc_getfsstat_trampoline_addr(SB)/4, $libc_getfsstat_trampoline<>(SB) ++ + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) + GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 + DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) ++ ++TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 ++ JMP libc_pledge(SB) ++GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $4 ++DATA ·libc_pledge_trampoline_addr(SB)/4, $libc_pledge_trampoline<>(SB) ++ ++TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 ++ JMP libc_unveil(SB) ++GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $4 ++DATA ·libc_unveil_trampoline_addr(SB)/4, $libc_unveil_trampoline<>(SB) +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +index a107b8f..57571d0 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build openbsd && arm64 +-// +build openbsd,arm64 + + package unix + +@@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr + + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + ++func fcntl(fd int, cmd int, arg int) (n int, err error) { ++ r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++var libc_fcntl_trampoline_addr uintptr ++ ++//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { ++ r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) +@@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr + + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + ++func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { ++ r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++var libc_getfsstat_trampoline_addr uintptr ++ ++//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) +@@ -2229,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error + var libc_utimensat_trampoline_addr uintptr + + //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func pledge(promises *byte, execpromises *byte) (err error) { ++ _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++var libc_pledge_trampoline_addr uintptr ++ ++//go:cgo_import_dynamic libc_pledge pledge "libc.so" ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func unveil(path *byte, flags *byte) (err error) { ++ _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++var libc_unveil_trampoline_addr uintptr ++ ++//go:cgo_import_dynamic libc_unveil unveil "libc.so" +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +index a6bc32c..f77d532 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s ++++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +@@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 + DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + ++TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 ++ JMP libc_fcntl(SB) ++GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 ++DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) ++ + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ppoll(SB) + GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 +@@ -668,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 + DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) + ++TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 ++ JMP libc_getfsstat(SB) ++GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 ++DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) ++ + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) + GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 + DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) ++ ++TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 ++ JMP libc_pledge(SB) ++GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 ++DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) ++ ++TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 ++ JMP libc_unveil(SB) ++GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 ++DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +index c427de5..e62963e 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build openbsd && mips64 +-// +build openbsd,mips64 + + package unix + +@@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr + + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + ++func fcntl(fd int, cmd int, arg int) (n int, err error) { ++ r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++var libc_fcntl_trampoline_addr uintptr ++ ++//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { ++ r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) +@@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr + + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + ++func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { ++ r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++var libc_getfsstat_trampoline_addr uintptr ++ ++//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) +@@ -2229,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error + var libc_utimensat_trampoline_addr uintptr + + //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func pledge(promises *byte, execpromises *byte) (err error) { ++ _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++var libc_pledge_trampoline_addr uintptr ++ ++//go:cgo_import_dynamic libc_pledge pledge "libc.so" ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func unveil(path *byte, flags *byte) (err error) { ++ _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++var libc_unveil_trampoline_addr uintptr ++ ++//go:cgo_import_dynamic libc_unveil unveil "libc.so" +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s +index b4e7bce..fae140b 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s ++++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s +@@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 + DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + ++TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 ++ JMP libc_fcntl(SB) ++GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 ++DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) ++ + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ppoll(SB) + GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 +@@ -668,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 + DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) + ++TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 ++ JMP libc_getfsstat(SB) ++GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 ++DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) ++ + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) + GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 + DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) ++ ++TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 ++ JMP libc_pledge(SB) ++GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 ++DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) ++ ++TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 ++ JMP libc_unveil(SB) ++GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 ++DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +index 60c1a99..0083135 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build openbsd && ppc64 +-// +build openbsd,ppc64 + + package unix + +@@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr + + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + ++func fcntl(fd int, cmd int, arg int) (n int, err error) { ++ r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++var libc_fcntl_trampoline_addr uintptr ++ ++//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { ++ r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) +@@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr + + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + ++func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { ++ r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++var libc_getfsstat_trampoline_addr uintptr ++ ++//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) +@@ -2229,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error + var libc_utimensat_trampoline_addr uintptr + + //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func pledge(promises *byte, execpromises *byte) (err error) { ++ _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++var libc_pledge_trampoline_addr uintptr ++ ++//go:cgo_import_dynamic libc_pledge pledge "libc.so" ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func unveil(path *byte, flags *byte) (err error) { ++ _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++var libc_unveil_trampoline_addr uintptr ++ ++//go:cgo_import_dynamic libc_unveil unveil "libc.so" +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s +index ca3f766..9d1e0ff 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s ++++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s +@@ -213,6 +213,12 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 + DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + ++TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 ++ CALL libc_fcntl(SB) ++ RET ++GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 ++DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) ++ + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_ppoll(SB) + RET +@@ -801,8 +807,26 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 + DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) + ++TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 ++ CALL libc_getfsstat(SB) ++ RET ++GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 ++DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) ++ + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_utimensat(SB) + RET + GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 + DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) ++ ++TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 ++ CALL libc_pledge(SB) ++ RET ++GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 ++DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) ++ ++TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 ++ CALL libc_unveil(SB) ++ RET ++GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 ++DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +index 52eba36..79029ed 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build openbsd && riscv64 +-// +build openbsd,riscv64 + + package unix + +@@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr + + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + ++func fcntl(fd int, cmd int, arg int) (n int, err error) { ++ r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++var libc_fcntl_trampoline_addr uintptr ++ ++//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { ++ r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) +@@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr + + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + ++func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { ++ r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++var libc_getfsstat_trampoline_addr uintptr ++ ++//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) +@@ -2229,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error + var libc_utimensat_trampoline_addr uintptr + + //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func pledge(promises *byte, execpromises *byte) (err error) { ++ _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++var libc_pledge_trampoline_addr uintptr ++ ++//go:cgo_import_dynamic libc_pledge pledge "libc.so" ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func unveil(path *byte, flags *byte) (err error) { ++ _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++var libc_unveil_trampoline_addr uintptr ++ ++//go:cgo_import_dynamic libc_unveil unveil "libc.so" +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s +index 477a7d5..da115f9 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s ++++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s +@@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 + DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + ++TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 ++ JMP libc_fcntl(SB) ++GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 ++DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) ++ + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ppoll(SB) + GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 +@@ -668,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 + DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) + ++TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 ++ JMP libc_getfsstat(SB) ++GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 ++DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) ++ + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) + GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 + DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) ++ ++TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 ++ JMP libc_pledge(SB) ++GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 ++DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) ++ ++TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 ++ JMP libc_unveil(SB) ++GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 ++DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +index b401894..829b87f 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build solaris && amd64 +-// +build solaris,amd64 + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go +index 1d8fe1d..94f0112 100644 +--- a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go ++++ b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build zos && s390x +-// +build zos,s390x + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go +index 55e0484..3a58ae8 100644 +--- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go ++++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; DO NOT EDIT. + + //go:build 386 && openbsd +-// +build 386,openbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go +index d2243cf..dcb7a0e 100644 +--- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go ++++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; DO NOT EDIT. + + //go:build amd64 && openbsd +-// +build amd64,openbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go +index 82dc51b..db5a7bf 100644 +--- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go ++++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; DO NOT EDIT. + + //go:build arm && openbsd +-// +build arm,openbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go +index cbdda1a..7be575a 100644 +--- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go ++++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; DO NOT EDIT. + + //go:build arm64 && openbsd +-// +build arm64,openbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go +index f55eae1..d6e3174 100644 +--- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go ++++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; DO NOT EDIT. + + //go:build mips64 && openbsd +-// +build mips64,openbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go +index e440544..ee97157 100644 +--- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go ++++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; DO NOT EDIT. + + //go:build ppc64 && openbsd +-// +build ppc64,openbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go +index a0db82f..35c3b91 100644 +--- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go ++++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; DO NOT EDIT. + + //go:build riscv64 && openbsd +-// +build riscv64,openbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go +index f8298ff..5edda76 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build amd64 && darwin +-// +build amd64,darwin + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go +index 5eb433b..0dc9e8b 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build arm64 && darwin +-// +build arm64,darwin + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go +index 703675c..308ddf3 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build amd64 && dragonfly +-// +build amd64,dragonfly + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go +index 4e0d961..418664e 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build 386 && freebsd +-// +build 386,freebsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go +index 01636b8..34d0b86 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build amd64 && freebsd +-// +build amd64,freebsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go +index ad99bc1..b71cf45 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build arm && freebsd +-// +build arm,freebsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go +index 89dcc42..e32df1c 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build arm64 && freebsd +-// +build arm64,freebsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go +index ee37aaa..15ad611 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build riscv64 && freebsd +-// +build riscv64,freebsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +index 9862853..0cc3ce4 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build 386 && linux +-// +build 386,linux + + package unix + +@@ -448,4 +447,9 @@ const ( + SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ++ SYS_FCHMODAT2 = 452 ++ SYS_MAP_SHADOW_STACK = 453 ++ SYS_FUTEX_WAKE = 454 ++ SYS_FUTEX_WAIT = 455 ++ SYS_FUTEX_REQUEUE = 456 + ) +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +index 8901f0f..856d92d 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build amd64 && linux +-// +build amd64,linux + + package unix + +@@ -370,4 +369,9 @@ const ( + SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ++ SYS_FCHMODAT2 = 452 ++ SYS_MAP_SHADOW_STACK = 453 ++ SYS_FUTEX_WAKE = 454 ++ SYS_FUTEX_WAIT = 455 ++ SYS_FUTEX_REQUEUE = 456 + ) +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +index 6902c37..8d46709 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build arm && linux +-// +build arm,linux + + package unix + +@@ -412,4 +411,9 @@ const ( + SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ++ SYS_FCHMODAT2 = 452 ++ SYS_MAP_SHADOW_STACK = 453 ++ SYS_FUTEX_WAKE = 454 ++ SYS_FUTEX_WAIT = 455 ++ SYS_FUTEX_REQUEUE = 456 + ) +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +index a6d3dff..edc1732 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build arm64 && linux +-// +build arm64,linux + + package unix + +@@ -315,4 +314,9 @@ const ( + SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ++ SYS_FCHMODAT2 = 452 ++ SYS_MAP_SHADOW_STACK = 453 ++ SYS_FUTEX_WAKE = 454 ++ SYS_FUTEX_WAIT = 455 ++ SYS_FUTEX_REQUEUE = 456 + ) +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +index b18f3f7..445eba2 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build loong64 && linux +-// +build loong64,linux + + package unix + +@@ -309,4 +308,9 @@ const ( + SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ++ SYS_FCHMODAT2 = 452 ++ SYS_MAP_SHADOW_STACK = 453 ++ SYS_FUTEX_WAKE = 454 ++ SYS_FUTEX_WAIT = 455 ++ SYS_FUTEX_REQUEUE = 456 + ) +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +index 0302e5e..adba01b 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build mips && linux +-// +build mips,linux + + package unix + +@@ -432,4 +431,9 @@ const ( + SYS_FUTEX_WAITV = 4449 + SYS_SET_MEMPOLICY_HOME_NODE = 4450 + SYS_CACHESTAT = 4451 ++ SYS_FCHMODAT2 = 4452 ++ SYS_MAP_SHADOW_STACK = 4453 ++ SYS_FUTEX_WAKE = 4454 ++ SYS_FUTEX_WAIT = 4455 ++ SYS_FUTEX_REQUEUE = 4456 + ) +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +index 6693ba4..014c4e9 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build mips64 && linux +-// +build mips64,linux + + package unix + +@@ -362,4 +361,9 @@ const ( + SYS_FUTEX_WAITV = 5449 + SYS_SET_MEMPOLICY_HOME_NODE = 5450 + SYS_CACHESTAT = 5451 ++ SYS_FCHMODAT2 = 5452 ++ SYS_MAP_SHADOW_STACK = 5453 ++ SYS_FUTEX_WAKE = 5454 ++ SYS_FUTEX_WAIT = 5455 ++ SYS_FUTEX_REQUEUE = 5456 + ) +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +index fd93f49..ccc97d7 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build mips64le && linux +-// +build mips64le,linux + + package unix + +@@ -362,4 +361,9 @@ const ( + SYS_FUTEX_WAITV = 5449 + SYS_SET_MEMPOLICY_HOME_NODE = 5450 + SYS_CACHESTAT = 5451 ++ SYS_FCHMODAT2 = 5452 ++ SYS_MAP_SHADOW_STACK = 5453 ++ SYS_FUTEX_WAKE = 5454 ++ SYS_FUTEX_WAIT = 5455 ++ SYS_FUTEX_REQUEUE = 5456 + ) +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +index 760ddca..ec2b64a 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build mipsle && linux +-// +build mipsle,linux + + package unix + +@@ -432,4 +431,9 @@ const ( + SYS_FUTEX_WAITV = 4449 + SYS_SET_MEMPOLICY_HOME_NODE = 4450 + SYS_CACHESTAT = 4451 ++ SYS_FCHMODAT2 = 4452 ++ SYS_MAP_SHADOW_STACK = 4453 ++ SYS_FUTEX_WAKE = 4454 ++ SYS_FUTEX_WAIT = 4455 ++ SYS_FUTEX_REQUEUE = 4456 + ) +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +index cff2b25..21a839e 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build ppc && linux +-// +build ppc,linux + + package unix + +@@ -439,4 +438,9 @@ const ( + SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ++ SYS_FCHMODAT2 = 452 ++ SYS_MAP_SHADOW_STACK = 453 ++ SYS_FUTEX_WAKE = 454 ++ SYS_FUTEX_WAIT = 455 ++ SYS_FUTEX_REQUEUE = 456 + ) +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +index a4b2405..c11121e 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build ppc64 && linux +-// +build ppc64,linux + + package unix + +@@ -411,4 +410,9 @@ const ( + SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ++ SYS_FCHMODAT2 = 452 ++ SYS_MAP_SHADOW_STACK = 453 ++ SYS_FUTEX_WAKE = 454 ++ SYS_FUTEX_WAIT = 455 ++ SYS_FUTEX_REQUEUE = 456 + ) +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +index aca54b4..909b631 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build ppc64le && linux +-// +build ppc64le,linux + + package unix + +@@ -411,4 +410,9 @@ const ( + SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ++ SYS_FCHMODAT2 = 452 ++ SYS_MAP_SHADOW_STACK = 453 ++ SYS_FUTEX_WAKE = 454 ++ SYS_FUTEX_WAIT = 455 ++ SYS_FUTEX_REQUEUE = 456 + ) +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +index 9d1738d..e49bed1 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build riscv64 && linux +-// +build riscv64,linux + + package unix + +@@ -316,4 +315,9 @@ const ( + SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ++ SYS_FCHMODAT2 = 452 ++ SYS_MAP_SHADOW_STACK = 453 ++ SYS_FUTEX_WAKE = 454 ++ SYS_FUTEX_WAIT = 455 ++ SYS_FUTEX_REQUEUE = 456 + ) +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +index 022878d..66017d2 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build s390x && linux +-// +build s390x,linux + + package unix + +@@ -377,4 +376,9 @@ const ( + SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ++ SYS_FCHMODAT2 = 452 ++ SYS_MAP_SHADOW_STACK = 453 ++ SYS_FUTEX_WAKE = 454 ++ SYS_FUTEX_WAIT = 455 ++ SYS_FUTEX_REQUEUE = 456 + ) +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +index 4100a76..47bab18 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build sparc64 && linux +-// +build sparc64,linux + + package unix + +@@ -390,4 +389,9 @@ const ( + SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ++ SYS_FCHMODAT2 = 452 ++ SYS_MAP_SHADOW_STACK = 453 ++ SYS_FUTEX_WAKE = 454 ++ SYS_FUTEX_WAIT = 455 ++ SYS_FUTEX_REQUEUE = 456 + ) +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go +index 3a6699e..b2aa8cd 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build 386 && netbsd +-// +build 386,netbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go +index 5677cd4..524a1b1 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build amd64 && netbsd +-// +build amd64,netbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go +index e784cb6..d59b943 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build arm && netbsd +-// +build arm,netbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go +index bd4952e..31e771d 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; DO NOT EDIT. + + //go:build arm64 && netbsd +-// +build arm64,netbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go +index 5977338..9fd77c6 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build 386 && openbsd +-// +build 386,openbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go +index 16af291..af10af2 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build amd64 && openbsd +-// +build amd64,openbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go +index f59b18a..cc2028a 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build arm && openbsd +-// +build arm,openbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go +index 721ef59..c06dd44 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build arm64 && openbsd +-// +build arm64,openbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go +index 01c43a0..9ddbf3e 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build mips64 && openbsd +-// +build mips64,openbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go +index f258cfa..19a6ee4 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build ppc64 && openbsd +-// +build ppc64,openbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go +index 07919e0..05192a7 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build riscv64 && openbsd +-// +build riscv64,openbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go +index 073daad..b2e3085 100644 +--- a/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go ++++ b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build zos && s390x +-// +build zos,s390x + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go +index 7a8161c..3e6d57c 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build ppc && aix +-// +build ppc,aix + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go +index 07ed733..3a219bd 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build ppc64 && aix +-// +build ppc64,aix + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +index 690cefc..091d107 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build amd64 && darwin +-// +build amd64,darwin + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +index 5bffc10..28ff4ef 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build arm64 && darwin +-// +build arm64,darwin + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +index d0ba8e9..30e405b 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build amd64 && dragonfly +-// +build amd64,dragonfly + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +index 29dc483..6cbd094 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build 386 && freebsd +-// +build 386,freebsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +index 0a89b28..7c03b6e 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build amd64 && freebsd +-// +build amd64,freebsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +index c8666bb..422107e 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build arm && freebsd +-// +build arm,freebsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +index 88fb48a..505a12a 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build arm64 && freebsd +-// +build arm64,freebsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +index 698dc97..cc986c7 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build riscv64 && freebsd +-// +build riscv64,freebsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go +index 18aa70b..eff6bcd 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_linux.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go +@@ -1,7 +1,6 @@ + // Code generated by mkmerge; DO NOT EDIT. + + //go:build linux +-// +build linux + + package unix + +@@ -175,7 +174,8 @@ type FscryptPolicyV2 struct { + Contents_encryption_mode uint8 + Filenames_encryption_mode uint8 + Flags uint8 +- _ [4]uint8 ++ Log2_data_unit_size uint8 ++ _ [3]uint8 + Master_key_identifier [16]uint8 + } + +@@ -456,60 +456,63 @@ type Ucred struct { + } + + type TCPInfo struct { +- State uint8 +- Ca_state uint8 +- Retransmits uint8 +- Probes uint8 +- Backoff uint8 +- Options uint8 +- Rto uint32 +- Ato uint32 +- Snd_mss uint32 +- Rcv_mss uint32 +- Unacked uint32 +- Sacked uint32 +- Lost uint32 +- Retrans uint32 +- Fackets uint32 +- Last_data_sent uint32 +- Last_ack_sent uint32 +- Last_data_recv uint32 +- Last_ack_recv uint32 +- Pmtu uint32 +- Rcv_ssthresh uint32 +- Rtt uint32 +- Rttvar uint32 +- Snd_ssthresh uint32 +- Snd_cwnd uint32 +- Advmss uint32 +- Reordering uint32 +- Rcv_rtt uint32 +- Rcv_space uint32 +- Total_retrans uint32 +- Pacing_rate uint64 +- Max_pacing_rate uint64 +- Bytes_acked uint64 +- Bytes_received uint64 +- Segs_out uint32 +- Segs_in uint32 +- Notsent_bytes uint32 +- Min_rtt uint32 +- Data_segs_in uint32 +- Data_segs_out uint32 +- Delivery_rate uint64 +- Busy_time uint64 +- Rwnd_limited uint64 +- Sndbuf_limited uint64 +- Delivered uint32 +- Delivered_ce uint32 +- Bytes_sent uint64 +- Bytes_retrans uint64 +- Dsack_dups uint32 +- Reord_seen uint32 +- Rcv_ooopack uint32 +- Snd_wnd uint32 +- Rcv_wnd uint32 +- Rehash uint32 ++ State uint8 ++ Ca_state uint8 ++ Retransmits uint8 ++ Probes uint8 ++ Backoff uint8 ++ Options uint8 ++ Rto uint32 ++ Ato uint32 ++ Snd_mss uint32 ++ Rcv_mss uint32 ++ Unacked uint32 ++ Sacked uint32 ++ Lost uint32 ++ Retrans uint32 ++ Fackets uint32 ++ Last_data_sent uint32 ++ Last_ack_sent uint32 ++ Last_data_recv uint32 ++ Last_ack_recv uint32 ++ Pmtu uint32 ++ Rcv_ssthresh uint32 ++ Rtt uint32 ++ Rttvar uint32 ++ Snd_ssthresh uint32 ++ Snd_cwnd uint32 ++ Advmss uint32 ++ Reordering uint32 ++ Rcv_rtt uint32 ++ Rcv_space uint32 ++ Total_retrans uint32 ++ Pacing_rate uint64 ++ Max_pacing_rate uint64 ++ Bytes_acked uint64 ++ Bytes_received uint64 ++ Segs_out uint32 ++ Segs_in uint32 ++ Notsent_bytes uint32 ++ Min_rtt uint32 ++ Data_segs_in uint32 ++ Data_segs_out uint32 ++ Delivery_rate uint64 ++ Busy_time uint64 ++ Rwnd_limited uint64 ++ Sndbuf_limited uint64 ++ Delivered uint32 ++ Delivered_ce uint32 ++ Bytes_sent uint64 ++ Bytes_retrans uint64 ++ Dsack_dups uint32 ++ Reord_seen uint32 ++ Rcv_ooopack uint32 ++ Snd_wnd uint32 ++ Rcv_wnd uint32 ++ Rehash uint32 ++ Total_rto uint16 ++ Total_rto_recoveries uint16 ++ Total_rto_time uint32 + } + + type CanFilter struct { +@@ -552,7 +555,7 @@ const ( + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofUcred = 0xc +- SizeofTCPInfo = 0xf0 ++ SizeofTCPInfo = 0xf8 + SizeofCanFilter = 0x8 + SizeofTCPRepairOpt = 0x8 + ) +@@ -833,6 +836,15 @@ const ( + FSPICK_EMPTY_PATH = 0x8 + + FSMOUNT_CLOEXEC = 0x1 ++ ++ FSCONFIG_SET_FLAG = 0x0 ++ FSCONFIG_SET_STRING = 0x1 ++ FSCONFIG_SET_BINARY = 0x2 ++ FSCONFIG_SET_PATH = 0x3 ++ FSCONFIG_SET_PATH_EMPTY = 0x4 ++ FSCONFIG_SET_FD = 0x5 ++ FSCONFIG_CMD_CREATE = 0x6 ++ FSCONFIG_CMD_RECONFIGURE = 0x7 + ) + + type OpenHow struct { +@@ -1547,6 +1559,7 @@ const ( + IFLA_DEVLINK_PORT = 0x3e + IFLA_GSO_IPV4_MAX_SIZE = 0x3f + IFLA_GRO_IPV4_MAX_SIZE = 0x40 ++ IFLA_DPLL_PIN = 0x41 + IFLA_PROTO_DOWN_REASON_UNSPEC = 0x0 + IFLA_PROTO_DOWN_REASON_MASK = 0x1 + IFLA_PROTO_DOWN_REASON_VALUE = 0x2 +@@ -1562,6 +1575,7 @@ const ( + IFLA_INET6_ICMP6STATS = 0x6 + IFLA_INET6_TOKEN = 0x7 + IFLA_INET6_ADDR_GEN_MODE = 0x8 ++ IFLA_INET6_RA_MTU = 0x9 + IFLA_BR_UNSPEC = 0x0 + IFLA_BR_FORWARD_DELAY = 0x1 + IFLA_BR_HELLO_TIME = 0x2 +@@ -1609,6 +1623,9 @@ const ( + IFLA_BR_MCAST_MLD_VERSION = 0x2c + IFLA_BR_VLAN_STATS_PER_PORT = 0x2d + IFLA_BR_MULTI_BOOLOPT = 0x2e ++ IFLA_BR_MCAST_QUERIER_STATE = 0x2f ++ IFLA_BR_FDB_N_LEARNED = 0x30 ++ IFLA_BR_FDB_MAX_LEARNED = 0x31 + IFLA_BRPORT_UNSPEC = 0x0 + IFLA_BRPORT_STATE = 0x1 + IFLA_BRPORT_PRIORITY = 0x2 +@@ -1646,6 +1663,14 @@ const ( + IFLA_BRPORT_BACKUP_PORT = 0x22 + IFLA_BRPORT_MRP_RING_OPEN = 0x23 + IFLA_BRPORT_MRP_IN_OPEN = 0x24 ++ IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT = 0x25 ++ IFLA_BRPORT_MCAST_EHT_HOSTS_CNT = 0x26 ++ IFLA_BRPORT_LOCKED = 0x27 ++ IFLA_BRPORT_MAB = 0x28 ++ IFLA_BRPORT_MCAST_N_GROUPS = 0x29 ++ IFLA_BRPORT_MCAST_MAX_GROUPS = 0x2a ++ IFLA_BRPORT_NEIGH_VLAN_SUPPRESS = 0x2b ++ IFLA_BRPORT_BACKUP_NHID = 0x2c + IFLA_INFO_UNSPEC = 0x0 + IFLA_INFO_KIND = 0x1 + IFLA_INFO_DATA = 0x2 +@@ -1667,6 +1692,9 @@ const ( + IFLA_MACVLAN_MACADDR = 0x4 + IFLA_MACVLAN_MACADDR_DATA = 0x5 + IFLA_MACVLAN_MACADDR_COUNT = 0x6 ++ IFLA_MACVLAN_BC_QUEUE_LEN = 0x7 ++ IFLA_MACVLAN_BC_QUEUE_LEN_USED = 0x8 ++ IFLA_MACVLAN_BC_CUTOFF = 0x9 + IFLA_VRF_UNSPEC = 0x0 + IFLA_VRF_TABLE = 0x1 + IFLA_VRF_PORT_UNSPEC = 0x0 +@@ -1690,9 +1718,22 @@ const ( + IFLA_XFRM_UNSPEC = 0x0 + IFLA_XFRM_LINK = 0x1 + IFLA_XFRM_IF_ID = 0x2 ++ IFLA_XFRM_COLLECT_METADATA = 0x3 + IFLA_IPVLAN_UNSPEC = 0x0 + IFLA_IPVLAN_MODE = 0x1 + IFLA_IPVLAN_FLAGS = 0x2 ++ NETKIT_NEXT = -0x1 ++ NETKIT_PASS = 0x0 ++ NETKIT_DROP = 0x2 ++ NETKIT_REDIRECT = 0x7 ++ NETKIT_L2 = 0x0 ++ NETKIT_L3 = 0x1 ++ IFLA_NETKIT_UNSPEC = 0x0 ++ IFLA_NETKIT_PEER_INFO = 0x1 ++ IFLA_NETKIT_PRIMARY = 0x2 ++ IFLA_NETKIT_POLICY = 0x3 ++ IFLA_NETKIT_PEER_POLICY = 0x4 ++ IFLA_NETKIT_MODE = 0x5 + IFLA_VXLAN_UNSPEC = 0x0 + IFLA_VXLAN_ID = 0x1 + IFLA_VXLAN_GROUP = 0x2 +@@ -1723,6 +1764,8 @@ const ( + IFLA_VXLAN_GPE = 0x1b + IFLA_VXLAN_TTL_INHERIT = 0x1c + IFLA_VXLAN_DF = 0x1d ++ IFLA_VXLAN_VNIFILTER = 0x1e ++ IFLA_VXLAN_LOCALBYPASS = 0x1f + IFLA_GENEVE_UNSPEC = 0x0 + IFLA_GENEVE_ID = 0x1 + IFLA_GENEVE_REMOTE = 0x2 +@@ -1737,6 +1780,7 @@ const ( + IFLA_GENEVE_LABEL = 0xb + IFLA_GENEVE_TTL_INHERIT = 0xc + IFLA_GENEVE_DF = 0xd ++ IFLA_GENEVE_INNER_PROTO_INHERIT = 0xe + IFLA_BAREUDP_UNSPEC = 0x0 + IFLA_BAREUDP_PORT = 0x1 + IFLA_BAREUDP_ETHERTYPE = 0x2 +@@ -1749,6 +1793,8 @@ const ( + IFLA_GTP_FD1 = 0x2 + IFLA_GTP_PDP_HASHSIZE = 0x3 + IFLA_GTP_ROLE = 0x4 ++ IFLA_GTP_CREATE_SOCKETS = 0x5 ++ IFLA_GTP_RESTART_COUNT = 0x6 + IFLA_BOND_UNSPEC = 0x0 + IFLA_BOND_MODE = 0x1 + IFLA_BOND_ACTIVE_SLAVE = 0x2 +@@ -1778,6 +1824,9 @@ const ( + IFLA_BOND_AD_ACTOR_SYSTEM = 0x1a + IFLA_BOND_TLB_DYNAMIC_LB = 0x1b + IFLA_BOND_PEER_NOTIF_DELAY = 0x1c ++ IFLA_BOND_AD_LACP_ACTIVE = 0x1d ++ IFLA_BOND_MISSED_MAX = 0x1e ++ IFLA_BOND_NS_IP6_TARGET = 0x1f + IFLA_BOND_AD_INFO_UNSPEC = 0x0 + IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 + IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 +@@ -1793,6 +1842,7 @@ const ( + IFLA_BOND_SLAVE_AD_AGGREGATOR_ID = 0x6 + IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE = 0x7 + IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE = 0x8 ++ IFLA_BOND_SLAVE_PRIO = 0x9 + IFLA_VF_INFO_UNSPEC = 0x0 + IFLA_VF_INFO = 0x1 + IFLA_VF_UNSPEC = 0x0 +@@ -1851,8 +1901,16 @@ const ( + IFLA_STATS_LINK_XSTATS_SLAVE = 0x3 + IFLA_STATS_LINK_OFFLOAD_XSTATS = 0x4 + IFLA_STATS_AF_SPEC = 0x5 ++ IFLA_STATS_GETSET_UNSPEC = 0x0 ++ IFLA_STATS_GET_FILTERS = 0x1 ++ IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS = 0x2 + IFLA_OFFLOAD_XSTATS_UNSPEC = 0x0 + IFLA_OFFLOAD_XSTATS_CPU_HIT = 0x1 ++ IFLA_OFFLOAD_XSTATS_HW_S_INFO = 0x2 ++ IFLA_OFFLOAD_XSTATS_L3_STATS = 0x3 ++ IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC = 0x0 ++ IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST = 0x1 ++ IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED = 0x2 + IFLA_XDP_UNSPEC = 0x0 + IFLA_XDP_FD = 0x1 + IFLA_XDP_ATTACHED = 0x2 +@@ -1882,6 +1940,11 @@ const ( + IFLA_RMNET_UNSPEC = 0x0 + IFLA_RMNET_MUX_ID = 0x1 + IFLA_RMNET_FLAGS = 0x2 ++ IFLA_MCTP_UNSPEC = 0x0 ++ IFLA_MCTP_NET = 0x1 ++ IFLA_DSA_UNSPEC = 0x0 ++ IFLA_DSA_CONDUIT = 0x1 ++ IFLA_DSA_MASTER = 0x1 + ) + + const ( +@@ -2672,6 +2735,7 @@ const ( + BPF_PROG_TYPE_LSM = 0x1d + BPF_PROG_TYPE_SK_LOOKUP = 0x1e + BPF_PROG_TYPE_SYSCALL = 0x1f ++ BPF_PROG_TYPE_NETFILTER = 0x20 + BPF_CGROUP_INET_INGRESS = 0x0 + BPF_CGROUP_INET_EGRESS = 0x1 + BPF_CGROUP_INET_SOCK_CREATE = 0x2 +@@ -2716,6 +2780,11 @@ const ( + BPF_PERF_EVENT = 0x29 + BPF_TRACE_KPROBE_MULTI = 0x2a + BPF_LSM_CGROUP = 0x2b ++ BPF_STRUCT_OPS = 0x2c ++ BPF_NETFILTER = 0x2d ++ BPF_TCX_INGRESS = 0x2e ++ BPF_TCX_EGRESS = 0x2f ++ BPF_TRACE_UPROBE_MULTI = 0x30 + BPF_LINK_TYPE_UNSPEC = 0x0 + BPF_LINK_TYPE_RAW_TRACEPOINT = 0x1 + BPF_LINK_TYPE_TRACING = 0x2 +@@ -2726,6 +2795,18 @@ const ( + BPF_LINK_TYPE_PERF_EVENT = 0x7 + BPF_LINK_TYPE_KPROBE_MULTI = 0x8 + BPF_LINK_TYPE_STRUCT_OPS = 0x9 ++ BPF_LINK_TYPE_NETFILTER = 0xa ++ BPF_LINK_TYPE_TCX = 0xb ++ BPF_LINK_TYPE_UPROBE_MULTI = 0xc ++ BPF_PERF_EVENT_UNSPEC = 0x0 ++ BPF_PERF_EVENT_UPROBE = 0x1 ++ BPF_PERF_EVENT_URETPROBE = 0x2 ++ BPF_PERF_EVENT_KPROBE = 0x3 ++ BPF_PERF_EVENT_KRETPROBE = 0x4 ++ BPF_PERF_EVENT_TRACEPOINT = 0x5 ++ BPF_PERF_EVENT_EVENT = 0x6 ++ BPF_F_KPROBE_MULTI_RETURN = 0x1 ++ BPF_F_UPROBE_MULTI_RETURN = 0x1 + BPF_ANY = 0x0 + BPF_NOEXIST = 0x1 + BPF_EXIST = 0x2 +@@ -2743,6 +2824,8 @@ const ( + BPF_F_MMAPABLE = 0x400 + BPF_F_PRESERVE_ELEMS = 0x800 + BPF_F_INNER_MAP = 0x1000 ++ BPF_F_LINK = 0x2000 ++ BPF_F_PATH_FD = 0x4000 + BPF_STATS_RUN_TIME = 0x0 + BPF_STACK_BUILD_ID_EMPTY = 0x0 + BPF_STACK_BUILD_ID_VALID = 0x1 +@@ -2763,6 +2846,7 @@ const ( + BPF_F_ZERO_CSUM_TX = 0x2 + BPF_F_DONT_FRAGMENT = 0x4 + BPF_F_SEQ_NUMBER = 0x8 ++ BPF_F_NO_TUNNEL_KEY = 0x10 + BPF_F_TUNINFO_FLAGS = 0x10 + BPF_F_INDEX_MASK = 0xffffffff + BPF_F_CURRENT_CPU = 0xffffffff +@@ -2779,6 +2863,8 @@ const ( + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_NO_CSUM_RESET = 0x20 + BPF_F_ADJ_ROOM_ENCAP_L2_ETH = 0x40 ++ BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = 0x80 ++ BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = 0x100 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 + BPF_F_SYSCTL_BASE_NAME = 0x1 +@@ -2867,6 +2953,8 @@ const ( + BPF_DEVCG_DEV_CHAR = 0x2 + BPF_FIB_LOOKUP_DIRECT = 0x1 + BPF_FIB_LOOKUP_OUTPUT = 0x2 ++ BPF_FIB_LOOKUP_SKIP_NEIGH = 0x4 ++ BPF_FIB_LOOKUP_TBID = 0x8 + BPF_FIB_LKUP_RET_SUCCESS = 0x0 + BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 + BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 +@@ -2902,6 +2990,7 @@ const ( + BPF_CORE_ENUMVAL_EXISTS = 0xa + BPF_CORE_ENUMVAL_VALUE = 0xb + BPF_CORE_TYPE_MATCHES = 0xc ++ BPF_F_TIMER_ABS = 0x1 + ) + + const ( +@@ -2980,6 +3069,12 @@ type LoopInfo64 struct { + Encrypt_key [32]uint8 + Init [2]uint64 + } ++type LoopConfig struct { ++ Fd uint32 ++ Size uint32 ++ Info LoopInfo64 ++ _ [8]uint64 ++} + + type TIPCSocketAddr struct { + Ref uint32 +@@ -3368,7 +3463,7 @@ const ( + DEVLINK_PORT_FN_ATTR_STATE = 0x2 + DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 + DEVLINK_PORT_FN_ATTR_CAPS = 0x4 +- DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x4 ++ DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x5 + ) + + type FsverityDigest struct { +@@ -4152,7 +4247,8 @@ const ( + ) + + type LandlockRulesetAttr struct { +- Access_fs uint64 ++ Access_fs uint64 ++ Access_net uint64 + } + + type LandlockPathBeneathAttr struct { +@@ -5103,7 +5199,7 @@ const ( + NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf + NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe + NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf +- NL80211_FREQUENCY_ATTR_MAX = 0x1b ++ NL80211_FREQUENCY_ATTR_MAX = 0x1c + NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 + NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 + NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc +@@ -5516,7 +5612,7 @@ const ( + NL80211_REGDOM_TYPE_CUSTOM_WORLD = 0x2 + NL80211_REGDOM_TYPE_INTERSECTION = 0x3 + NL80211_REGDOM_TYPE_WORLD = 0x1 +- NL80211_REG_RULE_ATTR_MAX = 0x7 ++ NL80211_REG_RULE_ATTR_MAX = 0x8 + NL80211_REKEY_DATA_AKM = 0x4 + NL80211_REKEY_DATA_KCK = 0x2 + NL80211_REKEY_DATA_KEK = 0x1 +@@ -5883,3 +5979,15 @@ type SchedAttr struct { + } + + const SizeofSchedAttr = 0x38 ++ ++type Cachestat_t struct { ++ Cache uint64 ++ Dirty uint64 ++ Writeback uint64 ++ Evicted uint64 ++ Recently_evicted uint64 ++} ++type CachestatRange struct { ++ Off uint64 ++ Len uint64 ++} +diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +index 6d8acbc..438a30a 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build 386 && linux +-// +build 386,linux + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +index 59293c6..adceca3 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build amd64 && linux +-// +build amd64,linux + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +index 40cfa38..eeaa00a 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build arm && linux +-// +build arm,linux + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +index 055bc42..6739aa9 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build arm64 && linux +-// +build arm64,linux + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +index f28affb..9920ef6 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build loong64 && linux +-// +build loong64,linux + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +index 9d71e7c..2923b79 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build mips && linux +-// +build mips,linux + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +index fd5ccd3..ce2750e 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build mips64 && linux +-// +build mips64,linux + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +index 7704de7..3038811 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build mips64le && linux +-// +build mips64le,linux + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +index df00b87..efc6fed 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build mipsle && linux +-// +build mipsle,linux + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +index 0942840..9a654b7 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build ppc && linux +-// +build ppc,linux + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +index 0348743..40d358e 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build ppc64 && linux +-// +build ppc64,linux + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +index bad0670..148c6ce 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build ppc64le && linux +-// +build ppc64le,linux + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +index 1b4c97c..72ba815 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build riscv64 && linux +-// +build riscv64,linux + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +index aa268d0..71e7655 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build s390x && linux +-// +build s390x,linux + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +index 444045b..4abbdb9 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build sparc64 && linux +-// +build sparc64,linux + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +index 9bc4c8f..f22e794 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build 386 && netbsd +-// +build 386,netbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +index bb05f65..066a7d8 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build amd64 && netbsd +-// +build amd64,netbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +index db40e3a..439548e 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build arm && netbsd +-// +build arm,netbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go +index 1112115..16085d3 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build arm64 && netbsd +-// +build arm64,netbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +index 26eba23..afd13a3 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build 386 && openbsd +-// +build 386,openbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +index 5a54798..5d97f1f 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build amd64 && openbsd +-// +build amd64,openbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +index be58c4e..34871cd 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build arm && openbsd +-// +build arm,openbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go +index 5233826..5911bce 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build arm64 && openbsd +-// +build arm64,openbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go +index 605cfdb..e4f24f3 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build mips64 && openbsd +-// +build mips64,openbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go +index d6724c0..ca50a79 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build ppc64 && openbsd +-// +build ppc64,openbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go +index ddfd27a..d7d7f79 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build riscv64 && openbsd +-// +build riscv64,openbsd + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +index 0400747..1416057 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +@@ -2,7 +2,6 @@ + // Code generated by the command above; see README.md. DO NOT EDIT. + + //go:build amd64 && solaris +-// +build amd64,solaris + + package unix + +diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +index aec1efc..54f31be 100644 +--- a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go ++++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build zos && s390x +-// +build zos,s390x + + // Hand edited based on ztypes_linux_s390x.go + // TODO: auto-generate. +diff --git a/vendor/golang.org/x/sys/windows/aliases.go b/vendor/golang.org/x/sys/windows/aliases.go +index a20ebea..ce2d713 100644 +--- a/vendor/golang.org/x/sys/windows/aliases.go ++++ b/vendor/golang.org/x/sys/windows/aliases.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build windows && go1.9 +-// +build windows,go1.9 + + package windows + +diff --git a/vendor/golang.org/x/sys/windows/empty.s b/vendor/golang.org/x/sys/windows/empty.s +index fdbbbcd..ba64cac 100644 +--- a/vendor/golang.org/x/sys/windows/empty.s ++++ b/vendor/golang.org/x/sys/windows/empty.s +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build !go1.12 +-// +build !go1.12 + + // This file is here to allow bodyless functions with go:linkname for Go 1.11 + // and earlier (see https://golang.org/issue/23311). +diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go +index b8ad192..d4577a4 100644 +--- a/vendor/golang.org/x/sys/windows/env_windows.go ++++ b/vendor/golang.org/x/sys/windows/env_windows.go +@@ -37,14 +37,17 @@ func (token Token) Environ(inheritExisting bool) (env []string, err error) { + return nil, err + } + defer DestroyEnvironmentBlock(block) +- blockp := unsafe.Pointer(block) +- for { +- entry := UTF16PtrToString((*uint16)(blockp)) +- if len(entry) == 0 { +- break ++ size := unsafe.Sizeof(*block) ++ for *block != 0 { ++ // find NUL terminator ++ end := unsafe.Pointer(block) ++ for *(*uint16)(end) != 0 { ++ end = unsafe.Add(end, size) + } +- env = append(env, entry) +- blockp = unsafe.Add(blockp, 2*(len(entry)+1)) ++ ++ entry := unsafe.Slice(block, (uintptr(end)-uintptr(unsafe.Pointer(block)))/size) ++ env = append(env, UTF16ToString(entry)) ++ block = (*uint16)(unsafe.Add(end, size)) + } + return env, nil + } +diff --git a/vendor/golang.org/x/sys/windows/eventlog.go b/vendor/golang.org/x/sys/windows/eventlog.go +index 2cd6064..6c36695 100644 +--- a/vendor/golang.org/x/sys/windows/eventlog.go ++++ b/vendor/golang.org/x/sys/windows/eventlog.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build windows +-// +build windows + + package windows + +diff --git a/vendor/golang.org/x/sys/windows/mksyscall.go b/vendor/golang.org/x/sys/windows/mksyscall.go +index 8563f79..dbcdb09 100644 +--- a/vendor/golang.org/x/sys/windows/mksyscall.go ++++ b/vendor/golang.org/x/sys/windows/mksyscall.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build generate +-// +build generate + + package windows + +diff --git a/vendor/golang.org/x/sys/windows/race.go b/vendor/golang.org/x/sys/windows/race.go +index 9196b08..0f1bdc3 100644 +--- a/vendor/golang.org/x/sys/windows/race.go ++++ b/vendor/golang.org/x/sys/windows/race.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build windows && race +-// +build windows,race + + package windows + +diff --git a/vendor/golang.org/x/sys/windows/race0.go b/vendor/golang.org/x/sys/windows/race0.go +index 7bae481..0c78da7 100644 +--- a/vendor/golang.org/x/sys/windows/race0.go ++++ b/vendor/golang.org/x/sys/windows/race0.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build windows && !race +-// +build windows,!race + + package windows + +diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go +index c44a1b9..a9dc630 100644 +--- a/vendor/golang.org/x/sys/windows/service.go ++++ b/vendor/golang.org/x/sys/windows/service.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build windows +-// +build windows + + package windows + +diff --git a/vendor/golang.org/x/sys/windows/str.go b/vendor/golang.org/x/sys/windows/str.go +index 4fc0143..6a4f9ce 100644 +--- a/vendor/golang.org/x/sys/windows/str.go ++++ b/vendor/golang.org/x/sys/windows/str.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build windows +-// +build windows + + package windows + +diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go +index 8732cdb..e85ed6b 100644 +--- a/vendor/golang.org/x/sys/windows/syscall.go ++++ b/vendor/golang.org/x/sys/windows/syscall.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build windows +-// +build windows + + // Package windows contains an interface to the low-level operating system + // primitives. OS details vary depending on the underlying system, and +diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go +index 35cfc57..6395a03 100644 +--- a/vendor/golang.org/x/sys/windows/syscall_windows.go ++++ b/vendor/golang.org/x/sys/windows/syscall_windows.go +@@ -125,8 +125,7 @@ func UTF16PtrToString(p *uint16) string { + for ptr := unsafe.Pointer(p); *(*uint16)(ptr) != 0; n++ { + ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p)) + } +- +- return string(utf16.Decode(unsafe.Slice(p, n))) ++ return UTF16ToString(unsafe.Slice(p, n)) + } + + func Getpagesize() int { return 4096 } +@@ -155,6 +154,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { + //sys GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) = kernel32.GetModuleFileNameW + //sys GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) = kernel32.GetModuleHandleExW + //sys SetDefaultDllDirectories(directoryFlags uint32) (err error) ++//sys AddDllDirectory(path *uint16) (cookie uintptr, err error) = kernel32.AddDllDirectory ++//sys RemoveDllDirectory(cookie uintptr) (err error) = kernel32.RemoveDllDirectory + //sys SetDllDirectory(path string) (err error) = kernel32.SetDllDirectoryW + //sys GetVersion() (ver uint32, err error) + //sys FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW +@@ -192,6 +193,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { + //sys GetComputerName(buf *uint16, n *uint32) (err error) = GetComputerNameW + //sys GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW + //sys SetEndOfFile(handle Handle) (err error) ++//sys SetFileValidData(handle Handle, validDataLength int64) (err error) + //sys GetSystemTimeAsFileTime(time *Filetime) + //sys GetSystemTimePreciseAsFileTime(time *Filetime) + //sys GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) [failretval==0xffffffff] +@@ -233,6 +235,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { + //sys CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) = userenv.CreateEnvironmentBlock + //sys DestroyEnvironmentBlock(block *uint16) (err error) = userenv.DestroyEnvironmentBlock + //sys getTickCount64() (ms uint64) = kernel32.GetTickCount64 ++//sys GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) + //sys SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) + //sys GetFileAttributes(name *uint16) (attrs uint32, err error) [failretval==INVALID_FILE_ATTRIBUTES] = kernel32.GetFileAttributesW + //sys SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW +@@ -969,7 +972,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, int32, error) { + if n > 0 { + sl += int32(n) + 1 + } +- if sa.raw.Path[0] == '@' { ++ if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { ++ // Check sl > 3 so we don't change unnamed socket behavior. + sa.raw.Path[0] = 0 + // Don't count trailing NUL for abstract address. + sl-- +diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go +index b88dc7c..359780f 100644 +--- a/vendor/golang.org/x/sys/windows/types_windows.go ++++ b/vendor/golang.org/x/sys/windows/types_windows.go +@@ -1094,7 +1094,33 @@ const ( + + SOMAXCONN = 0x7fffffff + +- TCP_NODELAY = 1 ++ TCP_NODELAY = 1 ++ TCP_EXPEDITED_1122 = 2 ++ TCP_KEEPALIVE = 3 ++ TCP_MAXSEG = 4 ++ TCP_MAXRT = 5 ++ TCP_STDURG = 6 ++ TCP_NOURG = 7 ++ TCP_ATMARK = 8 ++ TCP_NOSYNRETRIES = 9 ++ TCP_TIMESTAMPS = 10 ++ TCP_OFFLOAD_PREFERENCE = 11 ++ TCP_CONGESTION_ALGORITHM = 12 ++ TCP_DELAY_FIN_ACK = 13 ++ TCP_MAXRTMS = 14 ++ TCP_FASTOPEN = 15 ++ TCP_KEEPCNT = 16 ++ TCP_KEEPIDLE = TCP_KEEPALIVE ++ TCP_KEEPINTVL = 17 ++ TCP_FAIL_CONNECT_ON_ICMP_ERROR = 18 ++ TCP_ICMP_ERROR_INFO = 19 ++ ++ UDP_NOCHECKSUM = 1 ++ UDP_SEND_MSG_SIZE = 2 ++ UDP_RECV_MAX_COALESCED_SIZE = 3 ++ UDP_CHECKSUM_COVERAGE = 20 ++ ++ UDP_COALESCED_INFO = 3 + + SHUT_RD = 0 + SHUT_WR = 1 +diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go +index 8b1688d..e8791c8 100644 +--- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go ++++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go +@@ -184,6 +184,7 @@ var ( + procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") + procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") + procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") ++ procAddDllDirectory = modkernel32.NewProc("AddDllDirectory") + procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") + procCancelIo = modkernel32.NewProc("CancelIo") + procCancelIoEx = modkernel32.NewProc("CancelIoEx") +@@ -253,6 +254,7 @@ var ( + procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") + procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") + procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") ++ procGetFileTime = modkernel32.NewProc("GetFileTime") + procGetFileType = modkernel32.NewProc("GetFileType") + procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW") + procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") +@@ -329,6 +331,7 @@ var ( + procReadProcessMemory = modkernel32.NewProc("ReadProcessMemory") + procReleaseMutex = modkernel32.NewProc("ReleaseMutex") + procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") ++ procRemoveDllDirectory = modkernel32.NewProc("RemoveDllDirectory") + procResetEvent = modkernel32.NewProc("ResetEvent") + procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") + procResumeThread = modkernel32.NewProc("ResumeThread") +@@ -339,6 +342,7 @@ var ( + procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") + procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") + procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") ++ procSetFileValidData = modkernel32.NewProc("SetFileValidData") + procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") + procSetErrorMode = modkernel32.NewProc("SetErrorMode") + procSetEvent = modkernel32.NewProc("SetEvent") +@@ -1604,6 +1608,15 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { + return + } + ++func AddDllDirectory(path *uint16) (cookie uintptr, err error) { ++ r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) ++ cookie = uintptr(r0) ++ if cookie == 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ + func AssignProcessToJobObject(job Handle, process Handle) (err error) { + r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) + if r1 == 0 { +@@ -2185,6 +2198,14 @@ func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, + return + } + ++func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { ++ r1, _, e1 := syscall.Syscall6(procGetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) ++ if r1 == 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ + func GetFileType(filehandle Handle) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) + n = uint32(r0) +@@ -2870,6 +2891,14 @@ func RemoveDirectory(path *uint16) (err error) { + return + } + ++func RemoveDllDirectory(cookie uintptr) (err error) { ++ r1, _, e1 := syscall.Syscall(procRemoveDllDirectory.Addr(), 1, uintptr(cookie), 0, 0) ++ if r1 == 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ + func ResetEvent(event Handle) (err error) { + r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) + if r1 == 0 { +@@ -2960,6 +2989,14 @@ func SetEndOfFile(handle Handle) (err error) { + return + } + ++func SetFileValidData(handle Handle, validDataLength int64) (err error) { ++ r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) ++ if r1 == 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ + func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) + if r1 == 0 { +diff --git a/vendor/golang.org/x/term/term_unix.go b/vendor/golang.org/x/term/term_unix.go +index 62c2b3f..1ad0ddf 100644 +--- a/vendor/golang.org/x/term/term_unix.go ++++ b/vendor/golang.org/x/term/term_unix.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos + + package term + +diff --git a/vendor/golang.org/x/term/term_unix_bsd.go b/vendor/golang.org/x/term/term_unix_bsd.go +index 853b3d6..9dbf546 100644 +--- a/vendor/golang.org/x/term/term_unix_bsd.go ++++ b/vendor/golang.org/x/term/term_unix_bsd.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build darwin || dragonfly || freebsd || netbsd || openbsd +-// +build darwin dragonfly freebsd netbsd openbsd + + package term + +diff --git a/vendor/golang.org/x/term/term_unix_other.go b/vendor/golang.org/x/term/term_unix_other.go +index 1e8955c..1b36de7 100644 +--- a/vendor/golang.org/x/term/term_unix_other.go ++++ b/vendor/golang.org/x/term/term_unix_other.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build aix || linux || solaris || zos +-// +build aix linux solaris zos + + package term + +diff --git a/vendor/golang.org/x/term/term_unsupported.go b/vendor/golang.org/x/term/term_unsupported.go +index f1df850..3c409e5 100644 +--- a/vendor/golang.org/x/term/term_unsupported.go ++++ b/vendor/golang.org/x/term/term_unsupported.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !zos && !windows && !solaris && !plan9 +-// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!zos,!windows,!solaris,!plan9 + + package term + +diff --git a/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go b/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go +index 8a7392c..784bb88 100644 +--- a/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go ++++ b/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build go1.10 +-// +build go1.10 + + package bidirule + +diff --git a/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go b/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go +index bb0a920..8e1e943 100644 +--- a/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go ++++ b/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build !go1.10 +-// +build !go1.10 + + package bidirule + +diff --git a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go +index 42fa8d7..d2bd711 100644 +--- a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go ++++ b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go +@@ -1,7 +1,6 @@ + // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + + //go:build go1.10 && !go1.13 +-// +build go1.10,!go1.13 + + package bidi + +diff --git a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go +index 56a0e1e..f76bdca 100644 +--- a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go ++++ b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go +@@ -1,7 +1,6 @@ + // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + + //go:build go1.13 && !go1.14 +-// +build go1.13,!go1.14 + + package bidi + +diff --git a/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go +index baacf32..3aa2c3b 100644 +--- a/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go ++++ b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go +@@ -1,7 +1,6 @@ + // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + + //go:build go1.14 && !go1.16 +-// +build go1.14,!go1.16 + + package bidi + +diff --git a/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go +index ffadb7b..a713757 100644 +--- a/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go ++++ b/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go +@@ -1,7 +1,6 @@ + // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + + //go:build go1.16 && !go1.21 +-// +build go1.16,!go1.21 + + package bidi + +diff --git a/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go +index 92cce58..f15746f 100644 +--- a/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go ++++ b/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go +@@ -1,7 +1,6 @@ + // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + + //go:build go1.21 +-// +build go1.21 + + package bidi + +diff --git a/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go +index f517fdb..c164d37 100644 +--- a/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go ++++ b/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go +@@ -1,7 +1,6 @@ + // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + + //go:build !go1.10 +-// +build !go1.10 + + package bidi + +diff --git a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go +index f5a0788..1af161c 100644 +--- a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go ++++ b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go +@@ -1,7 +1,6 @@ + // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + + //go:build go1.10 && !go1.13 +-// +build go1.10,!go1.13 + + package norm + +diff --git a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go +index cb7239c..eb73ecc 100644 +--- a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go ++++ b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go +@@ -1,7 +1,6 @@ + // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + + //go:build go1.13 && !go1.14 +-// +build go1.13,!go1.14 + + package norm + +diff --git a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go +index 11b2733..276cb8d 100644 +--- a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go ++++ b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go +@@ -1,7 +1,6 @@ + // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + + //go:build go1.14 && !go1.16 +-// +build go1.14,!go1.16 + + package norm + +diff --git a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go +index f65785e..0cceffd 100644 +--- a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go ++++ b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go +@@ -1,7 +1,6 @@ + // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + + //go:build go1.16 && !go1.21 +-// +build go1.16,!go1.21 + + package norm + +diff --git a/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go +index e1858b8..b0819e4 100644 +--- a/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go ++++ b/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go +@@ -1,7 +1,6 @@ + // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + + //go:build go1.21 +-// +build go1.21 + + package norm + +diff --git a/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go +index 0175eae..bf65457 100644 +--- a/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go ++++ b/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go +@@ -1,7 +1,6 @@ + // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + + //go:build !go1.10 +-// +build !go1.10 + + package norm + +diff --git a/vendor/modules.txt b/vendor/modules.txt +index 7ef0ab8..c959a8e 100644 +--- a/vendor/modules.txt ++++ b/vendor/modules.txt +@@ -150,8 +150,8 @@ github.com/vishvananda/netlink/nl + # github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f + ## explicit; go 1.12 + github.com/vishvananda/netns +-# golang.org/x/net v0.17.0 +-## explicit; go 1.17 ++# golang.org/x/net v0.23.0 ++## explicit; go 1.18 + golang.org/x/net/context + golang.org/x/net/html + golang.org/x/net/html/atom +@@ -166,16 +166,16 @@ golang.org/x/net/trace + ## explicit; go 1.17 + golang.org/x/oauth2 + golang.org/x/oauth2/internal +-# golang.org/x/sys v0.13.0 +-## explicit; go 1.17 ++# golang.org/x/sys v0.18.0 ++## explicit; go 1.18 + golang.org/x/sys/plan9 + golang.org/x/sys/unix + golang.org/x/sys/windows +-# golang.org/x/term v0.13.0 +-## explicit; go 1.17 ++# golang.org/x/term v0.18.0 ++## explicit; go 1.18 + golang.org/x/term +-# golang.org/x/text v0.13.0 +-## explicit; go 1.17 ++# golang.org/x/text v0.14.0 ++## explicit; go 1.18 + golang.org/x/text/encoding + golang.org/x/text/encoding/charmap + golang.org/x/text/encoding/htmlindex +-- +2.34.1 + diff --git a/SPECS/multus/multus.spec b/SPECS/multus/multus.spec index 0c2b495d1c5..ef01cbc9ef7 100644 --- a/SPECS/multus/multus.spec +++ b/SPECS/multus/multus.spec @@ -19,7 +19,7 @@ Summary: CNI plugin providing multiple interfaces in containers Name: multus Version: 4.0.2 -Release: 2%{?dist} +Release: 3%{?dist} License: ASL 2.0 Vendor: Microsoft Corporation Distribution: Azure Linux @@ -28,6 +28,8 @@ URL: https://github.com/intel/multus-cni Source0: https://github.com/k8snetworkplumbingwg/multus-cni/archive/refs/tags/v%{version}.tar.gz#/%{name}-%{version}.tar.gz %define commit efdc0a5c7d1ea4bb236d638403420448b48782b3 Patch0: CVE-2023-3978.patch +Patch1: CVE-2023-44487.patch +Patch2: CVE-2023-45288.patch BuildRequires: golang BuildRequires: golang-packaging @@ -70,6 +72,10 @@ install -D -m0644 deployments/multus-daemonset-crio.yml %{buildroot}%{_datadir}/ %{_datarootdir}/k8s-yaml/multus/multus.yaml %changelog +* Fri Nov 22 2024 Xiaohong Deng - 4.0.2-3 +- Add patches to resolve CVE-2023-39325, CVE-2023-44487 and CVE-2023-45288. +- CVE-2023-39325 is a subset of CVE-2023-44487 and the patches are combined. + * Wed Aug 21 2024 Sumedh Sharma - 4.0.2-2 - Add patch to resolve CVE-2023-3978