diff --git a/cmd/collector/otelcollector.go b/cmd/collector/otelcollector.go index 1a7eaf597..629102c5c 100644 --- a/cmd/collector/otelcollector.go +++ b/cmd/collector/otelcollector.go @@ -46,7 +46,6 @@ func makeMapProvidersMap(providers ...confmap.Provider) map[string]confmap.Provi } func GenerateCollectorSettings(URIs []string) *collector.CollectorSettings { - providerSet := confmap.ProviderSettings{} buildInfo := component.BuildInfo{ Command: "observe-agent", Description: "Observe Distribution of Opentelemetry Collector", @@ -58,13 +57,13 @@ func GenerateCollectorSettings(URIs []string) *collector.CollectorSettings { ConfigProviderSettings: collector.ConfigProviderSettings{ ResolverSettings: confmap.ResolverSettings{ URIs: URIs, - Providers: makeMapProvidersMap( - fileprovider.NewWithSettings(providerSet), - envprovider.NewWithSettings(providerSet), - yamlprovider.NewWithSettings(providerSet), - httpprovider.NewWithSettings(providerSet), - httpsprovider.NewWithSettings(providerSet), - ), + ProviderFactories: []confmap.ProviderFactory{ + fileprovider.NewFactory(), + envprovider.NewFactory(), + yamlprovider.NewFactory(), + httpprovider.NewFactory(), + httpsprovider.NewFactory(), + }, }, }, } diff --git a/go.mod b/go.mod index 01af40a56..436cdd3e4 100644 --- a/go.mod +++ b/go.mod @@ -1,95 +1,99 @@ module observe/agent -go 1.21.7 +go 1.22.3 require ( - github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector v0.97.0 - github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.97.0 - github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage v0.97.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.97.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.97.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.97.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filestatsreceiver v0.97.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.97.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/journaldreceiver v0.97.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.97.0 - github.com/prometheus/client_model v0.6.0 - github.com/prometheus/common v0.51.1 - github.com/shirou/gopsutil/v3 v3.24.3 + github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector v0.101.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.101.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage v0.101.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.101.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.101.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.101.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filestatsreceiver v0.101.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.101.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/journaldreceiver v0.101.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.101.0 + github.com/prometheus/client_model v0.6.1 + github.com/prometheus/common v0.53.0 + github.com/shirou/gopsutil/v3 v3.24.4 github.com/spf13/cobra v1.8.0 github.com/spf13/viper v1.18.2 - go.opentelemetry.io/collector/component v0.97.0 - go.opentelemetry.io/collector/confmap v0.97.0 - go.opentelemetry.io/collector/confmap/provider/envprovider v0.97.0 - go.opentelemetry.io/collector/confmap/provider/fileprovider v0.97.0 - go.opentelemetry.io/collector/confmap/provider/httpprovider v0.97.0 - go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.97.0 - go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.97.0 - go.opentelemetry.io/collector/connector v0.97.0 - go.opentelemetry.io/collector/exporter v0.97.0 - go.opentelemetry.io/collector/exporter/debugexporter v0.97.0 - go.opentelemetry.io/collector/exporter/loggingexporter v0.97.0 - go.opentelemetry.io/collector/exporter/otlphttpexporter v0.97.0 - go.opentelemetry.io/collector/extension v0.97.0 - go.opentelemetry.io/collector/otelcol v0.97.0 - go.opentelemetry.io/collector/processor v0.97.0 - go.opentelemetry.io/collector/processor/batchprocessor v0.97.0 - go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.97.0 - go.opentelemetry.io/collector/receiver v0.97.0 - go.opentelemetry.io/collector/receiver/otlpreceiver v0.97.0 - golang.org/x/sys v0.18.0 + go.opentelemetry.io/collector/component v0.101.0 + go.opentelemetry.io/collector/confmap v0.101.0 + go.opentelemetry.io/collector/confmap/provider/envprovider v0.101.0 + go.opentelemetry.io/collector/confmap/provider/fileprovider v0.101.0 + go.opentelemetry.io/collector/confmap/provider/httpprovider v0.101.0 + go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.101.0 + go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.101.0 + go.opentelemetry.io/collector/connector v0.101.0 + go.opentelemetry.io/collector/exporter v0.101.0 + go.opentelemetry.io/collector/exporter/debugexporter v0.101.0 + go.opentelemetry.io/collector/exporter/loggingexporter v0.101.0 + go.opentelemetry.io/collector/exporter/otlphttpexporter v0.101.0 + go.opentelemetry.io/collector/extension v0.101.0 + go.opentelemetry.io/collector/otelcol v0.101.0 + go.opentelemetry.io/collector/processor v0.101.0 + go.opentelemetry.io/collector/processor/batchprocessor v0.101.0 + go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.101.0 + go.opentelemetry.io/collector/receiver v0.101.0 + go.opentelemetry.io/collector/receiver/otlpreceiver v0.101.0 + golang.org/x/sys v0.20.0 ) require ( - cloud.google.com/go/compute v1.23.3 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.4.0 // indirect + cloud.google.com/go/auth v0.4.2 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect + cloud.google.com/go/compute/metadata v0.3.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect - github.com/Code-Hex/go-generics-cache v1.3.1 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.21.0 // indirect - github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect + github.com/Code-Hex/go-generics-cache v1.5.1 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.23.0 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/Microsoft/hcsshim v0.12.3 // indirect github.com/Showmax/go-fqdn v1.0.0 // indirect github.com/alecthomas/participle/v2 v2.1.1 // indirect github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect github.com/armon/go-metrics v0.4.1 // indirect - github.com/aws/aws-sdk-go v1.51.7 // indirect + github.com/aws/aws-sdk-go v1.53.9 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bmatcuk/doublestar/v4 v4.6.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b // indirect + github.com/containerd/containerd v1.7.17 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dennwc/varint v1.0.0 // indirect - github.com/digitalocean/godo v1.108.0 // indirect - github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/digitalocean/godo v1.116.0 // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/emicklei/go-restful/v3 v3.12.0 // indirect github.com/envoyproxy/go-control-plane v0.12.0 // indirect github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect - github.com/expr-lang/expr v1.16.2 // indirect - github.com/fatih/color v1.15.0 // indirect + github.com/expr-lang/expr v1.16.7 // indirect + github.com/fatih/color v1.17.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-ole/go-ole v1.2.6 // indirect - github.com/go-openapi/jsonpointer v0.20.0 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.4 // indirect - github.com/go-resty/resty/v2 v2.11.0 // indirect + github.com/go-ole/go-ole v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-resty/resty/v2 v2.13.1 // indirect github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect github.com/go-zookeeper/zk v1.0.3 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v5 v5.2.0 // indirect + github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.4 // indirect @@ -100,27 +104,28 @@ require ( github.com/google/s2a-go v0.1.7 // indirect github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.0 // indirect - github.com/gophercloud/gophercloud v1.8.0 // indirect - github.com/gorilla/websocket v1.5.0 // indirect - github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect - github.com/hashicorp/consul/api v1.28.2 // indirect + github.com/googleapis/gax-go/v2 v2.12.4 // indirect + github.com/gophercloud/gophercloud v1.11.0 // indirect + github.com/gorilla/websocket v1.5.1 // indirect + github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/hashicorp/consul/api v1.28.3 // indirect + github.com/hashicorp/consul/proto-public v0.6.1 // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-hclog v1.5.0 // indirect + github.com/hashicorp/go-hclog v1.6.3 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-retryablehttp v0.7.4 // indirect + github.com/hashicorp/go-retryablehttp v0.7.6 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/hashicorp/nomad/api v0.0.0-20230721134942-515895c7690c // indirect + github.com/hashicorp/nomad/api v0.0.0-20240523172747-f0851bc989bd // indirect github.com/hashicorp/serf v0.10.1 // indirect - github.com/hetznercloud/hcloud-go/v2 v2.6.0 // indirect + github.com/hetznercloud/hcloud-go/v2 v2.8.0 // indirect github.com/iancoleman/strcase v0.3.0 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -130,59 +135,60 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.7 // indirect + github.com/klauspost/compress v1.17.8 // indirect github.com/knadh/koanf/maps v0.1.1 // indirect github.com/knadh/koanf/providers/confmap v0.1.0 // indirect - github.com/knadh/koanf/v2 v2.1.0 // indirect + github.com/knadh/koanf/v2 v2.1.1 // indirect github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 // indirect + github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b // indirect github.com/leoluk/perflib_exporter v0.2.1 // indirect - github.com/linode/linodego v1.27.1 // indirect - github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/linode/linodego v1.34.0 // indirect + github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.19 // indirect - github.com/miekg/dns v1.1.58 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/miekg/dns v1.1.59 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mostynb/go-grpc-compression v1.2.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.97.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.97.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.97.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.97.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.97.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.97.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.97.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.97.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.97.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.97.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.101.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.101.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.101.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.101.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.101.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.101.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.101.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.101.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.101.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.101.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect github.com/openshift/api v3.9.0+incompatible // indirect - github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142 // indirect - github.com/ovh/go-ovh v1.4.3 // indirect + github.com/openshift/client-go v0.0.0-20240523113335-452272e0496d // indirect + github.com/ovh/go-ovh v1.5.1 // indirect github.com/pelletier/go-toml/v2 v2.1.0 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/prometheus/client_golang v1.19.0 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/prometheus/client_golang v1.19.1 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect - github.com/prometheus/procfs v0.13.0 // indirect - github.com/prometheus/prometheus v0.50.1 // indirect - github.com/rs/cors v1.10.1 // indirect + github.com/prometheus/procfs v0.15.0 // indirect + github.com/prometheus/prometheus v0.52.0 // indirect + github.com/rs/cors v1.11.0 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect - github.com/scaleway/scaleway-sdk-go v1.0.0-beta.22 // indirect + github.com/scaleway/scaleway-sdk-go v1.0.0-beta.26 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.11.0 // indirect @@ -191,80 +197,86 @@ require ( github.com/stretchr/objx v0.5.2 // indirect github.com/stretchr/testify v1.9.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect - github.com/tklauser/go-sysconf v0.3.12 // indirect - github.com/tklauser/numcpus v0.6.1 // indirect + github.com/tidwall/gjson v1.17.1 // indirect + github.com/tidwall/pretty v1.2.1 // indirect + github.com/tidwall/tinylru v1.2.1 // indirect + github.com/tklauser/go-sysconf v0.3.14 // indirect + github.com/tklauser/numcpus v0.8.0 // indirect github.com/valyala/fastjson v1.6.4 // indirect github.com/vultr/govultr/v2 v2.17.2 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect - go.etcd.io/bbolt v1.3.9 // indirect + go.etcd.io/bbolt v1.3.10 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector v0.97.0 // indirect - go.opentelemetry.io/collector/config/configauth v0.97.0 // indirect - go.opentelemetry.io/collector/config/configcompression v1.4.0 // indirect - go.opentelemetry.io/collector/config/configgrpc v0.97.0 // indirect - go.opentelemetry.io/collector/config/confighttp v0.97.0 // indirect - go.opentelemetry.io/collector/config/confignet v0.97.0 // indirect - go.opentelemetry.io/collector/config/configopaque v1.4.0 // indirect - go.opentelemetry.io/collector/config/configretry v0.97.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.97.0 // indirect - go.opentelemetry.io/collector/config/configtls v0.97.0 // indirect - go.opentelemetry.io/collector/config/internal v0.97.0 // indirect - go.opentelemetry.io/collector/confmap/converter/expandconverter v0.97.0 // indirect - go.opentelemetry.io/collector/consumer v0.97.0 // indirect - go.opentelemetry.io/collector/extension/auth v0.97.0 // indirect - go.opentelemetry.io/collector/featuregate v1.4.0 // indirect - go.opentelemetry.io/collector/pdata v1.4.0 // indirect - go.opentelemetry.io/collector/semconv v0.97.0 // indirect - go.opentelemetry.io/collector/service v0.97.0 // indirect - go.opentelemetry.io/contrib/config v0.4.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect - go.opentelemetry.io/contrib/propagators/b3 v1.24.0 // indirect - go.opentelemetry.io/otel v1.24.0 // indirect - go.opentelemetry.io/otel/bridge/opencensus v1.24.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.24.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.24.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 // indirect - go.opentelemetry.io/otel/exporters/prometheus v0.46.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.24.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.24.0 // indirect - go.opentelemetry.io/otel/metric v1.24.0 // indirect - go.opentelemetry.io/otel/sdk v1.24.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.24.0 // indirect - go.opentelemetry.io/otel/trace v1.24.0 // indirect - go.opentelemetry.io/proto/otlp v1.1.0 // indirect + go.opentelemetry.io/collector v0.101.0 // indirect + go.opentelemetry.io/collector/config/configauth v0.101.0 // indirect + go.opentelemetry.io/collector/config/configcompression v1.8.0 // indirect + go.opentelemetry.io/collector/config/configgrpc v0.101.0 // indirect + go.opentelemetry.io/collector/config/confighttp v0.101.0 // indirect + go.opentelemetry.io/collector/config/confignet v0.101.0 // indirect + go.opentelemetry.io/collector/config/configopaque v1.8.0 // indirect + go.opentelemetry.io/collector/config/configretry v0.101.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.101.0 // indirect + go.opentelemetry.io/collector/config/configtls v0.101.0 // indirect + go.opentelemetry.io/collector/config/internal v0.101.0 // indirect + go.opentelemetry.io/collector/confmap/converter/expandconverter v0.101.0 // indirect + go.opentelemetry.io/collector/consumer v0.101.0 // indirect + go.opentelemetry.io/collector/extension/auth v0.101.0 // indirect + go.opentelemetry.io/collector/featuregate v1.8.0 // indirect + go.opentelemetry.io/collector/filter v0.101.0 // indirect + go.opentelemetry.io/collector/pdata v1.8.0 // indirect + go.opentelemetry.io/collector/semconv v0.101.0 // indirect + go.opentelemetry.io/collector/service v0.101.0 // indirect + go.opentelemetry.io/contrib/config v0.7.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect + go.opentelemetry.io/contrib/propagators/b3 v1.27.0 // indirect + go.opentelemetry.io/contrib/zpages v0.52.0 // indirect + go.opentelemetry.io/otel v1.27.0 // indirect + go.opentelemetry.io/otel/bridge/opencensus v1.27.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.49.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 // indirect + go.opentelemetry.io/otel/metric v1.27.0 // indirect + go.opentelemetry.io/otel/sdk v1.27.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.27.0 // indirect + go.opentelemetry.io/otel/trace v1.27.0 // indirect + go.opentelemetry.io/proto/otlp v1.2.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.21.0 // indirect - golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect - golang.org/x/mod v0.16.0 // indirect - golang.org/x/net v0.22.0 // indirect - golang.org/x/oauth2 v0.18.0 // indirect - golang.org/x/term v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/crypto v0.23.0 // indirect + golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/oauth2 v0.20.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/term v0.20.0 // indirect + golang.org/x/text v0.15.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.17.0 // indirect + golang.org/x/tools v0.21.0 // indirect gonum.org/v1/gonum v0.15.0 // indirect - google.golang.org/api v0.157.0 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect - google.golang.org/grpc v1.62.1 // indirect - google.golang.org/protobuf v1.33.0 // indirect + google.golang.org/api v0.181.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240521202816-d264139d666e // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e // indirect + google.golang.org/grpc v1.64.0 // indirect + google.golang.org/protobuf v1.34.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.29.3 // indirect - k8s.io/apimachinery v0.29.3 // indirect - k8s.io/client-go v0.29.3 // indirect + gotest.tools/v3 v3.5.0 // indirect + k8s.io/api v0.30.1 // indirect + k8s.io/apimachinery v0.30.1 // indirect + k8s.io/client-go v0.30.1 // indirect k8s.io/klog/v2 v2.120.1 // indirect - k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect - k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a // indirect + k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index 230246a4f..9cc2befdc 100644 --- a/go.sum +++ b/go.sum @@ -13,16 +13,18 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/auth v0.4.2 h1:sb0eyLkhRtpq5jA+a8KWw0W70YcdVca7KJ8TM0AFYDg= +cloud.google.com/go/auth v0.4.2/go.mod h1:Kqvlz1cf1sNA0D+sYJnkPQOP+JMHkuHeIgVmCRtZOLc= +cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= +cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= +cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -37,45 +39,35 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 h1:6oNBlSdi1QqM1PNW7FPA6xOGA5UNsXnkaYZz9vdPGhA= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.4.0 h1:QfV5XZt6iNa2aWMAt96CZEbfJ7kgG/qYIpq465Shr5E= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.4.0/go.mod h1:uYt4CfhkJA9o0FN7jfE5minm/i4nUE4MjGUJkzB6Zs8= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0/go.mod h1:LRr2FzBTQlONPPa5HREE5+RjSCTXl7BwOvYOaWTqCaI= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1 h1:7CBQ+Ei8SP2c6ydQTGCCrS35bDxgTMfoP2miAwK++OU= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1/go.mod h1:c/wcGeGx5FUPbM/JltUYHZcKmigwyVLJlDq+4HdtXaw= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= -github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Code-Hex/go-generics-cache v1.3.1 h1:i8rLwyhoyhaerr7JpjtYjJZUcCbWOdiYO3fZXLiEC4g= -github.com/Code-Hex/go-generics-cache v1.3.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= +github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU= +github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.21.0 h1:aNyyrkRcLMWFum5qgYbXl6Ut+MMOmfH/kLjZJ5YJP/I= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.21.0/go.mod h1:BEOBnuYVyPt9wxVRQqqpKUK9FXVcL2+LOjZ8apLa9ao= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8= -github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.23.0 h1:yRhWveg9NbJcJYoJL4FoSauT2dxnt4N9MIAJ7tvU/mQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.23.0/go.mod h1:p2puVVSKjQ84Qb1gzw2XHLs34WQyHTYFZLaVxypAFYs= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/Microsoft/hcsshim v0.12.3 h1:LS9NXqXhMoqNCplK1ApmVSfB4UnVLRDWRapB6EIlxE0= +github.com/Microsoft/hcsshim v0.12.3/go.mod h1:Iyl1WVpZzr+UkzjekHZbV8o5Z9ZkxNGx6CtY2Qg/JVQ= github.com/Showmax/go-fqdn v1.0.0 h1:0rG5IbmVliNT5O19Mfuvna9LL7zlHyRfsSvBPZmF9tM= github.com/Showmax/go-fqdn v1.0.0/go.mod h1:SfrFBzmDCtCGrnHhoDjuvFnKsWjEQX/Q9ARZvOrJAko= github.com/alecthomas/assert/v2 v2.3.0 h1:mAsH2wmvjsuvyBvAmCtm7zFsBlb8mIHx5ySLVdDZXL0= @@ -97,10 +89,9 @@ github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJ github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.51.7 h1:RRjxHhx9RCjw5AhgpmmShq3F4JDlleSkyhYMQ2xUAe8= -github.com/aws/aws-sdk-go v1.51.7/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.53.9 h1:6oipls9+L+l2Me5rklqlX3xGWNWGcMinY3F69q9Q+Cg= +github.com/aws/aws-sdk-go v1.53.9/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -114,8 +105,8 @@ github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK3 github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -123,40 +114,35 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6D github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= -github.com/containerd/containerd v1.7.12 h1:+KQsnv4VnzyxWcfO9mlxxELaoztsDEjOuCMPAuPqgU0= -github.com/containerd/containerd v1.7.12/go.mod h1:/5OMpE1p0ylxtEUGY8kuCYkDRzJm9NO1TFMWjUpdevk= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/containerd/containerd v1.7.17 h1:KjNnn0+tAVQHAoaWRjmdak9WlvnFR/8rU1CHHy8Rm2A= +github.com/containerd/containerd v1.7.17/go.mod h1:vK+hhT4TIv2uejlcDlbVIc8+h/BqtKLIyNrtCZol8lI= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E= github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= -github.com/digitalocean/godo v1.108.0 h1:fWyMENvtxpCpva1UbKzOFnyAS04N1FNuBWWfPeTGquQ= -github.com/digitalocean/godo v1.108.0/go.mod h1:R6EmmWI8CT1+fCtjWY9UCB+L5uufuZH13wk3YhxycCs= -github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= -github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/digitalocean/godo v1.116.0 h1:SuF/Imd1/dE/nYrUFVkJ2itesQNnJQE1a/vmtHknxeE= +github.com/digitalocean/godo v1.116.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= -github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk= +github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -165,22 +151,19 @@ github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/Ir github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= -github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/expr-lang/expr v1.16.2 h1:JvMnzUs3LeVHBvGFcXYmXo+Q6DPDmzrlcSBO6Wy3w4s= -github.com/expr-lang/expr v1.16.2/go.mod h1:uCkhfG+x7fcZ5A5sXHKuQ07jGZRl6J0FCAaf2k4PtVQ= +github.com/expr-lang/expr v1.16.7 h1:gCIiHt5ODA0xIaDbD0DPKyZpM9Drph3b3lolYAYq2Kw= +github.com/expr-lang/expr v1.16.7/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= -github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= +github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -196,37 +179,26 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= -github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= -github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-resty/resty/v2 v2.11.0 h1:i7jMfNOJYMp69lq7qozJP+bjgzfAzeOhuGlyDrqxT/8= -github.com/go-resty/resty/v2 v2.11.0/go.mod h1:iiP/OpA0CkcL3IGt1O0+/SIItFUbkkyw5BGXiVdTu+A= +github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g= +github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= @@ -236,8 +208,8 @@ github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJA github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw= -github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -265,8 +237,6 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= @@ -295,7 +265,6 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -307,12 +276,11 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20240117000934-35fc243c5815 h1:WzfWbQz/Ze8v6l++GGbGNFZnUShVpP/0xffCPLL+ax8= -github.com/google/pprof v0.0.0-20240117000934-35fc243c5815/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -320,23 +288,22 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfF github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= -github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= -github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/gophercloud/gophercloud v1.8.0 h1:TM3Jawprb2NrdOnvcHhWJalmKmAmOGgfZElM/3oBYCk= -github.com/gophercloud/gophercloud v1.8.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= -github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU= -github.com/hashicorp/consul/api v1.28.2 h1:mXfkRHrpHN4YY3RqL09nXU1eHKLNiuAN4kHvDQ16k/8= -github.com/hashicorp/consul/api v1.28.2/go.mod h1:KyzqzgMEya+IZPcD65YFoOVAgPpbfERu4I/tzG6/ueE= -github.com/hashicorp/consul/sdk v0.16.0 h1:SE9m0W6DEfgIVCJX7xU+iv/hUl4m/nxqMTnCdMxDpJ8= -github.com/hashicorp/consul/sdk v0.16.0/go.mod h1:7pxqqhqoaPqnBnzXD1StKed62LqJeClzVsUEy85Zr0A= +github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= +github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= +github.com/gophercloud/gophercloud v1.11.0 h1:ls0O747DIq1D8SUHc7r2vI8BFbMLeLFuENaAIfEx7OM= +github.com/gophercloud/gophercloud v1.11.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/hashicorp/consul/api v1.28.3 h1:IE06LST/knnCQ+cxcvzyXRF/DetkgGhJoaOFd4l9xkk= +github.com/hashicorp/consul/api v1.28.3/go.mod h1:7AGcUFu28HkgOKD/GmsIGIFzRTmN0L02AE9Thsr2OhU= +github.com/hashicorp/consul/proto-public v0.6.1 h1:+uzH3olCrksXYWAYHKqK782CtK9scfqH+Unlw3UHhCg= +github.com/hashicorp/consul/proto-public v0.6.1/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg= +github.com/hashicorp/consul/sdk v0.15.0 h1:2qK9nDrr4tiJKRoxPGhm6B7xJjLVIQqkjiab2M4aKjU= +github.com/hashicorp/consul/sdk v0.15.0/go.mod h1:r/OmRRPbHOe0yxNahLw7G9x5WG17E1BIECMtCjcPSNo= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -345,9 +312,8 @@ github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brv github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= -github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= @@ -359,8 +325,8 @@ github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA= -github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= +github.com/hashicorp/go-retryablehttp v0.7.6 h1:TwRYfx2z2C4cLbXmT8I5PgP/xmuqASDyiVuGYfs9GZM= +github.com/hashicorp/go-retryablehttp v0.7.6/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= @@ -385,19 +351,17 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= -github.com/hashicorp/nomad/api v0.0.0-20230721134942-515895c7690c h1:Nc3Mt2BAnq0/VoLEntF/nipX+K1S7pG+RgwiitSv6v0= -github.com/hashicorp/nomad/api v0.0.0-20230721134942-515895c7690c/go.mod h1:O23qLAZuCx4htdY9zBaO4cJPXgleSFEdq6D/sezGgYE= +github.com/hashicorp/nomad/api v0.0.0-20240523172747-f0851bc989bd h1:+5QaGXCHCpDxSUZTq1V4akYei0doZGG0vEO4vZWzG1M= +github.com/hashicorp/nomad/api v0.0.0-20240523172747-f0851bc989bd/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go/v2 v2.6.0 h1:RJOA2hHZ7rD1pScA4O1NF6qhkHyUdbbxjHgFNot8928= -github.com/hetznercloud/hcloud-go/v2 v2.6.0/go.mod h1:4J1cSE57+g0WS93IiHLV7ubTHItcp+awzeBp5bM9mfA= +github.com/hetznercloud/hcloud-go/v2 v2.8.0 h1:vfbfL/JfV8dIZUX7ANHWEbKNqgFWsETqvt/EctvoFJ0= +github.com/hetznercloud/hcloud-go/v2 v2.8.0/go.mod h1:jvpP3qAWMIZ3WQwQLYa97ia6t98iPCgsJNwRts+Jnrk= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -406,8 +370,8 @@ github.com/influxdata/go-syslog/v3 v3.0.1-0.20230911200830-875f5bc594a4 h1:2r2Wi github.com/influxdata/go-syslog/v3 v3.0.1-0.20230911200830-875f5bc594a4/go.mod h1:1yEQhaLb/cETXCqQmdh7lDjupNAReO7c83AHyK2dJ48= github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8= github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= -github.com/jarcoal/httpmock v1.3.0 h1:2RJ8GP0IIaWwcC9Fp2BmVi8Kog3v2Hn7VXM3fTd+nuc= -github.com/jarcoal/httpmock v1.3.0/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= +github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= +github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -428,44 +392,40 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= -github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= +github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU= -github.com/knadh/koanf/v2 v2.1.0 h1:eh4QmHHBuU8BybfIJ8mB8K8gsGCD/AUQTdwGq/GzId8= -github.com/knadh/koanf/v2 v2.1.0/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es= +github.com/knadh/koanf/v2 v2.1.1 h1:/R8eXqasSTsmDCsAyYj+81Wteg8AqrV9CP6gvsTsOmM= +github.com/knadh/koanf/v2 v2.1.1/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 h1:bCiVCRCs1Heq84lurVinUPy19keqGEe4jh5vtK37jcg= github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg= +github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b h1:11UHH39z1RhZ5dc4y4r/4koJo6IYFgTRMe/LlwRTEw0= +github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg= github.com/leoluk/perflib_exporter v0.2.1 h1:/3/ut1k/jFt5p4ypjLZKDHDqlXAK6ERZPVWtwdI389I= github.com/leoluk/perflib_exporter v0.2.1/go.mod h1:MinSWm88jguXFFrGsP56PtleUb4Qtm4tNRH/wXNXRTI= -github.com/linode/linodego v1.27.1 h1:KoQm5g2fppw8qIClJqUEL0yKH0+f+7te3Mewagb5QKE= -github.com/linode/linodego v1.27.1/go.mod h1:5oAsx+uinHtVo6U77nXXXtox7MWzUW6aEkTOKXxA9uo= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/linode/linodego v1.34.0 h1:tBCwZzJTNh6Sr5xImkq/KQ/1rvUbH3aXGve5VuHEspQ= +github.com/linode/linodego v1.34.0/go.mod h1:JxuhOEAMfSxun6RU5/MgTKH2GGTmFrhKRj3wL1NFin0= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae h1:dIZY4ULFcto4tAFlj1FYZl8ztUZ13bdq+PLY+NOfbyI= +github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -481,15 +441,15 @@ github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOA github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g= github.com/maxatome/go-testdeep v1.12.0/go.mod h1:lPZc/HAcJMP92l7yI6TRz1aZN5URwUBUAfUNvrclaNM= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= -github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= +github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs= +github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= @@ -500,14 +460,14 @@ github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4= github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg= @@ -525,96 +485,84 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mostynb/go-grpc-compression v1.2.2 h1:XaDbnRvt2+1vgr0b/l0qh4mJAfIxE0bKXtz2Znl3GGI= github.com/mostynb/go-grpc-compression v1.2.2/go.mod h1:GOCr2KBxXcblCuczg3YdLQlcin1/NfyDA348ckuCH6w= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= -github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= -github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= -github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector v0.97.0 h1:UhZYjLMy4NvdBeQHmW2YGKKHEPMrukHT4uP/7Nwq8+g= -github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector v0.97.0/go.mod h1:xNjcyNZx2MnaCbxsdQWJc2ivkUGcAb7IF4N9wJJ1KNw= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.97.0 h1:/qa5x2Ap0hhwZ9aUrg9JdBgbT+jRgMs7gCwxqIVGwdQ= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.97.0/go.mod h1:ORwXW1waz6JkmhK1ayiY6NIWgJuipZqmxv+GTnj3eEI= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.97.0 h1:scoJpU7Skut3Fam8Dfu6+A80Te4S6zqHbmderYJ9GhU= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.97.0/go.mod h1:/ZWb14bjsRhtlAwWeCGYgBFbPlzonVq06VYdvdCmlxc= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.97.0 h1:p7ANWW52nAKp9+f7RgGZvvXqU80rZu6yXVdSvcrgta0= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.97.0/go.mod h1:eLeuqRgW59hKeKdSdD2mNOs64yBKuxyp6udwgToUxjE= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage v0.97.0 h1:d3+p+jvp+7+N0pEG5dFy5Px6llEdfaGhZNRHvT6FNpQ= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage v0.97.0/go.mod h1:ZsCJoB4eTUyfECMLs4nvJidelnYt0dWbPFQxzOtcTUA= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.97.0 h1:x8iUxQv7lNqYLu38miE3YcdK8+a4g3frMJTsMGQkK1c= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.97.0/go.mod h1:xlJ4VRZZKASyfqUeFO4rNtZuwnHlwJGi+JF6muigJNE= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.97.0 h1:jPhm8t6pJcxgng2DaEhPZ3sY44KBoM0W32j6aI+hMgU= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.97.0/go.mod h1:PAJCWmb9gEZh1OhazzHrvUGQeu31Rzqm8OJQVfxlLwU= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.97.0 h1:vX/BkTfd7/cvydXJ7FmUy5iSviQeNGAgTCoXcLu7/Ww= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.97.0/go.mod h1:yrlbvRlLeny1kFmj4Ac9BSqv/pOr2h7sOIvDE6OMCKk= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.97.0 h1:f3HVDcjUVUbOpKWiebD9v8+9YdDdNvzPyKh3IVb0ORY= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.97.0/go.mod h1:110wLws4lB2Jpv58rK7YoaMIhIEmLlzw5/viC0XJhbM= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.97.0 h1:NIFnii1Z6SLzzL3hN3wFoi41GMCQS3KHNxE/g3GSAQs= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.97.0/go.mod h1:SBG/HCN7CFTEdHyswA5Pyq+p66yX3/PWE1PXJiffUKQ= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.97.0 h1:O9mJRU+2kHO1CJWBoXzenlifjOdVE9sWiHcPrYP4npk= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.97.0/go.mod h1:DiygZ9kaN/PPjlEKR+Qo5eN1BYi1L+kul630euxG1mw= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.97.0 h1:N5b1f66i7KKaUWcDR6gvl5ZucLns2sMidbfXvfQZFTw= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.97.0/go.mod h1:sShrcWdTiwWfuZ8oQhulxYk4eF+qB2vrAxg/MAd690c= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.97.0 h1:bVeo7BahYY4rWdaEuzJX2Tn20MbvYcEHXbFjV2IwnPQ= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.97.0/go.mod h1:lj29zRdEZdvbQvZ6g7Pb+tSPw4dQE/jahe39fRgO+08= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.97.0 h1:zVXzbHR2aXRuypZXve4+AxcEYeeYeqzl76WtkZ/pGpw= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.97.0/go.mod h1:K6kz22H/i+t2/BGyCsaaNxFKjoXr1VJPhJV4GtjnJK0= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.97.0 h1:MWki5BBYideAF82i3ladRxmed3nkkGHDUA1YC2p2mz8= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.97.0/go.mod h1:Zsl3xSYy9QFqZ5lTPXqPHSuxz03kUaMw75kvvScE+94= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.97.0 h1:82EcF0nmspeVCZmUpwrMuVAXnMYDOF4gai9eK6fHAwk= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.97.0/go.mod h1:7rCj4V2dKJVsaMNBzY81m4fek1AD2yWCFwzrM3PjSNo= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.97.0 h1:j0YEfadJv26PQWhaMynfWcDtSnsAGvBmGW9wwp+q2mU= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.97.0/go.mod h1:BleH1EFM1PjhKy9geg/FVIsvc/v7hl+FqZlxAnzUHSk= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.97.0 h1:A+ffQzNIuJ4xVZ6ZNtrT8RlkoXlDGCTQOFj5C8KokcE= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.97.0/go.mod h1:fb9Yjx+I1zUG7/mYHl0tXBBFLuIitLDT1HYsjAYAqx4= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.97.0 h1:j+98nNl3RHHan+9ptoZHYcj9d51l5jZwpE3AxJR2OMI= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.97.0/go.mod h1:CXYG7Toolx/SZQTL9qiW0xnGvYoC7XGGASRTr1sNCmA= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.97.0 h1:nxz/dz7CxjJwK7wAEkFDdrHVzQEHS5Yv1ta1i06c2x4= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.97.0/go.mod h1:67cMXmkvbije8dwLy3EiJPiat4Cf/3Rr4HDiPxYCttM= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.97.0 h1:8UfxoSP0ZKkib+RWZVdCixHEjkz1VxspRAmHqmexO9M= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.97.0/go.mod h1:LNP6YdtDyTLyKgLFmg0qo7U4LgIlsYlRH9kC47j+vT4= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.97.0 h1:LDeauKxJqswkxA2XVWXHAa9zpQuIbz1JpqrWtVscNl0= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.97.0/go.mod h1:kl9u418k+2spS75V0kDglwysLzpDGu50eC9MK4r6Yy0= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filestatsreceiver v0.97.0 h1:1uOOmvOjxpiE2je+1PkvID82pf9x4QQjcCn4eNGPHfI= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filestatsreceiver v0.97.0/go.mod h1:AWzgiEEUh0Md8J24qrErZI2vyVC5pSJxHsgFrk6Ut/M= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.97.0 h1:FXK0ADSKQYuqJpJ13oYCdgg9e+f3V23GlJ6sM8BSwx8= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.97.0/go.mod h1:2YlFa0RAWKShfGB5ME8NDaBTCeF+kYExM9B9LOrvH3U= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/journaldreceiver v0.97.0 h1:0Loi+HlfT0ht6bXKOfd2qaiWt9mAZTBrIELQ6Zfc8pc= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/journaldreceiver v0.97.0/go.mod h1:VLu0LhPhMv1OuXrh9ylt7IYNDS7GEb4zeLWeruOQ5U0= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.97.0 h1:7JuGYrWcdwMy4/gnlXcmCD6C9FovH2J8hpEsXf2JcnY= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.97.0/go.mod h1:S5j7OXl0p/irRvJGUdTzI3S/so96K4JmJexBgbrbwVs= +github.com/onsi/ginkgo/v2 v2.17.2 h1:7eMhcy3GimbsA3hEnVKdw/PQM9XN9krpKVXsZdph0/g= +github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= +github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector v0.101.0 h1:NyAhTbZ3wFiZ/CRzWB02ZvyNK3ozhwin6Le5xAweQjs= +github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector v0.101.0/go.mod h1:iiHph7g38llooFHquXobp119Nq5WThsaeXJDMMW62zA= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.101.0 h1:+wwQn1vl122nREufLWRjZ4Nes/qXyINuo5Mj3KLIFJ8= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.101.0/go.mod h1:KZGnojtoI4/oefkAZNu4ErTrB5PwOrpeD+zVJAYmL0o= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.101.0 h1:wENxI7kq2dTOKJEsVWJ3qChEI45BvR8Ww/Purt0uRX0= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.101.0/go.mod h1:6+7E3+Pdlf8ZolIEju7JsryNSSKFp1SspjC0sgqCMU4= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.101.0 h1:oYMkHmAFSXO1UThrb6fK/1ixGN3dROpcgKsWIz5PtXs= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.101.0/go.mod h1:QDF6x3uIeJRkPGgw4Ss3i4NbxzB/QFCFnXInsXf4C08= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage v0.101.0 h1:NOlyJCQ6RmcydAXO4UhM2vyuWJmrnbEwK8TKpUqnsM8= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage v0.101.0/go.mod h1:RznIPZ6W9sOsS1hMIEKvk763Mnq9DWHMwoYlSkhIHVI= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.101.0 h1:Murav41YSXxIcoWLHXO/owB42+1br5OJ+vc0LUIGfzc= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.101.0/go.mod h1:TL8un+dk9fjFSFpyU5b9/3gh/hRvwKZtl3Wn5aU01ks= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.101.0 h1:NjRf0D9Of6WlqEIXKhOIbBs4YudrPAtDn6+Rez1MqzA= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.101.0/go.mod h1:ofU9nc88bSz7EbFY70TBgrcpWFPpeP+vMpVycda+ZrI= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.101.0 h1:X+FXRfxLK2mH813tMyZmX93Mt/3l6F8X5aFi7QPBQDI= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.101.0/go.mod h1:j/pizzitn+kpiTNTxsgpaGqAW3qh3pRSbSTUsIeQcLE= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.101.0 h1:W3liZnf7jmszDVc7c192+qCxBdTboofZIQsNfXrbJJU= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.101.0/go.mod h1:1Qq2NwBiKbdiyX9FhXr8qOoUe2tft1ukytf5AXgZFOU= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.101.0 h1:EjomfpSCHVy7HRvYwzXI2nKK5LidIvyY3Ah2AIEpwKY= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.101.0/go.mod h1:VS66oUydCMwiWl1BFmLs7iNy4lGsfVYsriXr/d1fpAk= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.101.0 h1:0iWWzDsqAa0IGCQeypTFfs4i+Wp3Cy53i5x4HOJLZAs= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.101.0/go.mod h1:d+QYxYz/c2RfdzdcDoLyr2O3dj0+4nRr2+uYuWQvRP0= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.101.0 h1:Ohhry/Fcxh7/ysAxFhW2IJR/4hWEPaizDNtg02upYLA= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.101.0/go.mod h1:H2vPArfULuCAm4Y6GHNxuLrjFGSgO16NJgdGACxBhSM= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.101.0 h1:WIQrGeC1vApemZnYBdPFZkpB03HCZ5JFmpIImsi1+pY= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.101.0/go.mod h1:M3n3w6i1429HqtxfxoENpQAqTb8lKPwlFiJDhSQ81IY= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.101.0 h1:TCQYvGS2MKTotOTQDnHUSd4ljEzXRzHXopdv71giKWU= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.101.0/go.mod h1:Nl2d4DSK/IbaWnnBxYyhMNUW6C9sb5/4idVZrSW/5Ps= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.101.0 h1:dVINhi/nne11lG+Xnwuy9t/N4xyaH2Om2EU+5lphCA4= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.101.0/go.mod h1:kjyfpKOuBfkx3UsJQsbQ5eTJM3yQWiRYaYxs47PpxvI= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.101.0 h1:FZSF4MO8Hu6GO8uyHbiw9cdAbYLVYc35CCBEFHwS3bA= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.101.0/go.mod h1:fWQRv/se7kKk/rL2ggK4M23z047FwRc6VMHgA0ATN+A= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.101.0 h1:r7ue2vHBAH5v1AiNsC3TWDSysSdG/nhZ8HFnhOE+dbw= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.101.0/go.mod h1:l+8+GK6bzSjK4bLTfbkU0hj+9y8wbpaDr42tmqOEDr0= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.101.0 h1:+xK9ysBZrLhOp9AyQWjwB6nQZk1MB8dC/uBhBoy+nc4= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.101.0/go.mod h1:RTndgkM5YuEFqcxIbDFxsREB6+wIjubv8v0MD6z6EzY= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.101.0 h1:u9I8EKebbS86sFel1t/gBgPUHNG7yipR+LJrvXs8I6s= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.101.0/go.mod h1:heRSIvv9xZIlqJ/mgeaA+r6cPzPr4y7DRsltBD1TcHU= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.101.0 h1:MYWBarIgi8/XVP3XDsykFVm5ig3LvNGWAsIIX43S6P0= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.101.0/go.mod h1:iPG3smyP8ivSSoy0JRse0rQPPfI4zjHHzAXmRVgQUV8= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.101.0 h1:UkUD1IRlH7dEQee2+m2QbQMLE3kSeS5T9SuDRT0UFms= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.101.0/go.mod h1:nzq6OZw0+u5Prwh11VKXKMjfNKMJ+8sg/JUoPeCKyHQ= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.101.0 h1:nQ57XjuZvoBL9O0/LcIJW/dqKEDQQ35LFdG77BrgFy0= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.101.0/go.mod h1:jkKmmLXtaO9NRAFGyHmmytFF5379YuC060cfGeoScbc= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filestatsreceiver v0.101.0 h1:Vqi/YycjanF8PmuS7LeLd71tW46NGAYQs2p9F0glIUI= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filestatsreceiver v0.101.0/go.mod h1:05AvmGjfW3cr5VPhDrxDED0mINTzaBWqQPI+vL/BYfE= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.101.0 h1:GfNsLaielncmxlJxT1PG3oCjFtNCyH+8J/MHPTjvqeE= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.101.0/go.mod h1:PPripeI6YW22fi7P2ZAJ2BOKaBXYwBb1bDiwo2lGmJY= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/journaldreceiver v0.101.0 h1:99dMFyJLyGun2lBKgm70w4111xyWiMWP/ZMrDYxfzqA= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/journaldreceiver v0.101.0/go.mod h1:g3vUkW12It9mJjB5PCKdQCB3q4ywpROh24KraPK9ddI= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.101.0 h1:fiTNKHxLwAktTRjbb11HH8D2V+t9XWobqnJgfNajd74= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.101.0/go.mod h1:gmhlcPT07BUg2qZDkf6XrlKfO7dsfyu03TEaFwLwCSU= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= -github.com/openshift/api v0.0.0-20210521075222-e273a339932a/go.mod h1:izBmoXbUu3z5kUa4FjZhvekTsyzIWiOoaIgJiZBBMQs= github.com/openshift/api v3.9.0+incompatible h1:fJ/KsefYuZAjmrr3+5U9yZIZbTOpVkDDLDLFresAeYs= github.com/openshift/api v3.9.0+incompatible/go.mod h1:dh9o4Fs58gpFXGSYfnVxGR9PnV53I8TW84pQaJDdGiY= -github.com/openshift/build-machinery-go v0.0.0-20210423112049-9415d7ebd33e/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= -github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142 h1:ZHRIMCFIJN1p9LsJt4HQ+akDrys4PrYnXzOWI5LK03I= -github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142/go.mod h1:fjS8r9mqDVsPb5td3NehsNOAWa4uiFkYEfVZioQ2gH0= -github.com/ovh/go-ovh v1.4.3 h1:Gs3V823zwTFpzgGLZNI6ILS4rmxZgJwJCz54Er9LwD0= -github.com/ovh/go-ovh v1.4.3/go.mod h1:AkPXVtgwB6xlKblMjRKJJmjRp+ogrE7fz2lVgcQY8SY= +github.com/openshift/client-go v0.0.0-20240523113335-452272e0496d h1:Dq21KMlDTVy2fCIyp0gsW+6ir6FwD3RjnCuza2/bIyM= +github.com/openshift/client-go v0.0.0-20240523113335-452272e0496d/go.mod h1:jt2Q+6Iheyh6omSPkRMOC6Doad5My/FfBfJpATPD4g0= +github.com/ovh/go-ovh v1.5.1 h1:P8O+7H+NQuFK9P/j4sFW5C0fvSS2DnHYGPwdVCp45wI= +github.com/ovh/go-ovh v1.5.1/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -626,29 +574,30 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= -github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= -github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.31.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.51.1 h1:eIjN50Bwglz6a/c3hAgSMcofL3nD+nFQkV6Dd4DsQCw= -github.com/prometheus/common v0.51.1/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q= +github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= +github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -656,32 +605,32 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.13.0 h1:GqzLlQyfsPbaEHaQkO7tbDlriv/4o5Hudv6OXHGKX7o= -github.com/prometheus/procfs v0.13.0/go.mod h1:cd4PFCR54QLnGKPaKGA6l+cfuNXtht43ZKY6tow0Y1g= -github.com/prometheus/prometheus v0.50.1 h1:N2L+DYrxqPh4WZStU+o1p/gQlBaqFbcLBTjlp3vpdXw= -github.com/prometheus/prometheus v0.50.1/go.mod h1:FvE8dtQ1Ww63IlyKBn1V4s+zMwF9kHkVNkQBR1pM4CU= +github.com/prometheus/procfs v0.15.0 h1:A82kmvXJq2jTu5YUhSGNlYoxh85zLnKgPz4bMZgI5Ek= +github.com/prometheus/procfs v0.15.0/go.mod h1:Y0RJ/Y5g5wJpkTisOtqwDSo4HwhGmLB4VQSw2sQJLHk= +github.com/prometheus/prometheus v0.52.0 h1:f7kHJgr7+zShpWdTCeKqbCWR7nKTScgLYQwRux9h1V0= +github.com/prometheus/prometheus v0.52.0/go.mod h1:3z74cVsmVH0iXOR5QBjB7Pa6A0KJeEAK5A6UsmAFb1g= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= -github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= -github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rs/cors v1.11.0 h1:0B9GE/r9Bc2UxRMMtymBkHTenPkHDv0CW4Y98GBY+po= +github.com/rs/cors v1.11.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.22 h1:wJrcTdddKOI8TFxs8cemnhKP2EmKy3yfUKHj3ZdfzYo= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.22/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.26 h1:F+GIVtGqCFxPxO46ujf8cEOP574MBoRm3gNbPXECbxs= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.26/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shirou/gopsutil/v3 v3.24.3 h1:eoUGJSmdfLzJ3mxIhmOAhgKEKgQkeOwKpz1NbhVnuPE= -github.com/shirou/gopsutil/v3 v3.24.3/go.mod h1:JpND7O217xa72ewWz9zN2eIIkPWsDN/3pl0H8Qt0uwg= +github.com/shirou/gopsutil/v3 v3.24.4 h1:dEHgzZXt4LMNm+oYELpzl9YCqV65Yr/6SfrvgRBtXeU= +github.com/shirou/gopsutil/v3 v3.24.4/go.mod h1:lTd2mdiOspcqLgAnr9/nGi71NkeMpWKdmhuxm9GusH8= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= -github.com/shoenig/test v0.6.6 h1:Oe8TPH9wAbv++YPNDKJWUnI8Q4PPWCx3UbOfH+FxiMU= -github.com/shoenig/test v0.6.6/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY= +github.com/shoenig/test v1.7.1/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -689,21 +638,18 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= @@ -711,7 +657,6 @@ github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= @@ -722,22 +667,25 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/testcontainers/testcontainers-go v0.29.1 h1:z8kxdFlovA2y97RWx98v/TQ+tR+SXZm6p35M+xB92zk= -github.com/testcontainers/testcontainers-go v0.29.1/go.mod h1:SnKnKQav8UcgtKqjp/AD8bE1MqZm+3TDb/B8crE3XnI= -github.com/tidwall/gjson v1.10.2 h1:APbLGOM0rrEkd8WBw9C24nllro4ajFuJu0Sc9hRz8Bo= -github.com/tidwall/gjson v1.10.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/testcontainers/testcontainers-go v0.31.0 h1:W0VwIhcEVhRflwL9as3dhY6jXjVCA27AkmbnZ+UTh3U= +github.com/testcontainers/testcontainers-go v0.31.0/go.mod h1:D2lAoA0zUFiSY+eAflqK5mcUx/A5hrrORaEQrd0SefI= +github.com/tidwall/gjson v1.17.1 h1:wlYEnwqAHgzmhNUFfw7Xalt2JzQvsMx2Se4PcoFCT/U= +github.com/tidwall/gjson v1.17.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= -github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= -github.com/tidwall/tinylru v1.1.0 h1:XY6IUfzVTU9rpwdhKUF6nQdChgCdGjkMfLzbWyiau6I= -github.com/tidwall/tinylru v1.1.0/go.mod h1:3+bX+TJ2baOLMWTnlyNWHh4QMnFyARg2TLTQ6OFbzw8= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/tinylru v1.2.1 h1:VgBr72c2IEr+V+pCdkPZUwiQ0KJknnWIYbhxAVkYfQk= +github.com/tidwall/tinylru v1.2.1/go.mod h1:9bQnEduwB6inr2Y7AkBP7JPgCkyrhTV/ZpX0oOOpBI4= github.com/tidwall/wal v1.1.7 h1:emc1TRjIVsdKKSnpwGBAcsAGg0767SvUk8+ygx7Bb+4= github.com/tidwall/wal v1.1.7/go.mod h1:r6lR1j27W9EPalgHiB7zLJDYu3mzW5BQP5KrzBpYY/E= -github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= -github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= +github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= +github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ= github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= @@ -750,8 +698,8 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI= -go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE= +go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0= +go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -759,122 +707,126 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector v0.97.0 h1:qyOju13byHIKEK/JehmTiGMj4pFLa4kDyrOCtTmjHU0= -go.opentelemetry.io/collector v0.97.0/go.mod h1:V6xquYAaO2VHVu4DBK28JYuikRdZajh7DH5Vl/Y8NiA= -go.opentelemetry.io/collector/component v0.97.0 h1:vanKhXl5nptN8igRH4PqVYHOILif653vaPIKv6LCZCI= -go.opentelemetry.io/collector/component v0.97.0/go.mod h1:F/m3HMlkb16RKI7wJjgbECK1IZkAcmB8bu7yD8XOkwM= -go.opentelemetry.io/collector/config/configauth v0.97.0 h1:38M2uUsBzgD7sdJPPXUsOq1BFr6X6P4A5VFg+MOcRNY= -go.opentelemetry.io/collector/config/configauth v0.97.0/go.mod h1:BkCDatBU7CXXStrRPE1b4woj2VLxaYEMg2WTkb50BlI= -go.opentelemetry.io/collector/config/configcompression v1.4.0 h1:qWRKdl49lBvPUr6UWmyf1pR4EOBHN+66pDeGtfQ1Mbk= -go.opentelemetry.io/collector/config/configcompression v1.4.0/go.mod h1:O0fOPCADyGwGLLIf5lf7N3960NsnIfxsm6dr/mIpL+M= -go.opentelemetry.io/collector/config/configgrpc v0.97.0 h1:Ukl1GPtzSko4Pu8KV5jedD8OjySL/C+QgrfRdaakfHk= -go.opentelemetry.io/collector/config/configgrpc v0.97.0/go.mod h1:i8OrrxynYldlcZ6wPOUKNoZmmbUCDp3CzryRT+2mN7c= -go.opentelemetry.io/collector/config/confighttp v0.97.0 h1:Tfw4DtK5x66uSoRdbZc9tQTNGWEo/urR8RAedBdYtNU= -go.opentelemetry.io/collector/config/confighttp v0.97.0/go.mod h1:wyg4yXvCsk1CsfPBWQ3+rZDThz44Q0d35/1lJBHj5VI= -go.opentelemetry.io/collector/config/confignet v0.97.0 h1:KJjv10/YVMslSSLVWW/IIjpLM3JiO3rWvw5dK/t1H7g= -go.opentelemetry.io/collector/config/confignet v0.97.0/go.mod h1:3naWoPss70RhDHhYjGACi7xh4NcVRvs9itzIRVWyu1k= -go.opentelemetry.io/collector/config/configopaque v1.4.0 h1:5KgD9oLN+N07HqDsLzUrU0mE2pC8cMhrCSC1Nf8CEO4= -go.opentelemetry.io/collector/config/configopaque v1.4.0/go.mod h1:7Qzo69x7i+FaNELeA9jmZtVvfnR5lE6JYa5YEOCJPFQ= -go.opentelemetry.io/collector/config/configretry v0.97.0 h1:k7VwQ5H0oBLm6Fgm0ltfDDbmQVsiqSIY9ojijF0hiR0= -go.opentelemetry.io/collector/config/configretry v0.97.0/go.mod h1:s7A6ZGxK8bxqidFzwbr2pITzbsB2qf+aeHEDQDcanV8= -go.opentelemetry.io/collector/config/configtelemetry v0.97.0 h1:JS/WxK09A9m39D5OqsAWaoRe4tG7ESMnzDNIbZ5bD6c= -go.opentelemetry.io/collector/config/configtelemetry v0.97.0/go.mod h1:YV5PaOdtnU1xRomPcYqoHmyCr48tnaAREeGO96EZw8o= -go.opentelemetry.io/collector/config/configtls v0.97.0 h1:wmXj/rKQUGMZzbHVCTyB+xUWImsGxnLqhivwjBE0FdI= -go.opentelemetry.io/collector/config/configtls v0.97.0/go.mod h1:ev/fMI6hm1WTSHHEAEoVjF3RZj0qf38E/XO5itFku7k= -go.opentelemetry.io/collector/config/internal v0.97.0 h1:vhTzCm2u6MUAxdWPprkOePR/Kd57v2uF11twpza1E7o= -go.opentelemetry.io/collector/config/internal v0.97.0/go.mod h1:RVGDn9OH/KHT878cclG497/n2qxe54+zW+u/SVsRLNw= -go.opentelemetry.io/collector/confmap v0.97.0 h1:0CGSk7YW9rPc6jCwJteJzHzN96HRoHTfuqI7J/EmZsg= -go.opentelemetry.io/collector/confmap v0.97.0/go.mod h1:AnJmZcZoOLuykSXGiAf3shi11ZZk5ei4tZd9dDTTpWE= -go.opentelemetry.io/collector/confmap/converter/expandconverter v0.97.0 h1:Tw0+JlvA1Z5xpvHYqzYXsPdsCaq6+oGoqw7fCymh+lc= -go.opentelemetry.io/collector/confmap/converter/expandconverter v0.97.0/go.mod h1:gp3XWfC1OpmwHZsODRIpy4XZtrNy1RryJhvK7sdNgmk= -go.opentelemetry.io/collector/confmap/provider/envprovider v0.97.0 h1:2F3yl+Vr6nJ0sN9HoYeebY5+lJ8OJ4VqxCY16SsVcXs= -go.opentelemetry.io/collector/confmap/provider/envprovider v0.97.0/go.mod h1:GrHP/mOgzx8+fcTRmgB/IgH3lG80nv2bFW1v6oPggRM= -go.opentelemetry.io/collector/confmap/provider/fileprovider v0.97.0 h1:5SXsBAA/6Hv76+ndBY0wZRYGNES/55SKu6VhP4kriqM= -go.opentelemetry.io/collector/confmap/provider/fileprovider v0.97.0/go.mod h1:YAj2CNxE1go08GiAxYO2HSeNkWv8g7C7DHFpEECttI8= -go.opentelemetry.io/collector/confmap/provider/httpprovider v0.97.0 h1:Wd4XR3cOznED8sYM0Qy0NlAToxvpEG8OH9O89RKp+pg= -go.opentelemetry.io/collector/confmap/provider/httpprovider v0.97.0/go.mod h1:2LIGxKR6dJPP5kxkRSTIeWuJ7Mto1Mv456+MlG86RH8= -go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.97.0 h1:suZwewHxcwA3z0kE6p6tjYcPKlGOYWoIjy/58gBK/c0= -go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.97.0/go.mod h1:R+cJ8wWzaJll+WCTUOM769zIk1vOb7NQARG9xWNbgUA= -go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.97.0 h1:ntcR7AMHwFRROTMW1ifx0xVu+ltbPafS/1r/ssxe+hM= -go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.97.0/go.mod h1:0THo600LMD0RGl7loqyaHgd/47Icgb64QOmqaj0j5dU= -go.opentelemetry.io/collector/connector v0.97.0 h1:X3AI3rCRiNhKCF5OJc1XzNoIKYbClYm5BiLINtxHts8= -go.opentelemetry.io/collector/connector v0.97.0/go.mod h1:KolkR5/kkPzy2jW7Q7zs+FiO1xiDrBeAvDYrZe/ygtA= -go.opentelemetry.io/collector/consumer v0.97.0 h1:S0BZQtJQxSHT156S8a5rLt3TeWYP8Rq+jn8QEyWQUYk= -go.opentelemetry.io/collector/consumer v0.97.0/go.mod h1:1D06LURiZ/1KA2OnuKNeSn9bvFmJ5ZWe6L8kLu0osSY= -go.opentelemetry.io/collector/exporter v0.97.0 h1:kw/fQrpkhTz0/3I/Z0maRj0S8Mi0NK50/WwFuWrRYPc= -go.opentelemetry.io/collector/exporter v0.97.0/go.mod h1:EJYc4biKWxq3kD4Xh4SUSFbZ2lMsxjzwiCozikEDMjk= -go.opentelemetry.io/collector/exporter/debugexporter v0.97.0 h1:BZ2QjBmPBsYKFKaZvOJU0o0Xfth8kxOB6izrCB19qTM= -go.opentelemetry.io/collector/exporter/debugexporter v0.97.0/go.mod h1:CKrzODDvR5bLQZxSkNXUoaFcCY1fe6dPZV6FkdyH8/M= -go.opentelemetry.io/collector/exporter/loggingexporter v0.97.0 h1:avSUMkwU3b7OlrKR5qFjbBYmRgwdIoDltR11h9MmxjQ= -go.opentelemetry.io/collector/exporter/loggingexporter v0.97.0/go.mod h1:wT/uTcbmw01Uxgsb370KVffXIGypXikZxk7KQDynLnk= -go.opentelemetry.io/collector/exporter/otlphttpexporter v0.97.0 h1:TjqChvxmmGMvY8/GV2/FQygwzTRUsj77HjJBc9kraEw= -go.opentelemetry.io/collector/exporter/otlphttpexporter v0.97.0/go.mod h1:AqVCYi9O5hWzdko9zvRm/DGA4pOxBJZ5OqJ+XinLK8Y= -go.opentelemetry.io/collector/extension v0.97.0 h1:LpjZ4KQgnhLG/u3l69QgWkX8qMqeS8IFKWMoDtbPIeE= -go.opentelemetry.io/collector/extension v0.97.0/go.mod h1:jWNG0Npi7AxiqwCclToskDfCQuNKHYHlBPJNnIKHp84= -go.opentelemetry.io/collector/extension/auth v0.97.0 h1:2AYGxSbsi1KC2DOOFbAe7valrERb86m7TfRY85X8hSE= -go.opentelemetry.io/collector/extension/auth v0.97.0/go.mod h1:uElLYtzMPA48mu9baxGIH6lHpOn76NLe4mVHnmV+hEY= -go.opentelemetry.io/collector/extension/zpagesextension v0.97.0 h1:IaZm011+6WKE03TFmDQ7ztp65eu8ngGc026m9KhuhB4= -go.opentelemetry.io/collector/extension/zpagesextension v0.97.0/go.mod h1:PxR6gK4FEYHc6iGPH7svM9jjFIZo4o/MGGO0l+see0o= -go.opentelemetry.io/collector/featuregate v1.4.0 h1:RWE9M659C9iuUQc4GzBsndkGHG1jIzIY+nZJWvcKy1M= -go.opentelemetry.io/collector/featuregate v1.4.0/go.mod h1:w7nUODKxEi3FLf1HslCiE6YWtMtOOrMnSwsDam8Mg9w= -go.opentelemetry.io/collector/otelcol v0.97.0 h1:xtZOYmdzeu1dSgco0I4yqhSyQ6GrGa7E7TRUlg7KIwI= -go.opentelemetry.io/collector/otelcol v0.97.0/go.mod h1:mHzwRh3ATXfStF9xsttnJilxpaP+03hhozMn39dP+Jo= -go.opentelemetry.io/collector/pdata v1.4.0 h1:cA6Pr7Z2V7mE+i7FmYpavX7nefzd6H4CICgW0T9aJX0= -go.opentelemetry.io/collector/pdata v1.4.0/go.mod h1:0Ttp4wQinhV5oJTd9MjyvUegmZBO9O0nrlh/+EDLw+Q= -go.opentelemetry.io/collector/processor v0.97.0 h1:L3R5R7x56LH2inF3sv0ZOsFfulVo8yuIFhO/OgpkCU0= -go.opentelemetry.io/collector/processor v0.97.0/go.mod h1:OsxBAPQ2fDytAn+yWLdEQ1yjYfl/OIak1AfKGfI8ALs= -go.opentelemetry.io/collector/processor/batchprocessor v0.97.0 h1:DpC+d8ZQB/Qj5+sODYPpu8a8l0aMtMZEq6pN/3e+5X4= -go.opentelemetry.io/collector/processor/batchprocessor v0.97.0/go.mod h1:2sSDgffCKAUocMEapmcMBvgfBIotLtDFg/YJrOia8GE= -go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.97.0 h1:B0ji9gIYZc+NohgcQ8arPtPLvzk+O1gdphGTk69hAmg= -go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.97.0/go.mod h1:/Euc792OGPS4EVgzRVrRex1v4e5Oq2MJjCff3t1vOgw= -go.opentelemetry.io/collector/receiver v0.97.0 h1:ozzE5MhIPtfnYA/UKB/NCcgxSmeLqdwErboi6B/IpLQ= -go.opentelemetry.io/collector/receiver v0.97.0/go.mod h1:1TCN9DRuB45+xKqlwv4BMQR6qXgaJeSSNezFTJhmDUo= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.97.0 h1:LY6kbWURk2g+jcfJlQo51Xu6WyYKHW3W+UOhAr3g3qc= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.97.0/go.mod h1:JL1oxtysJT2TN80p/nC9vrWkENh/If7kmMRio7yF9WE= -go.opentelemetry.io/collector/semconv v0.97.0 h1:iF3nTfThbiOwz7o5Pocn0dDnDoffd18ijDuf6Mwzi1s= -go.opentelemetry.io/collector/semconv v0.97.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A= -go.opentelemetry.io/collector/service v0.97.0 h1:Nv/A41Ezot0lqMQwqJSRGV2PeQNcItaiDaQPudFXBOM= -go.opentelemetry.io/collector/service v0.97.0/go.mod h1:h9SwBe1yThExLAhgIErMtGsyBdEmSKGK8p/f+QcWyT0= -go.opentelemetry.io/contrib/config v0.4.0 h1:Xb+ncYOqseLroMuBesGNRgVQolXcXOhMj7EhGwJCdHs= -go.opentelemetry.io/contrib/config v0.4.0/go.mod h1:drNk2xRqLWW4/amk6Uh1S+sDAJTc7bcEEN1GfJzj418= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= -go.opentelemetry.io/contrib/propagators/b3 v1.24.0 h1:n4xwCdTx3pZqZs2CjS/CUZAs03y3dZcGhC/FepKtEUY= -go.opentelemetry.io/contrib/propagators/b3 v1.24.0/go.mod h1:k5wRxKRU2uXx2F8uNJ4TaonuEO/V7/5xoz7kdsDACT8= -go.opentelemetry.io/contrib/zpages v0.49.0 h1:Wk217PkNBxcKWnIQpwtbZZE286K4ZY9uajnM5woSeLU= -go.opentelemetry.io/contrib/zpages v0.49.0/go.mod h1:6alLi5mmkZWbAtZMRPd1ffIgkTcsU9OTHQF2NbSOhrQ= -go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= -go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= -go.opentelemetry.io/otel/bridge/opencensus v1.24.0 h1:Vlhy5ee5k5R0zASpH+9AgHiJH7xnKACI3XopO1tUZfY= -go.opentelemetry.io/otel/bridge/opencensus v1.24.0/go.mod h1:jRjVXV/X38jyrnHtvMGN8+9cejZB21JvXAAvooF2s+Q= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.24.0 h1:f2jriWfOdldanBwS9jNBdeOKAQN7b4ugAMaNu1/1k9g= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.24.0/go.mod h1:B+bcQI1yTY+N0vqMpoZbEN7+XU4tNM0DmUiOwebFJWI= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.24.0 h1:mM8nKi6/iFQ0iqst80wDHU2ge198Ye/TfN0WBS5U24Y= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.24.0/go.mod h1:0PrIIzDteLSmNyxqcGYRL4mDIo8OTuBAOI/Bn1URxac= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 h1:t6wl9SPayj+c7lEIFgm4ooDBZVb01IhLB4InpomhRw8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0/go.mod h1:iSDOcsnSA5INXzZtwaBPrKp/lWu/V14Dd+llD0oI2EA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0 h1:Mw5xcxMwlqoJd97vwPxA8isEaIoxsta9/Q51+TTJLGE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0/go.mod h1:CQNu9bj7o7mC6U7+CA/schKEYakYXWr79ucDHTMGhCM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 h1:Xw8U6u2f8DK2XAkGRFV7BBLENgnTGX9i4rQRxJf+/vs= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0/go.mod h1:6KW1Fm6R/s6Z3PGXwSJN2K4eT6wQB3vXX6CVnYX9NmM= -go.opentelemetry.io/otel/exporters/prometheus v0.46.0 h1:I8WIFXR351FoLJYuloU4EgXbtNX2URfU/85pUPheIEQ= -go.opentelemetry.io/otel/exporters/prometheus v0.46.0/go.mod h1:ztwVUHe5DTR/1v7PeuGRnU5Bbd4QKYwApWmuutKsJSs= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.24.0 h1:JYE2HM7pZbOt5Jhk8ndWZTUWYOVift2cHjXVMkPdmdc= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.24.0/go.mod h1:yMb/8c6hVsnma0RpsBMNo0fEiQKeclawtgaIaOp2MLY= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.24.0 h1:s0PHtIkN+3xrbDOpt2M8OTG92cWqUESvzh2MxiR5xY8= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.24.0/go.mod h1:hZlFbDbRt++MMPCCfSJfmhkGIWnX1h3XjkfxZUjLrIA= -go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= -go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= -go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= -go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= -go.opentelemetry.io/otel/sdk/metric v1.24.0 h1:yyMQrPzF+k88/DbH7o4FMAs80puqd+9osbiBrJrz/w8= -go.opentelemetry.io/otel/sdk/metric v1.24.0/go.mod h1:I6Y5FjH6rvEnTTAYQz3Mmv2kl6Ek5IIrmwTLqMrrOE0= -go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= -go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= -go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI= -go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY= +go.opentelemetry.io/collector v0.101.0 h1:jnCI/JZgpEYONWy4LCvif4CjMM7cPS4XvGHp3OrZpYo= +go.opentelemetry.io/collector v0.101.0/go.mod h1:N0xja/N3NUDIC55SjjNzyyIoxE6YoCEZC3aXQ39yIVs= +go.opentelemetry.io/collector/component v0.101.0 h1:2sILYgE8cZJj0Vseh6LUjS9iXPyqDPTx/R8yf8IPu+4= +go.opentelemetry.io/collector/component v0.101.0/go.mod h1:OB1uBpQZ2Ba6wVui/sthh6j+CPxVQIy2ou5rzZPINQQ= +go.opentelemetry.io/collector/config/configauth v0.101.0 h1:rUH9aHETDmqaQFq53zaRIEy4N0jllzK6Bl1OoBlUA4s= +go.opentelemetry.io/collector/config/configauth v0.101.0/go.mod h1:wF/luWiQ7rpIWjFs0ds3PVrZ2bKhhVAmANKp3Fv5fjU= +go.opentelemetry.io/collector/config/configcompression v1.8.0 h1:qcgde9yOFkdRYSjHujxxVnciAPYBSI5hv1EZ/+7GQuA= +go.opentelemetry.io/collector/config/configcompression v1.8.0/go.mod h1:O0fOPCADyGwGLLIf5lf7N3960NsnIfxsm6dr/mIpL+M= +go.opentelemetry.io/collector/config/configgrpc v0.101.0 h1:IuP8a+cnhxLKUoBLEBXDxOYvIgYS5nTlWL2JLZ9lV3c= +go.opentelemetry.io/collector/config/configgrpc v0.101.0/go.mod h1:XFTQv7Wf9atXblYURL1uzcJbI0l8tLPo5Z4AXlLK/Rs= +go.opentelemetry.io/collector/config/confighttp v0.101.0 h1:/LIrKzD+rzE+uLXECIXHhlO6pu9CnRmdrKV/VKbYT9A= +go.opentelemetry.io/collector/config/confighttp v0.101.0/go.mod h1:KspNrdrtpaPg27qtxZ+e3jmJoOHLyj0oNmMpJd0b3wg= +go.opentelemetry.io/collector/config/confignet v0.101.0 h1:Mdb9e/EpCSac4Ccg7w4UchS/o4yY1WoIc9X5o7fTu9E= +go.opentelemetry.io/collector/config/confignet v0.101.0/go.mod h1:3naWoPss70RhDHhYjGACi7xh4NcVRvs9itzIRVWyu1k= +go.opentelemetry.io/collector/config/configopaque v1.8.0 h1:MXNJDG/yNmEX/tkf4EJ+aSucM92l4KfqtCAhBjMVMg8= +go.opentelemetry.io/collector/config/configopaque v1.8.0/go.mod h1:VUBsRa6pi8z1GaR9CCELMOnIZQRdZQ1GGi0W3UTk7x0= +go.opentelemetry.io/collector/config/configretry v0.101.0 h1:5QggLq/lZiZXry1Ut52IOTbrdz1RbGoL29Io/wWdE4g= +go.opentelemetry.io/collector/config/configretry v0.101.0/go.mod h1:uRdmPeCkrW9Zsadh2WEbQ1AGXGYJ02vCfmmT+0g69nY= +go.opentelemetry.io/collector/config/configtelemetry v0.101.0 h1:G9RerNdBUm6rYW6wrJoKzleBiDsCGaCjtQx5UYr0hzw= +go.opentelemetry.io/collector/config/configtelemetry v0.101.0/go.mod h1:YV5PaOdtnU1xRomPcYqoHmyCr48tnaAREeGO96EZw8o= +go.opentelemetry.io/collector/config/configtls v0.101.0 h1:kUBqqEPuO7Awsqq8dOlP+NRQ/wSxyosM24m1lF6JIdA= +go.opentelemetry.io/collector/config/configtls v0.101.0/go.mod h1:cyNmN5a/SaXKeup3vbISmjwbXTt9Z0fl1wt7k30Ta3Q= +go.opentelemetry.io/collector/config/internal v0.101.0 h1:GnnVmX/v/MVf4oK4TOcG0+AnCsTDC02CsmTvcSq+08g= +go.opentelemetry.io/collector/config/internal v0.101.0/go.mod h1:GYu44KDiZy9Rs4wIq5kfWDihqfpbktgupUGjW4BBNpY= +go.opentelemetry.io/collector/confmap v0.101.0 h1:pGXZRBKnZqys1HgNECGSi8Pec5RBGa9vVCfrpcvW+kA= +go.opentelemetry.io/collector/confmap v0.101.0/go.mod h1:BWKPIpYeUzSG6ZgCJMjF7xsLvyrvJCfYURl57E5vhiQ= +go.opentelemetry.io/collector/confmap/converter/expandconverter v0.101.0 h1:/uUZlzzxO8QknVCslpYVlQGSq5EG3Dzr6l4w2xW4u2A= +go.opentelemetry.io/collector/confmap/converter/expandconverter v0.101.0/go.mod h1:GKgiSuL5+ATJE1lrAQVpfSBFX/3XqGyc2qrfQBKpVd8= +go.opentelemetry.io/collector/confmap/provider/envprovider v0.101.0 h1:I7oSi6hTTaDMbh9k6nxF4YhLqB/t0xXTgrBXGCT53vw= +go.opentelemetry.io/collector/confmap/provider/envprovider v0.101.0/go.mod h1:DxCK400//MGnFyLSnIjme+R7qZwfDQtHYERIQtEt7Cg= +go.opentelemetry.io/collector/confmap/provider/fileprovider v0.101.0 h1:uMjJyxN3q+DaKR+GOJcERuVD8rKEe1PvTOUaMs66gCM= +go.opentelemetry.io/collector/confmap/provider/fileprovider v0.101.0/go.mod h1:3WLJtHisH4/7K/IU//OMoCTJYZ8o96/YfjP6J+Q/kBo= +go.opentelemetry.io/collector/confmap/provider/httpprovider v0.101.0 h1:U+/Mmd1+DHl94R/i+LxTjReEtFh3qV4QJ8XxqhCdezk= +go.opentelemetry.io/collector/confmap/provider/httpprovider v0.101.0/go.mod h1:EJ5t47HWbDGxUjVax+BBJ3ySpHBHQz/Ys689+R6OXis= +go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.101.0 h1:W0Xw+OgRCbdKWJy3VSZKPCcf4fFZlFF6L+mMWleq1LY= +go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.101.0/go.mod h1:0I8UtPWeXbuwe/UMQ+LmoFWoNo6NCxxNocHPrLox0X8= +go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.101.0 h1:N5yNF24hxUOO5Ps5mWwwQaYWyBPcqqSh4h10kULDT3c= +go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.101.0/go.mod h1:sPzwdCKCXYXUR8U7eAHshDZPnfbF7B7I/BFyUWTvvKQ= +go.opentelemetry.io/collector/connector v0.101.0 h1:OedmwrzrxC3wrYkp8Mpfcf30bJmlxC9TuwNLnpj4V8M= +go.opentelemetry.io/collector/connector v0.101.0/go.mod h1:skAILMO4ye4Y3s2DUo7k/8uZFwG22fpwjIYXO/pv/JQ= +go.opentelemetry.io/collector/consumer v0.101.0 h1:9tDxaeHe1+Uovf3fhdx7T4pV5mo/Dc0hniH7O5H3RBA= +go.opentelemetry.io/collector/consumer v0.101.0/go.mod h1:ud5k64on9m7hHTrhjEeLhWbLkd8+Gp06rDt3p86TKNs= +go.opentelemetry.io/collector/exporter v0.101.0 h1:zAxQBfaWO+PEHL3nDglgMGaWsqLsj1lJHPaBnO8PeDo= +go.opentelemetry.io/collector/exporter v0.101.0/go.mod h1:ZFwUWCmnM2ZbEty71Q13qME9QhvIKMgyYrS3s8vJPM8= +go.opentelemetry.io/collector/exporter/debugexporter v0.101.0 h1:D5d6xPAFu/TOhUBSBuyHwI7ZvUQLMXmdiSw70TZNetA= +go.opentelemetry.io/collector/exporter/debugexporter v0.101.0/go.mod h1:0E6mKsuQLkTLj2k/a8kJPPlkvpHqdyu9y1wzzAC+934= +go.opentelemetry.io/collector/exporter/loggingexporter v0.101.0 h1:LLNMLafpuIlTN6Zw5uF/VVKsLyUkVTlJZYP5VrtIyVI= +go.opentelemetry.io/collector/exporter/loggingexporter v0.101.0/go.mod h1:I+dWF7xN+oZnv6WPTc8UFUcTCYkroWF0G293KSxKKoo= +go.opentelemetry.io/collector/exporter/otlphttpexporter v0.101.0 h1:t3HKouN61e0TWcrKklJYDdq5317qtxqwDL5d6HlUUo0= +go.opentelemetry.io/collector/exporter/otlphttpexporter v0.101.0/go.mod h1:H/ZVNwR8JzUTy1go86a4gw8WwBqF/+Nc0zpXtTkMJYs= +go.opentelemetry.io/collector/extension v0.101.0 h1:A4hq/aci9+/Pxi8sJfyYgbeHjSIL7JFZR81IlSOTla4= +go.opentelemetry.io/collector/extension v0.101.0/go.mod h1:14gQMuybTcppfTTM9AwqeoFrNCLv/ds/c0A4Z0hWuLI= +go.opentelemetry.io/collector/extension/auth v0.101.0 h1:Y3sO0qQb2tkm1LBdrH8UIUNpDcorWxwq/9nhcQqlxqU= +go.opentelemetry.io/collector/extension/auth v0.101.0/go.mod h1:5PEBkpr5fF/47BAZ2dvc9M3+QfkabxIOB4YCjjW5DNc= +go.opentelemetry.io/collector/extension/zpagesextension v0.101.0 h1:hZIkGTgKeVvVlqoPw72G/RkIhp0QSqY5PNRjf38mf2k= +go.opentelemetry.io/collector/extension/zpagesextension v0.101.0/go.mod h1:sllOMdEbNg2UnMxTO8jSx2OEfAcYX3ud6smuXhN6pbA= +go.opentelemetry.io/collector/featuregate v1.8.0 h1:p/bAuk5LiSfdYS88yFl/Jzao9bHEYqCh7YvZJ+L+IZg= +go.opentelemetry.io/collector/featuregate v1.8.0/go.mod h1:w7nUODKxEi3FLf1HslCiE6YWtMtOOrMnSwsDam8Mg9w= +go.opentelemetry.io/collector/filter v0.101.0 h1:tNs6+liajg4hxSmtX5tcuGBefSPB+TEyyK3KTPp+dYY= +go.opentelemetry.io/collector/filter v0.101.0/go.mod h1:Kp9rCRB60SDm+pjrsaK95fkwfEXGh4j1yewvATTNkfI= +go.opentelemetry.io/collector/otelcol v0.101.0 h1:6kF2dcXpu5NjxK2j0ksCRzZhqigxCGrP/u7n57FSMOg= +go.opentelemetry.io/collector/otelcol v0.101.0/go.mod h1:qGrb+hlZXId/hJj0y28vq0YkMd6Xsoz2w7mZkXJOw68= +go.opentelemetry.io/collector/pdata v1.8.0 h1:d/QQgZxB4Y+d3mqLVh2ozvzujUhloD3P/fk7X+In764= +go.opentelemetry.io/collector/pdata v1.8.0/go.mod h1:/W7clu0wFC4WSRp94Ucn6Vm36Wkrt+tmtlDb1aiNZCY= +go.opentelemetry.io/collector/pdata/testdata v0.101.0 h1:JzeUtg5RN1iIFgY8DakGlqBkGxOTJlkaYlLausnEGKY= +go.opentelemetry.io/collector/pdata/testdata v0.101.0/go.mod h1:ZGobfCus4fWo5RduZ7ENI0+HD9BewgKuO6qU2rBVnUg= +go.opentelemetry.io/collector/processor v0.101.0 h1:VU77ImECho43O/7p3w6KUnNvzg/TQ4/WxjZzvI/TNm0= +go.opentelemetry.io/collector/processor v0.101.0/go.mod h1:uLvPw3SNAJbqkdWAzDs4F7S3FobHNG6fJQaO9q3aGVQ= +go.opentelemetry.io/collector/processor/batchprocessor v0.101.0 h1:/hSyISiVWgt6mH38w+v/HPbNZW99GWDEJruwNRgF38Q= +go.opentelemetry.io/collector/processor/batchprocessor v0.101.0/go.mod h1:etSi9dCLw0tkiu94msC0kADBs7LpBiZqXo3koMORMpI= +go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.101.0 h1:RzBP3upqjCff3rgLQj4ySNkXIR+ecvdMn3Ak+yTQikI= +go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.101.0/go.mod h1:B0Qa/FL31DBsLTdUj1Y8vpzSyDTlDM+eda1j1rRe8Z4= +go.opentelemetry.io/collector/receiver v0.101.0 h1:+YJQvcAw5Es15Ub8hYqqZumKbe7D0SMU8XCgGRxc25M= +go.opentelemetry.io/collector/receiver v0.101.0/go.mod h1:JFVHAkIIz9uOk85u9pHsYRcyFj1ZAUpw59ahNZ28+ko= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.101.0 h1:CEu8qgxIyy7C66f+InNrV2gvgO+Z7Ryk8aPkzhUQeT8= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.101.0/go.mod h1:gektz6Q5R2ooWgQDa4ufdpJdD+M1/062hHQerea3VKQ= +go.opentelemetry.io/collector/semconv v0.101.0 h1:tOe9iTe9dDCnvz/bqgfNRr4w80kXG8505tQJ5h5v08Q= +go.opentelemetry.io/collector/semconv v0.101.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A= +go.opentelemetry.io/collector/service v0.101.0 h1:My2NrH25WYmJ6vMWwT3csglyiTkf0XP3nPgj0mX1yFw= +go.opentelemetry.io/collector/service v0.101.0/go.mod h1:XowYC9FyNGmWClh0aObztKdTfQNLAr6mubpvh27ee+Q= +go.opentelemetry.io/contrib/config v0.7.0 h1:b1rK5tGTuhhPirJiMxOcyQfZs76j2VapY6ODn3b2Dbs= +go.opentelemetry.io/contrib/config v0.7.0/go.mod h1:8tdiFd8N5etOi3XzBmAoMxplEzI3TcL8dU5rM5/xcOQ= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 h1:vS1Ao/R55RNV4O7TA2Qopok8yN+X0LIP6RVWLFkprck= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0/go.mod h1:BMsdeOxN04K0L5FNUBfjFdvwWGNe/rkmSwH4Aelu/X0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= +go.opentelemetry.io/contrib/propagators/b3 v1.27.0 h1:IjgxbomVrV9za6bRi8fWCNXENs0co37SZedQilP2hm0= +go.opentelemetry.io/contrib/propagators/b3 v1.27.0/go.mod h1:Dv9obQz25lCisDvvs4dy28UPh974CxkahRDUPsY7y9E= +go.opentelemetry.io/contrib/zpages v0.52.0 h1:MPgkMy0Cp3O5EdfVXP0ss3ujhEibysTM4eszx7E7d+E= +go.opentelemetry.io/contrib/zpages v0.52.0/go.mod h1:fqG5AFdoYru3A3DnhibVuaaEfQV2WKxE7fYE1jgDRwk= +go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= +go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= +go.opentelemetry.io/otel/bridge/opencensus v1.27.0 h1:ao9aGGHd+G4YfjBpGs6vbkvt5hoC67STlJA9fCnOAcs= +go.opentelemetry.io/otel/bridge/opencensus v1.27.0/go.mod h1:uRvWtAAXzyVOST0WMPX5JHGBaAvBws+2F8PcC5gMnTk= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 h1:bFgvUr3/O4PHj3VQcFEuYKvRZJX1SJDQ+11JXuSB3/w= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0/go.mod h1:xJntEd2KL6Qdg5lwp97HMLQDVeAhrYxmzFseAMDPQ8I= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0 h1:CIHWikMsN3wO+wq1Tp5VGdVRTcON+DmOJSfDjXypKOc= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0/go.mod h1:TNupZ6cxqyFEpLXAZW7On+mLFL0/g0TE3unIYL91xWc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= +go.opentelemetry.io/otel/exporters/prometheus v0.49.0 h1:Er5I1g/YhfYv9Affk9nJLfH/+qCCVVg1f2R9AbJfqDQ= +go.opentelemetry.io/otel/exporters/prometheus v0.49.0/go.mod h1:KfQ1wpjf3zsHjzP149P4LyAwWRupc6c7t1ZJ9eXpKQM= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 h1:/jlt1Y8gXWiHG9FBx6cJaIC5hYx5Fe64nC8w5Cylt/0= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0/go.mod h1:bmToOGOBZ4hA9ghphIc1PAf66VA8KOtsuy3+ScStG20= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 h1:/0YaXu3755A/cFbtXp+21lkXgI0QE5avTWA2HjU9/WE= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0/go.mod h1:m7SFxp0/7IxmJPLIY3JhOcU9CoFzDaCPL6xxQIxhA+o= +go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= +go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= +go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= +go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= +go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI= +go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= +go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= +go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= +go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= +go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -887,17 +839,14 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -908,8 +857,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= +golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= +golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -930,14 +879,12 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= -golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -950,7 +897,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -968,7 +914,6 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -976,17 +921,17 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= -golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= +golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= +golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1000,12 +945,11 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1015,7 +959,6 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1045,14 +988,13 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1065,37 +1007,33 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1108,7 +1046,6 @@ golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1134,7 +1071,6 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -1143,11 +1079,10 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw= +golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1170,16 +1105,14 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.157.0 h1:ORAeqmbrrozeyw5NjnMxh7peHO0UzV4wWYSwZeCUb20= -google.golang.org/api v0.157.0/go.mod h1:+z4v4ufbZ1WEpld6yMGHyggs+PmAHiaLNj5ytP3N01g= +google.golang.org/api v0.181.0 h1:rPdjwnWgiPPOJx3IcSAQ2III5aX5tCer6wMpa/xmZi4= +google.golang.org/api v0.181.0/go.mod h1:MnQ+M0CFsfUwA5beZ+g/vCBCPXvtmZwRz2qzZk8ih1k= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1209,10 +1142,10 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 h1:RFiFrvy37/mpSpdySBDrUdipW/dHwsRwh3J3+A9VgT4= -google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/genproto/googleapis/api v0.0.0-20240521202816-d264139d666e h1:SkdGTrROJl2jRGT/Fxv5QUf9jtdKCQh4KQJXbXVLAi0= +google.golang.org/genproto/googleapis/api v0.0.0-20240521202816-d264139d666e/go.mod h1:LweJcLbyVij6rCex8YunD8DYR5VDonap/jYl3ZRxcIU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e h1:Elxv5MwEkCI9f5SkoL6afed6NTdxaGoAo39eANBwHL8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1226,8 +1159,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= -google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= +google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1239,23 +1172,19 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1267,8 +1196,8 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.2.0 h1:I0DwBVMGAx26dttAj1BtJLAkVGncrkkUXfJLC4Flt/I= -gotest.tools/v3 v3.2.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A= +gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= +gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1276,38 +1205,24 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s= -k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= -k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= -k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= -k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= -k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= -k8s.io/client-go v0.21.1/go.mod h1:/kEw4RgW+3xnBGzvp9IWxKSNA+lXn3A7AuH3gdOAzLs= -k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= -k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= -k8s.io/code-generator v0.21.1/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= -k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/api v0.30.1 h1:kCm/6mADMdbAxmIh0LBjS54nQBE+U4KmbCfIkF5CpJY= +k8s.io/api v0.30.1/go.mod h1:ddbN2C0+0DIiPntan/bye3SW3PdwLa11/0yqwvuRrJM= +k8s.io/apimachinery v0.30.1 h1:ZQStsEfo4n65yAdlGTfP/uSHMQSoYzU/oeEbkmF7P2U= +k8s.io/apimachinery v0.30.1/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q= +k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc= k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a h1:zD1uj3Jf+mD4zmA7W+goE5TxDkI7OGJjBNBzq5fJtLA= +k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a/go.mod h1:UxDHUPsUwTOOxSU+oXURfFBcAS6JwiRXTYqYwfuGowc= +k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 h1:jgGTlFYnhF1PM1Ax/lAlxUPE+KfCIXHaathvJg1C3ak= +k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md new file mode 100644 index 000000000..1e7aa4798 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/CHANGES.md @@ -0,0 +1,135 @@ +# Changelog + +## [0.4.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.1...auth/v0.4.2) (2024-05-16) + + +### Bug Fixes + +* **auth:** Enable client certificates by default only for GDU ([#10151](https://github.com/googleapis/google-cloud-go/issues/10151)) ([7c52978](https://github.com/googleapis/google-cloud-go/commit/7c529786275a39b7e00525f7d5e7be0d963e9e15)) +* **auth:** Handle non-Transport DefaultTransport ([#10162](https://github.com/googleapis/google-cloud-go/issues/10162)) ([fa3bfdb](https://github.com/googleapis/google-cloud-go/commit/fa3bfdb23aaa45b34394a8b61e753b3587506782)), refs [#10159](https://github.com/googleapis/google-cloud-go/issues/10159) +* **auth:** Have refresh time match docs ([#10147](https://github.com/googleapis/google-cloud-go/issues/10147)) ([bcb5568](https://github.com/googleapis/google-cloud-go/commit/bcb5568c07a54dd3d2e869d15f502b0741a609e8)) +* **auth:** Update compute token fetching error with named prefix ([#10180](https://github.com/googleapis/google-cloud-go/issues/10180)) ([4573504](https://github.com/googleapis/google-cloud-go/commit/4573504828d2928bebedc875d87650ba227829ea)) + +## [0.4.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.0...auth/v0.4.1) (2024-05-09) + + +### Bug Fixes + +* **auth:** Don't try to detect default creds it opt configured ([#10143](https://github.com/googleapis/google-cloud-go/issues/10143)) ([804632e](https://github.com/googleapis/google-cloud-go/commit/804632e7c5b0b85ff522f7951114485e256eb5bc)) + +## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.3.0...auth/v0.4.0) (2024-05-07) + + +### Features + +* **auth:** Enable client certificates by default ([#10102](https://github.com/googleapis/google-cloud-go/issues/10102)) ([9013e52](https://github.com/googleapis/google-cloud-go/commit/9013e5200a6ec0f178ed91acb255481ffb073a2c)) + + +### Bug Fixes + +* **auth:** Get s2a logic up to date ([#10093](https://github.com/googleapis/google-cloud-go/issues/10093)) ([4fe9ae4](https://github.com/googleapis/google-cloud-go/commit/4fe9ae4b7101af2a5221d6d6b2e77b479305bb06)) + +## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.2...auth/v0.3.0) (2024-04-23) + + +### Features + +* **auth/httptransport:** Add ability to customize transport ([#10023](https://github.com/googleapis/google-cloud-go/issues/10023)) ([72c7f6b](https://github.com/googleapis/google-cloud-go/commit/72c7f6bbec3136cc7a62788fc7186bc33ef6c3b3)), refs [#9812](https://github.com/googleapis/google-cloud-go/issues/9812) [#9814](https://github.com/googleapis/google-cloud-go/issues/9814) + + +### Bug Fixes + +* **auth/credentials:** Error on bad file name if explicitly set ([#10018](https://github.com/googleapis/google-cloud-go/issues/10018)) ([55beaa9](https://github.com/googleapis/google-cloud-go/commit/55beaa993aaf052d8be39766afc6777c3c2a0bdd)), refs [#9809](https://github.com/googleapis/google-cloud-go/issues/9809) + +## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.1...auth/v0.2.2) (2024-04-19) + + +### Bug Fixes + +* **auth:** Add internal opt to skip validation on transports ([#9999](https://github.com/googleapis/google-cloud-go/issues/9999)) ([9e20ef8](https://github.com/googleapis/google-cloud-go/commit/9e20ef89f6287d6bd03b8697d5898dc43b4a77cf)), refs [#9823](https://github.com/googleapis/google-cloud-go/issues/9823) +* **auth:** Set secure flag for gRPC conn pools ([#10002](https://github.com/googleapis/google-cloud-go/issues/10002)) ([14e3956](https://github.com/googleapis/google-cloud-go/commit/14e3956dfd736399731b5ee8d9b178ae085cf7ba)), refs [#9833](https://github.com/googleapis/google-cloud-go/issues/9833) + +## [0.2.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.0...auth/v0.2.1) (2024-04-18) + + +### Bug Fixes + +* **auth:** Default gRPC token type to Bearer if not set ([#9800](https://github.com/googleapis/google-cloud-go/issues/9800)) ([5284066](https://github.com/googleapis/google-cloud-go/commit/5284066670b6fe65d79089cfe0199c9660f87fc7)) + +## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.1.1...auth/v0.2.0) (2024-04-15) + +### Breaking Changes + +In the below mentioned commits there were a few large breaking changes since the +last release of the module. + +1. The `Credentials` type has been moved to the root of the module as it is + becoming the core abstraction for the whole module. +2. Because of the above mentioned change many functions that previously + returned a `TokenProvider` now return `Credentials`. Similarly, these + functions have been renamed to be more specific. +3. Most places that used to take an optional `TokenProvider` now accept + `Credentials`. You can make a `Credentials` from a `TokenProvider` using the + constructor found in the `auth` package. +4. The `detect` package has been renamed to `credentials`. With this change some + function signatures were also updated for better readability. +5. Derivative auth flows like `impersonate` and `downscope` have been moved to + be under the new `credentials` package. + +Although these changes are disruptive we think that they are for the best of the +long-term health of the module. We do not expect any more large breaking changes +like these in future revisions, even before 1.0.0. This version will be the +first version of the auth library that our client libraries start to use and +depend on. + +### Features + +* **auth/credentials/externalaccount:** Add default TokenURL ([#9700](https://github.com/googleapis/google-cloud-go/issues/9700)) ([81830e6](https://github.com/googleapis/google-cloud-go/commit/81830e6848ceefd055aa4d08f933d1154455a0f6)) +* **auth:** Add downscope.Options.UniverseDomain ([#9634](https://github.com/googleapis/google-cloud-go/issues/9634)) ([52cf7d7](https://github.com/googleapis/google-cloud-go/commit/52cf7d780853594291c4e34302d618299d1f5a1d)) +* **auth:** Add universe domain to grpctransport and httptransport ([#9663](https://github.com/googleapis/google-cloud-go/issues/9663)) ([67d353b](https://github.com/googleapis/google-cloud-go/commit/67d353beefe3b607c08c891876fbd95ab89e5fe3)), refs [#9670](https://github.com/googleapis/google-cloud-go/issues/9670) +* **auth:** Add UniverseDomain to DetectOptions ([#9536](https://github.com/googleapis/google-cloud-go/issues/9536)) ([3618d3f](https://github.com/googleapis/google-cloud-go/commit/3618d3f7061615c0e189f376c75abc201203b501)) +* **auth:** Make package externalaccount public ([#9633](https://github.com/googleapis/google-cloud-go/issues/9633)) ([a0978d8](https://github.com/googleapis/google-cloud-go/commit/a0978d8e96968399940ebd7d092539772bf9caac)) +* **auth:** Move credentials to base auth package ([#9590](https://github.com/googleapis/google-cloud-go/issues/9590)) ([1a04baf](https://github.com/googleapis/google-cloud-go/commit/1a04bafa83c27342b9308d785645e1e5423ea10d)) +* **auth:** Refactor public sigs to use Credentials ([#9603](https://github.com/googleapis/google-cloud-go/issues/9603)) ([69cb240](https://github.com/googleapis/google-cloud-go/commit/69cb240c530b1f7173a9af2555c19e9a1beb56c5)) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a)) +* **auth:** Fix uint32 conversion ([9221c7f](https://github.com/googleapis/google-cloud-go/commit/9221c7fa12cef9d5fb7ddc92f41f1d6204971c7b)) +* **auth:** Port sts expires fix ([#9618](https://github.com/googleapis/google-cloud-go/issues/9618)) ([7bec97b](https://github.com/googleapis/google-cloud-go/commit/7bec97b2f51ed3ac4f9b88bf100d301da3f5d1bd)) +* **auth:** Read universe_domain from all credentials files ([#9632](https://github.com/googleapis/google-cloud-go/issues/9632)) ([16efbb5](https://github.com/googleapis/google-cloud-go/commit/16efbb52e39ea4a319e5ee1e95c0e0305b6d9824)) +* **auth:** Remove content-type header from idms get requests ([#9508](https://github.com/googleapis/google-cloud-go/issues/9508)) ([8589f41](https://github.com/googleapis/google-cloud-go/commit/8589f41599d265d7c3d46a3d86c9fab2329cbdd9)) +* **auth:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a)) + +## [0.1.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.1.0...auth/v0.1.1) (2024-03-10) + + +### Bug Fixes + +* **auth/impersonate:** Properly send default detect params ([#9529](https://github.com/googleapis/google-cloud-go/issues/9529)) ([5b6b8be](https://github.com/googleapis/google-cloud-go/commit/5b6b8bef577f82707e51f5cc5d258d5bdf90218f)), refs [#9136](https://github.com/googleapis/google-cloud-go/issues/9136) +* **auth:** Update grpc-go to v1.56.3 ([343cea8](https://github.com/googleapis/google-cloud-go/commit/343cea8c43b1e31ae21ad50ad31d3b0b60143f8c)) +* **auth:** Update grpc-go to v1.59.0 ([81a97b0](https://github.com/googleapis/google-cloud-go/commit/81a97b06cb28b25432e4ece595c55a9857e960b7)) + +## 0.1.0 (2023-10-18) + + +### Features + +* **auth:** Add base auth package ([#8465](https://github.com/googleapis/google-cloud-go/issues/8465)) ([6a45f26](https://github.com/googleapis/google-cloud-go/commit/6a45f26b809b64edae21f312c18d4205f96b180e)) +* **auth:** Add cert support to httptransport ([#8569](https://github.com/googleapis/google-cloud-go/issues/8569)) ([37e3435](https://github.com/googleapis/google-cloud-go/commit/37e3435f8e98595eafab481bdfcb31a4c56fa993)) +* **auth:** Add Credentials.UniverseDomain() ([#8654](https://github.com/googleapis/google-cloud-go/issues/8654)) ([af0aa1e](https://github.com/googleapis/google-cloud-go/commit/af0aa1ed8015bc8fe0dd87a7549ae029107cbdb8)) +* **auth:** Add detect package ([#8491](https://github.com/googleapis/google-cloud-go/issues/8491)) ([d977419](https://github.com/googleapis/google-cloud-go/commit/d977419a3269f6acc193df77a2136a6eb4b4add7)) +* **auth:** Add downscope package ([#8532](https://github.com/googleapis/google-cloud-go/issues/8532)) ([dda9bff](https://github.com/googleapis/google-cloud-go/commit/dda9bff8ec70e6d104901b4105d13dcaa4e2404c)) +* **auth:** Add grpctransport package ([#8625](https://github.com/googleapis/google-cloud-go/issues/8625)) ([69a8347](https://github.com/googleapis/google-cloud-go/commit/69a83470bdcc7ed10c6c36d1abc3b7cfdb8a0ee5)) +* **auth:** Add httptransport package ([#8567](https://github.com/googleapis/google-cloud-go/issues/8567)) ([6898597](https://github.com/googleapis/google-cloud-go/commit/6898597d2ea95d630fcd00fd15c58c75ea843bff)) +* **auth:** Add idtoken package ([#8580](https://github.com/googleapis/google-cloud-go/issues/8580)) ([a79e693](https://github.com/googleapis/google-cloud-go/commit/a79e693e97e4e3e1c6742099af3dbc58866d88fe)) +* **auth:** Add impersonate package ([#8578](https://github.com/googleapis/google-cloud-go/issues/8578)) ([e29ba0c](https://github.com/googleapis/google-cloud-go/commit/e29ba0cb7bd3888ab9e808087027dc5a32474c04)) +* **auth:** Add support for external accounts in detect ([#8508](https://github.com/googleapis/google-cloud-go/issues/8508)) ([62210d5](https://github.com/googleapis/google-cloud-go/commit/62210d5d3e56e8e9f35db8e6ac0defec19582507)) +* **auth:** Port external account changes ([#8697](https://github.com/googleapis/google-cloud-go/issues/8697)) ([5823db5](https://github.com/googleapis/google-cloud-go/commit/5823db5d633069999b58b9131a7f9cd77e82c899)) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) +* **auth:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) diff --git a/vendor/cloud.google.com/go/compute/LICENSE b/vendor/cloud.google.com/go/auth/LICENSE similarity index 100% rename from vendor/cloud.google.com/go/compute/LICENSE rename to vendor/cloud.google.com/go/auth/LICENSE diff --git a/vendor/cloud.google.com/go/auth/README.md b/vendor/cloud.google.com/go/auth/README.md new file mode 100644 index 000000000..36de276a0 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/README.md @@ -0,0 +1,4 @@ +# auth + +This module is currently EXPERIMENTAL and under active development. It is not +yet intended to be used. diff --git a/vendor/cloud.google.com/go/auth/auth.go b/vendor/cloud.google.com/go/auth/auth.go new file mode 100644 index 000000000..d579e482e --- /dev/null +++ b/vendor/cloud.google.com/go/auth/auth.go @@ -0,0 +1,476 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/jwt" +) + +const ( + // Parameter keys for AuthCodeURL method to support PKCE. + codeChallengeKey = "code_challenge" + codeChallengeMethodKey = "code_challenge_method" + + // Parameter key for Exchange method to support PKCE. + codeVerifierKey = "code_verifier" + + // 3 minutes and 45 seconds before expiration. The shortest MDS cache is 4 minutes, + // so we give it 15 seconds to refresh it's cache before attempting to refresh a token. + defaultExpiryDelta = 225 * time.Second + + universeDomainDefault = "googleapis.com" +) + +var ( + defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" + defaultHeader = &jwt.Header{Algorithm: jwt.HeaderAlgRSA256, Type: jwt.HeaderType} + + // for testing + timeNow = time.Now +) + +// TokenProvider specifies an interface for anything that can return a token. +type TokenProvider interface { + // Token returns a Token or an error. + // The Token returned must be safe to use + // concurrently. + // The returned Token must not be modified. + // The context provided must be sent along to any requests that are made in + // the implementing code. + Token(context.Context) (*Token, error) +} + +// Token holds the credential token used to authorized requests. All fields are +// considered read-only. +type Token struct { + // Value is the token used to authorize requests. It is usually an access + // token but may be other types of tokens such as ID tokens in some flows. + Value string + // Type is the type of token Value is. If uninitialized, it should be + // assumed to be a "Bearer" token. + Type string + // Expiry is the time the token is set to expire. + Expiry time.Time + // Metadata may include, but is not limited to, the body of the token + // response returned by the server. + Metadata map[string]interface{} // TODO(codyoss): maybe make a method to flatten metadata to avoid []string for url.Values +} + +// IsValid reports that a [Token] is non-nil, has a [Token.Value], and has not +// expired. A token is considered expired if [Token.Expiry] has passed or will +// pass in the next 10 seconds. +func (t *Token) IsValid() bool { + return t.isValidWithEarlyExpiry(defaultExpiryDelta) +} + +func (t *Token) isValidWithEarlyExpiry(earlyExpiry time.Duration) bool { + if t == nil || t.Value == "" { + return false + } + if t.Expiry.IsZero() { + return true + } + return !t.Expiry.Round(0).Add(-earlyExpiry).Before(timeNow()) +} + +// Credentials holds Google credentials, including +// [Application Default Credentials](https://developers.google.com/accounts/docs/application-default-credentials). +type Credentials struct { + json []byte + projectID CredentialsPropertyProvider + quotaProjectID CredentialsPropertyProvider + // universeDomain is the default service domain for a given Cloud universe. + universeDomain CredentialsPropertyProvider + + TokenProvider +} + +// JSON returns the bytes associated with the the file used to source +// credentials if one was used. +func (c *Credentials) JSON() []byte { + return c.json +} + +// ProjectID returns the associated project ID from the underlying file or +// environment. +func (c *Credentials) ProjectID(ctx context.Context) (string, error) { + if c.projectID == nil { + return internal.GetProjectID(c.json, ""), nil + } + v, err := c.projectID.GetProperty(ctx) + if err != nil { + return "", err + } + return internal.GetProjectID(c.json, v), nil +} + +// QuotaProjectID returns the associated quota project ID from the underlying +// file or environment. +func (c *Credentials) QuotaProjectID(ctx context.Context) (string, error) { + if c.quotaProjectID == nil { + return internal.GetQuotaProject(c.json, ""), nil + } + v, err := c.quotaProjectID.GetProperty(ctx) + if err != nil { + return "", err + } + return internal.GetQuotaProject(c.json, v), nil +} + +// UniverseDomain returns the default service domain for a given Cloud universe. +// The default value is "googleapis.com". +func (c *Credentials) UniverseDomain(ctx context.Context) (string, error) { + if c.universeDomain == nil { + return universeDomainDefault, nil + } + v, err := c.universeDomain.GetProperty(ctx) + if err != nil { + return "", err + } + if v == "" { + return universeDomainDefault, nil + } + return v, err +} + +// CredentialsPropertyProvider provides an implementation to fetch a property +// value for [Credentials]. +type CredentialsPropertyProvider interface { + GetProperty(context.Context) (string, error) +} + +// CredentialsPropertyFunc is a type adapter to allow the use of ordinary +// functions as a [CredentialsPropertyProvider]. +type CredentialsPropertyFunc func(context.Context) (string, error) + +// GetProperty loads the properly value provided the given context. +func (p CredentialsPropertyFunc) GetProperty(ctx context.Context) (string, error) { + return p(ctx) +} + +// CredentialsOptions are used to configure [Credentials]. +type CredentialsOptions struct { + // TokenProvider is a means of sourcing a token for the credentials. Required. + TokenProvider TokenProvider + // JSON is the raw contents of the credentials file if sourced from a file. + JSON []byte + // ProjectIDProvider resolves the project ID associated with the + // credentials. + ProjectIDProvider CredentialsPropertyProvider + // QuotaProjectIDProvider resolves the quota project ID associated with the + // credentials. + QuotaProjectIDProvider CredentialsPropertyProvider + // UniverseDomainProvider resolves the universe domain with the credentials. + UniverseDomainProvider CredentialsPropertyProvider +} + +// NewCredentials returns new [Credentials] from the provided options. Most users +// will want to build this object a function from the +// [cloud.google.com/go/auth/credentials] package. +func NewCredentials(opts *CredentialsOptions) *Credentials { + creds := &Credentials{ + TokenProvider: opts.TokenProvider, + json: opts.JSON, + projectID: opts.ProjectIDProvider, + quotaProjectID: opts.QuotaProjectIDProvider, + universeDomain: opts.UniverseDomainProvider, + } + + return creds +} + +// CachedTokenProviderOptions provided options for configuring a +// CachedTokenProvider. +type CachedTokenProviderOptions struct { + // DisableAutoRefresh makes the TokenProvider always return the same token, + // even if it is expired. + DisableAutoRefresh bool + // ExpireEarly configures the amount of time before a token expires, that it + // should be refreshed. If unset, the default value is 10 seconds. + ExpireEarly time.Duration +} + +func (ctpo *CachedTokenProviderOptions) autoRefresh() bool { + if ctpo == nil { + return true + } + return !ctpo.DisableAutoRefresh +} + +func (ctpo *CachedTokenProviderOptions) expireEarly() time.Duration { + if ctpo == nil { + return defaultExpiryDelta + } + return ctpo.ExpireEarly +} + +// NewCachedTokenProvider wraps a [TokenProvider] to cache the tokens returned +// by the underlying provider. By default it will refresh tokens ten seconds +// before they expire, but this time can be configured with the optional +// options. +func NewCachedTokenProvider(tp TokenProvider, opts *CachedTokenProviderOptions) TokenProvider { + if ctp, ok := tp.(*cachedTokenProvider); ok { + return ctp + } + return &cachedTokenProvider{ + tp: tp, + autoRefresh: opts.autoRefresh(), + expireEarly: opts.expireEarly(), + } +} + +type cachedTokenProvider struct { + tp TokenProvider + autoRefresh bool + expireEarly time.Duration + + mu sync.Mutex + cachedToken *Token +} + +func (c *cachedTokenProvider) Token(ctx context.Context) (*Token, error) { + c.mu.Lock() + defer c.mu.Unlock() + if c.cachedToken.IsValid() || !c.autoRefresh { + return c.cachedToken, nil + } + t, err := c.tp.Token(ctx) + if err != nil { + return nil, err + } + c.cachedToken = t + return t, nil +} + +// Error is a error associated with retrieving a [Token]. It can hold useful +// additional details for debugging. +type Error struct { + // Response is the HTTP response associated with error. The body will always + // be already closed and consumed. + Response *http.Response + // Body is the HTTP response body. + Body []byte + // Err is the underlying wrapped error. + Err error + + // code returned in the token response + code string + // description returned in the token response + description string + // uri returned in the token response + uri string +} + +func (e *Error) Error() string { + if e.code != "" { + s := fmt.Sprintf("auth: %q", e.code) + if e.description != "" { + s += fmt.Sprintf(" %q", e.description) + } + if e.uri != "" { + s += fmt.Sprintf(" %q", e.uri) + } + return s + } + return fmt.Sprintf("auth: cannot fetch token: %v\nResponse: %s", e.Response.StatusCode, e.Body) +} + +// Temporary returns true if the error is considered temporary and may be able +// to be retried. +func (e *Error) Temporary() bool { + if e.Response == nil { + return false + } + sc := e.Response.StatusCode + return sc == http.StatusInternalServerError || sc == http.StatusServiceUnavailable || sc == http.StatusRequestTimeout || sc == http.StatusTooManyRequests +} + +func (e *Error) Unwrap() error { + return e.Err +} + +// Style describes how the token endpoint wants to receive the ClientID and +// ClientSecret. +type Style int + +const ( + // StyleUnknown means the value has not been initiated. Sending this in + // a request will cause the token exchange to fail. + StyleUnknown Style = iota + // StyleInParams sends client info in the body of a POST request. + StyleInParams + // StyleInHeader sends client info using Basic Authorization header. + StyleInHeader +) + +// Options2LO is the configuration settings for doing a 2-legged JWT OAuth2 flow. +type Options2LO struct { + // Email is the OAuth2 client ID. This value is set as the "iss" in the + // JWT. + Email string + // PrivateKey contains the contents of an RSA private key or the + // contents of a PEM file that contains a private key. It is used to sign + // the JWT created. + PrivateKey []byte + // TokenURL is th URL the JWT is sent to. Required. + TokenURL string + // PrivateKeyID is the ID of the key used to sign the JWT. It is used as the + // "kid" in the JWT header. Optional. + PrivateKeyID string + // Subject is the used for to impersonate a user. It is used as the "sub" in + // the JWT.m Optional. + Subject string + // Scopes specifies requested permissions for the token. Optional. + Scopes []string + // Expires specifies the lifetime of the token. Optional. + Expires time.Duration + // Audience specifies the "aud" in the JWT. Optional. + Audience string + // PrivateClaims allows specifying any custom claims for the JWT. Optional. + PrivateClaims map[string]interface{} + + // Client is the client to be used to make the underlying token requests. + // Optional. + Client *http.Client + // UseIDToken requests that the token returned be an ID token if one is + // returned from the server. Optional. + UseIDToken bool +} + +func (o *Options2LO) client() *http.Client { + if o.Client != nil { + return o.Client + } + return internal.CloneDefaultClient() +} + +func (o *Options2LO) validate() error { + if o == nil { + return errors.New("auth: options must be provided") + } + if o.Email == "" { + return errors.New("auth: email must be provided") + } + if len(o.PrivateKey) == 0 { + return errors.New("auth: private key must be provided") + } + if o.TokenURL == "" { + return errors.New("auth: token URL must be provided") + } + return nil +} + +// New2LOTokenProvider returns a [TokenProvider] from the provided options. +func New2LOTokenProvider(opts *Options2LO) (TokenProvider, error) { + if err := opts.validate(); err != nil { + return nil, err + } + return tokenProvider2LO{opts: opts, Client: opts.client()}, nil +} + +type tokenProvider2LO struct { + opts *Options2LO + Client *http.Client +} + +func (tp tokenProvider2LO) Token(ctx context.Context) (*Token, error) { + pk, err := internal.ParseKey(tp.opts.PrivateKey) + if err != nil { + return nil, err + } + claimSet := &jwt.Claims{ + Iss: tp.opts.Email, + Scope: strings.Join(tp.opts.Scopes, " "), + Aud: tp.opts.TokenURL, + AdditionalClaims: tp.opts.PrivateClaims, + Sub: tp.opts.Subject, + } + if t := tp.opts.Expires; t > 0 { + claimSet.Exp = time.Now().Add(t).Unix() + } + if aud := tp.opts.Audience; aud != "" { + claimSet.Aud = aud + } + h := *defaultHeader + h.KeyID = tp.opts.PrivateKeyID + payload, err := jwt.EncodeJWS(&h, claimSet, pk) + if err != nil { + return nil, err + } + v := url.Values{} + v.Set("grant_type", defaultGrantType) + v.Set("assertion", payload) + resp, err := tp.Client.PostForm(tp.opts.TokenURL, v) + if err != nil { + return nil, fmt.Errorf("auth: cannot fetch token: %w", err) + } + defer resp.Body.Close() + body, err := internal.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("auth: cannot fetch token: %w", err) + } + if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { + return nil, &Error{ + Response: resp, + Body: body, + } + } + // tokenRes is the JSON response body. + var tokenRes struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + IDToken string `json:"id_token"` + ExpiresIn int64 `json:"expires_in"` + } + if err := json.Unmarshal(body, &tokenRes); err != nil { + return nil, fmt.Errorf("auth: cannot fetch token: %w", err) + } + token := &Token{ + Value: tokenRes.AccessToken, + Type: tokenRes.TokenType, + } + token.Metadata = make(map[string]interface{}) + json.Unmarshal(body, &token.Metadata) // no error checks for optional fields + + if secs := tokenRes.ExpiresIn; secs > 0 { + token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) + } + if v := tokenRes.IDToken; v != "" { + // decode returned id token to get expiry + claimSet, err := jwt.DecodeJWS(v) + if err != nil { + return nil, fmt.Errorf("auth: error decoding JWT token: %w", err) + } + token.Expiry = time.Unix(claimSet.Exp, 0) + } + if tp.opts.UseIDToken { + if tokenRes.IDToken == "" { + return nil, fmt.Errorf("auth: response doesn't have JWT token") + } + token.Value = tokenRes.IDToken + } + return token, nil +} diff --git a/vendor/cloud.google.com/go/auth/credentials/compute.go b/vendor/cloud.google.com/go/auth/credentials/compute.go new file mode 100644 index 000000000..f3ec88824 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/compute.go @@ -0,0 +1,85 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credentials + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/url" + "strings" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/compute/metadata" +) + +var ( + computeTokenMetadata = map[string]interface{}{ + "auth.google.tokenSource": "compute-metadata", + "auth.google.serviceAccount": "default", + } + computeTokenURI = "instance/service-accounts/default/token" +) + +// computeTokenProvider creates a [cloud.google.com/go/auth.TokenProvider] that +// uses the metadata service to retrieve tokens. +func computeTokenProvider(earlyExpiry time.Duration, scope ...string) auth.TokenProvider { + return auth.NewCachedTokenProvider(computeProvider{scopes: scope}, &auth.CachedTokenProviderOptions{ + ExpireEarly: earlyExpiry, + }) +} + +// computeProvider fetches tokens from the google cloud metadata service. +type computeProvider struct { + scopes []string +} + +type metadataTokenResp struct { + AccessToken string `json:"access_token"` + ExpiresInSec int `json:"expires_in"` + TokenType string `json:"token_type"` +} + +func (cs computeProvider) Token(ctx context.Context) (*auth.Token, error) { + tokenURI, err := url.Parse(computeTokenURI) + if err != nil { + return nil, err + } + if len(cs.scopes) > 0 { + v := url.Values{} + v.Set("scopes", strings.Join(cs.scopes, ",")) + tokenURI.RawQuery = v.Encode() + } + tokenJSON, err := metadata.GetWithContext(ctx, tokenURI.String()) + if err != nil { + return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) + } + var res metadataTokenResp + if err := json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res); err != nil { + return nil, fmt.Errorf("credentials: invalid token JSON from metadata: %w", err) + } + if res.ExpiresInSec == 0 || res.AccessToken == "" { + return nil, errors.New("credentials: incomplete token received from metadata") + } + return &auth.Token{ + Value: res.AccessToken, + Type: res.TokenType, + Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), + Metadata: computeTokenMetadata, + }, nil + +} diff --git a/vendor/cloud.google.com/go/auth/credentials/detect.go b/vendor/cloud.google.com/go/auth/credentials/detect.go new file mode 100644 index 000000000..cb3f44f58 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/detect.go @@ -0,0 +1,252 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credentials + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" + "cloud.google.com/go/compute/metadata" +) + +const ( + // jwtTokenURL is Google's OAuth 2.0 token URL to use with the JWT(2LO) flow. + jwtTokenURL = "https://oauth2.googleapis.com/token" + + // Google's OAuth 2.0 default endpoints. + googleAuthURL = "https://accounts.google.com/o/oauth2/auth" + googleTokenURL = "https://oauth2.googleapis.com/token" + + // Help on default credentials + adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc" +) + +var ( + // for testing + allowOnGCECheck = true +) + +// OnGCE reports whether this process is running in Google Cloud. +func OnGCE() bool { + // TODO(codyoss): once all libs use this auth lib move metadata check here + return allowOnGCECheck && metadata.OnGCE() +} + +// DetectDefault searches for "Application Default Credentials" and returns +// a credential based on the [DetectOptions] provided. +// +// It looks for credentials in the following places, preferring the first +// location found: +// +// - A JSON file whose path is specified by the GOOGLE_APPLICATION_CREDENTIALS +// environment variable. For workload identity federation, refer to +// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation +// on how to generate the JSON configuration file for on-prem/non-Google +// cloud platforms. +// - A JSON file in a location known to the gcloud command-line tool. On +// Windows, this is %APPDATA%/gcloud/application_default_credentials.json. On +// other systems, $HOME/.config/gcloud/application_default_credentials.json. +// - On Google Compute Engine, Google App Engine standard second generation +// runtimes, and Google App Engine flexible environment, it fetches +// credentials from the metadata server. +func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) { + if err := opts.validate(); err != nil { + return nil, err + } + if opts.CredentialsJSON != nil { + return readCredentialsFileJSON(opts.CredentialsJSON, opts) + } + if opts.CredentialsFile != "" { + return readCredentialsFile(opts.CredentialsFile, opts) + } + if filename := os.Getenv(credsfile.GoogleAppCredsEnvVar); filename != "" { + if creds, err := readCredentialsFile(filename, opts); err == nil { + return creds, err + } + } + + fileName := credsfile.GetWellKnownFileName() + if b, err := os.ReadFile(fileName); err == nil { + return readCredentialsFileJSON(b, opts) + } + + if OnGCE() { + return auth.NewCredentials(&auth.CredentialsOptions{ + TokenProvider: computeTokenProvider(opts.EarlyTokenRefresh, opts.Scopes...), + ProjectIDProvider: auth.CredentialsPropertyFunc(func(context.Context) (string, error) { + return metadata.ProjectID() + }), + UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{}, + }), nil + } + + return nil, fmt.Errorf("credentials: could not find default credentials. See %v for more information", adcSetupURL) +} + +// DetectOptions provides configuration for [DetectDefault]. +type DetectOptions struct { + // Scopes that credentials tokens should have. Example: + // https://www.googleapis.com/auth/cloud-platform. Required if Audience is + // not provided. + Scopes []string + // Audience that credentials tokens should have. Only applicable for 2LO + // flows with service accounts. If specified, scopes should not be provided. + Audience string + // Subject is the user email used for [domain wide delegation](https://developers.google.com/identity/protocols/oauth2/service-account#delegatingauthority). + // Optional. + Subject string + // EarlyTokenRefresh configures how early before a token expires that it + // should be refreshed. + EarlyTokenRefresh time.Duration + // AuthHandlerOptions configures an authorization handler and other options + // for 3LO flows. It is required, and only used, for client credential + // flows. + AuthHandlerOptions *auth.AuthorizationHandlerOptions + // TokenURL allows to set the token endpoint for user credential flows. If + // unset the default value is: https://oauth2.googleapis.com/token. + // Optional. + TokenURL string + // STSAudience is the audience sent to when retrieving an STS token. + // Currently this only used for GDCH auth flow, for which it is required. + STSAudience string + // CredentialsFile overrides detection logic and sources a credential file + // from the provided filepath. If provided, CredentialsJSON must not be. + // Optional. + CredentialsFile string + // CredentialsJSON overrides detection logic and uses the JSON bytes as the + // source for the credential. If provided, CredentialsFile must not be. + // Optional. + CredentialsJSON []byte + // UseSelfSignedJWT directs service account based credentials to create a + // self-signed JWT with the private key found in the file, skipping any + // network requests that would normally be made. Optional. + UseSelfSignedJWT bool + // Client configures the underlying client used to make network requests + // when fetching tokens. Optional. + Client *http.Client + // UniverseDomain is the default service domain for a given Cloud universe. + // The default value is "googleapis.com". This option is ignored for + // authentication flows that do not support universe domain. Optional. + UniverseDomain string +} + +func (o *DetectOptions) validate() error { + if o == nil { + return errors.New("credentials: options must be provided") + } + if len(o.Scopes) > 0 && o.Audience != "" { + return errors.New("credentials: both scopes and audience were provided") + } + if len(o.CredentialsJSON) > 0 && o.CredentialsFile != "" { + return errors.New("credentials: both credentials file and JSON were provided") + } + return nil +} + +func (o *DetectOptions) tokenURL() string { + if o.TokenURL != "" { + return o.TokenURL + } + return googleTokenURL +} + +func (o *DetectOptions) scopes() []string { + scopes := make([]string, len(o.Scopes)) + copy(scopes, o.Scopes) + return scopes +} + +func (o *DetectOptions) client() *http.Client { + if o.Client != nil { + return o.Client + } + return internal.CloneDefaultClient() +} + +func readCredentialsFile(filename string, opts *DetectOptions) (*auth.Credentials, error) { + b, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + return readCredentialsFileJSON(b, opts) +} + +func readCredentialsFileJSON(b []byte, opts *DetectOptions) (*auth.Credentials, error) { + // attempt to parse jsonData as a Google Developers Console client_credentials.json. + config := clientCredConfigFromJSON(b, opts) + if config != nil { + if config.AuthHandlerOpts == nil { + return nil, errors.New("credentials: auth handler must be specified for this credential filetype") + } + tp, err := auth.New3LOTokenProvider(config) + if err != nil { + return nil, err + } + return auth.NewCredentials(&auth.CredentialsOptions{ + TokenProvider: tp, + JSON: b, + }), nil + } + return fileCredentials(b, opts) +} + +func clientCredConfigFromJSON(b []byte, opts *DetectOptions) *auth.Options3LO { + var creds credsfile.ClientCredentialsFile + var c *credsfile.Config3LO + if err := json.Unmarshal(b, &creds); err != nil { + return nil + } + switch { + case creds.Web != nil: + c = creds.Web + case creds.Installed != nil: + c = creds.Installed + default: + return nil + } + if len(c.RedirectURIs) < 1 { + return nil + } + var handleOpts *auth.AuthorizationHandlerOptions + if opts.AuthHandlerOptions != nil { + handleOpts = &auth.AuthorizationHandlerOptions{ + Handler: opts.AuthHandlerOptions.Handler, + State: opts.AuthHandlerOptions.State, + PKCEOpts: opts.AuthHandlerOptions.PKCEOpts, + } + } + return &auth.Options3LO{ + ClientID: c.ClientID, + ClientSecret: c.ClientSecret, + RedirectURL: c.RedirectURIs[0], + Scopes: opts.scopes(), + AuthURL: c.AuthURI, + TokenURL: c.TokenURI, + Client: opts.client(), + EarlyTokenExpiry: opts.EarlyTokenRefresh, + AuthHandlerOpts: handleOpts, + // TODO(codyoss): refactor this out. We need to add in auto-detection + // for this use case. + AuthStyle: auth.StyleInParams, + } +} diff --git a/vendor/cloud.google.com/go/auth/credentials/doc.go b/vendor/cloud.google.com/go/auth/credentials/doc.go new file mode 100644 index 000000000..1dbb2866b --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/doc.go @@ -0,0 +1,45 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package credentials provides support for making OAuth2 authorized and +// authenticated HTTP requests to Google APIs. It supports the Web server flow, +// client-side credentials, service accounts, Google Compute Engine service +// accounts, Google App Engine service accounts and workload identity federation +// from non-Google cloud platforms. +// +// A brief overview of the package follows. For more information, please read +// https://developers.google.com/accounts/docs/OAuth2 +// and +// https://developers.google.com/accounts/docs/application-default-credentials. +// For more information on using workload identity federation, refer to +// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation. +// +// # Credentials +// +// The [cloud.google.com/go/auth.Credentials] type represents Google +// credentials, including Application Default Credentials. +// +// Use [DetectDefault] to obtain Application Default Credentials. +// +// Application Default Credentials support workload identity federation to +// access Google Cloud resources from non-Google Cloud platforms including Amazon +// Web Services (AWS), Microsoft Azure or any identity provider that supports +// OpenID Connect (OIDC). Workload identity federation is recommended for +// non-Google Cloud environments as it avoids the need to download, manage, and +// store service account private keys locally. +// +// # Workforce Identity Federation +// +// For more information on this feature see [cloud.google.com/go/auth/credentials/externalaccount]. +package credentials diff --git a/vendor/cloud.google.com/go/auth/credentials/filetypes.go b/vendor/cloud.google.com/go/auth/credentials/filetypes.go new file mode 100644 index 000000000..a66e56d70 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/filetypes.go @@ -0,0 +1,219 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credentials + +import ( + "errors" + "fmt" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials/internal/externalaccount" + "cloud.google.com/go/auth/credentials/internal/externalaccountuser" + "cloud.google.com/go/auth/credentials/internal/gdch" + "cloud.google.com/go/auth/credentials/internal/impersonate" + internalauth "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" +) + +func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { + fileType, err := credsfile.ParseFileType(b) + if err != nil { + return nil, err + } + + var projectID, quotaProjectID, universeDomain string + var tp auth.TokenProvider + switch fileType { + case credsfile.ServiceAccountKey: + f, err := credsfile.ParseServiceAccount(b) + if err != nil { + return nil, err + } + tp, err = handleServiceAccount(f, opts) + if err != nil { + return nil, err + } + projectID = f.ProjectID + universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) + case credsfile.UserCredentialsKey: + f, err := credsfile.ParseUserCredentials(b) + if err != nil { + return nil, err + } + tp, err = handleUserCredential(f, opts) + if err != nil { + return nil, err + } + quotaProjectID = f.QuotaProjectID + universeDomain = f.UniverseDomain + case credsfile.ExternalAccountKey: + f, err := credsfile.ParseExternalAccount(b) + if err != nil { + return nil, err + } + tp, err = handleExternalAccount(f, opts) + if err != nil { + return nil, err + } + quotaProjectID = f.QuotaProjectID + universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) + case credsfile.ExternalAccountAuthorizedUserKey: + f, err := credsfile.ParseExternalAccountAuthorizedUser(b) + if err != nil { + return nil, err + } + tp, err = handleExternalAccountAuthorizedUser(f, opts) + if err != nil { + return nil, err + } + quotaProjectID = f.QuotaProjectID + universeDomain = f.UniverseDomain + case credsfile.ImpersonatedServiceAccountKey: + f, err := credsfile.ParseImpersonatedServiceAccount(b) + if err != nil { + return nil, err + } + tp, err = handleImpersonatedServiceAccount(f, opts) + if err != nil { + return nil, err + } + universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) + case credsfile.GDCHServiceAccountKey: + f, err := credsfile.ParseGDCHServiceAccount(b) + if err != nil { + return nil, err + } + tp, err = handleGDCHServiceAccount(f, opts) + if err != nil { + return nil, err + } + projectID = f.Project + universeDomain = f.UniverseDomain + default: + return nil, fmt.Errorf("credentials: unsupported filetype %q", fileType) + } + return auth.NewCredentials(&auth.CredentialsOptions{ + TokenProvider: auth.NewCachedTokenProvider(tp, &auth.CachedTokenProviderOptions{ + ExpireEarly: opts.EarlyTokenRefresh, + }), + JSON: b, + ProjectIDProvider: internalauth.StaticCredentialsProperty(projectID), + QuotaProjectIDProvider: internalauth.StaticCredentialsProperty(quotaProjectID), + UniverseDomainProvider: internalauth.StaticCredentialsProperty(universeDomain), + }), nil +} + +// resolveUniverseDomain returns optsUniverseDomain if non-empty, in order to +// support configuring universe-specific credentials in code. Auth flows +// unsupported for universe domain should not use this func, but should instead +// simply set the file universe domain on the credentials. +func resolveUniverseDomain(optsUniverseDomain, fileUniverseDomain string) string { + if optsUniverseDomain != "" { + return optsUniverseDomain + } + return fileUniverseDomain +} + +func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + if opts.UseSelfSignedJWT { + return configureSelfSignedJWT(f, opts) + } + opts2LO := &auth.Options2LO{ + Email: f.ClientEmail, + PrivateKey: []byte(f.PrivateKey), + PrivateKeyID: f.PrivateKeyID, + Scopes: opts.scopes(), + TokenURL: f.TokenURL, + Subject: opts.Subject, + } + if opts2LO.TokenURL == "" { + opts2LO.TokenURL = jwtTokenURL + } + return auth.New2LOTokenProvider(opts2LO) +} + +func handleUserCredential(f *credsfile.UserCredentialsFile, opts *DetectOptions) (auth.TokenProvider, error) { + opts3LO := &auth.Options3LO{ + ClientID: f.ClientID, + ClientSecret: f.ClientSecret, + Scopes: opts.scopes(), + AuthURL: googleAuthURL, + TokenURL: opts.tokenURL(), + AuthStyle: auth.StyleInParams, + EarlyTokenExpiry: opts.EarlyTokenRefresh, + RefreshToken: f.RefreshToken, + } + return auth.New3LOTokenProvider(opts3LO) +} + +func handleExternalAccount(f *credsfile.ExternalAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + externalOpts := &externalaccount.Options{ + Audience: f.Audience, + SubjectTokenType: f.SubjectTokenType, + TokenURL: f.TokenURL, + TokenInfoURL: f.TokenInfoURL, + ServiceAccountImpersonationURL: f.ServiceAccountImpersonationURL, + ClientSecret: f.ClientSecret, + ClientID: f.ClientID, + CredentialSource: f.CredentialSource, + QuotaProjectID: f.QuotaProjectID, + Scopes: opts.scopes(), + WorkforcePoolUserProject: f.WorkforcePoolUserProject, + Client: opts.client(), + } + if f.ServiceAccountImpersonation != nil { + externalOpts.ServiceAccountImpersonationLifetimeSeconds = f.ServiceAccountImpersonation.TokenLifetimeSeconds + } + return externalaccount.NewTokenProvider(externalOpts) +} + +func handleExternalAccountAuthorizedUser(f *credsfile.ExternalAccountAuthorizedUserFile, opts *DetectOptions) (auth.TokenProvider, error) { + externalOpts := &externalaccountuser.Options{ + Audience: f.Audience, + RefreshToken: f.RefreshToken, + TokenURL: f.TokenURL, + TokenInfoURL: f.TokenInfoURL, + ClientID: f.ClientID, + ClientSecret: f.ClientSecret, + Scopes: opts.scopes(), + Client: opts.client(), + } + return externalaccountuser.NewTokenProvider(externalOpts) +} + +func handleImpersonatedServiceAccount(f *credsfile.ImpersonatedServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + if f.ServiceAccountImpersonationURL == "" || f.CredSource == nil { + return nil, errors.New("missing 'source_credentials' field or 'service_account_impersonation_url' in credentials") + } + + tp, err := fileCredentials(f.CredSource, opts) + if err != nil { + return nil, err + } + return impersonate.NewTokenProvider(&impersonate.Options{ + URL: f.ServiceAccountImpersonationURL, + Scopes: opts.scopes(), + Tp: tp, + Delegates: f.Delegates, + Client: opts.client(), + }) +} + +func handleGDCHServiceAccount(f *credsfile.GDCHServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + return gdch.NewTokenProvider(f, &gdch.Options{ + STSAudience: opts.STSAudience, + Client: opts.client(), + }) +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go new file mode 100644 index 000000000..d9e1dcddf --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go @@ -0,0 +1,547 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "bytes" + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "path" + "sort" + "strings" + "time" + + "cloud.google.com/go/auth/internal" +) + +var ( + // getenv aliases os.Getenv for testing + getenv = os.Getenv +) + +const ( + // AWS Signature Version 4 signing algorithm identifier. + awsAlgorithm = "AWS4-HMAC-SHA256" + + // The termination string for the AWS credential scope value as defined in + // https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html + awsRequestType = "aws4_request" + + // The AWS authorization header name for the security session token if available. + awsSecurityTokenHeader = "x-amz-security-token" + + // The name of the header containing the session token for metadata endpoint calls + awsIMDSv2SessionTokenHeader = "X-aws-ec2-metadata-token" + + awsIMDSv2SessionTTLHeader = "X-aws-ec2-metadata-token-ttl-seconds" + + awsIMDSv2SessionTTL = "300" + + // The AWS authorization header name for the auto-generated date. + awsDateHeader = "x-amz-date" + + defaultRegionalCredentialVerificationURL = "https://sts.{region}.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15" + + // Supported AWS configuration environment variables. + awsAccessKeyIDEnvVar = "AWS_ACCESS_KEY_ID" + awsDefaultRegionEnvVar = "AWS_DEFAULT_REGION" + awsRegionEnvVar = "AWS_REGION" + awsSecretAccessKeyEnvVar = "AWS_SECRET_ACCESS_KEY" + awsSessionTokenEnvVar = "AWS_SESSION_TOKEN" + + awsTimeFormatLong = "20060102T150405Z" + awsTimeFormatShort = "20060102" + awsProviderType = "aws" +) + +type awsSubjectProvider struct { + EnvironmentID string + RegionURL string + RegionalCredVerificationURL string + CredVerificationURL string + IMDSv2SessionTokenURL string + TargetResource string + requestSigner *awsRequestSigner + region string + securityCredentialsProvider AwsSecurityCredentialsProvider + reqOpts *RequestOptions + + Client *http.Client +} + +func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error) { + // Set Defaults + if sp.RegionalCredVerificationURL == "" { + sp.RegionalCredVerificationURL = defaultRegionalCredentialVerificationURL + } + if sp.requestSigner == nil { + headers := make(map[string]string) + if sp.shouldUseMetadataServer() { + awsSessionToken, err := sp.getAWSSessionToken(ctx) + if err != nil { + return "", err + } + + if awsSessionToken != "" { + headers[awsIMDSv2SessionTokenHeader] = awsSessionToken + } + } + + awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers) + if err != nil { + return "", err + } + if sp.region, err = sp.getRegion(ctx, headers); err != nil { + return "", err + } + sp.requestSigner = &awsRequestSigner{ + RegionName: sp.region, + AwsSecurityCredentials: awsSecurityCredentials, + } + } + + // Generate the signed request to AWS STS GetCallerIdentity API. + // Use the required regional endpoint. Otherwise, the request will fail. + req, err := http.NewRequest("POST", strings.Replace(sp.RegionalCredVerificationURL, "{region}", sp.region, 1), nil) + if err != nil { + return "", err + } + // The full, canonical resource name of the workload identity pool + // provider, with or without the HTTPS prefix. + // Including this header as part of the signature is recommended to + // ensure data integrity. + if sp.TargetResource != "" { + req.Header.Set("x-goog-cloud-target-resource", sp.TargetResource) + } + sp.requestSigner.signRequest(req) + + /* + The GCP STS endpoint expects the headers to be formatted as: + # [ + # {key: 'x-amz-date', value: '...'}, + # {key: 'Authorization', value: '...'}, + # ... + # ] + # And then serialized as: + # quote(json.dumps({ + # url: '...', + # method: 'POST', + # headers: [{key: 'x-amz-date', value: '...'}, ...] + # })) + */ + + awsSignedReq := awsRequest{ + URL: req.URL.String(), + Method: "POST", + } + for headerKey, headerList := range req.Header { + for _, headerValue := range headerList { + awsSignedReq.Headers = append(awsSignedReq.Headers, awsRequestHeader{ + Key: headerKey, + Value: headerValue, + }) + } + } + sort.Slice(awsSignedReq.Headers, func(i, j int) bool { + headerCompare := strings.Compare(awsSignedReq.Headers[i].Key, awsSignedReq.Headers[j].Key) + if headerCompare == 0 { + return strings.Compare(awsSignedReq.Headers[i].Value, awsSignedReq.Headers[j].Value) < 0 + } + return headerCompare < 0 + }) + + result, err := json.Marshal(awsSignedReq) + if err != nil { + return "", err + } + return url.QueryEscape(string(result)), nil +} + +func (sp *awsSubjectProvider) providerType() string { + if sp.securityCredentialsProvider != nil { + return programmaticProviderType + } + return awsProviderType +} + +func (sp *awsSubjectProvider) getAWSSessionToken(ctx context.Context) (string, error) { + if sp.IMDSv2SessionTokenURL == "" { + return "", nil + } + req, err := http.NewRequestWithContext(ctx, "PUT", sp.IMDSv2SessionTokenURL, nil) + if err != nil { + return "", err + } + req.Header.Set(awsIMDSv2SessionTTLHeader, awsIMDSv2SessionTTL) + + resp, err := sp.Client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + respBody, err := internal.ReadAll(resp.Body) + if err != nil { + return "", err + } + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("credentials: unable to retrieve AWS session token: %s", respBody) + } + return string(respBody), nil +} + +func (sp *awsSubjectProvider) getRegion(ctx context.Context, headers map[string]string) (string, error) { + if sp.securityCredentialsProvider != nil { + return sp.securityCredentialsProvider.AwsRegion(ctx, sp.reqOpts) + } + if canRetrieveRegionFromEnvironment() { + if envAwsRegion := getenv(awsRegionEnvVar); envAwsRegion != "" { + return envAwsRegion, nil + } + return getenv(awsDefaultRegionEnvVar), nil + } + + if sp.RegionURL == "" { + return "", errors.New("credentials: unable to determine AWS region") + } + + req, err := http.NewRequestWithContext(ctx, "GET", sp.RegionURL, nil) + if err != nil { + return "", err + } + + for name, value := range headers { + req.Header.Add(name, value) + } + + resp, err := sp.Client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + respBody, err := internal.ReadAll(resp.Body) + if err != nil { + return "", err + } + + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("credentials: unable to retrieve AWS region - %s", respBody) + } + + // This endpoint will return the region in format: us-east-2b. + // Only the us-east-2 part should be used. + bodyLen := len(respBody) + if bodyLen == 0 { + return "", nil + } + return string(respBody[:bodyLen-1]), nil +} + +func (sp *awsSubjectProvider) getSecurityCredentials(ctx context.Context, headers map[string]string) (result *AwsSecurityCredentials, err error) { + if sp.securityCredentialsProvider != nil { + return sp.securityCredentialsProvider.AwsSecurityCredentials(ctx, sp.reqOpts) + } + if canRetrieveSecurityCredentialFromEnvironment() { + return &AwsSecurityCredentials{ + AccessKeyID: getenv(awsAccessKeyIDEnvVar), + SecretAccessKey: getenv(awsSecretAccessKeyEnvVar), + SessionToken: getenv(awsSessionTokenEnvVar), + }, nil + } + + roleName, err := sp.getMetadataRoleName(ctx, headers) + if err != nil { + return + } + credentials, err := sp.getMetadataSecurityCredentials(ctx, roleName, headers) + if err != nil { + return + } + + if credentials.AccessKeyID == "" { + return result, errors.New("credentials: missing AccessKeyId credential") + } + if credentials.SecretAccessKey == "" { + return result, errors.New("credentials: missing SecretAccessKey credential") + } + + return credentials, nil +} + +func (sp *awsSubjectProvider) getMetadataSecurityCredentials(ctx context.Context, roleName string, headers map[string]string) (*AwsSecurityCredentials, error) { + var result *AwsSecurityCredentials + + req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/%s", sp.CredVerificationURL, roleName), nil) + if err != nil { + return result, err + } + for name, value := range headers { + req.Header.Add(name, value) + } + + resp, err := sp.Client.Do(req) + if err != nil { + return result, err + } + defer resp.Body.Close() + + respBody, err := internal.ReadAll(resp.Body) + if err != nil { + return result, err + } + if resp.StatusCode != http.StatusOK { + return result, fmt.Errorf("credentials: unable to retrieve AWS security credentials - %s", respBody) + } + err = json.Unmarshal(respBody, &result) + return result, err +} + +func (sp *awsSubjectProvider) getMetadataRoleName(ctx context.Context, headers map[string]string) (string, error) { + if sp.CredVerificationURL == "" { + return "", errors.New("credentials: unable to determine the AWS metadata server security credentials endpoint") + } + req, err := http.NewRequestWithContext(ctx, "GET", sp.CredVerificationURL, nil) + if err != nil { + return "", err + } + for name, value := range headers { + req.Header.Add(name, value) + } + + resp, err := sp.Client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + respBody, err := internal.ReadAll(resp.Body) + if err != nil { + return "", err + } + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("credentials: unable to retrieve AWS role name - %s", respBody) + } + return string(respBody), nil +} + +// awsRequestSigner is a utility class to sign http requests using a AWS V4 signature. +type awsRequestSigner struct { + RegionName string + AwsSecurityCredentials *AwsSecurityCredentials +} + +// signRequest adds the appropriate headers to an http.Request +// or returns an error if something prevented this. +func (rs *awsRequestSigner) signRequest(req *http.Request) error { + // req is assumed non-nil + signedRequest := cloneRequest(req) + timestamp := Now() + signedRequest.Header.Set("host", requestHost(req)) + if rs.AwsSecurityCredentials.SessionToken != "" { + signedRequest.Header.Set(awsSecurityTokenHeader, rs.AwsSecurityCredentials.SessionToken) + } + if signedRequest.Header.Get("date") == "" { + signedRequest.Header.Set(awsDateHeader, timestamp.Format(awsTimeFormatLong)) + } + authorizationCode, err := rs.generateAuthentication(signedRequest, timestamp) + if err != nil { + return err + } + signedRequest.Header.Set("Authorization", authorizationCode) + req.Header = signedRequest.Header + return nil +} + +func (rs *awsRequestSigner) generateAuthentication(req *http.Request, timestamp time.Time) (string, error) { + canonicalHeaderColumns, canonicalHeaderData := canonicalHeaders(req) + dateStamp := timestamp.Format(awsTimeFormatShort) + serviceName := "" + + if splitHost := strings.Split(requestHost(req), "."); len(splitHost) > 0 { + serviceName = splitHost[0] + } + credentialScope := strings.Join([]string{dateStamp, rs.RegionName, serviceName, awsRequestType}, "/") + requestString, err := canonicalRequest(req, canonicalHeaderColumns, canonicalHeaderData) + if err != nil { + return "", err + } + requestHash, err := getSha256([]byte(requestString)) + if err != nil { + return "", err + } + + stringToSign := strings.Join([]string{awsAlgorithm, timestamp.Format(awsTimeFormatLong), credentialScope, requestHash}, "\n") + signingKey := []byte("AWS4" + rs.AwsSecurityCredentials.SecretAccessKey) + for _, signingInput := range []string{ + dateStamp, rs.RegionName, serviceName, awsRequestType, stringToSign, + } { + signingKey, err = getHmacSha256(signingKey, []byte(signingInput)) + if err != nil { + return "", err + } + } + + return fmt.Sprintf("%s Credential=%s/%s, SignedHeaders=%s, Signature=%s", awsAlgorithm, rs.AwsSecurityCredentials.AccessKeyID, credentialScope, canonicalHeaderColumns, hex.EncodeToString(signingKey)), nil +} + +func getSha256(input []byte) (string, error) { + hash := sha256.New() + if _, err := hash.Write(input); err != nil { + return "", err + } + return hex.EncodeToString(hash.Sum(nil)), nil +} + +func getHmacSha256(key, input []byte) ([]byte, error) { + hash := hmac.New(sha256.New, key) + if _, err := hash.Write(input); err != nil { + return nil, err + } + return hash.Sum(nil), nil +} + +func cloneRequest(r *http.Request) *http.Request { + r2 := new(http.Request) + *r2 = *r + if r.Header != nil { + r2.Header = make(http.Header, len(r.Header)) + + // Find total number of values. + headerCount := 0 + for _, headerValues := range r.Header { + headerCount += len(headerValues) + } + copiedHeaders := make([]string, headerCount) // shared backing array for headers' values + + for headerKey, headerValues := range r.Header { + headerCount = copy(copiedHeaders, headerValues) + r2.Header[headerKey] = copiedHeaders[:headerCount:headerCount] + copiedHeaders = copiedHeaders[headerCount:] + } + } + return r2 +} + +func canonicalPath(req *http.Request) string { + result := req.URL.EscapedPath() + if result == "" { + return "/" + } + return path.Clean(result) +} + +func canonicalQuery(req *http.Request) string { + queryValues := req.URL.Query() + for queryKey := range queryValues { + sort.Strings(queryValues[queryKey]) + } + return queryValues.Encode() +} + +func canonicalHeaders(req *http.Request) (string, string) { + // Header keys need to be sorted alphabetically. + var headers []string + lowerCaseHeaders := make(http.Header) + for k, v := range req.Header { + k := strings.ToLower(k) + if _, ok := lowerCaseHeaders[k]; ok { + // include additional values + lowerCaseHeaders[k] = append(lowerCaseHeaders[k], v...) + } else { + headers = append(headers, k) + lowerCaseHeaders[k] = v + } + } + sort.Strings(headers) + + var fullHeaders bytes.Buffer + for _, header := range headers { + headerValue := strings.Join(lowerCaseHeaders[header], ",") + fullHeaders.WriteString(header) + fullHeaders.WriteRune(':') + fullHeaders.WriteString(headerValue) + fullHeaders.WriteRune('\n') + } + + return strings.Join(headers, ";"), fullHeaders.String() +} + +func requestDataHash(req *http.Request) (string, error) { + var requestData []byte + if req.Body != nil { + requestBody, err := req.GetBody() + if err != nil { + return "", err + } + defer requestBody.Close() + + requestData, err = internal.ReadAll(requestBody) + if err != nil { + return "", err + } + } + + return getSha256(requestData) +} + +func requestHost(req *http.Request) string { + if req.Host != "" { + return req.Host + } + return req.URL.Host +} + +func canonicalRequest(req *http.Request, canonicalHeaderColumns, canonicalHeaderData string) (string, error) { + dataHash, err := requestDataHash(req) + if err != nil { + return "", err + } + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", req.Method, canonicalPath(req), canonicalQuery(req), canonicalHeaderData, canonicalHeaderColumns, dataHash), nil +} + +type awsRequestHeader struct { + Key string `json:"key"` + Value string `json:"value"` +} + +type awsRequest struct { + URL string `json:"url"` + Method string `json:"method"` + Headers []awsRequestHeader `json:"headers"` +} + +// The AWS region can be provided through AWS_REGION or AWS_DEFAULT_REGION. Only one is +// required. +func canRetrieveRegionFromEnvironment() bool { + return getenv(awsRegionEnvVar) != "" || getenv(awsDefaultRegionEnvVar) != "" +} + +// Check if both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are available. +func canRetrieveSecurityCredentialFromEnvironment() bool { + return getenv(awsAccessKeyIDEnvVar) != "" && getenv(awsSecretAccessKeyEnvVar) != "" +} + +func (sp *awsSubjectProvider) shouldUseMetadataServer() bool { + return sp.securityCredentialsProvider == nil && (!canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment()) +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/executable_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/executable_provider.go new file mode 100644 index 000000000..d5765c474 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/executable_provider.go @@ -0,0 +1,284 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "os/exec" + "regexp" + "strings" + "time" + + "cloud.google.com/go/auth/internal" +) + +const ( + executableSupportedMaxVersion = 1 + executableDefaultTimeout = 30 * time.Second + executableSource = "response" + executableProviderType = "executable" + outputFileSource = "output file" + + allowExecutablesEnvVar = "GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES" + + jwtTokenType = "urn:ietf:params:oauth:token-type:jwt" + idTokenType = "urn:ietf:params:oauth:token-type:id_token" + saml2TokenType = "urn:ietf:params:oauth:token-type:saml2" +) + +var ( + serviceAccountImpersonationRE = regexp.MustCompile(`https://iamcredentials..+/v1/projects/-/serviceAccounts/(.*@.*):generateAccessToken`) +) + +type nonCacheableError struct { + message string +} + +func (nce nonCacheableError) Error() string { + return nce.message +} + +// environment is a contract for testing +type environment interface { + existingEnv() []string + getenv(string) string + run(ctx context.Context, command string, env []string) ([]byte, error) + now() time.Time +} + +type runtimeEnvironment struct{} + +func (r runtimeEnvironment) existingEnv() []string { + return os.Environ() +} +func (r runtimeEnvironment) getenv(key string) string { + return os.Getenv(key) +} +func (r runtimeEnvironment) now() time.Time { + return time.Now().UTC() +} + +func (r runtimeEnvironment) run(ctx context.Context, command string, env []string) ([]byte, error) { + splitCommand := strings.Fields(command) + cmd := exec.CommandContext(ctx, splitCommand[0], splitCommand[1:]...) + cmd.Env = env + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + if ctx.Err() == context.DeadlineExceeded { + return nil, context.DeadlineExceeded + } + if exitError, ok := err.(*exec.ExitError); ok { + return nil, exitCodeError(exitError) + } + return nil, executableError(err) + } + + bytesStdout := bytes.TrimSpace(stdout.Bytes()) + if len(bytesStdout) > 0 { + return bytesStdout, nil + } + return bytes.TrimSpace(stderr.Bytes()), nil +} + +type executableSubjectProvider struct { + Command string + Timeout time.Duration + OutputFile string + client *http.Client + opts *Options + env environment +} + +type executableResponse struct { + Version int `json:"version,omitempty"` + Success *bool `json:"success,omitempty"` + TokenType string `json:"token_type,omitempty"` + ExpirationTime int64 `json:"expiration_time,omitempty"` + IDToken string `json:"id_token,omitempty"` + SamlResponse string `json:"saml_response,omitempty"` + Code string `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (sp *executableSubjectProvider) parseSubjectTokenFromSource(response []byte, source string, now int64) (string, error) { + var result executableResponse + if err := json.Unmarshal(response, &result); err != nil { + return "", jsonParsingError(source, string(response)) + } + // Validate + if result.Version == 0 { + return "", missingFieldError(source, "version") + } + if result.Success == nil { + return "", missingFieldError(source, "success") + } + if !*result.Success { + if result.Code == "" || result.Message == "" { + return "", malformedFailureError() + } + return "", userDefinedError(result.Code, result.Message) + } + if result.Version > executableSupportedMaxVersion || result.Version < 0 { + return "", unsupportedVersionError(source, result.Version) + } + if result.ExpirationTime == 0 && sp.OutputFile != "" { + return "", missingFieldError(source, "expiration_time") + } + if result.TokenType == "" { + return "", missingFieldError(source, "token_type") + } + if result.ExpirationTime != 0 && result.ExpirationTime < now { + return "", tokenExpiredError() + } + + switch result.TokenType { + case jwtTokenType, idTokenType: + if result.IDToken == "" { + return "", missingFieldError(source, "id_token") + } + return result.IDToken, nil + case saml2TokenType: + if result.SamlResponse == "" { + return "", missingFieldError(source, "saml_response") + } + return result.SamlResponse, nil + default: + return "", tokenTypeError(source) + } +} + +func (sp *executableSubjectProvider) subjectToken(ctx context.Context) (string, error) { + if token, err := sp.getTokenFromOutputFile(); token != "" || err != nil { + return token, err + } + return sp.getTokenFromExecutableCommand(ctx) +} + +func (sp *executableSubjectProvider) providerType() string { + return executableProviderType +} + +func (sp *executableSubjectProvider) getTokenFromOutputFile() (token string, err error) { + if sp.OutputFile == "" { + // This ExecutableCredentialSource doesn't use an OutputFile. + return "", nil + } + + file, err := os.Open(sp.OutputFile) + if err != nil { + // No OutputFile found. Hasn't been created yet, so skip it. + return "", nil + } + defer file.Close() + + data, err := internal.ReadAll(file) + if err != nil || len(data) == 0 { + // Cachefile exists, but no data found. Get new credential. + return "", nil + } + + token, err = sp.parseSubjectTokenFromSource(data, outputFileSource, sp.env.now().Unix()) + if err != nil { + if _, ok := err.(nonCacheableError); ok { + // If the cached token is expired we need a new token, + // and if the cache contains a failure, we need to try again. + return "", nil + } + + // There was an error in the cached token, and the developer should be aware of it. + return "", err + } + // Token parsing succeeded. Use found token. + return token, nil +} + +func (sp *executableSubjectProvider) executableEnvironment() []string { + result := sp.env.existingEnv() + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_AUDIENCE=%v", sp.opts.Audience)) + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_TOKEN_TYPE=%v", sp.opts.SubjectTokenType)) + result = append(result, "GOOGLE_EXTERNAL_ACCOUNT_INTERACTIVE=0") + if sp.opts.ServiceAccountImpersonationURL != "" { + matches := serviceAccountImpersonationRE.FindStringSubmatch(sp.opts.ServiceAccountImpersonationURL) + if matches != nil { + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_IMPERSONATED_EMAIL=%v", matches[1])) + } + } + if sp.OutputFile != "" { + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_OUTPUT_FILE=%v", sp.OutputFile)) + } + return result +} + +func (sp *executableSubjectProvider) getTokenFromExecutableCommand(ctx context.Context) (string, error) { + // For security reasons, we need our consumers to set this environment variable to allow executables to be run. + if sp.env.getenv(allowExecutablesEnvVar) != "1" { + return "", errors.New("credentials: executables need to be explicitly allowed (set GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES to '1') to run") + } + + ctx, cancel := context.WithDeadline(ctx, sp.env.now().Add(sp.Timeout)) + defer cancel() + + output, err := sp.env.run(ctx, sp.Command, sp.executableEnvironment()) + if err != nil { + return "", err + } + return sp.parseSubjectTokenFromSource(output, executableSource, sp.env.now().Unix()) +} + +func missingFieldError(source, field string) error { + return fmt.Errorf("credentials: %q missing %q field", source, field) +} + +func jsonParsingError(source, data string) error { + return fmt.Errorf("credentials: unable to parse %q: %v", source, data) +} + +func malformedFailureError() error { + return nonCacheableError{"credentials: response must include `error` and `message` fields when unsuccessful"} +} + +func userDefinedError(code, message string) error { + return nonCacheableError{fmt.Sprintf("credentials: response contains unsuccessful response: (%v) %v", code, message)} +} + +func unsupportedVersionError(source string, version int) error { + return fmt.Errorf("credentials: %v contains unsupported version: %v", source, version) +} + +func tokenExpiredError() error { + return nonCacheableError{"credentials: the token returned by the executable is expired"} +} + +func tokenTypeError(source string) error { + return fmt.Errorf("credentials: %v contains unsupported token type", source) +} + +func exitCodeError(err *exec.ExitError) error { + return fmt.Errorf("credentials: executable command failed with exit code %v: %w", err.ExitCode(), err) +} + +func executableError(err error) error { + return fmt.Errorf("credentials: executable command failed: %w", err) +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go new file mode 100644 index 000000000..b19c6edea --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go @@ -0,0 +1,367 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "context" + "errors" + "fmt" + "net/http" + "regexp" + "strconv" + "strings" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials/internal/impersonate" + "cloud.google.com/go/auth/credentials/internal/stsexchange" + "cloud.google.com/go/auth/internal/credsfile" +) + +const ( + timeoutMinimum = 5 * time.Second + timeoutMaximum = 120 * time.Second + + universeDomainPlaceholder = "UNIVERSE_DOMAIN" + defaultTokenURL = "https://sts.UNIVERSE_DOMAIN/v1/token" + defaultUniverseDomain = "googleapis.com" +) + +var ( + // Now aliases time.Now for testing + Now = func() time.Time { + return time.Now().UTC() + } + validWorkforceAudiencePattern *regexp.Regexp = regexp.MustCompile(`//iam\.googleapis\.com/locations/[^/]+/workforcePools/`) +) + +// Options stores the configuration for fetching tokens with external credentials. +type Options struct { + // Audience is the Secure Token Service (STS) audience which contains the resource name for the workload + // identity pool or the workforce pool and the provider identifier in that pool. + Audience string + // SubjectTokenType is the STS token type based on the Oauth2.0 token exchange spec + // e.g. `urn:ietf:params:oauth:token-type:jwt`. + SubjectTokenType string + // TokenURL is the STS token exchange endpoint. + TokenURL string + // TokenInfoURL is the token_info endpoint used to retrieve the account related information ( + // user attributes like account identifier, eg. email, username, uid, etc). This is + // needed for gCloud session account identification. + TokenInfoURL string + // ServiceAccountImpersonationURL is the URL for the service account impersonation request. This is only + // required for workload identity pools when APIs to be accessed have not integrated with UberMint. + ServiceAccountImpersonationURL string + // ServiceAccountImpersonationLifetimeSeconds is the number of seconds the service account impersonation + // token will be valid for. + ServiceAccountImpersonationLifetimeSeconds int + // ClientSecret is currently only required if token_info endpoint also + // needs to be called with the generated GCP access token. When provided, STS will be + // called with additional basic authentication using client_id as username and client_secret as password. + ClientSecret string + // ClientID is only required in conjunction with ClientSecret, as described above. + ClientID string + // CredentialSource contains the necessary information to retrieve the token itself, as well + // as some environmental information. + CredentialSource *credsfile.CredentialSource + // QuotaProjectID is injected by gCloud. If the value is non-empty, the Auth libraries + // will set the x-goog-user-project which overrides the project associated with the credentials. + QuotaProjectID string + // Scopes contains the desired scopes for the returned access token. + Scopes []string + // WorkforcePoolUserProject should be set when it is a workforce pool and + // not a workload identity pool. The underlying principal must still have + // serviceusage.services.use IAM permission to use the project for + // billing/quota. Optional. + WorkforcePoolUserProject string + // UniverseDomain is the default service domain for a given Cloud universe. + // This value will be used in the default STS token URL. The default value + // is "googleapis.com". It will not be used if TokenURL is set. Optional. + UniverseDomain string + // SubjectTokenProvider is an optional token provider for OIDC/SAML + // credentials. One of SubjectTokenProvider, AWSSecurityCredentialProvider + // or CredentialSource must be provided. Optional. + SubjectTokenProvider SubjectTokenProvider + // AwsSecurityCredentialsProvider is an AWS Security Credential provider + // for AWS credentials. One of SubjectTokenProvider, + // AWSSecurityCredentialProvider or CredentialSource must be provided. Optional. + AwsSecurityCredentialsProvider AwsSecurityCredentialsProvider + // Client for token request. + Client *http.Client +} + +// SubjectTokenProvider can be used to supply a subject token to exchange for a +// GCP access token. +type SubjectTokenProvider interface { + // SubjectToken should return a valid subject token or an error. + // The external account token provider does not cache the returned subject + // token, so caching logic should be implemented in the provider to prevent + // multiple requests for the same subject token. + SubjectToken(ctx context.Context, opts *RequestOptions) (string, error) +} + +// RequestOptions contains information about the requested subject token or AWS +// security credentials from the Google external account credential. +type RequestOptions struct { + // Audience is the requested audience for the external account credential. + Audience string + // Subject token type is the requested subject token type for the external + // account credential. Expected values include: + // “urn:ietf:params:oauth:token-type:jwt” + // “urn:ietf:params:oauth:token-type:id-token” + // “urn:ietf:params:oauth:token-type:saml2” + // “urn:ietf:params:aws:token-type:aws4_request” + SubjectTokenType string +} + +// AwsSecurityCredentialsProvider can be used to supply AwsSecurityCredentials +// and an AWS Region to exchange for a GCP access token. +type AwsSecurityCredentialsProvider interface { + // AwsRegion should return the AWS region or an error. + AwsRegion(ctx context.Context, opts *RequestOptions) (string, error) + // GetAwsSecurityCredentials should return a valid set of + // AwsSecurityCredentials or an error. The external account token provider + // does not cache the returned security credentials, so caching logic should + // be implemented in the provider to prevent multiple requests for the + // same security credentials. + AwsSecurityCredentials(ctx context.Context, opts *RequestOptions) (*AwsSecurityCredentials, error) +} + +// AwsSecurityCredentials models AWS security credentials. +type AwsSecurityCredentials struct { + // AccessKeyId is the AWS Access Key ID - Required. + AccessKeyID string `json:"AccessKeyID"` + // SecretAccessKey is the AWS Secret Access Key - Required. + SecretAccessKey string `json:"SecretAccessKey"` + // SessionToken is the AWS Session token. This should be provided for + // temporary AWS security credentials - Optional. + SessionToken string `json:"Token"` +} + +func (o *Options) validate() error { + if o.Audience == "" { + return fmt.Errorf("externalaccount: Audience must be set") + } + if o.SubjectTokenType == "" { + return fmt.Errorf("externalaccount: Subject token type must be set") + } + if o.WorkforcePoolUserProject != "" { + if valid := validWorkforceAudiencePattern.MatchString(o.Audience); !valid { + return fmt.Errorf("externalaccount: workforce_pool_user_project should not be set for non-workforce pool credentials") + } + } + count := 0 + if o.CredentialSource != nil { + count++ + } + if o.SubjectTokenProvider != nil { + count++ + } + if o.AwsSecurityCredentialsProvider != nil { + count++ + } + if count == 0 { + return fmt.Errorf("externalaccount: one of CredentialSource, SubjectTokenProvider, or AwsSecurityCredentialsProvider must be set") + } + if count > 1 { + return fmt.Errorf("externalaccount: only one of CredentialSource, SubjectTokenProvider, or AwsSecurityCredentialsProvider must be set") + } + return nil +} + +// resolveTokenURL sets the default STS token endpoint with the configured +// universe domain. +func (o *Options) resolveTokenURL() { + if o.TokenURL != "" { + return + } else if o.UniverseDomain != "" { + o.TokenURL = strings.Replace(defaultTokenURL, universeDomainPlaceholder, o.UniverseDomain, 1) + } else { + o.TokenURL = strings.Replace(defaultTokenURL, universeDomainPlaceholder, defaultUniverseDomain, 1) + } +} + +// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider] +// configured with the provided options. +func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { + if err := opts.validate(); err != nil { + return nil, err + } + opts.resolveTokenURL() + stp, err := newSubjectTokenProvider(opts) + if err != nil { + return nil, err + } + tp := &tokenProvider{ + client: opts.Client, + opts: opts, + stp: stp, + } + if opts.ServiceAccountImpersonationURL == "" { + return auth.NewCachedTokenProvider(tp, nil), nil + } + + scopes := make([]string, len(opts.Scopes)) + copy(scopes, opts.Scopes) + // needed for impersonation + tp.opts.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"} + imp, err := impersonate.NewTokenProvider(&impersonate.Options{ + Client: opts.Client, + URL: opts.ServiceAccountImpersonationURL, + Scopes: scopes, + Tp: auth.NewCachedTokenProvider(tp, nil), + TokenLifetimeSeconds: opts.ServiceAccountImpersonationLifetimeSeconds, + }) + if err != nil { + return nil, err + } + return auth.NewCachedTokenProvider(imp, nil), nil +} + +type subjectTokenProvider interface { + subjectToken(ctx context.Context) (string, error) + providerType() string +} + +// tokenProvider is the provider that handles external credentials. It is used to retrieve Tokens. +type tokenProvider struct { + client *http.Client + opts *Options + stp subjectTokenProvider +} + +func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) { + subjectToken, err := tp.stp.subjectToken(ctx) + if err != nil { + return nil, err + } + + stsRequest := &stsexchange.TokenRequest{ + GrantType: stsexchange.GrantType, + Audience: tp.opts.Audience, + Scope: tp.opts.Scopes, + RequestedTokenType: stsexchange.TokenType, + SubjectToken: subjectToken, + SubjectTokenType: tp.opts.SubjectTokenType, + } + header := make(http.Header) + header.Set("Content-Type", "application/x-www-form-urlencoded") + header.Add("x-goog-api-client", getGoogHeaderValue(tp.opts, tp.stp)) + clientAuth := stsexchange.ClientAuthentication{ + AuthStyle: auth.StyleInHeader, + ClientID: tp.opts.ClientID, + ClientSecret: tp.opts.ClientSecret, + } + var options map[string]interface{} + // Do not pass workforce_pool_user_project when client authentication is used. + // The client ID is sufficient for determining the user project. + if tp.opts.WorkforcePoolUserProject != "" && tp.opts.ClientID == "" { + options = map[string]interface{}{ + "userProject": tp.opts.WorkforcePoolUserProject, + } + } + stsResp, err := stsexchange.ExchangeToken(ctx, &stsexchange.Options{ + Client: tp.client, + Endpoint: tp.opts.TokenURL, + Request: stsRequest, + Authentication: clientAuth, + Headers: header, + ExtraOpts: options, + }) + if err != nil { + return nil, err + } + + tok := &auth.Token{ + Value: stsResp.AccessToken, + Type: stsResp.TokenType, + } + // The RFC8693 doesn't define the explicit 0 of "expires_in" field behavior. + if stsResp.ExpiresIn <= 0 { + return nil, fmt.Errorf("credentials: got invalid expiry from security token service") + } + tok.Expiry = Now().Add(time.Duration(stsResp.ExpiresIn) * time.Second) + return tok, nil +} + +// newSubjectTokenProvider determines the type of credsfile.CredentialSource needed to create a +// subjectTokenProvider +func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { + reqOpts := &RequestOptions{Audience: o.Audience, SubjectTokenType: o.SubjectTokenType} + if o.AwsSecurityCredentialsProvider != nil { + return &awsSubjectProvider{ + securityCredentialsProvider: o.AwsSecurityCredentialsProvider, + TargetResource: o.Audience, + reqOpts: reqOpts, + }, nil + } else if o.SubjectTokenProvider != nil { + return &programmaticProvider{stp: o.SubjectTokenProvider, opts: reqOpts}, nil + } else if len(o.CredentialSource.EnvironmentID) > 3 && o.CredentialSource.EnvironmentID[:3] == "aws" { + if awsVersion, err := strconv.Atoi(o.CredentialSource.EnvironmentID[3:]); err == nil { + if awsVersion != 1 { + return nil, fmt.Errorf("credentials: aws version '%d' is not supported in the current build", awsVersion) + } + + awsProvider := &awsSubjectProvider{ + EnvironmentID: o.CredentialSource.EnvironmentID, + RegionURL: o.CredentialSource.RegionURL, + RegionalCredVerificationURL: o.CredentialSource.RegionalCredVerificationURL, + CredVerificationURL: o.CredentialSource.URL, + TargetResource: o.Audience, + Client: o.Client, + } + if o.CredentialSource.IMDSv2SessionTokenURL != "" { + awsProvider.IMDSv2SessionTokenURL = o.CredentialSource.IMDSv2SessionTokenURL + } + + return awsProvider, nil + } + } else if o.CredentialSource.File != "" { + return &fileSubjectProvider{File: o.CredentialSource.File, Format: o.CredentialSource.Format}, nil + } else if o.CredentialSource.URL != "" { + return &urlSubjectProvider{URL: o.CredentialSource.URL, Headers: o.CredentialSource.Headers, Format: o.CredentialSource.Format, Client: o.Client}, nil + } else if o.CredentialSource.Executable != nil { + ec := o.CredentialSource.Executable + if ec.Command == "" { + return nil, errors.New("credentials: missing `command` field — executable command must be provided") + } + + execProvider := &executableSubjectProvider{} + execProvider.Command = ec.Command + if ec.TimeoutMillis == 0 { + execProvider.Timeout = executableDefaultTimeout + } else { + execProvider.Timeout = time.Duration(ec.TimeoutMillis) * time.Millisecond + if execProvider.Timeout < timeoutMinimum || execProvider.Timeout > timeoutMaximum { + return nil, fmt.Errorf("credentials: invalid `timeout_millis` field — executable timeout must be between %v and %v seconds", timeoutMinimum.Seconds(), timeoutMaximum.Seconds()) + } + } + execProvider.OutputFile = ec.OutputFile + execProvider.client = o.Client + execProvider.opts = o + execProvider.env = runtimeEnvironment{} + return execProvider, nil + } + return nil, errors.New("credentials: unable to parse credential source") +} + +func getGoogHeaderValue(conf *Options, p subjectTokenProvider) string { + return fmt.Sprintf("gl-go/%s auth/%s google-byoid-sdk source/%s sa-impersonation/%t config-lifetime/%t", + goVersion(), + "unknown", + p.providerType(), + conf.ServiceAccountImpersonationURL != "", + conf.ServiceAccountImpersonationLifetimeSeconds != 0) +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/file_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/file_provider.go new file mode 100644 index 000000000..8186939fe --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/file_provider.go @@ -0,0 +1,78 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "os" + + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" +) + +const ( + fileProviderType = "file" +) + +type fileSubjectProvider struct { + File string + Format *credsfile.Format +} + +func (sp *fileSubjectProvider) subjectToken(context.Context) (string, error) { + tokenFile, err := os.Open(sp.File) + if err != nil { + return "", fmt.Errorf("credentials: failed to open credential file %q: %w", sp.File, err) + } + defer tokenFile.Close() + tokenBytes, err := internal.ReadAll(tokenFile) + if err != nil { + return "", fmt.Errorf("credentials: failed to read credential file: %w", err) + } + tokenBytes = bytes.TrimSpace(tokenBytes) + + if sp.Format == nil { + return string(tokenBytes), nil + } + switch sp.Format.Type { + case fileTypeJSON: + jsonData := make(map[string]interface{}) + err = json.Unmarshal(tokenBytes, &jsonData) + if err != nil { + return "", fmt.Errorf("credentials: failed to unmarshal subject token file: %w", err) + } + val, ok := jsonData[sp.Format.SubjectTokenFieldName] + if !ok { + return "", errors.New("credentials: provided subject_token_field_name not found in credentials") + } + token, ok := val.(string) + if !ok { + return "", errors.New("credentials: improperly formatted subject token") + } + return token, nil + case fileTypeText: + return string(tokenBytes), nil + default: + return "", errors.New("credentials: invalid credential_source file format type: " + sp.Format.Type) + } +} + +func (sp *fileSubjectProvider) providerType() string { + return fileProviderType +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/info.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/info.go new file mode 100644 index 000000000..8e4b4379b --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/info.go @@ -0,0 +1,74 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "runtime" + "strings" + "unicode" +) + +var ( + // version is a package internal global variable for testing purposes. + version = runtime.Version +) + +// versionUnknown is only used when the runtime version cannot be determined. +const versionUnknown = "UNKNOWN" + +// goVersion returns a Go runtime version derived from the runtime environment +// that is modified to be suitable for reporting in a header, meaning it has no +// whitespace. If it is unable to determine the Go runtime version, it returns +// versionUnknown. +func goVersion() string { + const develPrefix = "devel +" + + s := version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } else if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + + notSemverRune := func(r rune) bool { + return !strings.ContainsRune("0123456789.", r) + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + // Some release candidates already have a dash in them. + if !strings.HasPrefix(prerelease, "-") { + prerelease = "-" + prerelease + } + s += prerelease + } + return s + } + return versionUnknown +} diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/programmatic_provider.go similarity index 59% rename from vendor/cloud.google.com/go/compute/internal/version.go rename to vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/programmatic_provider.go index 540ad16ac..be3c87351 100644 --- a/vendor/cloud.google.com/go/compute/internal/version.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/programmatic_provider.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,7 +12,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -package internal +package externalaccount -// Version is the current tagged release of the library. -const Version = "1.23.3" +import "context" + +type programmaticProvider struct { + opts *RequestOptions + stp SubjectTokenProvider +} + +func (pp *programmaticProvider) providerType() string { + return programmaticProviderType +} + +func (pp *programmaticProvider) subjectToken(ctx context.Context) (string, error) { + return pp.stp.SubjectToken(ctx, pp.opts) +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go new file mode 100644 index 000000000..22b8af1c1 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go @@ -0,0 +1,93 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" +) + +const ( + fileTypeText = "text" + fileTypeJSON = "json" + urlProviderType = "url" + programmaticProviderType = "programmatic" +) + +type urlSubjectProvider struct { + URL string + Headers map[string]string + Format *credsfile.Format + Client *http.Client +} + +func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error) { + req, err := http.NewRequestWithContext(ctx, "GET", sp.URL, nil) + if err != nil { + return "", fmt.Errorf("credentials: HTTP request for URL-sourced credential failed: %w", err) + } + + for key, val := range sp.Headers { + req.Header.Add(key, val) + } + resp, err := sp.Client.Do(req) + if err != nil { + return "", fmt.Errorf("credentials: invalid response when retrieving subject token: %w", err) + } + defer resp.Body.Close() + + respBody, err := internal.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("credentials: invalid body in subject token URL query: %w", err) + } + if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { + return "", fmt.Errorf("credentials: status code %d: %s", c, respBody) + } + + if sp.Format == nil { + return string(respBody), nil + } + switch sp.Format.Type { + case "json": + jsonData := make(map[string]interface{}) + err = json.Unmarshal(respBody, &jsonData) + if err != nil { + return "", fmt.Errorf("credentials: failed to unmarshal subject token file: %w", err) + } + val, ok := jsonData[sp.Format.SubjectTokenFieldName] + if !ok { + return "", errors.New("credentials: provided subject_token_field_name not found in credentials") + } + token, ok := val.(string) + if !ok { + return "", errors.New("credentials: improperly formatted subject token") + } + return token, nil + case fileTypeText: + return string(respBody), nil + default: + return "", errors.New("credentials: invalid credential_source file format type: " + sp.Format.Type) + } +} + +func (sp *urlSubjectProvider) providerType() string { + return urlProviderType +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go new file mode 100644 index 000000000..0d7885479 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go @@ -0,0 +1,110 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccountuser + +import ( + "context" + "errors" + "net/http" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials/internal/stsexchange" + "cloud.google.com/go/auth/internal" +) + +// Options stores the configuration for fetching tokens with external authorized +// user credentials. +type Options struct { + // Audience is the Secure Token Service (STS) audience which contains the + // resource name for the workforce pool and the provider identifier in that + // pool. + Audience string + // RefreshToken is the OAuth 2.0 refresh token. + RefreshToken string + // TokenURL is the STS token exchange endpoint for refresh. + TokenURL string + // TokenInfoURL is the STS endpoint URL for token introspection. Optional. + TokenInfoURL string + // ClientID is only required in conjunction with ClientSecret, as described + // below. + ClientID string + // ClientSecret is currently only required if token_info endpoint also needs + // to be called with the generated a cloud access token. When provided, STS + // will be called with additional basic authentication using client_id as + // username and client_secret as password. + ClientSecret string + // Scopes contains the desired scopes for the returned access token. + Scopes []string + + // Client for token request. + Client *http.Client +} + +func (c *Options) validate() bool { + return c.ClientID != "" && c.ClientSecret != "" && c.RefreshToken != "" && c.TokenURL != "" +} + +// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider] +// configured with the provided options. +func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { + if !opts.validate() { + return nil, errors.New("credentials: invalid external_account_authorized_user configuration") + } + + tp := &tokenProvider{ + o: opts, + } + return auth.NewCachedTokenProvider(tp, nil), nil +} + +type tokenProvider struct { + o *Options +} + +func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) { + opts := tp.o + + clientAuth := stsexchange.ClientAuthentication{ + AuthStyle: auth.StyleInHeader, + ClientID: opts.ClientID, + ClientSecret: opts.ClientSecret, + } + headers := make(http.Header) + headers.Set("Content-Type", "application/x-www-form-urlencoded") + stsResponse, err := stsexchange.RefreshAccessToken(ctx, &stsexchange.Options{ + Client: opts.Client, + Endpoint: opts.TokenURL, + RefreshToken: opts.RefreshToken, + Authentication: clientAuth, + Headers: headers, + }) + if err != nil { + return nil, err + } + if stsResponse.ExpiresIn < 0 { + return nil, errors.New("credentials: invalid expiry from security token service") + } + + // guarded by the wrapping with CachedTokenProvider + if stsResponse.RefreshToken != "" { + opts.RefreshToken = stsResponse.RefreshToken + } + return &auth.Token{ + Value: stsResponse.AccessToken, + Expiry: time.Now().UTC().Add(time.Duration(stsResponse.ExpiresIn) * time.Second), + Type: internal.TokenTypeBearer, + }, nil +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go new file mode 100644 index 000000000..467edb908 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go @@ -0,0 +1,182 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gdch + +import ( + "context" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" + "cloud.google.com/go/auth/internal/jwt" +) + +const ( + // GrantType is the grant type for the token request. + GrantType = "urn:ietf:params:oauth:token-type:token-exchange" + requestTokenType = "urn:ietf:params:oauth:token-type:access_token" + subjectTokenType = "urn:k8s:params:oauth:token-type:serviceaccount" +) + +var ( + gdchSupportFormatVersions map[string]bool = map[string]bool{ + "1": true, + } +) + +// Options for [NewTokenProvider]. +type Options struct { + STSAudience string + Client *http.Client +} + +// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider] from a +// GDCH cred file. +func NewTokenProvider(f *credsfile.GDCHServiceAccountFile, o *Options) (auth.TokenProvider, error) { + if !gdchSupportFormatVersions[f.FormatVersion] { + return nil, fmt.Errorf("credentials: unsupported gdch_service_account format %q", f.FormatVersion) + } + if o.STSAudience == "" { + return nil, errors.New("credentials: STSAudience must be set for the GDCH auth flows") + } + pk, err := internal.ParseKey([]byte(f.PrivateKey)) + if err != nil { + return nil, err + } + certPool, err := loadCertPool(f.CertPath) + if err != nil { + return nil, err + } + + tp := gdchProvider{ + serviceIdentity: fmt.Sprintf("system:serviceaccount:%s:%s", f.Project, f.Name), + tokenURL: f.TokenURL, + aud: o.STSAudience, + pk: pk, + pkID: f.PrivateKeyID, + certPool: certPool, + client: o.Client, + } + return tp, nil +} + +func loadCertPool(path string) (*x509.CertPool, error) { + pool := x509.NewCertPool() + pem, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("credentials: failed to read certificate: %w", err) + } + pool.AppendCertsFromPEM(pem) + return pool, nil +} + +type gdchProvider struct { + serviceIdentity string + tokenURL string + aud string + pk *rsa.PrivateKey + pkID string + certPool *x509.CertPool + + client *http.Client +} + +func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) { + addCertToTransport(g.client, g.certPool) + iat := time.Now() + exp := iat.Add(time.Hour) + claims := jwt.Claims{ + Iss: g.serviceIdentity, + Sub: g.serviceIdentity, + Aud: g.tokenURL, + Iat: iat.Unix(), + Exp: exp.Unix(), + } + h := jwt.Header{ + Algorithm: jwt.HeaderAlgRSA256, + Type: jwt.HeaderType, + KeyID: string(g.pkID), + } + payload, err := jwt.EncodeJWS(&h, &claims, g.pk) + if err != nil { + return nil, err + } + v := url.Values{} + v.Set("grant_type", GrantType) + v.Set("audience", g.aud) + v.Set("requested_token_type", requestTokenType) + v.Set("subject_token", payload) + v.Set("subject_token_type", subjectTokenType) + resp, err := g.client.PostForm(g.tokenURL, v) + if err != nil { + return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) + } + defer resp.Body.Close() + body, err := internal.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) + } + if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices { + return nil, &auth.Error{ + Response: resp, + Body: body, + } + } + + var tokenRes struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + ExpiresIn int64 `json:"expires_in"` // relative seconds from now + } + if err := json.Unmarshal(body, &tokenRes); err != nil { + return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) + } + token := &auth.Token{ + Value: tokenRes.AccessToken, + Type: tokenRes.TokenType, + } + raw := make(map[string]interface{}) + json.Unmarshal(body, &raw) // no error checks for optional fields + token.Metadata = raw + + if secs := tokenRes.ExpiresIn; secs > 0 { + token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) + } + return token, nil +} + +// addCertToTransport makes a best effort attempt at adding in the cert info to +// the client. It tries to keep all configured transport settings if the +// underlying transport is an http.Transport. Or else it overwrites the +// transport with defaults adding in the certs. +func addCertToTransport(hc *http.Client, certPool *x509.CertPool) { + trans, ok := hc.Transport.(*http.Transport) + if !ok { + trans = http.DefaultTransport.(*http.Transport).Clone() + } + trans.TLSClientConfig = &tls.Config{ + RootCAs: certPool, + } +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go new file mode 100644 index 000000000..3ceab873b --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go @@ -0,0 +1,151 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package impersonate + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" +) + +const ( + defaultTokenLifetime = "3600s" + authHeaderKey = "Authorization" +) + +// generateAccesstokenReq is used for service account impersonation +type generateAccessTokenReq struct { + Delegates []string `json:"delegates,omitempty"` + Lifetime string `json:"lifetime,omitempty"` + Scope []string `json:"scope,omitempty"` +} + +type impersonateTokenResponse struct { + AccessToken string `json:"accessToken"` + ExpireTime string `json:"expireTime"` +} + +// NewTokenProvider uses a source credential, stored in Ts, to request an access token to the provided URL. +// Scopes can be defined when the access token is requested. +func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { + if err := opts.validate(); err != nil { + return nil, err + } + return opts, nil +} + +// Options for [NewTokenProvider]. +type Options struct { + // Tp is the source credential used to generate a token on the + // impersonated service account. Required. + Tp auth.TokenProvider + + // URL is the endpoint to call to generate a token + // on behalf of the service account. Required. + URL string + // Scopes that the impersonated credential should have. Required. + Scopes []string + // Delegates are the service account email addresses in a delegation chain. + // Each service account must be granted roles/iam.serviceAccountTokenCreator + // on the next service account in the chain. Optional. + Delegates []string + // TokenLifetimeSeconds is the number of seconds the impersonation token will + // be valid for. Defaults to 1 hour if unset. Optional. + TokenLifetimeSeconds int + // Client configures the underlying client used to make network requests + // when fetching tokens. Required. + Client *http.Client +} + +func (o *Options) validate() error { + if o.Tp == nil { + return errors.New("credentials: missing required 'source_credentials' field in impersonated credentials") + } + if o.URL == "" { + return errors.New("credentials: missing required 'service_account_impersonation_url' field in impersonated credentials") + } + return nil +} + +// Token performs the exchange to get a temporary service account token to allow access to GCP. +func (o *Options) Token(ctx context.Context) (*auth.Token, error) { + lifetime := defaultTokenLifetime + if o.TokenLifetimeSeconds != 0 { + lifetime = fmt.Sprintf("%ds", o.TokenLifetimeSeconds) + } + reqBody := generateAccessTokenReq{ + Lifetime: lifetime, + Scope: o.Scopes, + Delegates: o.Delegates, + } + b, err := json.Marshal(reqBody) + if err != nil { + return nil, fmt.Errorf("credentials: unable to marshal request: %w", err) + } + req, err := http.NewRequestWithContext(ctx, "POST", o.URL, bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("credentials: unable to create impersonation request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + if err := setAuthHeader(ctx, o.Tp, req); err != nil { + return nil, err + } + resp, err := o.Client.Do(req) + if err != nil { + return nil, fmt.Errorf("credentials: unable to generate access token: %w", err) + } + defer resp.Body.Close() + body, err := internal.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("credentials: unable to read body: %w", err) + } + if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { + return nil, fmt.Errorf("credentials: status code %d: %s", c, body) + } + + var accessTokenResp impersonateTokenResponse + if err := json.Unmarshal(body, &accessTokenResp); err != nil { + return nil, fmt.Errorf("credentials: unable to parse response: %w", err) + } + expiry, err := time.Parse(time.RFC3339, accessTokenResp.ExpireTime) + if err != nil { + return nil, fmt.Errorf("credentials: unable to parse expiry: %w", err) + } + return &auth.Token{ + Value: accessTokenResp.AccessToken, + Expiry: expiry, + Type: internal.TokenTypeBearer, + }, nil +} + +func setAuthHeader(ctx context.Context, tp auth.TokenProvider, r *http.Request) error { + t, err := tp.Token(ctx) + if err != nil { + return err + } + typ := t.Type + if typ == "" { + typ = internal.TokenTypeBearer + } + r.Header.Set(authHeaderKey, typ+" "+t.Value) + return nil +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go new file mode 100644 index 000000000..f70e0aef4 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go @@ -0,0 +1,167 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stsexchange + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" +) + +const ( + // GrantType for a sts exchange. + GrantType = "urn:ietf:params:oauth:grant-type:token-exchange" + // TokenType for a sts exchange. + TokenType = "urn:ietf:params:oauth:token-type:access_token" + + jwtTokenType = "urn:ietf:params:oauth:token-type:jwt" +) + +// Options stores the configuration for making an sts exchange request. +type Options struct { + Client *http.Client + Endpoint string + Request *TokenRequest + Authentication ClientAuthentication + Headers http.Header + // ExtraOpts are optional fields marshalled into the `options` field of the + // request body. + ExtraOpts map[string]interface{} + RefreshToken string +} + +// RefreshAccessToken performs the token exchange using a refresh token flow. +func RefreshAccessToken(ctx context.Context, opts *Options) (*TokenResponse, error) { + data := url.Values{} + data.Set("grant_type", "refresh_token") + data.Set("refresh_token", opts.RefreshToken) + return doRequest(ctx, opts, data) +} + +// ExchangeToken performs an oauth2 token exchange with the provided endpoint. +func ExchangeToken(ctx context.Context, opts *Options) (*TokenResponse, error) { + data := url.Values{} + data.Set("audience", opts.Request.Audience) + data.Set("grant_type", GrantType) + data.Set("requested_token_type", TokenType) + data.Set("subject_token_type", opts.Request.SubjectTokenType) + data.Set("subject_token", opts.Request.SubjectToken) + data.Set("scope", strings.Join(opts.Request.Scope, " ")) + if opts.ExtraOpts != nil { + opts, err := json.Marshal(opts.ExtraOpts) + if err != nil { + return nil, fmt.Errorf("credentials: failed to marshal additional options: %w", err) + } + data.Set("options", string(opts)) + } + return doRequest(ctx, opts, data) +} + +func doRequest(ctx context.Context, opts *Options, data url.Values) (*TokenResponse, error) { + opts.Authentication.InjectAuthentication(data, opts.Headers) + encodedData := data.Encode() + + req, err := http.NewRequestWithContext(ctx, "POST", opts.Endpoint, strings.NewReader(encodedData)) + if err != nil { + return nil, fmt.Errorf("credentials: failed to properly build http request: %w", err) + + } + for key, list := range opts.Headers { + for _, val := range list { + req.Header.Add(key, val) + } + } + req.Header.Set("Content-Length", strconv.Itoa(len(encodedData))) + + resp, err := opts.Client.Do(req) + if err != nil { + return nil, fmt.Errorf("credentials: invalid response from Secure Token Server: %w", err) + } + defer resp.Body.Close() + + body, err := internal.ReadAll(resp.Body) + if err != nil { + return nil, err + } + if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices { + return nil, fmt.Errorf("credentials: status code %d: %s", c, body) + } + var stsResp TokenResponse + if err := json.Unmarshal(body, &stsResp); err != nil { + return nil, fmt.Errorf("credentials: failed to unmarshal response body from Secure Token Server: %w", err) + } + + return &stsResp, nil +} + +// TokenRequest contains fields necessary to make an oauth2 token +// exchange. +type TokenRequest struct { + ActingParty struct { + ActorToken string + ActorTokenType string + } + GrantType string + Resource string + Audience string + Scope []string + RequestedTokenType string + SubjectToken string + SubjectTokenType string +} + +// TokenResponse is used to decode the remote server response during +// an oauth2 token exchange. +type TokenResponse struct { + AccessToken string `json:"access_token"` + IssuedTokenType string `json:"issued_token_type"` + TokenType string `json:"token_type"` + ExpiresIn int `json:"expires_in"` + Scope string `json:"scope"` + RefreshToken string `json:"refresh_token"` +} + +// ClientAuthentication represents an OAuth client ID and secret and the +// mechanism for passing these credentials as stated in rfc6749#2.3.1. +type ClientAuthentication struct { + AuthStyle auth.Style + ClientID string + ClientSecret string +} + +// InjectAuthentication is used to add authentication to a Secure Token Service +// exchange request. It modifies either the passed url.Values or http.Header +// depending on the desired authentication format. +func (c *ClientAuthentication) InjectAuthentication(values url.Values, headers http.Header) { + if c.ClientID == "" || c.ClientSecret == "" || values == nil || headers == nil { + return + } + switch c.AuthStyle { + case auth.StyleInHeader: + plainHeader := c.ClientID + ":" + c.ClientSecret + headers.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(plainHeader))) + default: + values.Set("client_id", c.ClientID) + values.Set("client_secret", c.ClientSecret) + } +} diff --git a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go new file mode 100644 index 000000000..b62a8ae4d --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go @@ -0,0 +1,81 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credentials + +import ( + "context" + "crypto/rsa" + "fmt" + "strings" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" + "cloud.google.com/go/auth/internal/jwt" +) + +var ( + // for testing + now func() time.Time = time.Now +) + +// configureSelfSignedJWT uses the private key in the service account to create +// a JWT without making a network call. +func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + pk, err := internal.ParseKey([]byte(f.PrivateKey)) + if err != nil { + return nil, fmt.Errorf("credentials: could not parse key: %w", err) + } + return &selfSignedTokenProvider{ + email: f.ClientEmail, + audience: opts.Audience, + scopes: opts.scopes(), + pk: pk, + pkID: f.PrivateKeyID, + }, nil +} + +type selfSignedTokenProvider struct { + email string + audience string + scopes []string + pk *rsa.PrivateKey + pkID string +} + +func (tp *selfSignedTokenProvider) Token(context.Context) (*auth.Token, error) { + iat := now() + exp := iat.Add(time.Hour) + scope := strings.Join(tp.scopes, " ") + c := &jwt.Claims{ + Iss: tp.email, + Sub: tp.email, + Aud: tp.audience, + Scope: scope, + Iat: iat.Unix(), + Exp: exp.Unix(), + } + h := &jwt.Header{ + Algorithm: jwt.HeaderAlgRSA256, + Type: jwt.HeaderType, + KeyID: string(tp.pkID), + } + msg, err := jwt.EncodeJWS(h, c, tp.pk) + if err != nil { + return nil, fmt.Errorf("credentials: could not encode JWT: %w", err) + } + return &auth.Token{Value: msg, Type: internal.TokenTypeBearer, Expiry: exp}, nil +} diff --git a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go new file mode 100644 index 000000000..7fea9d87e --- /dev/null +++ b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go @@ -0,0 +1,215 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package httptransport + +import ( + "crypto/tls" + "errors" + "fmt" + "net/http" + + "cloud.google.com/go/auth" + detect "cloud.google.com/go/auth/credentials" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/transport" +) + +// ClientCertProvider is a function that returns a TLS client certificate to be +// used when opening TLS connections. It follows the same semantics as +// [crypto/tls.Config.GetClientCertificate]. +type ClientCertProvider = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + +// Options used to configure a [net/http.Client] from [NewClient]. +type Options struct { + // DisableTelemetry disables default telemetry (OpenCensus). An example + // reason to do so would be to bind custom telemetry that overrides the + // defaults. + DisableTelemetry bool + // DisableAuthentication specifies that no authentication should be used. It + // is suitable only for testing and for accessing public resources, like + // public Google Cloud Storage buckets. + DisableAuthentication bool + // Headers are extra HTTP headers that will be appended to every outgoing + // request. + Headers http.Header + // BaseRoundTripper overrides the base transport used for serving requests. + // If specified ClientCertProvider is ignored. + BaseRoundTripper http.RoundTripper + // Endpoint overrides the default endpoint to be used for a service. + Endpoint string + // APIKey specifies an API key to be used as the basis for authentication. + // If set DetectOpts are ignored. + APIKey string + // Credentials used to add Authorization header to all requests. If set + // DetectOpts are ignored. + Credentials *auth.Credentials + // ClientCertProvider is a function that returns a TLS client certificate to + // be used when opening TLS connections. It follows the same semantics as + // crypto/tls.Config.GetClientCertificate. + ClientCertProvider ClientCertProvider + // DetectOpts configures settings for detect Application Default + // Credentials. + DetectOpts *detect.DetectOptions + // UniverseDomain is the default service domain for a given Cloud universe. + // The default value is "googleapis.com". This is the universe domain + // configured for the client, which will be compared to the universe domain + // that is separately configured for the credentials. + UniverseDomain string + + // InternalOptions are NOT meant to be set directly by consumers of this + // package, they should only be set by generated client code. + InternalOptions *InternalOptions +} + +func (o *Options) validate() error { + if o == nil { + return errors.New("httptransport: opts required to be non-nil") + } + if o.InternalOptions != nil && o.InternalOptions.SkipValidation { + return nil + } + hasCreds := o.APIKey != "" || + o.Credentials != nil || + (o.DetectOpts != nil && len(o.DetectOpts.CredentialsJSON) > 0) || + (o.DetectOpts != nil && o.DetectOpts.CredentialsFile != "") + if o.DisableAuthentication && hasCreds { + return errors.New("httptransport: DisableAuthentication is incompatible with options that set or detect credentials") + } + return nil +} + +// client returns the client a user set for the detect options or nil if one was +// not set. +func (o *Options) client() *http.Client { + if o.DetectOpts != nil && o.DetectOpts.Client != nil { + return o.DetectOpts.Client + } + return nil +} + +func (o *Options) resolveDetectOptions() *detect.DetectOptions { + io := o.InternalOptions + // soft-clone these so we are not updating a ref the user holds and may reuse + do := transport.CloneDetectOptions(o.DetectOpts) + + // If scoped JWTs are enabled user provided an aud, allow self-signed JWT. + if (io != nil && io.EnableJWTWithScope) || do.Audience != "" { + do.UseSelfSignedJWT = true + } + // Only default scopes if user did not also set an audience. + if len(do.Scopes) == 0 && do.Audience == "" && io != nil && len(io.DefaultScopes) > 0 { + do.Scopes = make([]string, len(io.DefaultScopes)) + copy(do.Scopes, io.DefaultScopes) + } + if len(do.Scopes) == 0 && do.Audience == "" && io != nil { + do.Audience = o.InternalOptions.DefaultAudience + } + return do +} + +// InternalOptions are only meant to be set by generated client code. These are +// not meant to be set directly by consumers of this package. Configuration in +// this type is considered EXPERIMENTAL and may be removed at any time in the +// future without warning. +type InternalOptions struct { + // EnableJWTWithScope specifies if scope can be used with self-signed JWT. + EnableJWTWithScope bool + // DefaultAudience specifies a default audience to be used as the audience + // field ("aud") for the JWT token authentication. + DefaultAudience string + // DefaultEndpointTemplate combined with UniverseDomain specifies the + // default endpoint. + DefaultEndpointTemplate string + // DefaultMTLSEndpoint specifies the default mTLS endpoint. + DefaultMTLSEndpoint string + // DefaultScopes specifies the default OAuth2 scopes to be used for a + // service. + DefaultScopes []string + // SkipValidation bypasses validation on Options. It should only be used + // internally for clients that needs more control over their transport. + SkipValidation bool +} + +// AddAuthorizationMiddleware adds a middleware to the provided client's +// transport that sets the Authorization header with the value produced by the +// provided [cloud.google.com/go/auth.Credentials]. An error is returned only +// if client or creds is nil. +func AddAuthorizationMiddleware(client *http.Client, creds *auth.Credentials) error { + if client == nil || creds == nil { + return fmt.Errorf("httptransport: client and tp must not be nil") + } + base := client.Transport + if base == nil { + if dt, ok := http.DefaultTransport.(*http.Transport); ok { + base = dt.Clone() + } else { + // Directly reuse the DefaultTransport if the application has + // replaced it with an implementation of RoundTripper other than + // http.Transport. + base = http.DefaultTransport + } + } + client.Transport = &authTransport{ + creds: creds, + base: base, + // TODO(quartzmo): Somehow set clientUniverseDomain from impersonate calls. + } + return nil +} + +// NewClient returns a [net/http.Client] that can be used to communicate with a +// Google cloud service, configured with the provided [Options]. It +// automatically appends Authorization headers to all outgoing requests. +func NewClient(opts *Options) (*http.Client, error) { + if err := opts.validate(); err != nil { + return nil, err + } + + tOpts := &transport.Options{ + Endpoint: opts.Endpoint, + ClientCertProvider: opts.ClientCertProvider, + Client: opts.client(), + UniverseDomain: opts.UniverseDomain, + } + if io := opts.InternalOptions; io != nil { + tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate + tOpts.DefaultMTLSEndpoint = io.DefaultMTLSEndpoint + } + clientCertProvider, dialTLSContext, err := transport.GetHTTPTransportConfig(tOpts) + if err != nil { + return nil, err + } + baseRoundTripper := opts.BaseRoundTripper + if baseRoundTripper == nil { + baseRoundTripper = defaultBaseTransport(clientCertProvider, dialTLSContext) + } + trans, err := newTransport(baseRoundTripper, opts) + if err != nil { + return nil, err + } + return &http.Client{ + Transport: trans, + }, nil +} + +// SetAuthHeader uses the provided token to set the Authorization header on a +// request. If the token.Type is empty, the type is assumed to be Bearer. +func SetAuthHeader(token *auth.Token, req *http.Request) { + typ := token.Type + if typ == "" { + typ = internal.TokenTypeBearer + } + req.Header.Set("Authorization", typ+" "+token.Value) +} diff --git a/vendor/cloud.google.com/go/auth/httptransport/trace.go b/vendor/cloud.google.com/go/auth/httptransport/trace.go new file mode 100644 index 000000000..467c477c0 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/httptransport/trace.go @@ -0,0 +1,93 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package httptransport + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "net/http" + "strconv" + "strings" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +const ( + httpHeaderMaxSize = 200 + cloudTraceHeader = `X-Cloud-Trace-Context` +) + +// asserts the httpFormat fulfills this foreign interface +var _ propagation.HTTPFormat = (*httpFormat)(nil) + +// httpFormat implements propagation.httpFormat to propagate +// traces in HTTP headers for Google Cloud Platform and Cloud Trace. +type httpFormat struct{} + +// SpanContextFromRequest extracts a Cloud Trace span context from incoming requests. +func (f *httpFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { + h := req.Header.Get(cloudTraceHeader) + // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat. + // Return if the header is empty or missing, or if the header is unreasonably + // large, to avoid making unnecessary copies of a large string. + if h == "" || len(h) > httpHeaderMaxSize { + return trace.SpanContext{}, false + } + + // Parse the trace id field. + slash := strings.Index(h, `/`) + if slash == -1 { + return trace.SpanContext{}, false + } + tid, h := h[:slash], h[slash+1:] + + buf, err := hex.DecodeString(tid) + if err != nil { + return trace.SpanContext{}, false + } + copy(sc.TraceID[:], buf) + + // Parse the span id field. + spanstr := h + semicolon := strings.Index(h, `;`) + if semicolon != -1 { + spanstr, h = h[:semicolon], h[semicolon+1:] + } + sid, err := strconv.ParseUint(spanstr, 10, 64) + if err != nil { + return trace.SpanContext{}, false + } + binary.BigEndian.PutUint64(sc.SpanID[:], sid) + + // Parse the options field, options field is optional. + if !strings.HasPrefix(h, "o=") { + return sc, true + } + o, err := strconv.ParseUint(h[2:], 10, 32) + if err != nil { + return trace.SpanContext{}, false + } + sc.TraceOptions = trace.TraceOptions(o) + return sc, true +} + +// SpanContextToRequest modifies the given request to include a Cloud Trace header. +func (f *httpFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { + sid := binary.BigEndian.Uint64(sc.SpanID[:]) + header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions)) + req.Header.Set(cloudTraceHeader, header) +} diff --git a/vendor/cloud.google.com/go/auth/httptransport/transport.go b/vendor/cloud.google.com/go/auth/httptransport/transport.go new file mode 100644 index 000000000..94caeb00f --- /dev/null +++ b/vendor/cloud.google.com/go/auth/httptransport/transport.go @@ -0,0 +1,211 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package httptransport + +import ( + "context" + "crypto/tls" + "net" + "net/http" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/transport" + "cloud.google.com/go/auth/internal/transport/cert" + "go.opencensus.io/plugin/ochttp" + "golang.org/x/net/http2" +) + +const ( + quotaProjectHeaderKey = "X-Goog-User-Project" +) + +func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, error) { + var headers = opts.Headers + ht := &headerTransport{ + base: base, + headers: headers, + } + var trans http.RoundTripper = ht + trans = addOCTransport(trans, opts) + switch { + case opts.DisableAuthentication: + // Do nothing. + case opts.APIKey != "": + qp := internal.GetQuotaProject(nil, opts.Headers.Get(quotaProjectHeaderKey)) + if qp != "" { + if headers == nil { + headers = make(map[string][]string, 1) + } + headers.Set(quotaProjectHeaderKey, qp) + } + trans = &apiKeyTransport{ + Transport: trans, + Key: opts.APIKey, + } + default: + var creds *auth.Credentials + if opts.Credentials != nil { + creds = opts.Credentials + } else { + var err error + creds, err = credentials.DetectDefault(opts.resolveDetectOptions()) + if err != nil { + return nil, err + } + } + qp, err := creds.QuotaProjectID(context.Background()) + if err != nil { + return nil, err + } + if qp != "" { + if headers == nil { + headers = make(map[string][]string, 1) + } + headers.Set(quotaProjectHeaderKey, qp) + } + creds.TokenProvider = auth.NewCachedTokenProvider(creds.TokenProvider, nil) + trans = &authTransport{ + base: trans, + creds: creds, + clientUniverseDomain: opts.UniverseDomain, + } + } + return trans, nil +} + +// defaultBaseTransport returns the base HTTP transport. +// On App Engine, this is urlfetch.Transport. +// Otherwise, use a default transport, taking most defaults from +// http.DefaultTransport. +// If TLSCertificate is available, set TLSClientConfig as well. +func defaultBaseTransport(clientCertSource cert.Provider, dialTLSContext func(context.Context, string, string) (net.Conn, error)) http.RoundTripper { + trans := http.DefaultTransport.(*http.Transport).Clone() + trans.MaxIdleConnsPerHost = 100 + + if clientCertSource != nil { + trans.TLSClientConfig = &tls.Config{ + GetClientCertificate: clientCertSource, + } + } + if dialTLSContext != nil { + // If DialTLSContext is set, TLSClientConfig wil be ignored + trans.DialTLSContext = dialTLSContext + } + + // Configures the ReadIdleTimeout HTTP/2 option for the + // transport. This allows broken idle connections to be pruned more quickly, + // preventing the client from attempting to re-use connections that will no + // longer work. + http2Trans, err := http2.ConfigureTransports(trans) + if err == nil { + http2Trans.ReadIdleTimeout = time.Second * 31 + } + + return trans +} + +type apiKeyTransport struct { + // Key is the API Key to set on requests. + Key string + // Transport is the underlying HTTP transport. + // If nil, http.DefaultTransport is used. + Transport http.RoundTripper +} + +func (t *apiKeyTransport) RoundTrip(req *http.Request) (*http.Response, error) { + newReq := *req + args := newReq.URL.Query() + args.Set("key", t.Key) + newReq.URL.RawQuery = args.Encode() + return t.Transport.RoundTrip(&newReq) +} + +type headerTransport struct { + headers http.Header + base http.RoundTripper +} + +func (t *headerTransport) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.base + newReq := *req + newReq.Header = make(http.Header) + for k, vv := range req.Header { + newReq.Header[k] = vv + } + + for k, v := range t.headers { + newReq.Header[k] = v + } + + return rt.RoundTrip(&newReq) +} + +func addOCTransport(trans http.RoundTripper, opts *Options) http.RoundTripper { + if opts.DisableTelemetry { + return trans + } + return &ochttp.Transport{ + Base: trans, + Propagation: &httpFormat{}, + } +} + +type authTransport struct { + creds *auth.Credentials + base http.RoundTripper + clientUniverseDomain string +} + +// getClientUniverseDomain returns the universe domain configured for the client. +// The default value is "googleapis.com". +func (t *authTransport) getClientUniverseDomain() string { + if t.clientUniverseDomain == "" { + return internal.DefaultUniverseDomain + } + return t.clientUniverseDomain +} + +// RoundTrip authorizes and authenticates the request with an +// access token from Transport's Source. Per the RoundTripper contract we must +// not modify the initial request, so we clone it, and we must close the body +// on any errors that happens during our token logic. +func (t *authTransport) RoundTrip(req *http.Request) (*http.Response, error) { + reqBodyClosed := false + if req.Body != nil { + defer func() { + if !reqBodyClosed { + req.Body.Close() + } + }() + } + credentialsUniverseDomain, err := t.creds.UniverseDomain(req.Context()) + if err != nil { + return nil, err + } + if err := transport.ValidateUniverseDomain(t.getClientUniverseDomain(), credentialsUniverseDomain); err != nil { + return nil, err + } + token, err := t.creds.Token(req.Context()) + if err != nil { + return nil, err + } + req2 := req.Clone(req.Context()) + SetAuthHeader(token, req2) + reqBodyClosed = true + return t.base.RoundTrip(req2) +} diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/credsfile.go b/vendor/cloud.google.com/go/auth/internal/credsfile/credsfile.go new file mode 100644 index 000000000..9cd4bed61 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/credsfile/credsfile.go @@ -0,0 +1,107 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package credsfile is meant to hide implementation details from the pubic +// surface of the detect package. It should not import any other packages in +// this module. It is located under the main internal package so other +// sub-packages can use these parsed types as well. +package credsfile + +import ( + "os" + "os/user" + "path/filepath" + "runtime" +) + +const ( + // GoogleAppCredsEnvVar is the environment variable for setting the + // application default credentials. + GoogleAppCredsEnvVar = "GOOGLE_APPLICATION_CREDENTIALS" + userCredsFilename = "application_default_credentials.json" +) + +// CredentialType represents different credential filetypes Google credentials +// can be. +type CredentialType int + +const ( + // UnknownCredType is an unidentified file type. + UnknownCredType CredentialType = iota + // UserCredentialsKey represents a user creds file type. + UserCredentialsKey + // ServiceAccountKey represents a service account file type. + ServiceAccountKey + // ImpersonatedServiceAccountKey represents a impersonated service account + // file type. + ImpersonatedServiceAccountKey + // ExternalAccountKey represents a external account file type. + ExternalAccountKey + // GDCHServiceAccountKey represents a GDCH file type. + GDCHServiceAccountKey + // ExternalAccountAuthorizedUserKey represents a external account authorized + // user file type. + ExternalAccountAuthorizedUserKey +) + +// parseCredentialType returns the associated filetype based on the parsed +// typeString provided. +func parseCredentialType(typeString string) CredentialType { + switch typeString { + case "service_account": + return ServiceAccountKey + case "authorized_user": + return UserCredentialsKey + case "impersonated_service_account": + return ImpersonatedServiceAccountKey + case "external_account": + return ExternalAccountKey + case "external_account_authorized_user": + return ExternalAccountAuthorizedUserKey + case "gdch_service_account": + return GDCHServiceAccountKey + default: + return UnknownCredType + } +} + +// GetFileNameFromEnv returns the override if provided or detects a filename +// from the environment. +func GetFileNameFromEnv(override string) string { + if override != "" { + return override + } + return os.Getenv(GoogleAppCredsEnvVar) +} + +// GetWellKnownFileName tries to locate the filepath for the user credential +// file based on the environment. +func GetWellKnownFileName() string { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud", userCredsFilename) + } + return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", userCredsFilename) +} + +// guessUnixHomeDir default to checking for HOME, but not all unix systems have +// this set, do have a fallback. +func guessUnixHomeDir() string { + if v := os.Getenv("HOME"); v != "" { + return v + } + if u, err := user.Current(); err == nil { + return u.HomeDir + } + return "" +} diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go b/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go new file mode 100644 index 000000000..69e30779f --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go @@ -0,0 +1,149 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credsfile + +import ( + "encoding/json" +) + +// Config3LO is the internals of a client creds file. +type Config3LO struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + RedirectURIs []string `json:"redirect_uris"` + AuthURI string `json:"auth_uri"` + TokenURI string `json:"token_uri"` +} + +// ClientCredentialsFile representation. +type ClientCredentialsFile struct { + Web *Config3LO `json:"web"` + Installed *Config3LO `json:"installed"` + UniverseDomain string `json:"universe_domain"` +} + +// ServiceAccountFile representation. +type ServiceAccountFile struct { + Type string `json:"type"` + ProjectID string `json:"project_id"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + ClientEmail string `json:"client_email"` + ClientID string `json:"client_id"` + AuthURL string `json:"auth_uri"` + TokenURL string `json:"token_uri"` + UniverseDomain string `json:"universe_domain"` +} + +// UserCredentialsFile representation. +type UserCredentialsFile struct { + Type string `json:"type"` + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + QuotaProjectID string `json:"quota_project_id"` + RefreshToken string `json:"refresh_token"` + UniverseDomain string `json:"universe_domain"` +} + +// ExternalAccountFile representation. +type ExternalAccountFile struct { + Type string `json:"type"` + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + Audience string `json:"audience"` + SubjectTokenType string `json:"subject_token_type"` + ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"` + TokenURL string `json:"token_url"` + CredentialSource *CredentialSource `json:"credential_source,omitempty"` + TokenInfoURL string `json:"token_info_url"` + ServiceAccountImpersonation *ServiceAccountImpersonationInfo `json:"service_account_impersonation,omitempty"` + QuotaProjectID string `json:"quota_project_id"` + WorkforcePoolUserProject string `json:"workforce_pool_user_project"` + UniverseDomain string `json:"universe_domain"` +} + +// ExternalAccountAuthorizedUserFile representation. +type ExternalAccountAuthorizedUserFile struct { + Type string `json:"type"` + Audience string `json:"audience"` + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + RefreshToken string `json:"refresh_token"` + TokenURL string `json:"token_url"` + TokenInfoURL string `json:"token_info_url"` + RevokeURL string `json:"revoke_url"` + QuotaProjectID string `json:"quota_project_id"` + UniverseDomain string `json:"universe_domain"` +} + +// CredentialSource stores the information necessary to retrieve the credentials for the STS exchange. +// +// One field amongst File, URL, and Executable should be filled, depending on the kind of credential in question. +// The EnvironmentID should start with AWS if being used for an AWS credential. +type CredentialSource struct { + File string `json:"file"` + URL string `json:"url"` + Headers map[string]string `json:"headers"` + Executable *ExecutableConfig `json:"executable,omitempty"` + EnvironmentID string `json:"environment_id"` + RegionURL string `json:"region_url"` + RegionalCredVerificationURL string `json:"regional_cred_verification_url"` + CredVerificationURL string `json:"cred_verification_url"` + IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"` + Format *Format `json:"format,omitempty"` +} + +// Format describes the format of a [CredentialSource]. +type Format struct { + // Type is either "text" or "json". When not provided "text" type is assumed. + Type string `json:"type"` + // SubjectTokenFieldName is only required for JSON format. This would be "access_token" for azure. + SubjectTokenFieldName string `json:"subject_token_field_name"` +} + +// ExecutableConfig represents the command to run for an executable +// [CredentialSource]. +type ExecutableConfig struct { + Command string `json:"command"` + TimeoutMillis int `json:"timeout_millis"` + OutputFile string `json:"output_file"` +} + +// ServiceAccountImpersonationInfo has impersonation configuration. +type ServiceAccountImpersonationInfo struct { + TokenLifetimeSeconds int `json:"token_lifetime_seconds"` +} + +// ImpersonatedServiceAccountFile representation. +type ImpersonatedServiceAccountFile struct { + Type string `json:"type"` + ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"` + Delegates []string `json:"delegates"` + CredSource json.RawMessage `json:"source_credentials"` + UniverseDomain string `json:"universe_domain"` +} + +// GDCHServiceAccountFile represents the Google Distributed Cloud Hosted (GDCH) service identity file. +type GDCHServiceAccountFile struct { + Type string `json:"type"` + FormatVersion string `json:"format_version"` + Project string `json:"project"` + Name string `json:"name"` + CertPath string `json:"ca_cert_path"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + TokenURL string `json:"token_uri"` + UniverseDomain string `json:"universe_domain"` +} diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/parse.go b/vendor/cloud.google.com/go/auth/internal/credsfile/parse.go new file mode 100644 index 000000000..a02b9f5df --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/credsfile/parse.go @@ -0,0 +1,98 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credsfile + +import ( + "encoding/json" +) + +// ParseServiceAccount parses bytes into a [ServiceAccountFile]. +func ParseServiceAccount(b []byte) (*ServiceAccountFile, error) { + var f *ServiceAccountFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +// ParseClientCredentials parses bytes into a +// [credsfile.ClientCredentialsFile]. +func ParseClientCredentials(b []byte) (*ClientCredentialsFile, error) { + var f *ClientCredentialsFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +// ParseUserCredentials parses bytes into a [UserCredentialsFile]. +func ParseUserCredentials(b []byte) (*UserCredentialsFile, error) { + var f *UserCredentialsFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +// ParseExternalAccount parses bytes into a [ExternalAccountFile]. +func ParseExternalAccount(b []byte) (*ExternalAccountFile, error) { + var f *ExternalAccountFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +// ParseExternalAccountAuthorizedUser parses bytes into a +// [ExternalAccountAuthorizedUserFile]. +func ParseExternalAccountAuthorizedUser(b []byte) (*ExternalAccountAuthorizedUserFile, error) { + var f *ExternalAccountAuthorizedUserFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +// ParseImpersonatedServiceAccount parses bytes into a +// [ImpersonatedServiceAccountFile]. +func ParseImpersonatedServiceAccount(b []byte) (*ImpersonatedServiceAccountFile, error) { + var f *ImpersonatedServiceAccountFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +// ParseGDCHServiceAccount parses bytes into a [GDCHServiceAccountFile]. +func ParseGDCHServiceAccount(b []byte) (*GDCHServiceAccountFile, error) { + var f *GDCHServiceAccountFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +type fileTypeChecker struct { + Type string `json:"type"` +} + +// ParseFileType determines the [CredentialType] based on bytes provided. +func ParseFileType(b []byte) (CredentialType, error) { + var f fileTypeChecker + if err := json.Unmarshal(b, &f); err != nil { + return 0, err + } + return parseCredentialType(f.Type), nil +} diff --git a/vendor/cloud.google.com/go/auth/internal/internal.go b/vendor/cloud.google.com/go/auth/internal/internal.go new file mode 100644 index 000000000..70534e809 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/internal.go @@ -0,0 +1,184 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "context" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "net/http" + "os" + "sync" + "time" + + "cloud.google.com/go/compute/metadata" +) + +const ( + // TokenTypeBearer is the auth header prefix for bearer tokens. + TokenTypeBearer = "Bearer" + + // QuotaProjectEnvVar is the environment variable for setting the quota + // project. + QuotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT" + projectEnvVar = "GOOGLE_CLOUD_PROJECT" + maxBodySize = 1 << 20 + + // DefaultUniverseDomain is the default value for universe domain. + // Universe domain is the default service domain for a given Cloud universe. + DefaultUniverseDomain = "googleapis.com" +) + +// CloneDefaultClient returns a [http.Client] with some good defaults. +func CloneDefaultClient() *http.Client { + return &http.Client{ + Transport: http.DefaultTransport.(*http.Transport).Clone(), + Timeout: 30 * time.Second, + } +} + +// ParseKey converts the binary contents of a private key file +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +func ParseKey(key []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(key) + if block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8: %w", err) + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("private key is invalid") + } + return parsed, nil +} + +// GetQuotaProject retrieves quota project with precedence being: override, +// environment variable, creds json file. +func GetQuotaProject(b []byte, override string) string { + if override != "" { + return override + } + if env := os.Getenv(QuotaProjectEnvVar); env != "" { + return env + } + if b == nil { + return "" + } + var v struct { + QuotaProject string `json:"quota_project_id"` + } + if err := json.Unmarshal(b, &v); err != nil { + return "" + } + return v.QuotaProject +} + +// GetProjectID retrieves project with precedence being: override, +// environment variable, creds json file. +func GetProjectID(b []byte, override string) string { + if override != "" { + return override + } + if env := os.Getenv(projectEnvVar); env != "" { + return env + } + if b == nil { + return "" + } + var v struct { + ProjectID string `json:"project_id"` // standard service account key + Project string `json:"project"` // gdch key + } + if err := json.Unmarshal(b, &v); err != nil { + return "" + } + if v.ProjectID != "" { + return v.ProjectID + } + return v.Project +} + +// ReadAll consumes the whole reader and safely reads the content of its body +// with some overflow protection. +func ReadAll(r io.Reader) ([]byte, error) { + return io.ReadAll(io.LimitReader(r, maxBodySize)) +} + +// StaticCredentialsProperty is a helper for creating static credentials +// properties. +func StaticCredentialsProperty(s string) StaticProperty { + return StaticProperty(s) +} + +// StaticProperty always returns that value of the underlying string. +type StaticProperty string + +// GetProperty loads the properly value provided the given context. +func (p StaticProperty) GetProperty(context.Context) (string, error) { + return string(p), nil +} + +// ComputeUniverseDomainProvider fetches the credentials universe domain from +// the google cloud metadata service. +type ComputeUniverseDomainProvider struct { + universeDomainOnce sync.Once + universeDomain string + universeDomainErr error +} + +// GetProperty fetches the credentials universe domain from the google cloud +// metadata service. +func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string, error) { + c.universeDomainOnce.Do(func() { + c.universeDomain, c.universeDomainErr = getMetadataUniverseDomain(ctx) + }) + if c.universeDomainErr != nil { + return "", c.universeDomainErr + } + return c.universeDomain, nil +} + +// httpGetMetadataUniverseDomain is a package var for unit test substitution. +var httpGetMetadataUniverseDomain = func(ctx context.Context) (string, error) { + client := metadata.NewClient(&http.Client{Timeout: time.Second}) + // TODO(quartzmo): set ctx on request + return client.Get("universe/universe_domain") +} + +func getMetadataUniverseDomain(ctx context.Context) (string, error) { + universeDomain, err := httpGetMetadataUniverseDomain(ctx) + if err == nil { + return universeDomain, nil + } + if _, ok := err.(metadata.NotDefinedError); ok { + // http.StatusNotFound (404) + return DefaultUniverseDomain, nil + } + return "", err +} diff --git a/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go new file mode 100644 index 000000000..dc28b3c3b --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go @@ -0,0 +1,171 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jwt + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" +) + +const ( + // HeaderAlgRSA256 is the RS256 [Header.Algorithm]. + HeaderAlgRSA256 = "RS256" + // HeaderAlgES256 is the ES256 [Header.Algorithm]. + HeaderAlgES256 = "ES256" + // HeaderType is the standard [Header.Type]. + HeaderType = "JWT" +) + +// Header represents a JWT header. +type Header struct { + Algorithm string `json:"alg"` + Type string `json:"typ"` + KeyID string `json:"kid"` +} + +func (h *Header) encode() (string, error) { + b, err := json.Marshal(h) + if err != nil { + return "", err + } + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// Claims represents the claims set of a JWT. +type Claims struct { + // Iss is the issuer JWT claim. + Iss string `json:"iss"` + // Scope is the scope JWT claim. + Scope string `json:"scope,omitempty"` + // Exp is the expiry JWT claim. If unset, default is in one hour from now. + Exp int64 `json:"exp"` + // Iat is the subject issued at claim. If unset, default is now. + Iat int64 `json:"iat"` + // Aud is the audience JWT claim. Optional. + Aud string `json:"aud"` + // Sub is the subject JWT claim. Optional. + Sub string `json:"sub,omitempty"` + // AdditionalClaims contains any additional non-standard JWT claims. Optional. + AdditionalClaims map[string]interface{} `json:"-"` +} + +func (c *Claims) encode() (string, error) { + // Compensate for skew + now := time.Now().Add(-10 * time.Second) + if c.Iat == 0 { + c.Iat = now.Unix() + } + if c.Exp == 0 { + c.Exp = now.Add(time.Hour).Unix() + } + if c.Exp < c.Iat { + return "", fmt.Errorf("jwt: invalid Exp = %d; must be later than Iat = %d", c.Exp, c.Iat) + } + + b, err := json.Marshal(c) + if err != nil { + return "", err + } + + if len(c.AdditionalClaims) == 0 { + return base64.RawURLEncoding.EncodeToString(b), nil + } + + // Marshal private claim set and then append it to b. + prv, err := json.Marshal(c.AdditionalClaims) + if err != nil { + return "", fmt.Errorf("invalid map of additional claims %v: %w", c.AdditionalClaims, err) + } + + // Concatenate public and private claim JSON objects. + if !bytes.HasSuffix(b, []byte{'}'}) { + return "", fmt.Errorf("invalid JSON %s", b) + } + if !bytes.HasPrefix(prv, []byte{'{'}) { + return "", fmt.Errorf("invalid JSON %s", prv) + } + b[len(b)-1] = ',' // Replace closing curly brace with a comma. + b = append(b, prv[1:]...) // Append private claims. + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// EncodeJWS encodes the data using the provided key as a JSON web signature. +func EncodeJWS(header *Header, c *Claims, key *rsa.PrivateKey) (string, error) { + head, err := header.encode() + if err != nil { + return "", err + } + claims, err := c.encode() + if err != nil { + return "", err + } + ss := fmt.Sprintf("%s.%s", head, claims) + h := sha256.New() + h.Write([]byte(ss)) + sig, err := rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil +} + +// DecodeJWS decodes a claim set from a JWS payload. +func DecodeJWS(payload string) (*Claims, error) { + // decode returned id token to get expiry + s := strings.Split(payload, ".") + if len(s) < 2 { + return nil, errors.New("invalid token received") + } + decoded, err := base64.RawURLEncoding.DecodeString(s[1]) + if err != nil { + return nil, err + } + c := &Claims{} + if err := json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c); err != nil { + return nil, err + } + if err := json.NewDecoder(bytes.NewBuffer(decoded)).Decode(&c.AdditionalClaims); err != nil { + return nil, err + } + return c, err +} + +// VerifyJWS tests whether the provided JWT token's signature was produced by +// the private key associated with the provided public key. +func VerifyJWS(token string, key *rsa.PublicKey) error { + parts := strings.Split(token, ".") + if len(parts) != 3 { + return errors.New("jwt: invalid token received, token must have 3 parts") + } + + signedContent := parts[0] + "." + parts[1] + signatureString, err := base64.RawURLEncoding.DecodeString(parts[2]) + if err != nil { + return err + } + + h := sha256.New() + h.Write([]byte(signedContent)) + return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), signatureString) +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cba.go b/vendor/cloud.google.com/go/auth/internal/transport/cba.go new file mode 100644 index 000000000..6ef88311a --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/cba.go @@ -0,0 +1,298 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "context" + "crypto/tls" + "errors" + "net" + "net/http" + "net/url" + "os" + "strconv" + "strings" + + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/transport/cert" + "github.com/google/s2a-go" + "github.com/google/s2a-go/fallback" + "google.golang.org/grpc/credentials" +) + +const ( + mTLSModeAlways = "always" + mTLSModeNever = "never" + mTLSModeAuto = "auto" + + // Experimental: if true, the code will try MTLS with S2A as the default for transport security. Default value is false. + googleAPIUseS2AEnv = "EXPERIMENTAL_GOOGLE_API_USE_S2A" + googleAPIUseCertSource = "GOOGLE_API_USE_CLIENT_CERTIFICATE" + googleAPIUseMTLS = "GOOGLE_API_USE_MTLS_ENDPOINT" + googleAPIUseMTLSOld = "GOOGLE_API_USE_MTLS" + + universeDomainPlaceholder = "UNIVERSE_DOMAIN" +) + +var ( + mdsMTLSAutoConfigSource mtlsConfigSource + errUniverseNotSupportedMTLS = errors.New("mTLS is not supported in any universe other than googleapis.com") +) + +// Options is a struct that is duplicated information from the individual +// transport packages in order to avoid cyclic deps. It correlates 1:1 with +// fields on httptransport.Options and grpctransport.Options. +type Options struct { + Endpoint string + DefaultMTLSEndpoint string + DefaultEndpointTemplate string + ClientCertProvider cert.Provider + Client *http.Client + UniverseDomain string + EnableDirectPath bool + EnableDirectPathXds bool +} + +// getUniverseDomain returns the default service domain for a given Cloud +// universe. +func (o *Options) getUniverseDomain() string { + if o.UniverseDomain == "" { + return internal.DefaultUniverseDomain + } + return o.UniverseDomain +} + +// isUniverseDomainGDU returns true if the universe domain is the default Google +// universe. +func (o *Options) isUniverseDomainGDU() bool { + return o.getUniverseDomain() == internal.DefaultUniverseDomain +} + +// defaultEndpoint returns the DefaultEndpointTemplate merged with the +// universe domain if the DefaultEndpointTemplate is set, otherwise returns an +// empty string. +func (o *Options) defaultEndpoint() string { + if o.DefaultEndpointTemplate == "" { + return "" + } + return strings.Replace(o.DefaultEndpointTemplate, universeDomainPlaceholder, o.getUniverseDomain(), 1) +} + +// mergedEndpoint merges a user-provided Endpoint of format host[:port] with the +// default endpoint. +func (o *Options) mergedEndpoint() (string, error) { + defaultEndpoint := o.defaultEndpoint() + u, err := url.Parse(fixScheme(defaultEndpoint)) + if err != nil { + return "", err + } + return strings.Replace(defaultEndpoint, u.Host, o.Endpoint, 1), nil +} + +func fixScheme(baseURL string) string { + if !strings.Contains(baseURL, "://") { + baseURL = "https://" + baseURL + } + return baseURL +} + +// GetGRPCTransportCredsAndEndpoint returns an instance of +// [google.golang.org/grpc/credentials.TransportCredentials], and the +// corresponding endpoint to use for GRPC client. +func GetGRPCTransportCredsAndEndpoint(opts *Options) (credentials.TransportCredentials, string, error) { + config, err := getTransportConfig(opts) + if err != nil { + return nil, "", err + } + + defaultTransportCreds := credentials.NewTLS(&tls.Config{ + GetClientCertificate: config.clientCertSource, + }) + if config.s2aAddress == "" { + return defaultTransportCreds, config.endpoint, nil + } + + var fallbackOpts *s2a.FallbackOptions + // In case of S2A failure, fall back to the endpoint that would've been used without S2A. + if fallbackHandshake, err := fallback.DefaultFallbackClientHandshakeFunc(config.endpoint); err == nil { + fallbackOpts = &s2a.FallbackOptions{ + FallbackClientHandshakeFunc: fallbackHandshake, + } + } + + s2aTransportCreds, err := s2a.NewClientCreds(&s2a.ClientOptions{ + S2AAddress: config.s2aAddress, + FallbackOpts: fallbackOpts, + }) + if err != nil { + // Use default if we cannot initialize S2A client transport credentials. + return defaultTransportCreds, config.endpoint, nil + } + return s2aTransportCreds, config.s2aMTLSEndpoint, nil +} + +// GetHTTPTransportConfig returns a client certificate source and a function for +// dialing MTLS with S2A. +func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context, string, string) (net.Conn, error), error) { + config, err := getTransportConfig(opts) + if err != nil { + return nil, nil, err + } + + if config.s2aAddress == "" { + return config.clientCertSource, nil, nil + } + + var fallbackOpts *s2a.FallbackOptions + // In case of S2A failure, fall back to the endpoint that would've been used without S2A. + if fallbackURL, err := url.Parse(config.endpoint); err == nil { + if fallbackDialer, fallbackServerAddr, err := fallback.DefaultFallbackDialerAndAddress(fallbackURL.Hostname()); err == nil { + fallbackOpts = &s2a.FallbackOptions{ + FallbackDialer: &s2a.FallbackDialer{ + Dialer: fallbackDialer, + ServerAddr: fallbackServerAddr, + }, + } + } + } + + dialTLSContextFunc := s2a.NewS2ADialTLSContextFunc(&s2a.ClientOptions{ + S2AAddress: config.s2aAddress, + FallbackOpts: fallbackOpts, + }) + return nil, dialTLSContextFunc, nil +} + +func getTransportConfig(opts *Options) (*transportConfig, error) { + clientCertSource, err := getClientCertificateSource(opts) + if err != nil { + return nil, err + } + endpoint, err := getEndpoint(opts, clientCertSource) + if err != nil { + return nil, err + } + defaultTransportConfig := transportConfig{ + clientCertSource: clientCertSource, + endpoint: endpoint, + } + + if !shouldUseS2A(clientCertSource, opts) { + return &defaultTransportConfig, nil + } + if !opts.isUniverseDomainGDU() { + return nil, errUniverseNotSupportedMTLS + } + + s2aMTLSEndpoint := opts.DefaultMTLSEndpoint + + s2aAddress := GetS2AAddress() + if s2aAddress == "" { + return &defaultTransportConfig, nil + } + return &transportConfig{ + clientCertSource: clientCertSource, + endpoint: endpoint, + s2aAddress: s2aAddress, + s2aMTLSEndpoint: s2aMTLSEndpoint, + }, nil +} + +// getClientCertificateSource returns a default client certificate source, if +// not provided by the user. +// +// A nil default source can be returned if the source does not exist. Any exceptions +// encountered while initializing the default source will be reported as client +// error (ex. corrupt metadata file). +func getClientCertificateSource(opts *Options) (cert.Provider, error) { + if !isClientCertificateEnabled(opts) { + return nil, nil + } else if opts.ClientCertProvider != nil { + return opts.ClientCertProvider, nil + } + return cert.DefaultProvider() + +} + +// isClientCertificateEnabled returns true by default for all GDU universe domain, unless explicitly overridden by env var +func isClientCertificateEnabled(opts *Options) bool { + if value, ok := os.LookupEnv(googleAPIUseCertSource); ok { + // error as false is OK + b, _ := strconv.ParseBool(value) + return b + } + return opts.isUniverseDomainGDU() +} + +type transportConfig struct { + // The client certificate source. + clientCertSource cert.Provider + // The corresponding endpoint to use based on client certificate source. + endpoint string + // The S2A address if it can be used, otherwise an empty string. + s2aAddress string + // The MTLS endpoint to use with S2A. + s2aMTLSEndpoint string +} + +// getEndpoint returns the endpoint for the service, taking into account the +// user-provided endpoint override "settings.Endpoint". +// +// If no endpoint override is specified, we will either return the default endpoint or +// the default mTLS endpoint if a client certificate is available. +// +// You can override the default endpoint choice (mtls vs. regular) by setting the +// GOOGLE_API_USE_MTLS_ENDPOINT environment variable. +// +// If the endpoint override is an address (host:port) rather than full base +// URL (ex. https://...), then the user-provided address will be merged into +// the default endpoint. For example, WithEndpoint("myhost:8000") and +// DefaultEndpointTemplate("https://UNIVERSE_DOMAIN/bar/baz") will return "https://myhost:8080/bar/baz" +func getEndpoint(opts *Options, clientCertSource cert.Provider) (string, error) { + if opts.Endpoint == "" { + mtlsMode := getMTLSMode() + if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) { + if !opts.isUniverseDomainGDU() { + return "", errUniverseNotSupportedMTLS + } + return opts.DefaultMTLSEndpoint, nil + } + return opts.defaultEndpoint(), nil + } + if strings.Contains(opts.Endpoint, "://") { + // User passed in a full URL path, use it verbatim. + return opts.Endpoint, nil + } + if opts.defaultEndpoint() == "" { + // If DefaultEndpointTemplate is not configured, + // use the user provided endpoint verbatim. This allows a naked + // "host[:port]" URL to be used with GRPC Direct Path. + return opts.Endpoint, nil + } + + // Assume user-provided endpoint is host[:port], merge it with the default endpoint. + return opts.mergedEndpoint() +} + +func getMTLSMode() string { + mode := os.Getenv(googleAPIUseMTLS) + if mode == "" { + mode = os.Getenv(googleAPIUseMTLSOld) // Deprecated. + } + if mode == "" { + return mTLSModeAuto + } + return strings.ToLower(mode) +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go new file mode 100644 index 000000000..96582ce7b --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go @@ -0,0 +1,62 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cert + +import ( + "crypto/tls" + "errors" + "sync" +) + +// defaultCertData holds all the variables pertaining to +// the default certificate provider created by [DefaultProvider]. +// +// A singleton model is used to allow the provider to be reused +// by the transport layer. As mentioned in [DefaultProvider] (provider nil, nil) +// may be returned to indicate a default provider could not be found, which +// will skip extra tls config in the transport layer . +type defaultCertData struct { + once sync.Once + provider Provider + err error +} + +var ( + defaultCert defaultCertData +) + +// Provider is a function that can be passed into crypto/tls.Config.GetClientCertificate. +type Provider func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + +// errSourceUnavailable is a sentinel error to indicate certificate source is unavailable. +var errSourceUnavailable = errors.New("certificate source is unavailable") + +// DefaultProvider returns a certificate source using the preferred EnterpriseCertificateProxySource. +// If EnterpriseCertificateProxySource is not available, fall back to the legacy SecureConnectSource. +// +// If neither source is available (due to missing configurations), a nil Source and a nil Error are +// returned to indicate that a default certificate source is unavailable. +func DefaultProvider() (Provider, error) { + defaultCert.once.Do(func() { + defaultCert.provider, defaultCert.err = NewEnterpriseCertificateProxyProvider("") + if errors.Is(defaultCert.err, errSourceUnavailable) { + defaultCert.provider, defaultCert.err = NewSecureConnectProvider("") + if errors.Is(defaultCert.err, errSourceUnavailable) { + defaultCert.provider, defaultCert.err = nil, nil + } + } + }) + return defaultCert.provider, defaultCert.err +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go new file mode 100644 index 000000000..366515916 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go @@ -0,0 +1,56 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cert + +import ( + "crypto/tls" + "errors" + + "github.com/googleapis/enterprise-certificate-proxy/client" +) + +type ecpSource struct { + key *client.Key +} + +// NewEnterpriseCertificateProxyProvider creates a certificate source +// using the Enterprise Certificate Proxy client, which delegates +// certifcate related operations to an OS-specific "signer binary" +// that communicates with the native keystore (ex. keychain on MacOS). +// +// The configFilePath points to a config file containing relevant parameters +// such as the certificate issuer and the location of the signer binary. +// If configFilePath is empty, the client will attempt to load the config from +// a well-known gcloud location. +func NewEnterpriseCertificateProxyProvider(configFilePath string) (Provider, error) { + key, err := client.Cred(configFilePath) + if err != nil { + if errors.Is(err, client.ErrCredUnavailable) { + return nil, errSourceUnavailable + } + return nil, err + } + + return (&ecpSource{ + key: key, + }).getClientCertificate, nil +} + +func (s *ecpSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + var cert tls.Certificate + cert.PrivateKey = s.key + cert.Certificate = s.key.CertificateChain() + return &cert, nil +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go new file mode 100644 index 000000000..3227aba28 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go @@ -0,0 +1,124 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cert + +import ( + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "os/user" + "path/filepath" + "sync" + "time" +) + +const ( + metadataPath = ".secureConnect" + metadataFile = "context_aware_metadata.json" +) + +type secureConnectSource struct { + metadata secureConnectMetadata + + // Cache the cert to avoid executing helper command repeatedly. + cachedCertMutex sync.Mutex + cachedCert *tls.Certificate +} + +type secureConnectMetadata struct { + Cmd []string `json:"cert_provider_command"` +} + +// NewSecureConnectProvider creates a certificate source using +// the Secure Connect Helper and its associated metadata file. +// +// The configFilePath points to the location of the context aware metadata file. +// If configFilePath is empty, use the default context aware metadata location. +func NewSecureConnectProvider(configFilePath string) (Provider, error) { + if configFilePath == "" { + user, err := user.Current() + if err != nil { + // Error locating the default config means Secure Connect is not supported. + return nil, errSourceUnavailable + } + configFilePath = filepath.Join(user.HomeDir, metadataPath, metadataFile) + } + + file, err := os.ReadFile(configFilePath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + // Config file missing means Secure Connect is not supported. + return nil, errSourceUnavailable + } + return nil, err + } + + var metadata secureConnectMetadata + if err := json.Unmarshal(file, &metadata); err != nil { + return nil, fmt.Errorf("cert: could not parse JSON in %q: %w", configFilePath, err) + } + if err := validateMetadata(metadata); err != nil { + return nil, fmt.Errorf("cert: invalid config in %q: %w", configFilePath, err) + } + return (&secureConnectSource{ + metadata: metadata, + }).getClientCertificate, nil +} + +func validateMetadata(metadata secureConnectMetadata) error { + if len(metadata.Cmd) == 0 { + return errors.New("empty cert_provider_command") + } + return nil +} + +func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + s.cachedCertMutex.Lock() + defer s.cachedCertMutex.Unlock() + if s.cachedCert != nil && !isCertificateExpired(s.cachedCert) { + return s.cachedCert, nil + } + // Expand OS environment variables in the cert provider command such as "$HOME". + for i := 0; i < len(s.metadata.Cmd); i++ { + s.metadata.Cmd[i] = os.ExpandEnv(s.metadata.Cmd[i]) + } + command := s.metadata.Cmd + data, err := exec.Command(command[0], command[1:]...).Output() + if err != nil { + return nil, err + } + cert, err := tls.X509KeyPair(data, data) + if err != nil { + return nil, err + } + s.cachedCert = &cert + return &cert, nil +} + +// isCertificateExpired returns true if the given cert is expired or invalid. +func isCertificateExpired(cert *tls.Certificate) bool { + if len(cert.Certificate) == 0 { + return true + } + parsed, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return true + } + return time.Now().After(parsed.NotAfter) +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go new file mode 100644 index 000000000..2ed532deb --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go @@ -0,0 +1,180 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "encoding/json" + "log" + "os" + "strconv" + "sync" + "time" + + "cloud.google.com/go/auth/internal/transport/cert" + "cloud.google.com/go/compute/metadata" +) + +const ( + configEndpointSuffix = "instance/platform-security/auto-mtls-configuration" +) + +var ( + // The period an MTLS config can be reused before needing refresh. + configExpiry = time.Hour + + // mdsMTLSAutoConfigSource is an instance of reuseMTLSConfigSource, with metadataMTLSAutoConfig as its config source. + mtlsOnce sync.Once +) + +// GetS2AAddress returns the S2A address to be reached via plaintext connection. +// Returns empty string if not set or invalid. +func GetS2AAddress() string { + c, err := getMetadataMTLSAutoConfig().Config() + if err != nil { + return "" + } + if !c.Valid() { + return "" + } + return c.S2A.PlaintextAddress +} + +type mtlsConfigSource interface { + Config() (*mtlsConfig, error) +} + +// mtlsConfig contains the configuration for establishing MTLS connections with Google APIs. +type mtlsConfig struct { + S2A *s2aAddresses `json:"s2a"` + Expiry time.Time +} + +func (c *mtlsConfig) Valid() bool { + return c != nil && c.S2A != nil && !c.expired() +} +func (c *mtlsConfig) expired() bool { + return c.Expiry.Before(time.Now()) +} + +// s2aAddresses contains the plaintext and/or MTLS S2A addresses. +type s2aAddresses struct { + // PlaintextAddress is the plaintext address to reach S2A + PlaintextAddress string `json:"plaintext_address"` + // MTLSAddress is the MTLS address to reach S2A + MTLSAddress string `json:"mtls_address"` +} + +// getMetadataMTLSAutoConfig returns mdsMTLSAutoConfigSource, which is backed by config from MDS with auto-refresh. +func getMetadataMTLSAutoConfig() mtlsConfigSource { + mtlsOnce.Do(func() { + mdsMTLSAutoConfigSource = &reuseMTLSConfigSource{ + src: &metadataMTLSAutoConfig{}, + } + }) + return mdsMTLSAutoConfigSource +} + +// reuseMTLSConfigSource caches a valid version of mtlsConfig, and uses `src` to refresh upon config expiry. +// It implements the mtlsConfigSource interface, so calling Config() on it returns an mtlsConfig. +type reuseMTLSConfigSource struct { + src mtlsConfigSource // src.Config() is called when config is expired + mu sync.Mutex // mutex guards config + config *mtlsConfig // cached config +} + +func (cs *reuseMTLSConfigSource) Config() (*mtlsConfig, error) { + cs.mu.Lock() + defer cs.mu.Unlock() + + if cs.config.Valid() { + return cs.config, nil + } + c, err := cs.src.Config() + if err != nil { + return nil, err + } + cs.config = c + return c, nil +} + +// metadataMTLSAutoConfig is an implementation of the interface mtlsConfigSource +// It has the logic to query MDS and return an mtlsConfig +type metadataMTLSAutoConfig struct{} + +var httpGetMetadataMTLSConfig = func() (string, error) { + return metadata.Get(configEndpointSuffix) +} + +func (cs *metadataMTLSAutoConfig) Config() (*mtlsConfig, error) { + resp, err := httpGetMetadataMTLSConfig() + if err != nil { + log.Printf("querying MTLS config from MDS endpoint failed: %v", err) + return defaultMTLSConfig(), nil + } + var config mtlsConfig + err = json.Unmarshal([]byte(resp), &config) + if err != nil { + log.Printf("unmarshalling MTLS config from MDS endpoint failed: %v", err) + return defaultMTLSConfig(), nil + } + + if config.S2A == nil { + log.Printf("returned MTLS config from MDS endpoint is invalid: %v", config) + return defaultMTLSConfig(), nil + } + + // set new expiry + config.Expiry = time.Now().Add(configExpiry) + return &config, nil +} + +func defaultMTLSConfig() *mtlsConfig { + return &mtlsConfig{ + S2A: &s2aAddresses{ + PlaintextAddress: "", + MTLSAddress: "", + }, + Expiry: time.Now().Add(configExpiry), + } +} + +func shouldUseS2A(clientCertSource cert.Provider, opts *Options) bool { + // If client cert is found, use that over S2A. + if clientCertSource != nil { + return false + } + // If EXPERIMENTAL_GOOGLE_API_USE_S2A is not set to true, skip S2A. + if !isGoogleS2AEnabled() { + return false + } + // If DefaultMTLSEndpoint is not set or has endpoint override, skip S2A. + if opts.DefaultMTLSEndpoint == "" || opts.Endpoint != "" { + return false + } + // If custom HTTP client is provided, skip S2A. + if opts.Client != nil { + return false + } + // If directPath is enabled, skip S2A. + return !opts.EnableDirectPath && !opts.EnableDirectPathXds +} + +func isGoogleS2AEnabled() bool { + b, err := strconv.ParseBool(os.Getenv(googleAPIUseS2AEnv)) + if err != nil { + return false + } + return b +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/transport.go b/vendor/cloud.google.com/go/auth/internal/transport/transport.go new file mode 100644 index 000000000..b76386d3c --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/transport.go @@ -0,0 +1,76 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transport provided internal helpers for the two transport packages +// (grpctransport and httptransport). +package transport + +import ( + "fmt" + + "cloud.google.com/go/auth/credentials" +) + +// CloneDetectOptions clones a user set detect option into some new memory that +// we can internally manipulate before sending onto the detect package. +func CloneDetectOptions(oldDo *credentials.DetectOptions) *credentials.DetectOptions { + if oldDo == nil { + // it is valid for users not to set this, but we will need to to default + // some options for them in this case so return some initialized memory + // to work with. + return &credentials.DetectOptions{} + } + newDo := &credentials.DetectOptions{ + // Simple types + Audience: oldDo.Audience, + Subject: oldDo.Subject, + EarlyTokenRefresh: oldDo.EarlyTokenRefresh, + TokenURL: oldDo.TokenURL, + STSAudience: oldDo.STSAudience, + CredentialsFile: oldDo.CredentialsFile, + UseSelfSignedJWT: oldDo.UseSelfSignedJWT, + UniverseDomain: oldDo.UniverseDomain, + + // These fields are are pointer types that we just want to use exactly + // as the user set, copy the ref + Client: oldDo.Client, + AuthHandlerOptions: oldDo.AuthHandlerOptions, + } + + // Smartly size this memory and copy below. + if oldDo.CredentialsJSON != nil { + newDo.CredentialsJSON = make([]byte, len(oldDo.CredentialsJSON)) + copy(newDo.CredentialsJSON, oldDo.CredentialsJSON) + } + if oldDo.Scopes != nil { + newDo.Scopes = make([]string, len(oldDo.Scopes)) + copy(newDo.Scopes, oldDo.Scopes) + } + + return newDo +} + +// ValidateUniverseDomain verifies that the universe domain configured for the +// client matches the universe domain configured for the credentials. +func ValidateUniverseDomain(clientUniverseDomain, credentialsUniverseDomain string) error { + if clientUniverseDomain != credentialsUniverseDomain { + return fmt.Errorf( + "the configured universe domain (%q) does not match the universe "+ + "domain found in the credentials (%q). If you haven't configured "+ + "the universe domain explicitly, \"googleapis.com\" is the default", + clientUniverseDomain, + credentialsUniverseDomain) + } + return nil +} diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md new file mode 100644 index 000000000..ff9747bed --- /dev/null +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md @@ -0,0 +1,40 @@ +# Changelog + +## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.1...auth/oauth2adapt/v0.2.2) (2024-04-23) + + +### Bug Fixes + +* **auth/oauth2adapt:** Bump x/net to v0.24.0 ([ba31ed5](https://github.com/googleapis/google-cloud-go/commit/ba31ed5fda2c9664f2e1cf972469295e63deb5b4)) + +## [0.2.1](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.0...auth/oauth2adapt/v0.2.1) (2024-04-18) + + +### Bug Fixes + +* **auth/oauth2adapt:** Adapt Token Types to be translated ([#9801](https://github.com/googleapis/google-cloud-go/issues/9801)) ([70f4115](https://github.com/googleapis/google-cloud-go/commit/70f411555ebbf2b71e6d425cc8d2030644c6b438)), refs [#9800](https://github.com/googleapis/google-cloud-go/issues/9800) + +## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.1.0...auth/oauth2adapt/v0.2.0) (2024-04-16) + + +### Features + +* **auth/oauth2adapt:** Add helpers for working with credentials types ([#9694](https://github.com/googleapis/google-cloud-go/issues/9694)) ([cf33b55](https://github.com/googleapis/google-cloud-go/commit/cf33b5514423a2ac5c2a323a1cd99aac34fd4233)) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a)) + +## 0.1.0 (2023-10-19) + + +### Features + +* **auth/oauth2adapt:** Adds a new module to translate types ([#8595](https://github.com/googleapis/google-cloud-go/issues/8595)) ([6933c5a](https://github.com/googleapis/google-cloud-go/commit/6933c5a0c1fc8e58cbfff8bbca439d671b94672f)) +* **auth/oauth2adapt:** Fixup deps for release ([#8747](https://github.com/googleapis/google-cloud-go/issues/8747)) ([749d243](https://github.com/googleapis/google-cloud-go/commit/749d243862b025a6487a4d2d339219889b4cfe70)) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/cloud.google.com/go/auth/oauth2adapt/LICENSE similarity index 100% rename from vendor/google.golang.org/appengine/LICENSE rename to vendor/cloud.google.com/go/auth/oauth2adapt/LICENSE diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go new file mode 100644 index 000000000..9835ac571 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go @@ -0,0 +1,164 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package oauth2adapt helps converts types used in [cloud.google.com/go/auth] +// and [golang.org/x/oauth2]. +package oauth2adapt + +import ( + "context" + "encoding/json" + "errors" + + "cloud.google.com/go/auth" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" +) + +// TokenProviderFromTokenSource converts any [golang.org/x/oauth2.TokenSource] +// into a [cloud.google.com/go/auth.TokenProvider]. +func TokenProviderFromTokenSource(ts oauth2.TokenSource) auth.TokenProvider { + return &tokenProviderAdapter{ts: ts} +} + +type tokenProviderAdapter struct { + ts oauth2.TokenSource +} + +// Token fulfills the [cloud.google.com/go/auth.TokenProvider] interface. It +// is a light wrapper around the underlying TokenSource. +func (tp *tokenProviderAdapter) Token(context.Context) (*auth.Token, error) { + tok, err := tp.ts.Token() + if err != nil { + var err2 *oauth2.RetrieveError + if ok := errors.As(err, &err2); ok { + return nil, AuthErrorFromRetrieveError(err2) + } + return nil, err + } + return &auth.Token{ + Value: tok.AccessToken, + Type: tok.Type(), + Expiry: tok.Expiry, + }, nil +} + +// TokenSourceFromTokenProvider converts any +// [cloud.google.com/go/auth.TokenProvider] into a +// [golang.org/x/oauth2.TokenSource]. +func TokenSourceFromTokenProvider(tp auth.TokenProvider) oauth2.TokenSource { + return &tokenSourceAdapter{tp: tp} +} + +type tokenSourceAdapter struct { + tp auth.TokenProvider +} + +// Token fulfills the [golang.org/x/oauth2.TokenSource] interface. It +// is a light wrapper around the underlying TokenProvider. +func (ts *tokenSourceAdapter) Token() (*oauth2.Token, error) { + tok, err := ts.tp.Token(context.Background()) + if err != nil { + var err2 *auth.Error + if ok := errors.As(err, &err2); ok { + return nil, AddRetrieveErrorToAuthError(err2) + } + return nil, err + } + return &oauth2.Token{ + AccessToken: tok.Value, + TokenType: tok.Type, + Expiry: tok.Expiry, + }, nil +} + +// AuthCredentialsFromOauth2Credentials converts a [golang.org/x/oauth2/google.Credentials] +// to a [cloud.google.com/go/auth.Credentials]. +func AuthCredentialsFromOauth2Credentials(creds *google.Credentials) *auth.Credentials { + if creds == nil { + return nil + } + return auth.NewCredentials(&auth.CredentialsOptions{ + TokenProvider: TokenProviderFromTokenSource(creds.TokenSource), + JSON: creds.JSON, + ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { + return creds.ProjectID, nil + }), + UniverseDomainProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { + return creds.GetUniverseDomain() + }), + }) +} + +// Oauth2CredentialsFromAuthCredentials converts a [cloud.google.com/go/auth.Credentials] +// to a [golang.org/x/oauth2/google.Credentials]. +func Oauth2CredentialsFromAuthCredentials(creds *auth.Credentials) *google.Credentials { + if creds == nil { + return nil + } + // Throw away errors as old credentials are not request aware. Also, no + // network requests are currently happening for this use case. + projectID, _ := creds.ProjectID(context.Background()) + + return &google.Credentials{ + TokenSource: TokenSourceFromTokenProvider(creds.TokenProvider), + ProjectID: projectID, + JSON: creds.JSON(), + UniverseDomainProvider: func() (string, error) { + return creds.UniverseDomain(context.Background()) + }, + } +} + +type oauth2Error struct { + ErrorCode string `json:"error"` + ErrorDescription string `json:"error_description"` + ErrorURI string `json:"error_uri"` +} + +// AddRetrieveErrorToAuthError returns the same error provided and adds a +// [golang.org/x/oauth2.RetrieveError] to the error chain by setting the `Err` field on the +// [cloud.google.com/go/auth.Error]. +func AddRetrieveErrorToAuthError(err *auth.Error) *auth.Error { + if err == nil { + return nil + } + e := &oauth2.RetrieveError{ + Response: err.Response, + Body: err.Body, + } + err.Err = e + if len(err.Body) > 0 { + var oErr oauth2Error + // ignore the error as it only fills in extra details + json.Unmarshal(err.Body, &oErr) + e.ErrorCode = oErr.ErrorCode + e.ErrorDescription = oErr.ErrorDescription + e.ErrorURI = oErr.ErrorURI + } + return err +} + +// AuthErrorFromRetrieveError returns an [cloud.google.com/go/auth.Error] that +// wraps the provided [golang.org/x/oauth2.RetrieveError]. +func AuthErrorFromRetrieveError(err *oauth2.RetrieveError) *auth.Error { + if err == nil { + return nil + } + return &auth.Error{ + Response: err.Response, + Body: err.Body, + Err: err, + } +} diff --git a/vendor/cloud.google.com/go/auth/threelegged.go b/vendor/cloud.google.com/go/auth/threelegged.go new file mode 100644 index 000000000..1b8d83c4b --- /dev/null +++ b/vendor/cloud.google.com/go/auth/threelegged.go @@ -0,0 +1,373 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "mime" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "cloud.google.com/go/auth/internal" +) + +// AuthorizationHandler is a 3-legged-OAuth helper that prompts the user for +// OAuth consent at the specified auth code URL and returns an auth code and +// state upon approval. +type AuthorizationHandler func(authCodeURL string) (code string, state string, err error) + +// Options3LO are the options for doing a 3-legged OAuth2 flow. +type Options3LO struct { + // ClientID is the application's ID. + ClientID string + // ClientSecret is the application's secret. Not required if AuthHandlerOpts + // is set. + ClientSecret string + // AuthURL is the URL for authenticating. + AuthURL string + // TokenURL is the URL for retrieving a token. + TokenURL string + // AuthStyle is used to describe how to client info in the token request. + AuthStyle Style + // RefreshToken is the token used to refresh the credential. Not required + // if AuthHandlerOpts is set. + RefreshToken string + // RedirectURL is the URL to redirect users to. Optional. + RedirectURL string + // Scopes specifies requested permissions for the Token. Optional. + Scopes []string + + // URLParams are the set of values to apply to the token exchange. Optional. + URLParams url.Values + // Client is the client to be used to make the underlying token requests. + // Optional. + Client *http.Client + // EarlyTokenExpiry is the time before the token expires that it should be + // refreshed. If not set the default value is 10 seconds. Optional. + EarlyTokenExpiry time.Duration + + // AuthHandlerOpts provides a set of options for doing a + // 3-legged OAuth2 flow with a custom [AuthorizationHandler]. Optional. + AuthHandlerOpts *AuthorizationHandlerOptions +} + +func (o *Options3LO) validate() error { + if o == nil { + return errors.New("auth: options must be provided") + } + if o.ClientID == "" { + return errors.New("auth: client ID must be provided") + } + if o.AuthHandlerOpts == nil && o.ClientSecret == "" { + return errors.New("auth: client secret must be provided") + } + if o.AuthURL == "" { + return errors.New("auth: auth URL must be provided") + } + if o.TokenURL == "" { + return errors.New("auth: token URL must be provided") + } + if o.AuthStyle == StyleUnknown { + return errors.New("auth: auth style must be provided") + } + if o.AuthHandlerOpts == nil && o.RefreshToken == "" { + return errors.New("auth: refresh token must be provided") + } + return nil +} + +// PKCEOptions holds parameters to support PKCE. +type PKCEOptions struct { + // Challenge is the un-padded, base64-url-encoded string of the encrypted code verifier. + Challenge string // The un-padded, base64-url-encoded string of the encrypted code verifier. + // ChallengeMethod is the encryption method (ex. S256). + ChallengeMethod string + // Verifier is the original, non-encrypted secret. + Verifier string // The original, non-encrypted secret. +} + +type tokenJSON struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + RefreshToken string `json:"refresh_token"` + ExpiresIn int `json:"expires_in"` + // error fields + ErrorCode string `json:"error"` + ErrorDescription string `json:"error_description"` + ErrorURI string `json:"error_uri"` +} + +func (e *tokenJSON) expiry() (t time.Time) { + if v := e.ExpiresIn; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + return +} + +func (o *Options3LO) client() *http.Client { + if o.Client != nil { + return o.Client + } + return internal.CloneDefaultClient() +} + +// authCodeURL returns a URL that points to a OAuth2 consent page. +func (o *Options3LO) authCodeURL(state string, values url.Values) string { + var buf bytes.Buffer + buf.WriteString(o.AuthURL) + v := url.Values{ + "response_type": {"code"}, + "client_id": {o.ClientID}, + } + if o.RedirectURL != "" { + v.Set("redirect_uri", o.RedirectURL) + } + if len(o.Scopes) > 0 { + v.Set("scope", strings.Join(o.Scopes, " ")) + } + if state != "" { + v.Set("state", state) + } + if o.AuthHandlerOpts != nil { + if o.AuthHandlerOpts.PKCEOpts != nil && + o.AuthHandlerOpts.PKCEOpts.Challenge != "" { + v.Set(codeChallengeKey, o.AuthHandlerOpts.PKCEOpts.Challenge) + } + if o.AuthHandlerOpts.PKCEOpts != nil && + o.AuthHandlerOpts.PKCEOpts.ChallengeMethod != "" { + v.Set(codeChallengeMethodKey, o.AuthHandlerOpts.PKCEOpts.ChallengeMethod) + } + } + for k := range values { + v.Set(k, v.Get(k)) + } + if strings.Contains(o.AuthURL, "?") { + buf.WriteByte('&') + } else { + buf.WriteByte('?') + } + buf.WriteString(v.Encode()) + return buf.String() +} + +// New3LOTokenProvider returns a [TokenProvider] based on the 3-legged OAuth2 +// configuration. The TokenProvider is caches and auto-refreshes tokens by +// default. +func New3LOTokenProvider(opts *Options3LO) (TokenProvider, error) { + if err := opts.validate(); err != nil { + return nil, err + } + if opts.AuthHandlerOpts != nil { + return new3LOTokenProviderWithAuthHandler(opts), nil + } + return NewCachedTokenProvider(&tokenProvider3LO{opts: opts, refreshToken: opts.RefreshToken, client: opts.client()}, &CachedTokenProviderOptions{ + ExpireEarly: opts.EarlyTokenExpiry, + }), nil +} + +// AuthorizationHandlerOptions provides a set of options to specify for doing a +// 3-legged OAuth2 flow with a custom [AuthorizationHandler]. +type AuthorizationHandlerOptions struct { + // AuthorizationHandler specifies the handler used to for the authorization + // part of the flow. + Handler AuthorizationHandler + // State is used verify that the "state" is identical in the request and + // response before exchanging the auth code for OAuth2 token. + State string + // PKCEOpts allows setting configurations for PKCE. Optional. + PKCEOpts *PKCEOptions +} + +func new3LOTokenProviderWithAuthHandler(opts *Options3LO) TokenProvider { + return NewCachedTokenProvider(&tokenProviderWithHandler{opts: opts, state: opts.AuthHandlerOpts.State}, &CachedTokenProviderOptions{ + ExpireEarly: opts.EarlyTokenExpiry, + }) +} + +// exchange handles the final exchange portion of the 3lo flow. Returns a Token, +// refreshToken, and error. +func (o *Options3LO) exchange(ctx context.Context, code string) (*Token, string, error) { + // Build request + v := url.Values{ + "grant_type": {"authorization_code"}, + "code": {code}, + } + if o.RedirectURL != "" { + v.Set("redirect_uri", o.RedirectURL) + } + if o.AuthHandlerOpts != nil && + o.AuthHandlerOpts.PKCEOpts != nil && + o.AuthHandlerOpts.PKCEOpts.Verifier != "" { + v.Set(codeVerifierKey, o.AuthHandlerOpts.PKCEOpts.Verifier) + } + for k := range o.URLParams { + v.Set(k, o.URLParams.Get(k)) + } + return fetchToken(ctx, o, v) +} + +// This struct is not safe for concurrent access alone, but the way it is used +// in this package by wrapping it with a cachedTokenProvider makes it so. +type tokenProvider3LO struct { + opts *Options3LO + client *http.Client + refreshToken string +} + +func (tp *tokenProvider3LO) Token(ctx context.Context) (*Token, error) { + if tp.refreshToken == "" { + return nil, errors.New("auth: token expired and refresh token is not set") + } + v := url.Values{ + "grant_type": {"refresh_token"}, + "refresh_token": {tp.refreshToken}, + } + for k := range tp.opts.URLParams { + v.Set(k, tp.opts.URLParams.Get(k)) + } + + tk, rt, err := fetchToken(ctx, tp.opts, v) + if err != nil { + return nil, err + } + if tp.refreshToken != rt && rt != "" { + tp.refreshToken = rt + } + return tk, err +} + +type tokenProviderWithHandler struct { + opts *Options3LO + state string +} + +func (tp tokenProviderWithHandler) Token(ctx context.Context) (*Token, error) { + url := tp.opts.authCodeURL(tp.state, nil) + code, state, err := tp.opts.AuthHandlerOpts.Handler(url) + if err != nil { + return nil, err + } + if state != tp.state { + return nil, errors.New("auth: state mismatch in 3-legged-OAuth flow") + } + tok, _, err := tp.opts.exchange(ctx, code) + return tok, err +} + +// fetchToken returns a Token, refresh token, and/or an error. +func fetchToken(ctx context.Context, o *Options3LO, v url.Values) (*Token, string, error) { + var refreshToken string + if o.AuthStyle == StyleInParams { + if o.ClientID != "" { + v.Set("client_id", o.ClientID) + } + if o.ClientSecret != "" { + v.Set("client_secret", o.ClientSecret) + } + } + req, err := http.NewRequest("POST", o.TokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, refreshToken, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + if o.AuthStyle == StyleInHeader { + req.SetBasicAuth(url.QueryEscape(o.ClientID), url.QueryEscape(o.ClientSecret)) + } + + // Make request + r, err := o.client().Do(req.WithContext(ctx)) + if err != nil { + return nil, refreshToken, err + } + body, err := internal.ReadAll(r.Body) + r.Body.Close() + if err != nil { + return nil, refreshToken, fmt.Errorf("auth: cannot fetch token: %w", err) + } + + failureStatus := r.StatusCode < 200 || r.StatusCode > 299 + tokError := &Error{ + Response: r, + Body: body, + } + + var token *Token + // errors ignored because of default switch on content + content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": + // some endpoints return a query string + vals, err := url.ParseQuery(string(body)) + if err != nil { + if failureStatus { + return nil, refreshToken, tokError + } + return nil, refreshToken, fmt.Errorf("auth: cannot parse response: %w", err) + } + tokError.code = vals.Get("error") + tokError.description = vals.Get("error_description") + tokError.uri = vals.Get("error_uri") + token = &Token{ + Value: vals.Get("access_token"), + Type: vals.Get("token_type"), + Metadata: make(map[string]interface{}, len(vals)), + } + for k, v := range vals { + token.Metadata[k] = v + } + refreshToken = vals.Get("refresh_token") + e := vals.Get("expires_in") + expires, _ := strconv.Atoi(e) + if expires != 0 { + token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) + } + default: + var tj tokenJSON + if err = json.Unmarshal(body, &tj); err != nil { + if failureStatus { + return nil, refreshToken, tokError + } + return nil, refreshToken, fmt.Errorf("auth: cannot parse json: %w", err) + } + tokError.code = tj.ErrorCode + tokError.description = tj.ErrorDescription + tokError.uri = tj.ErrorURI + token = &Token{ + Value: tj.AccessToken, + Type: tj.TokenType, + Expiry: tj.expiry(), + Metadata: make(map[string]interface{}), + } + json.Unmarshal(body, &token.Metadata) // optional field, skip err check + refreshToken = tj.RefreshToken + } + // according to spec, servers should respond status 400 in error case + // https://www.rfc-editor.org/rfc/rfc6749#section-5.2 + // but some unorthodox servers respond 200 in error case + if failureStatus || tokError.code != "" { + return nil, refreshToken, tokError + } + if token.Value == "" { + return nil, refreshToken, errors.New("auth: server response missing access_token") + } + return token, refreshToken, nil +} diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md index 06b957349..967e06074 100644 --- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -1,5 +1,12 @@ # Changes +## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.3...compute/metadata/v0.3.0) (2024-04-15) + + +### Features + +* **compute/metadata:** Add context aware functions ([#9733](https://github.com/googleapis/google-cloud-go/issues/9733)) ([e4eb5b4](https://github.com/googleapis/google-cloud-go/commit/e4eb5b46ee2aec9d2fc18300bfd66015e25a0510)) + ## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.2...compute/metadata/v0.2.3) (2022-12-15) diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index c17faa142..f67e3c7ee 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -23,7 +23,7 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -95,9 +95,9 @@ func (c *cachedValue) get(cl *Client) (v string, err error) { return c.v, nil } if c.trim { - v, err = cl.getTrimmed(c.k) + v, err = cl.getTrimmed(context.Background(), c.k) } else { - v, err = cl.Get(c.k) + v, err = cl.GetWithContext(context.Background(), c.k) } if err == nil { c.v = v @@ -197,18 +197,32 @@ func systemInfoSuggestsGCE() bool { // We don't have any non-Linux clues available, at least yet. return false } - slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") + slurp, _ := os.ReadFile("/sys/class/dmi/id/product_name") name := strings.TrimSpace(string(slurp)) return name == "Google" || name == "Google Compute Engine" } -// Subscribe calls Client.Subscribe on the default client. +// Subscribe calls Client.SubscribeWithContext on the default client. func Subscribe(suffix string, fn func(v string, ok bool) error) error { - return defaultClient.Subscribe(suffix, fn) + return defaultClient.SubscribeWithContext(context.Background(), suffix, func(ctx context.Context, v string, ok bool) error { return fn(v, ok) }) } -// Get calls Client.Get on the default client. -func Get(suffix string) (string, error) { return defaultClient.Get(suffix) } +// SubscribeWithContext calls Client.SubscribeWithContext on the default client. +func SubscribeWithContext(ctx context.Context, suffix string, fn func(ctx context.Context, v string, ok bool) error) error { + return defaultClient.SubscribeWithContext(ctx, suffix, fn) +} + +// Get calls Client.GetWithContext on the default client. +// +// Deprecated: Please use the context aware variant [GetWithContext]. +func Get(suffix string) (string, error) { + return defaultClient.GetWithContext(context.Background(), suffix) +} + +// GetWithContext calls Client.GetWithContext on the default client. +func GetWithContext(ctx context.Context, suffix string) (string, error) { + return defaultClient.GetWithContext(ctx, suffix) +} // ProjectID returns the current instance's project ID string. func ProjectID() (string, error) { return defaultClient.ProjectID() } @@ -288,8 +302,7 @@ func NewClient(c *http.Client) *Client { // getETag returns a value from the metadata service as well as the associated ETag. // This func is otherwise equivalent to Get. -func (c *Client) getETag(suffix string) (value, etag string, err error) { - ctx := context.TODO() +func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string, err error) { // Using a fixed IP makes it very difficult to spoof the metadata service in // a container, which is an important use-case for local testing of cloud // deployments. To enable spoofing of the metadata service, the environment @@ -306,7 +319,7 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) { } suffix = strings.TrimLeft(suffix, "/") u := "http://" + host + "/computeMetadata/v1/" + suffix - req, err := http.NewRequest("GET", u, nil) + req, err := http.NewRequestWithContext(ctx, "GET", u, nil) if err != nil { return "", "", err } @@ -336,7 +349,7 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) { if res.StatusCode == http.StatusNotFound { return "", "", NotDefinedError(suffix) } - all, err := ioutil.ReadAll(res.Body) + all, err := io.ReadAll(res.Body) if err != nil { return "", "", err } @@ -354,19 +367,33 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) { // // If the requested metadata is not defined, the returned error will // be of type NotDefinedError. +// +// Deprecated: Please use the context aware variant [Client.GetWithContext]. func (c *Client) Get(suffix string) (string, error) { - val, _, err := c.getETag(suffix) + return c.GetWithContext(context.Background(), suffix) +} + +// GetWithContext returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func (c *Client) GetWithContext(ctx context.Context, suffix string) (string, error) { + val, _, err := c.getETag(ctx, suffix) return val, err } -func (c *Client) getTrimmed(suffix string) (s string, err error) { - s, err = c.Get(suffix) +func (c *Client) getTrimmed(ctx context.Context, suffix string) (s string, err error) { + s, err = c.GetWithContext(ctx, suffix) s = strings.TrimSpace(s) return } func (c *Client) lines(suffix string) ([]string, error) { - j, err := c.Get(suffix) + j, err := c.GetWithContext(context.Background(), suffix) if err != nil { return nil, err } @@ -388,7 +415,7 @@ func (c *Client) InstanceID() (string, error) { return instID.get(c) } // InternalIP returns the instance's primary internal IP address. func (c *Client) InternalIP() (string, error) { - return c.getTrimmed("instance/network-interfaces/0/ip") + return c.getTrimmed(context.Background(), "instance/network-interfaces/0/ip") } // Email returns the email address associated with the service account. @@ -398,25 +425,25 @@ func (c *Client) Email(serviceAccount string) (string, error) { if serviceAccount == "" { serviceAccount = "default" } - return c.getTrimmed("instance/service-accounts/" + serviceAccount + "/email") + return c.getTrimmed(context.Background(), "instance/service-accounts/"+serviceAccount+"/email") } // ExternalIP returns the instance's primary external (public) IP address. func (c *Client) ExternalIP() (string, error) { - return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") + return c.getTrimmed(context.Background(), "instance/network-interfaces/0/access-configs/0/external-ip") } // Hostname returns the instance's hostname. This will be of the form // ".c..internal". func (c *Client) Hostname() (string, error) { - return c.getTrimmed("instance/hostname") + return c.getTrimmed(context.Background(), "instance/hostname") } // InstanceTags returns the list of user-defined instance tags, // assigned when initially creating a GCE instance. func (c *Client) InstanceTags() ([]string, error) { var s []string - j, err := c.Get("instance/tags") + j, err := c.GetWithContext(context.Background(), "instance/tags") if err != nil { return nil, err } @@ -428,12 +455,12 @@ func (c *Client) InstanceTags() ([]string, error) { // InstanceName returns the current VM's instance ID string. func (c *Client) InstanceName() (string, error) { - return c.getTrimmed("instance/name") + return c.getTrimmed(context.Background(), "instance/name") } // Zone returns the current VM's zone, such as "us-central1-b". func (c *Client) Zone() (string, error) { - zone, err := c.getTrimmed("instance/zone") + zone, err := c.getTrimmed(context.Background(), "instance/zone") // zone is of the form "projects//zones/". if err != nil { return "", err @@ -460,7 +487,7 @@ func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project // InstanceAttributeValue may return ("", nil) if the attribute was // defined to be the empty string. func (c *Client) InstanceAttributeValue(attr string) (string, error) { - return c.Get("instance/attributes/" + attr) + return c.GetWithContext(context.Background(), "instance/attributes/"+attr) } // ProjectAttributeValue returns the value of the provided @@ -472,7 +499,7 @@ func (c *Client) InstanceAttributeValue(attr string) (string, error) { // ProjectAttributeValue may return ("", nil) if the attribute was // defined to be the empty string. func (c *Client) ProjectAttributeValue(attr string) (string, error) { - return c.Get("project/attributes/" + attr) + return c.GetWithContext(context.Background(), "project/attributes/"+attr) } // Scopes returns the service account scopes for the given account. @@ -489,21 +516,30 @@ func (c *Client) Scopes(serviceAccount string) ([]string, error) { // The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". // The suffix may contain query parameters. // -// Subscribe calls fn with the latest metadata value indicated by the provided -// suffix. If the metadata value is deleted, fn is called with the empty string -// and ok false. Subscribe blocks until fn returns a non-nil error or the value -// is deleted. Subscribe returns the error value returned from the last call to -// fn, which may be nil when ok == false. +// Deprecated: Please use the context aware variant [Client.SubscribeWithContext]. func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error { + return c.SubscribeWithContext(context.Background(), suffix, func(ctx context.Context, v string, ok bool) error { return fn(v, ok) }) +} + +// SubscribeWithContext subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// The suffix may contain query parameters. +// +// SubscribeWithContext calls fn with the latest metadata value indicated by the +// provided suffix. If the metadata value is deleted, fn is called with the +// empty string and ok false. Subscribe blocks until fn returns a non-nil error +// or the value is deleted. Subscribe returns the error value returned from the +// last call to fn, which may be nil when ok == false. +func (c *Client) SubscribeWithContext(ctx context.Context, suffix string, fn func(ctx context.Context, v string, ok bool) error) error { const failedSubscribeSleep = time.Second * 5 // First check to see if the metadata value exists at all. - val, lastETag, err := c.getETag(suffix) + val, lastETag, err := c.getETag(ctx, suffix) if err != nil { return err } - if err := fn(val, true); err != nil { + if err := fn(ctx, val, true); err != nil { return err } @@ -514,7 +550,7 @@ func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) erro suffix += "?wait_for_change=true&last_etag=" } for { - val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag)) + val, etag, err := c.getETag(ctx, suffix+url.QueryEscape(lastETag)) if err != nil { if _, deleted := err.(NotDefinedError); !deleted { time.Sleep(failedSubscribeSleep) @@ -524,7 +560,7 @@ func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) erro } lastETag = etag - if err := fn(val, ok); err != nil || !ok { + if err := fn(ctx, val, ok); err != nil || !ok { return err } } diff --git a/vendor/cloud.google.com/go/compute/metadata/retry.go b/vendor/cloud.google.com/go/compute/metadata/retry.go index 0f18f3cda..3d4bc75dd 100644 --- a/vendor/cloud.google.com/go/compute/metadata/retry.go +++ b/vendor/cloud.google.com/go/compute/metadata/retry.go @@ -27,7 +27,7 @@ const ( ) var ( - syscallRetryable = func(err error) bool { return false } + syscallRetryable = func(error) bool { return false } ) // defaultBackoff is basically equivalent to gax.Backoff without the need for diff --git a/vendor/cloud.google.com/go/compute/metadata/tidyfix.go b/vendor/cloud.google.com/go/compute/metadata/tidyfix.go deleted file mode 100644 index 4cef48500..000000000 --- a/vendor/cloud.google.com/go/compute/metadata/tidyfix.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file, and the {{.RootMod}} import, won't actually become part of -// the resultant binary. -//go:build modhack -// +build modhack - -package metadata - -// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "cloud.google.com/go/compute/internal" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md index 5c8411cb5..a6675492b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -1,5 +1,56 @@ # Release History +## 1.11.1 (2024-04-02) + +### Bugs Fixed + +* Pollers that use the `Location` header won't consider `http.StatusRequestTimeout` a terminal failure. +* `runtime.Poller[T].Result` won't consider non-terminal error responses as terminal. + +## 1.11.0 (2024-04-01) + +### Features Added + +* Added `StatusCodes` to `arm/policy.RegistrationOptions` to allow supporting non-standard HTTP status codes during registration. +* Added field `InsecureAllowCredentialWithHTTP` to `azcore.ClientOptions` and dependent authentication pipeline policies. +* Added type `MultipartContent` to the `streaming` package to support multipart/form payloads with custom Content-Type and file name. + +### Bugs Fixed + +* `runtime.SetMultipartFormData` won't try to stringify `[]byte` values. +* Pollers that use the `Location` header won't consider `http.StatusTooManyRequests` a terminal failure. + +### Other Changes + +* Update dependencies. + +## 1.10.0 (2024-02-29) + +### Features Added + +* Added logging event `log.EventResponseError` that will contain the contents of `ResponseError.Error()` whenever an `azcore.ResponseError` is created. +* Added `runtime.NewResponseErrorWithErrorCode` for creating an `azcore.ResponseError` with a caller-supplied error code. +* Added type `MatchConditions` for use in conditional requests. + +### Bugs Fixed + +* Fixed a potential race condition between `NullValue` and `IsNullValue`. +* `runtime.EncodeQueryParams` will escape semicolons before calling `url.ParseQuery`. + +### Other Changes + +* Update dependencies. + +## 1.9.2 (2024-02-06) + +### Bugs Fixed + +* `runtime.MarshalAsByteArray` and `runtime.MarshalAsJSON` will preserve the preexisting value of the `Content-Type` header. + +### Other Changes + +* Update to latest version of `internal`. + ## 1.9.1 (2023-12-11) ### Bugs Fixed diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go index 83cf91e3e..f18caf848 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go @@ -20,6 +20,11 @@ type BearerTokenOptions struct { // policy's credential must support multitenant authentication. AuxiliaryTenants []string + // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP. + // By default, authenticated requests to an HTTP endpoint are rejected by the client. + // WARNING: setting this to true will allow sending the authentication key in clear text. Use with caution. + InsecureAllowCredentialWithHTTP bool + // Scopes contains the list of permission scopes required for the token. Scopes []string } @@ -44,6 +49,11 @@ type RegistrationOptions struct { // The default valule is 5 minutes. // NOTE: Setting this to a small value might cause the policy to prematurely fail. PollingDuration time.Duration + + // StatusCodes contains the slice of custom HTTP status codes to use instead + // of the default http.StatusConflict. This should only be set if a service + // returns a non-standard HTTP status code when unregistered. + StatusCodes []int } // ClientOptions contains configuration settings for a client's pipeline. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go index 302c19cd4..039b758bf 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go @@ -30,8 +30,9 @@ func NewPipeline(module, version string, cred azcore.TokenCredential, plOpts azr return azruntime.Pipeline{}, err } authPolicy := NewBearerTokenPolicy(cred, &armpolicy.BearerTokenOptions{ - AuxiliaryTenants: options.AuxiliaryTenants, - Scopes: []string{conf.Audience + "/.default"}, + AuxiliaryTenants: options.AuxiliaryTenants, + InsecureAllowCredentialWithHTTP: options.InsecureAllowCredentialWithHTTP, + Scopes: []string{conf.Audience + "/.default"}, }) perRetry := make([]azpolicy.Policy, len(plOpts.PerRetry), len(plOpts.PerRetry)+1) copy(perRetry, plOpts.PerRetry) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go index 54b3bb78d..765fbc684 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go @@ -64,6 +64,7 @@ func NewBearerTokenPolicy(cred azcore.TokenCredential, opts *armpolicy.BearerTok p.scopes = make([]string, len(opts.Scopes)) copy(p.scopes, opts.Scopes) p.btp = azruntime.NewBearerTokenPolicy(cred, opts.Scopes, &azpolicy.BearerTokenOptions{ + InsecureAllowCredentialWithHTTP: opts.InsecureAllowCredentialWithHTTP, AuthorizationHandler: azpolicy.AuthorizationHandler{ OnChallenge: p.onChallenge, OnRequest: p.onRequest, diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go index 83e15949a..810ac9d9f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go @@ -8,7 +8,6 @@ package runtime import ( "context" - "errors" "fmt" "net/http" "net/url" @@ -16,6 +15,7 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource" armpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" @@ -45,6 +45,9 @@ func setDefaults(r *armpolicy.RegistrationOptions) { if r.PollingDuration == 0 { r.PollingDuration = 5 * time.Minute } + if len(r.StatusCodes) == 0 { + r.StatusCodes = []int{http.StatusConflict} + } } // NewRPRegistrationPolicy creates a policy object configured using the specified options. @@ -88,7 +91,7 @@ func (r *rpRegistrationPolicy) Do(req *azpolicy.Request) (*http.Response, error) // make the original request resp, err = req.Next() // getting a 409 is the first indication that the RP might need to be registered, check error response - if err != nil || resp.StatusCode != http.StatusConflict { + if err != nil || !runtime.HasStatusCode(resp, r.options.StatusCodes...) { return resp, err } var reqErr requestError @@ -105,17 +108,12 @@ func (r *rpRegistrationPolicy) Do(req *azpolicy.Request) (*http.Response, error) // to the caller so its error unmarshalling will kick in return resp, err } - // RP needs to be registered. start by getting the subscription ID from the original request - subID, err := getSubscription(req.Raw().URL.Path) - if err != nil { - return resp, err - } - // now get the RP from the error - rp, err = getProvider(reqErr) + res, err := resource.ParseResourceID(req.Raw().URL.Path) if err != nil { return resp, err } - logRegistrationExit := func(v interface{}) { + rp = res.ResourceType.Namespace + logRegistrationExit := func(v any) { log.Writef(LogRPRegistration, "END registration for %s: %v", rp, v) } log.Writef(LogRPRegistration, "BEGIN registration for %s", rp) @@ -124,7 +122,7 @@ func (r *rpRegistrationPolicy) Do(req *azpolicy.Request) (*http.Response, error) rpOps := &providersOperations{ p: r.pipeline, u: r.endpoint, - subID: subID, + subID: res.SubscriptionID, } if _, err = rpOps.Register(&shared.ContextWithDeniedValues{Context: req.Raw().Context()}, rp); err != nil { logRegistrationExit(err) @@ -189,36 +187,13 @@ func isUnregisteredRPCode(errorCode string) bool { return false } -func getSubscription(path string) (string, error) { - parts := strings.Split(path, "/") - for i, v := range parts { - if v == "subscriptions" && (i+1) < len(parts) { - return parts[i+1], nil - } - } - return "", fmt.Errorf("failed to obtain subscription ID from %s", path) -} - -func getProvider(re requestError) (string, error) { - if len(re.ServiceError.Details) > 0 { - return re.ServiceError.Details[0].Target, nil - } - return "", errors.New("unexpected empty Details") -} - // minimal error definitions to simplify detection type requestError struct { ServiceError *serviceError `json:"error"` } type serviceError struct { - Code string `json:"code"` - Details []serviceErrorDetails `json:"details"` -} - -type serviceErrorDetails struct { - Code string `json:"code"` - Target string `json:"target"` + Code string `json:"code"` } /////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml index aab921853..99348527b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml @@ -23,7 +23,7 @@ pr: - sdk/azcore/ - eng/ -stages: -- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml +extends: + template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml parameters: ServiceDirectory: azcore diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go index 8eef8633a..9d1c2f0c0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go @@ -8,6 +8,7 @@ package azcore import ( "reflect" + "sync" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" @@ -41,13 +42,28 @@ func NewSASCredential(sas string) *SASCredential { } // holds sentinel values used to send nulls -var nullables map[reflect.Type]interface{} = map[reflect.Type]interface{}{} +var nullables map[reflect.Type]any = map[reflect.Type]any{} +var nullablesMu sync.RWMutex // NullValue is used to send an explicit 'null' within a request. // This is typically used in JSON-MERGE-PATCH operations to delete a value. func NullValue[T any]() T { t := shared.TypeOfT[T]() + + nullablesMu.RLock() v, found := nullables[t] + nullablesMu.RUnlock() + + if found { + // return the sentinel object + return v.(T) + } + + // promote to exclusive lock and check again (double-checked locking pattern) + nullablesMu.Lock() + defer nullablesMu.Unlock() + v, found = nullables[t] + if !found { var o reflect.Value if k := t.Kind(); k == reflect.Map { @@ -72,6 +88,9 @@ func NullValue[T any]() T { func IsNullValue[T any](v T) bool { // see if our map has a sentinel object for this *T t := reflect.TypeOf(v) + nullablesMu.RLock() + defer nullablesMu.RUnlock() + if o, found := nullables[t]; found { o1 := reflect.ValueOf(o) v1 := reflect.ValueOf(v) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go index 23ea7e7c8..2b19d01f7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go @@ -46,3 +46,12 @@ func (e ETag) WeakEquals(other ETag) bool { func (e ETag) IsWeak() bool { return len(e) >= 4 && strings.HasPrefix(string(e), "W/\"") && strings.HasSuffix(string(e), "\"") } + +// MatchConditions specifies HTTP options for conditional requests. +type MatchConditions struct { + // Optionally limit requests to resources that have a matching ETag. + IfMatch *ETag + + // Optionally limit requests to resources that do not match the ETag. + IfNoneMatch *ETag +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go index 659f2a7d2..3041984d9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go @@ -51,15 +51,15 @@ type Request struct { values opValues } -type opValues map[reflect.Type]interface{} +type opValues map[reflect.Type]any // Set adds/changes a value -func (ov opValues) set(value interface{}) { +func (ov opValues) set(value any) { ov[reflect.TypeOf(value)] = value } // Get looks for a value set by SetValue first -func (ov opValues) get(value interface{}) bool { +func (ov opValues) get(value any) bool { v, ok := ov[reflect.ValueOf(value).Elem().Type()] if ok { reflect.ValueOf(value).Elem().Set(reflect.ValueOf(v)) @@ -108,7 +108,7 @@ func (req *Request) Next() (*http.Response, error) { } // SetOperationValue adds/changes a mutable key/value associated with a single operation. -func (req *Request) SetOperationValue(value interface{}) { +func (req *Request) SetOperationValue(value any) { if req.values == nil { req.values = opValues{} } @@ -116,7 +116,7 @@ func (req *Request) SetOperationValue(value interface{}) { } // OperationValue looks for a value set by SetOperationValue(). -func (req *Request) OperationValue(value interface{}) bool { +func (req *Request) OperationValue(value any) bool { if req.values == nil { return false } @@ -125,46 +125,11 @@ func (req *Request) OperationValue(value interface{}) bool { // SetBody sets the specified ReadSeekCloser as the HTTP request body, and sets Content-Type and Content-Length // accordingly. If the ReadSeekCloser is nil or empty, Content-Length won't be set. If contentType is "", -// Content-Type won't be set. +// Content-Type won't be set, and if it was set, will be deleted. // Use streaming.NopCloser to turn an io.ReadSeeker into an io.ReadSeekCloser. func (req *Request) SetBody(body io.ReadSeekCloser, contentType string) error { - var err error - var size int64 - if body != nil { - size, err = body.Seek(0, io.SeekEnd) // Seek to the end to get the stream's size - if err != nil { - return err - } - } - if size == 0 { - // treat an empty stream the same as a nil one: assign req a nil body - body = nil - // RFC 9110 specifies a client shouldn't set Content-Length on a request containing no content - // (Del is a no-op when the header has no value) - req.req.Header.Del(shared.HeaderContentLength) - } else { - _, err = body.Seek(0, io.SeekStart) - if err != nil { - return err - } - req.req.Header.Set(shared.HeaderContentLength, strconv.FormatInt(size, 10)) - req.Raw().GetBody = func() (io.ReadCloser, error) { - _, err := body.Seek(0, io.SeekStart) // Seek back to the beginning of the stream - return body, err - } - } - // keep a copy of the body argument. this is to handle cases - // where req.Body is replaced, e.g. httputil.DumpRequest and friends. - req.body = body - req.req.Body = body - req.req.ContentLength = size - if contentType == "" { - // Del is a no-op when the header has no value - req.req.Header.Del(shared.HeaderContentType) - } else { - req.req.Header.Set(shared.HeaderContentType, contentType) - } - return nil + // clobber the existing Content-Type to preserve behavior + return SetBody(req, body, contentType, true) } // RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation. @@ -211,3 +176,48 @@ type PolicyFunc func(*Request) (*http.Response, error) func (pf PolicyFunc) Do(req *Request) (*http.Response, error) { return pf(req) } + +// SetBody sets the specified ReadSeekCloser as the HTTP request body, and sets Content-Type and Content-Length accordingly. +// - req is the request to modify +// - body is the request body; if nil or empty, Content-Length won't be set +// - contentType is the value for the Content-Type header; if empty, Content-Type will be deleted +// - clobberContentType when true, will overwrite the existing value of Content-Type with contentType +func SetBody(req *Request, body io.ReadSeekCloser, contentType string, clobberContentType bool) error { + var err error + var size int64 + if body != nil { + size, err = body.Seek(0, io.SeekEnd) // Seek to the end to get the stream's size + if err != nil { + return err + } + } + if size == 0 { + // treat an empty stream the same as a nil one: assign req a nil body + body = nil + // RFC 9110 specifies a client shouldn't set Content-Length on a request containing no content + // (Del is a no-op when the header has no value) + req.req.Header.Del(shared.HeaderContentLength) + } else { + _, err = body.Seek(0, io.SeekStart) + if err != nil { + return err + } + req.req.Header.Set(shared.HeaderContentLength, strconv.FormatInt(size, 10)) + req.Raw().GetBody = func() (io.ReadCloser, error) { + _, err := body.Seek(0, io.SeekStart) // Seek back to the beginning of the stream + return body, err + } + } + // keep a copy of the body argument. this is to handle cases + // where req.Body is replaced, e.g. httputil.DumpRequest and friends. + req.body = body + req.req.Body = body + req.req.ContentLength = size + if contentType == "" { + // Del is a no-op when the header has no value + req.req.Header.Del(shared.HeaderContentType) + } else if req.req.Header.Get(shared.HeaderContentType) == "" || clobberContentType { + req.req.Header.Set(shared.HeaderContentType, contentType) + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go index f24355288..08a954587 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go @@ -13,6 +13,7 @@ import ( "net/http" "regexp" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" ) @@ -20,36 +21,45 @@ import ( // NewResponseError creates a new *ResponseError from the provided HTTP response. // Exported as runtime.NewResponseError(). func NewResponseError(resp *http.Response) error { - respErr := &ResponseError{ - StatusCode: resp.StatusCode, - RawResponse: resp, - } - // prefer the error code in the response header if ec := resp.Header.Get(shared.HeaderXMSErrorCode); ec != "" { - respErr.ErrorCode = ec - return respErr + return NewResponseErrorWithErrorCode(resp, ec) } // if we didn't get x-ms-error-code, check in the response body body, err := exported.Payload(resp, nil) if err != nil { + // since we're not returning the ResponseError in this + // case we also don't want to write it to the log. return err } + var errorCode string if len(body) > 0 { - if code := extractErrorCodeJSON(body); code != "" { - respErr.ErrorCode = code - } else if code := extractErrorCodeXML(body); code != "" { - respErr.ErrorCode = code + if fromJSON := extractErrorCodeJSON(body); fromJSON != "" { + errorCode = fromJSON + } else if fromXML := extractErrorCodeXML(body); fromXML != "" { + errorCode = fromXML } } + return NewResponseErrorWithErrorCode(resp, errorCode) +} + +// NewResponseErrorWithErrorCode creates an *azcore.ResponseError from the provided HTTP response and errorCode. +// Exported as runtime.NewResponseErrorWithErrorCode(). +func NewResponseErrorWithErrorCode(resp *http.Response, errorCode string) error { + respErr := &ResponseError{ + ErrorCode: errorCode, + StatusCode: resp.StatusCode, + RawResponse: resp, + } + log.Write(log.EventResponseError, respErr.Error()) return respErr } func extractErrorCodeJSON(body []byte) string { - var rawObj map[string]interface{} + var rawObj map[string]any if err := json.Unmarshal(body, &rawObj); err != nil { // not a JSON object return "" @@ -58,7 +68,7 @@ func extractErrorCodeJSON(body []byte) string { // check if this is a wrapped error, i.e. { "error": { ... } } // if so then unwrap it if wrapped, ok := rawObj["error"]; ok { - unwrapped, ok := wrapped.(map[string]interface{}) + unwrapped, ok := wrapped.(map[string]any) if !ok { return "" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go index 0684cb317..6fc6d1400 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go @@ -15,24 +15,36 @@ import ( type Event = log.Event const ( - EventRequest = azlog.EventRequest - EventResponse = azlog.EventResponse - EventRetryPolicy = azlog.EventRetryPolicy - EventLRO = azlog.EventLRO + EventRequest = azlog.EventRequest + EventResponse = azlog.EventResponse + EventResponseError = azlog.EventResponseError + EventRetryPolicy = azlog.EventRetryPolicy + EventLRO = azlog.EventLRO ) +// Write invokes the underlying listener with the specified event and message. +// If the event shouldn't be logged or there is no listener then Write does nothing. func Write(cls log.Event, msg string) { log.Write(cls, msg) } -func Writef(cls log.Event, format string, a ...interface{}) { +// Writef invokes the underlying listener with the specified event and formatted message. +// If the event shouldn't be logged or there is no listener then Writef does nothing. +func Writef(cls log.Event, format string, a ...any) { log.Writef(cls, format, a...) } +// SetListener will set the Logger to write to the specified listener. func SetListener(lst func(Event, string)) { log.SetListener(lst) } +// Should returns true if the specified log event should be written to the log. +// By default all log events will be logged. Call SetEvents() to limit +// the log events for logging. +// If no listener has been set this will return false. +// Calling this method is useful when the message to log is computationally expensive +// and you want to avoid the overhead if its log event is not enabled. func Should(cls log.Event) bool { return log.Should(cls) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go index b05bd8b38..ccd4794e9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go @@ -27,7 +27,7 @@ func Applicable(resp *http.Response) bool { } // CanResume returns true if the token can rehydrate this poller type. -func CanResume(token map[string]interface{}) bool { +func CanResume(token map[string]any) bool { _, ok := token["asyncURL"] return ok } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go index 2bb9e105b..0d781b31d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go @@ -29,7 +29,7 @@ func Applicable(resp *http.Response) bool { } // CanResume returns true if the token can rehydrate this poller type. -func CanResume(token map[string]interface{}) bool { +func CanResume(token map[string]any) bool { t, ok := token["type"] if !ok { return false diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go index 259834718..51aede8a2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go @@ -26,7 +26,7 @@ func Applicable(resp *http.Response) bool { } // CanResume returns true if the token can rehydrate this poller type. -func CanResume(token map[string]interface{}) bool { +func CanResume(token map[string]any) bool { _, ok := token["fakeURL"] return ok } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go index d6be89876..7a56c5211 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go @@ -28,7 +28,7 @@ func Applicable(resp *http.Response) bool { } // CanResume returns true if the token can rehydrate this poller type. -func CanResume(token map[string]interface{}) bool { +func CanResume(token map[string]any) bool { t, ok := token["type"] if !ok { return false @@ -103,6 +103,10 @@ func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { } else if resp.StatusCode > 199 && resp.StatusCode < 300 { // any 2xx other than a 202 indicates success p.CurState = poller.StatusSucceeded + } else if pollers.IsNonTerminalHTTPStatusCode(resp) { + // the request timed out or is being throttled. + // DO NOT include this as a terminal failure. preserve + // the existing state and return the response. } else { p.CurState = poller.StatusFailed } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go index 1bc7ad0ac..ac1c0efb5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go @@ -25,7 +25,7 @@ func Applicable(resp *http.Response) bool { } // CanResume returns true if the token can rehydrate this poller type. -func CanResume(token map[string]interface{}) bool { +func CanResume(token map[string]any) bool { _, ok := token["oplocURL"] return ok } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go index d8d86a46c..eb3cf651d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go @@ -74,7 +74,7 @@ func ExtractToken(token string) ([]byte, error) { // IsTokenValid returns an error if the specified token isn't applicable for generic type T. func IsTokenValid[T any](token string) error { - raw := map[string]interface{}{} + raw := map[string]any{} if err := json.Unmarshal([]byte(token), &raw); err != nil { return err } @@ -185,3 +185,16 @@ func ResultHelper[T any](resp *http.Response, failed bool, out *T) error { } return nil } + +// IsNonTerminalHTTPStatusCode returns true if the HTTP status code should be +// considered non-terminal thus eligible for retry. +func IsNonTerminalHTTPStatusCode(resp *http.Response) bool { + return exported.HasStatusCode(resp, + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + ) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go index bb93daee6..03691cbf0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -40,5 +40,5 @@ const ( Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.9.1" + Version = "v1.11.1" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go index 7bde29d0a..f260dac36 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go @@ -23,6 +23,11 @@ const ( // This includes information like the HTTP status code, headers, and request URL. EventResponse Event = "Response" + // EventResponseError entries contain information about HTTP responses that returned + // an *azcore.ResponseError (i.e. responses with a non 2xx HTTP status code). + // This includes the contents of ResponseError.Error(). + EventResponseError Event = "ResponseError" + // EventRetryPolicy entries contain information specific to the retry policy in use. EventRetryPolicy Event = "Retry" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go index d934f1dc5..8d9845358 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go @@ -39,6 +39,11 @@ type ClientOptions struct { // Cloud specifies a cloud for the client. The default is Azure Public Cloud. Cloud cloud.Configuration + // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP. + // By default, authenticated requests to an HTTP endpoint are rejected by the client. + // WARNING: setting this to true will allow sending the credential in clear text. Use with caution. + InsecureAllowCredentialWithHTTP bool + // Logging configures the built-in logging policy. Logging LogOptions @@ -147,6 +152,11 @@ type BearerTokenOptions struct { // When this field isn't set, the policy follows its default behavior of authorizing every request with a bearer token from // its given credential. AuthorizationHandler AuthorizationHandler + + // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP. + // By default, authenticated requests to an HTTP endpoint are rejected by the client. + // WARNING: setting this to true will allow sending the bearer token in clear text. Use with caution. + InsecureAllowCredentialWithHTTP bool } // AuthorizationHandler allows SDK developers to insert custom logic that runs when BearerTokenPolicy must authorize a request. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go index 6d03b291e..c0d56158e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go @@ -14,6 +14,14 @@ import ( // NewResponseError creates an *azcore.ResponseError from the provided HTTP response. // Call this when a service request returns a non-successful status code. +// The error code will be extracted from the *http.Response, either from the x-ms-error-code +// header (preferred) or attempted to be parsed from the response body. func NewResponseError(resp *http.Response) error { return exported.NewResponseError(resp) } + +// NewResponseErrorWithErrorCode creates an *azcore.ResponseError from the provided HTTP response and errorCode. +// Use this variant when the error code is in a non-standard location. +func NewResponseErrorWithErrorCode(resp *http.Response, errorCode string) error { + return exported.NewResponseErrorWithErrorCode(resp, errorCode) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go index f0f280355..cb2a69528 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go @@ -24,6 +24,7 @@ type BearerTokenPolicy struct { authzHandler policy.AuthorizationHandler cred exported.TokenCredential scopes []string + allowHTTP bool } type acquiringResourceState struct { @@ -55,6 +56,7 @@ func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts * cred: cred, scopes: scopes, mainResource: temporal.NewResource(acquire), + allowHTTP: opts.InsecureAllowCredentialWithHTTP, } } @@ -80,7 +82,7 @@ func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { return req.Next() } - if err := checkHTTPSForAuth(req); err != nil { + if err := checkHTTPSForAuth(req, b.allowHTTP); err != nil { return nil, err } @@ -113,8 +115,8 @@ func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { return res, err } -func checkHTTPSForAuth(req *policy.Request) error { - if strings.ToLower(req.Raw().URL.Scheme) != "https" { +func checkHTTPSForAuth(req *policy.Request, allowHTTP bool) error { + if strings.ToLower(req.Raw().URL.Scheme) != "https" && !allowHTTP { return errorinfo.NonRetriableError(errors.New("authenticated requests are not permitted for non TLS protected (https) endpoints")) } return nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go index 6f577fa7a..eeb1c09cc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go @@ -12,13 +12,19 @@ import ( // KeyCredentialPolicy authorizes requests with a [azcore.KeyCredential]. type KeyCredentialPolicy struct { - cred *exported.KeyCredential - header string - prefix string + cred *exported.KeyCredential + header string + prefix string + allowHTTP bool } // KeyCredentialPolicyOptions contains the optional values configuring [KeyCredentialPolicy]. type KeyCredentialPolicyOptions struct { + // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP. + // By default, authenticated requests to an HTTP endpoint are rejected by the client. + // WARNING: setting this to true will allow sending the authentication key in clear text. Use with caution. + InsecureAllowCredentialWithHTTP bool + // Prefix is used if the key requires a prefix before it's inserted into the HTTP request. Prefix string } @@ -32,9 +38,10 @@ func NewKeyCredentialPolicy(cred *exported.KeyCredential, header string, options options = &KeyCredentialPolicyOptions{} } return &KeyCredentialPolicy{ - cred: cred, - header: header, - prefix: options.Prefix, + cred: cred, + header: header, + prefix: options.Prefix, + allowHTTP: options.InsecureAllowCredentialWithHTTP, } } @@ -44,7 +51,7 @@ func (k *KeyCredentialPolicy) Do(req *policy.Request) (*http.Response, error) { // this prevents a panic that might be hard to diagnose and allows testing // against http endpoints that don't require authentication. if k.cred != nil { - if err := checkHTTPSForAuth(req); err != nil { + if err := checkHTTPSForAuth(req, k.allowHTTP); err != nil { return nil, err } val := exported.KeyCredentialGet(k.cred) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go index ebe2b7772..3964beea8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go @@ -12,13 +12,17 @@ import ( // SASCredentialPolicy authorizes requests with a [azcore.SASCredential]. type SASCredentialPolicy struct { - cred *exported.SASCredential - header string + cred *exported.SASCredential + header string + allowHTTP bool } // SASCredentialPolicyOptions contains the optional values configuring [SASCredentialPolicy]. type SASCredentialPolicyOptions struct { - // placeholder for future optional values + // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP. + // By default, authenticated requests to an HTTP endpoint are rejected by the client. + // WARNING: setting this to true will allow sending the authentication key in clear text. Use with caution. + InsecureAllowCredentialWithHTTP bool } // NewSASCredentialPolicy creates a new instance of [SASCredentialPolicy]. @@ -26,9 +30,13 @@ type SASCredentialPolicyOptions struct { // - header is the name of the HTTP request header in which the shared access signature is placed // - options contains optional configuration, pass nil to accept the default values func NewSASCredentialPolicy(cred *exported.SASCredential, header string, options *SASCredentialPolicyOptions) *SASCredentialPolicy { + if options == nil { + options = &SASCredentialPolicyOptions{} + } return &SASCredentialPolicy{ - cred: cred, - header: header, + cred: cred, + header: header, + allowHTTP: options.InsecureAllowCredentialWithHTTP, } } @@ -38,7 +46,7 @@ func (k *SASCredentialPolicy) Do(req *policy.Request) (*http.Response, error) { // this prevents a panic that might be hard to diagnose and allows testing // against http endpoints that don't require authentication. if k.cred != nil { - if err := checkHTTPSForAuth(req); err != nil { + if err := checkHTTPSForAuth(req, k.allowHTTP); err != nil { return nil, err } req.Raw().Header.Add(k.header, exported.SASCredentialGet(k.cred)) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go index c373f6896..03f76c9aa 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go @@ -154,7 +154,7 @@ func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options if err != nil { return nil, err } - var asJSON map[string]interface{} + var asJSON map[string]any if err := json.Unmarshal(raw, &asJSON); err != nil { return nil, err } @@ -240,7 +240,7 @@ func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOpt } start := time.Now() - logPollUntilDoneExit := func(v interface{}) { + logPollUntilDoneExit := func(v any) { log.Writef(log.EventLRO, "END PollUntilDone() for %T: %v, total time: %s", p.op, v, time.Since(start)) } log.Writef(log.EventLRO, "BEGIN PollUntilDone() for %T", p.op) @@ -334,6 +334,11 @@ func (p *Poller[T]) Result(ctx context.Context) (res T, err error) { err = p.op.Result(ctx, p.result) var respErr *exported.ResponseError if errors.As(err, &respErr) { + if pollers.IsNonTerminalHTTPStatusCode(respErr.RawResponse) { + // the request failed in a non-terminal way. + // don't cache the error or mark the Poller as done + return + } // the LRO failed. record the error p.err = err } else if err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go index e97223da2..06ac95b1b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go @@ -11,9 +11,11 @@ import ( "context" "encoding/json" "encoding/xml" + "errors" "fmt" "io" "mime/multipart" + "net/textproto" "net/url" "path" "strings" @@ -21,6 +23,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" ) // Base64Encoding is usesd to specify which base-64 encoder/decoder to use when @@ -42,12 +45,19 @@ func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*polic } // EncodeQueryParams will parse and encode any query parameters in the specified URL. +// Any semicolons will automatically be escaped. func EncodeQueryParams(u string) (string, error) { before, after, found := strings.Cut(u, "?") if !found { return u, nil } - qp, err := url.ParseQuery(after) + // starting in Go 1.17, url.ParseQuery will reject semicolons in query params. + // so, we must escape them first. note that this assumes that semicolons aren't + // being used as query param separators which is per the current RFC. + // for more info: + // https://github.com/golang/go/issues/25192 + // https://github.com/golang/go/issues/50034 + qp, err := url.ParseQuery(strings.ReplaceAll(after, ";", "%3B")) if err != nil { return "", err } @@ -97,20 +107,22 @@ func EncodeByteArray(v []byte, format Base64Encoding) string { func MarshalAsByteArray(req *policy.Request, v []byte, format Base64Encoding) error { // send as a JSON string encode := fmt.Sprintf("\"%s\"", EncodeByteArray(v, format)) - return req.SetBody(exported.NopCloser(strings.NewReader(encode)), shared.ContentTypeAppJSON) + // tsp generated code can set Content-Type so we must prefer that + return exported.SetBody(req, exported.NopCloser(strings.NewReader(encode)), shared.ContentTypeAppJSON, false) } // MarshalAsJSON calls json.Marshal() to get the JSON encoding of v then calls SetBody. -func MarshalAsJSON(req *policy.Request, v interface{}) error { +func MarshalAsJSON(req *policy.Request, v any) error { b, err := json.Marshal(v) if err != nil { return fmt.Errorf("error marshalling type %T: %s", v, err) } - return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppJSON) + // tsp generated code can set Content-Type so we must prefer that + return exported.SetBody(req, exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppJSON, false) } // MarshalAsXML calls xml.Marshal() to get the XML encoding of v then calls SetBody. -func MarshalAsXML(req *policy.Request, v interface{}) error { +func MarshalAsXML(req *policy.Request, v any) error { b, err := xml.Marshal(v) if err != nil { return fmt.Errorf("error marshalling type %T: %s", v, err) @@ -120,10 +132,10 @@ func MarshalAsXML(req *policy.Request, v interface{}) error { return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppXML) } -// SetMultipartFormData writes the specified keys/values as multi-part form -// fields with the specified value. File content must be specified as a ReadSeekCloser. -// All other values are treated as string values. -func SetMultipartFormData(req *policy.Request, formData map[string]interface{}) error { +// SetMultipartFormData writes the specified keys/values as multi-part form fields with the specified value. +// File content must be specified as an [io.ReadSeekCloser] or [streaming.MultipartContent]. +// Byte slices will be treated as JSON. All other values are treated as string values. +func SetMultipartFormData(req *policy.Request, formData map[string]any) error { body := bytes.Buffer{} writer := multipart.NewWriter(&body) @@ -139,6 +151,60 @@ func SetMultipartFormData(req *policy.Request, formData map[string]interface{}) return nil } + quoteEscaper := strings.NewReplacer("\\", "\\\\", `"`, "\\\"") + + writeMultipartContent := func(fieldname string, mpc streaming.MultipartContent) error { + if mpc.Body == nil { + return errors.New("streaming.MultipartContent.Body cannot be nil") + } + + // use fieldname for the file name when unspecified + filename := fieldname + + if mpc.ContentType == "" && mpc.Filename == "" { + return writeContent(fieldname, filename, mpc.Body) + } + if mpc.Filename != "" { + filename = mpc.Filename + } + // this is pretty much copied from multipart.Writer.CreateFormFile + // but lets us set the caller provided Content-Type and filename + h := make(textproto.MIMEHeader) + h.Set("Content-Disposition", + fmt.Sprintf(`form-data; name="%s"; filename="%s"`, + quoteEscaper.Replace(fieldname), quoteEscaper.Replace(filename))) + contentType := "application/octet-stream" + if mpc.ContentType != "" { + contentType = mpc.ContentType + } + h.Set("Content-Type", contentType) + fd, err := writer.CreatePart(h) + if err != nil { + return err + } + // copy the data to the form file + if _, err = io.Copy(fd, mpc.Body); err != nil { + return err + } + return nil + } + + // the same as multipart.Writer.WriteField but lets us specify the Content-Type + writeField := func(fieldname, contentType string, value string) error { + h := make(textproto.MIMEHeader) + h.Set("Content-Disposition", + fmt.Sprintf(`form-data; name="%s"`, quoteEscaper.Replace(fieldname))) + h.Set("Content-Type", contentType) + fd, err := writer.CreatePart(h) + if err != nil { + return err + } + if _, err = fd.Write([]byte(value)); err != nil { + return err + } + return nil + } + for k, v := range formData { if rsc, ok := v.(io.ReadSeekCloser); ok { if err := writeContent(k, k, rsc); err != nil { @@ -152,13 +218,35 @@ func SetMultipartFormData(req *policy.Request, formData map[string]interface{}) } } continue + } else if mpc, ok := v.(streaming.MultipartContent); ok { + if err := writeMultipartContent(k, mpc); err != nil { + return err + } + continue + } else if mpcs, ok := v.([]streaming.MultipartContent); ok { + for _, mpc := range mpcs { + if err := writeMultipartContent(k, mpc); err != nil { + return err + } + } + continue } - // ensure the value is in string format - s, ok := v.(string) - if !ok { - s = fmt.Sprintf("%v", v) + + var content string + contentType := shared.ContentTypeTextPlain + switch tt := v.(type) { + case []byte: + // JSON, don't quote it + content = string(tt) + contentType = shared.ContentTypeAppJSON + case string: + content = tt + default: + // ensure the value is in string format + content = fmt.Sprintf("%v", v) } - if err := writer.WriteField(k, s); err != nil { + + if err := writeField(k, contentType, content); err != nil { return err } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go index 003c875b1..048566e02 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go @@ -40,7 +40,7 @@ func UnmarshalAsByteArray(resp *http.Response, v *[]byte, format Base64Encoding) } // UnmarshalAsJSON calls json.Unmarshal() to unmarshal the received payload into the value pointed to by v. -func UnmarshalAsJSON(resp *http.Response, v interface{}) error { +func UnmarshalAsJSON(resp *http.Response, v any) error { payload, err := Payload(resp) if err != nil { return err @@ -61,7 +61,7 @@ func UnmarshalAsJSON(resp *http.Response, v interface{}) error { } // UnmarshalAsXML calls xml.Unmarshal() to unmarshal the received payload into the value pointed to by v. -func UnmarshalAsXML(resp *http.Response, v interface{}) error { +func UnmarshalAsXML(resp *http.Response, v any) error { payload, err := Payload(resp) if err != nil { return err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go index fbcd48311..2468540bd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go @@ -73,3 +73,17 @@ func (p *progress) Seek(offset int64, whence int) (int64, error) { func (p *progress) Close() error { return p.rc.Close() } + +// MultipartContent contains streaming content used in multipart/form payloads. +type MultipartContent struct { + // Body contains the required content body. + Body io.ReadSeekCloser + + // ContentType optionally specifies the HTTP Content-Type for this Body. + // The default value is application/octet-stream. + ContentType string + + // Filename optionally specifies the filename for this Body. + // The default value is the field name for the multipart/form section. + Filename string +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md index 71dcb5f3e..f6749c030 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -1,5 +1,14 @@ # Release History +## 1.5.2 (2024-04-09) + +### Bugs Fixed +* `ManagedIdentityCredential` now specifies resource IDs correctly for Azure Container Instances + +### Other Changes +* Restored v1.4.0 error behavior for empty tenant IDs +* Upgraded dependencies + ## 1.5.1 (2024-01-17) ### Bugs Fixed diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json index 173ce2b3c..1be55a4bd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/azidentity", - "Tag": "go/azidentity_db4a26f583" + "Tag": "go/azidentity_98074050dc" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go index c3bcfb56c..b0965036b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go @@ -121,6 +121,9 @@ func alphanumeric(r rune) bool { } func validTenantID(tenantID string) bool { + if len(tenantID) < 1 { + return false + } for _, r := range tenantID { if !(alphanumeric(r) || r == '.' || r == '-') { return false diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum index 7cd86b001..65bcba7df 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum @@ -3,8 +3,6 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1/go.mod h1:3Ug6Qzto9an github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 h1:fb8kj/Dh4CSwgsOzHeZY4Xh68cFVbzXx+ONXGMY//4w= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -14,6 +12,7 @@ github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= @@ -23,6 +22,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -33,6 +34,7 @@ golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go index 7c25cb8bd..d129a1e91 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go @@ -34,14 +34,14 @@ const ( identityServerThumbprint = "IDENTITY_SERVER_THUMBPRINT" headerMetadata = "Metadata" imdsEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" + miResID = "mi_res_id" msiEndpoint = "MSI_ENDPOINT" + msiResID = "msi_res_id" msiSecret = "MSI_SECRET" imdsAPIVersion = "2018-02-01" azureArcAPIVersion = "2019-08-15" + qpClientID = "client_id" serviceFabricAPIVersion = "2019-07-01-preview" - - qpClientID = "client_id" - qpResID = "mi_res_id" ) type msiType int @@ -286,7 +286,7 @@ func (c *managedIdentityClient) createIMDSAuthRequest(ctx context.Context, id Ma q.Add("resource", strings.Join(scopes, " ")) if id != nil { if id.idKind() == miResourceID { - q.Add(qpResID, id.String()) + q.Add(msiResID, id.String()) } else { q.Add(qpClientID, id.String()) } @@ -306,7 +306,7 @@ func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context, q.Add("resource", scopes[0]) if id != nil { if id.idKind() == miResourceID { - q.Add(qpResID, id.String()) + q.Add(miResID, id.String()) } else { q.Add(qpClientID, id.String()) } @@ -329,7 +329,7 @@ func (c *managedIdentityClient) createAzureMLAuthRequest(ctx context.Context, id if id.idKind() == miResourceID { log.Write(EventAuthentication, "WARNING: Azure ML doesn't support specifying a managed identity by resource ID") q.Set("clientid", "") - q.Set(qpResID, id.String()) + q.Set(miResID, id.String()) } else { q.Set("clientid", id.String()) } @@ -351,7 +351,7 @@ func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Conte if id != nil { log.Write(EventAuthentication, "WARNING: Service Fabric doesn't support selecting a user-assigned identity at runtime") if id.idKind() == miResourceID { - q.Add(qpResID, id.String()) + q.Add(miResID, id.String()) } else { q.Add(qpClientID, id.String()) } @@ -411,7 +411,7 @@ func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, i if id != nil { log.Write(EventAuthentication, "WARNING: Azure Arc doesn't support user-assigned managed identities") if id.idKind() == miResourceID { - q.Add(qpResID, id.String()) + q.Add(miResID, id.String()) } else { q.Add(qpClientID, id.String()) } @@ -437,7 +437,7 @@ func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, log.Write(EventAuthentication, "WARNING: Cloud Shell doesn't support user-assigned managed identities") q := request.Raw().URL.Query() if id.idKind() == miResourceID { - q.Add(qpResID, id.String()) + q.Add(miResID, id.String()) } else { q.Add(qpClientID, id.String()) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go index e8caeea71..9b9d7ae0d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -14,5 +14,5 @@ const ( module = "github.com/Azure/azure-sdk-for-go/sdk/" + component // Version is the semantic version (see http://semver.org) of this module. - version = "v1.5.1" + version = "v1.5.2" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go index d4ed6ccc8..9948f604b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go @@ -39,6 +39,11 @@ type PayloadOptions struct { // Subsequent reads will access the cached value. // Exported as runtime.Payload() WITHOUT the opts parameter. func Payload(resp *http.Response, opts *PayloadOptions) ([]byte, error) { + if resp.Body == nil { + // this shouldn't happen in real-world scenarios as a + // response with no body should set it to http.NoBody + return nil, nil + } modifyBytes := func(b []byte) []byte { return b } if opts != nil && opts.BytesModifier != nil { modifyBytes = opts.BytesModifier diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/CHANGELOG.md index b19244d46..b363a53ef 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/CHANGELOG.md @@ -1,5 +1,40 @@ # Release History +## 5.7.0 (2024-04-26) +### Features Added + +- New value `DiffDiskPlacementNvmeDisk` added to enum type `DiffDiskPlacement` +- New value `DiskCreateOptionTypesCopy`, `DiskCreateOptionTypesRestore` added to enum type `DiskCreateOptionTypes` +- New enum type `ResourceIDOptionsForGetCapacityReservationGroups` with values `ResourceIDOptionsForGetCapacityReservationGroupsAll`, `ResourceIDOptionsForGetCapacityReservationGroupsCreatedInSubscription`, `ResourceIDOptionsForGetCapacityReservationGroupsSharedWithSubscription` +- New struct `EventGridAndResourceGraph` +- New struct `ScheduledEventsAdditionalPublishingTargets` +- New struct `ScheduledEventsPolicy` +- New struct `UserInitiatedReboot` +- New struct `UserInitiatedRedeploy` +- New field `ResourceIDsOnly` in struct `CapacityReservationGroupsClientListBySubscriptionOptions` +- New field `SourceResource` in struct `DataDisk` +- New field `Caching`, `DeleteOption`, `DiskEncryptionSet`, `WriteAcceleratorEnabled` in struct `DataDisksToAttach` +- New field `ScheduledEventsPolicy` in struct `VirtualMachineProperties` +- New field `ScheduledEventsPolicy` in struct `VirtualMachineScaleSetProperties` +- New field `ForceUpdateOSDiskForEphemeral` in struct `VirtualMachineScaleSetReimageParameters` +- New field `DiffDiskSettings` in struct `VirtualMachineScaleSetUpdateOSDisk` +- New field `ForceUpdateOSDiskForEphemeral` in struct `VirtualMachineScaleSetVMReimageParameters` + + +## 5.6.0 (2024-03-22) +### Features Added + +- New field `VirtualMachineID` in struct `GalleryArtifactVersionFullSource` + + +## 5.5.0 (2024-01-26) +### Features Added + +- New value `DiskSecurityTypesConfidentialVMNonPersistedTPM` added to enum type `DiskSecurityTypes` +- New enum type `ProvisionedBandwidthCopyOption` with values `ProvisionedBandwidthCopyOptionEnhanced`, `ProvisionedBandwidthCopyOptionNone` +- New field `ProvisionedBandwidthCopySpeed` in struct `CreationData` + + ## 5.4.0 (2023-12-22) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/README.md index 7c8d1e3e1..c4bf8ab34 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/README.md @@ -57,7 +57,7 @@ clientFactory, err := armcompute.NewClientFactory(, cred, &opti A client groups a set of related APIs, providing access to its functionality. Create one or more clients to access the APIs you require using client factory. ```go -client := clientFactory.NewLogAnalyticsClient() +client := clientFactory.NewAvailabilitySetsClient() ``` ## Fakes diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/assets.json index 1feae4c5c..db9b10d43 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/assets.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/resourcemanager/compute/armcompute", - "Tag": "go/resourcemanager/compute/armcompute_323718962d" + "Tag": "go/resourcemanager/compute/armcompute_6e7bd6d107" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/autorest.md index 9d88c8a6d..5f836a658 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/autorest.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/autorest.md @@ -5,9 +5,9 @@ ``` yaml azure-arm: true require: -- https://github.com/Azure/azure-rest-api-specs/blob/60679ee3db06e93eb73faa0587fed93ed843d6dc/specification/compute/resource-manager/readme.md -- https://github.com/Azure/azure-rest-api-specs/blob/60679ee3db06e93eb73faa0587fed93ed843d6dc/specification/compute/resource-manager/readme.go.md +- https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/compute/resource-manager/readme.md +- https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/compute/resource-manager/readme.go.md license-header: MICROSOFT_MIT_NO_VERSION -module-version: 5.4.0 -tag: package-2023-09-01 +module-version: 5.7.0 +tag: package-2024-03-01 ``` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/availabilitysets_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/availabilitysets_client.go index 3b568f54a..b9b8536f8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/availabilitysets_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/availabilitysets_client.go @@ -47,7 +47,7 @@ func NewAvailabilitySetsClient(subscriptionID string, credential azcore.TokenCre // CreateOrUpdate - Create or update an availability set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - availabilitySetName - The name of the availability set. // - parameters - Parameters supplied to the Create Availability Set operation. @@ -95,7 +95,7 @@ func (client *AvailabilitySetsClient) createOrUpdateCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -116,7 +116,7 @@ func (client *AvailabilitySetsClient) createOrUpdateHandleResponse(resp *http.Re // Delete - Delete an availability set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - availabilitySetName - The name of the availability set. // - options - AvailabilitySetsClientDeleteOptions contains the optional parameters for the AvailabilitySetsClient.Delete method. @@ -161,7 +161,7 @@ func (client *AvailabilitySetsClient) deleteCreateRequest(ctx context.Context, r return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -170,7 +170,7 @@ func (client *AvailabilitySetsClient) deleteCreateRequest(ctx context.Context, r // Get - Retrieves information about an availability set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - availabilitySetName - The name of the availability set. // - options - AvailabilitySetsClientGetOptions contains the optional parameters for the AvailabilitySetsClient.Get method. @@ -216,7 +216,7 @@ func (client *AvailabilitySetsClient) getCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -233,7 +233,7 @@ func (client *AvailabilitySetsClient) getHandleResponse(resp *http.Response) (Av // NewListPager - Lists all availability sets in a resource group. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - options - AvailabilitySetsClientListOptions contains the optional parameters for the AvailabilitySetsClient.NewListPager // method. @@ -276,7 +276,7 @@ func (client *AvailabilitySetsClient) listCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -294,7 +294,7 @@ func (client *AvailabilitySetsClient) listHandleResponse(resp *http.Response) (A // NewListAvailableSizesPager - Lists all available virtual machine sizes that can be used to create a new virtual machine // in an existing availability set. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - availabilitySetName - The name of the availability set. // - options - AvailabilitySetsClientListAvailableSizesOptions contains the optional parameters for the AvailabilitySetsClient.NewListAvailableSizesPager @@ -343,7 +343,7 @@ func (client *AvailabilitySetsClient) listAvailableSizesCreateRequest(ctx contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -360,7 +360,7 @@ func (client *AvailabilitySetsClient) listAvailableSizesHandleResponse(resp *htt // NewListBySubscriptionPager - Lists all availability sets in a subscription. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - options - AvailabilitySetsClientListBySubscriptionOptions contains the optional parameters for the AvailabilitySetsClient.NewListBySubscriptionPager // method. func (client *AvailabilitySetsClient) NewListBySubscriptionPager(options *AvailabilitySetsClientListBySubscriptionOptions) *runtime.Pager[AvailabilitySetsClientListBySubscriptionResponse] { @@ -398,10 +398,10 @@ func (client *AvailabilitySetsClient) listBySubscriptionCreateRequest(ctx contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -419,7 +419,7 @@ func (client *AvailabilitySetsClient) listBySubscriptionHandleResponse(resp *htt // Update - Update an availability set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - availabilitySetName - The name of the availability set. // - parameters - Parameters supplied to the Update Availability Set operation. @@ -466,7 +466,7 @@ func (client *AvailabilitySetsClient) updateCreateRequest(ctx context.Context, r return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservationgroups_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservationgroups_client.go index 96e32e966..51a410658 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservationgroups_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservationgroups_client.go @@ -49,7 +49,7 @@ func NewCapacityReservationGroupsClient(subscriptionID string, credential azcore // https://aka.ms/CapacityReservation for more details. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - capacityReservationGroupName - The name of the capacity reservation group. // - parameters - Parameters supplied to the Create capacity reservation Group. @@ -97,7 +97,7 @@ func (client *CapacityReservationGroupsClient) createOrUpdateCreateRequest(ctx c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -120,7 +120,7 @@ func (client *CapacityReservationGroupsClient) createOrUpdateHandleResponse(resp // the reservation group have also been deleted. Please refer to https://aka.ms/CapacityReservation for more details. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - capacityReservationGroupName - The name of the capacity reservation group. // - options - CapacityReservationGroupsClientDeleteOptions contains the optional parameters for the CapacityReservationGroupsClient.Delete @@ -166,7 +166,7 @@ func (client *CapacityReservationGroupsClient) deleteCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -175,7 +175,7 @@ func (client *CapacityReservationGroupsClient) deleteCreateRequest(ctx context.C // Get - The operation that retrieves information about a capacity reservation group. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - capacityReservationGroupName - The name of the capacity reservation group. // - options - CapacityReservationGroupsClientGetOptions contains the optional parameters for the CapacityReservationGroupsClient.Get @@ -225,7 +225,7 @@ func (client *CapacityReservationGroupsClient) getCreateRequest(ctx context.Cont if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -243,7 +243,7 @@ func (client *CapacityReservationGroupsClient) getHandleResponse(resp *http.Resp // NewListByResourceGroupPager - Lists all of the capacity reservation groups in the specified resource group. Use the nextLink // property in the response to get the next page of capacity reservation groups. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - options - CapacityReservationGroupsClientListByResourceGroupOptions contains the optional parameters for the CapacityReservationGroupsClient.NewListByResourceGroupPager // method. @@ -286,10 +286,10 @@ func (client *CapacityReservationGroupsClient) listByResourceGroupCreateRequest( return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -307,7 +307,7 @@ func (client *CapacityReservationGroupsClient) listByResourceGroupHandleResponse // NewListBySubscriptionPager - Lists all of the capacity reservation groups in the subscription. Use the nextLink property // in the response to get the next page of capacity reservation groups. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - options - CapacityReservationGroupsClientListBySubscriptionOptions contains the optional parameters for the CapacityReservationGroupsClient.NewListBySubscriptionPager // method. func (client *CapacityReservationGroupsClient) NewListBySubscriptionPager(options *CapacityReservationGroupsClientListBySubscriptionOptions) *runtime.Pager[CapacityReservationGroupsClientListBySubscriptionResponse] { @@ -345,10 +345,13 @@ func (client *CapacityReservationGroupsClient) listBySubscriptionCreateRequest(c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } + reqQP.Set("api-version", "2024-03-01") + if options != nil && options.ResourceIDsOnly != nil { + reqQP.Set("resourceIdsOnly", string(*options.ResourceIDsOnly)) + } req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -367,7 +370,7 @@ func (client *CapacityReservationGroupsClient) listBySubscriptionHandleResponse( // sharing profile may be modified. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - capacityReservationGroupName - The name of the capacity reservation group. // - parameters - Parameters supplied to the Update capacity reservation Group operation. @@ -415,7 +418,7 @@ func (client *CapacityReservationGroupsClient) updateCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservations_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservations_client.go index 951b0ac91..aaf86c3a2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservations_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservations_client.go @@ -49,7 +49,7 @@ func NewCapacityReservationsClient(subscriptionID string, credential azcore.Toke // details. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - capacityReservationGroupName - The name of the capacity reservation group. // - capacityReservationName - The name of the capacity reservation. @@ -78,7 +78,7 @@ func (client *CapacityReservationsClient) BeginCreateOrUpdate(ctx context.Contex // details. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *CapacityReservationsClient) createOrUpdate(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, parameters CapacityReservation, options *CapacityReservationsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "CapacityReservationsClient.BeginCreateOrUpdate" @@ -124,7 +124,7 @@ func (client *CapacityReservationsClient) createOrUpdateCreateRequest(ctx contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -138,7 +138,7 @@ func (client *CapacityReservationsClient) createOrUpdateCreateRequest(ctx contex // https://aka.ms/CapacityReservation for more details. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - capacityReservationGroupName - The name of the capacity reservation group. // - capacityReservationName - The name of the capacity reservation. @@ -166,7 +166,7 @@ func (client *CapacityReservationsClient) BeginDelete(ctx context.Context, resou // https://aka.ms/CapacityReservation for more details. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *CapacityReservationsClient) deleteOperation(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, options *CapacityReservationsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "CapacityReservationsClient.BeginDelete" @@ -212,7 +212,7 @@ func (client *CapacityReservationsClient) deleteCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -221,7 +221,7 @@ func (client *CapacityReservationsClient) deleteCreateRequest(ctx context.Contex // Get - The operation that retrieves information about the capacity reservation. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - capacityReservationGroupName - The name of the capacity reservation group. // - capacityReservationName - The name of the capacity reservation. @@ -276,7 +276,7 @@ func (client *CapacityReservationsClient) getCreateRequest(ctx context.Context, if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -294,7 +294,7 @@ func (client *CapacityReservationsClient) getHandleResponse(resp *http.Response) // NewListByCapacityReservationGroupPager - Lists all of the capacity reservations in the specified capacity reservation group. // Use the nextLink property in the response to get the next page of capacity reservations. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - capacityReservationGroupName - The name of the capacity reservation group. // - options - CapacityReservationsClientListByCapacityReservationGroupOptions contains the optional parameters for the CapacityReservationsClient.NewListByCapacityReservationGroupPager @@ -342,7 +342,7 @@ func (client *CapacityReservationsClient) listByCapacityReservationGroupCreateRe return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -360,7 +360,7 @@ func (client *CapacityReservationsClient) listByCapacityReservationGroupHandleRe // BeginUpdate - The operation to update a capacity reservation. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - capacityReservationGroupName - The name of the capacity reservation group. // - capacityReservationName - The name of the capacity reservation. @@ -387,7 +387,7 @@ func (client *CapacityReservationsClient) BeginUpdate(ctx context.Context, resou // Update - The operation to update a capacity reservation. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *CapacityReservationsClient) update(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, parameters CapacityReservationUpdate, options *CapacityReservationsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "CapacityReservationsClient.BeginUpdate" @@ -433,7 +433,7 @@ func (client *CapacityReservationsClient) updateCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/ci.yml index 084ed1b49..c93d36fd4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/ci.yml @@ -21,8 +21,8 @@ pr: include: - sdk/resourcemanager/compute/armcompute/ -stages: -- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml +extends: + template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml parameters: IncludeRelease: true ServiceDirectory: 'resourcemanager/compute/armcompute' diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/client_factory.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/client_factory.go index b8600b614..dab27c87c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/client_factory.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/client_factory.go @@ -17,8 +17,7 @@ import ( // Don't use this type directly, use NewClientFactory instead. type ClientFactory struct { subscriptionID string - credential azcore.TokenCredential - options *arm.ClientOptions + internal *arm.Client } // NewClientFactory creates a new instance of ClientFactory with the specified values. @@ -28,306 +27,403 @@ type ClientFactory struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewClientFactory(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ClientFactory, error) { - _, err := arm.NewClient(moduleName, moduleVersion, credential, options) + internal, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } return &ClientFactory{ - subscriptionID: subscriptionID, credential: credential, - options: options.Clone(), + subscriptionID: subscriptionID, + internal: internal, }, nil } // NewAvailabilitySetsClient creates a new instance of AvailabilitySetsClient. func (c *ClientFactory) NewAvailabilitySetsClient() *AvailabilitySetsClient { - subClient, _ := NewAvailabilitySetsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &AvailabilitySetsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewCapacityReservationGroupsClient creates a new instance of CapacityReservationGroupsClient. func (c *ClientFactory) NewCapacityReservationGroupsClient() *CapacityReservationGroupsClient { - subClient, _ := NewCapacityReservationGroupsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &CapacityReservationGroupsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewCapacityReservationsClient creates a new instance of CapacityReservationsClient. func (c *ClientFactory) NewCapacityReservationsClient() *CapacityReservationsClient { - subClient, _ := NewCapacityReservationsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &CapacityReservationsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewCloudServiceOperatingSystemsClient creates a new instance of CloudServiceOperatingSystemsClient. func (c *ClientFactory) NewCloudServiceOperatingSystemsClient() *CloudServiceOperatingSystemsClient { - subClient, _ := NewCloudServiceOperatingSystemsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &CloudServiceOperatingSystemsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewCloudServiceRoleInstancesClient creates a new instance of CloudServiceRoleInstancesClient. func (c *ClientFactory) NewCloudServiceRoleInstancesClient() *CloudServiceRoleInstancesClient { - subClient, _ := NewCloudServiceRoleInstancesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &CloudServiceRoleInstancesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewCloudServiceRolesClient creates a new instance of CloudServiceRolesClient. func (c *ClientFactory) NewCloudServiceRolesClient() *CloudServiceRolesClient { - subClient, _ := NewCloudServiceRolesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &CloudServiceRolesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewCloudServicesClient creates a new instance of CloudServicesClient. func (c *ClientFactory) NewCloudServicesClient() *CloudServicesClient { - subClient, _ := NewCloudServicesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &CloudServicesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewCloudServicesUpdateDomainClient creates a new instance of CloudServicesUpdateDomainClient. func (c *ClientFactory) NewCloudServicesUpdateDomainClient() *CloudServicesUpdateDomainClient { - subClient, _ := NewCloudServicesUpdateDomainClient(c.subscriptionID, c.credential, c.options) - return subClient + return &CloudServicesUpdateDomainClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewCommunityGalleriesClient creates a new instance of CommunityGalleriesClient. func (c *ClientFactory) NewCommunityGalleriesClient() *CommunityGalleriesClient { - subClient, _ := NewCommunityGalleriesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &CommunityGalleriesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewCommunityGalleryImageVersionsClient creates a new instance of CommunityGalleryImageVersionsClient. func (c *ClientFactory) NewCommunityGalleryImageVersionsClient() *CommunityGalleryImageVersionsClient { - subClient, _ := NewCommunityGalleryImageVersionsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &CommunityGalleryImageVersionsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewCommunityGalleryImagesClient creates a new instance of CommunityGalleryImagesClient. func (c *ClientFactory) NewCommunityGalleryImagesClient() *CommunityGalleryImagesClient { - subClient, _ := NewCommunityGalleryImagesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &CommunityGalleryImagesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewDedicatedHostGroupsClient creates a new instance of DedicatedHostGroupsClient. func (c *ClientFactory) NewDedicatedHostGroupsClient() *DedicatedHostGroupsClient { - subClient, _ := NewDedicatedHostGroupsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &DedicatedHostGroupsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewDedicatedHostsClient creates a new instance of DedicatedHostsClient. func (c *ClientFactory) NewDedicatedHostsClient() *DedicatedHostsClient { - subClient, _ := NewDedicatedHostsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &DedicatedHostsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewDiskAccessesClient creates a new instance of DiskAccessesClient. func (c *ClientFactory) NewDiskAccessesClient() *DiskAccessesClient { - subClient, _ := NewDiskAccessesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &DiskAccessesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewDiskEncryptionSetsClient creates a new instance of DiskEncryptionSetsClient. func (c *ClientFactory) NewDiskEncryptionSetsClient() *DiskEncryptionSetsClient { - subClient, _ := NewDiskEncryptionSetsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &DiskEncryptionSetsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewDiskRestorePointClient creates a new instance of DiskRestorePointClient. func (c *ClientFactory) NewDiskRestorePointClient() *DiskRestorePointClient { - subClient, _ := NewDiskRestorePointClient(c.subscriptionID, c.credential, c.options) - return subClient + return &DiskRestorePointClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewDisksClient creates a new instance of DisksClient. func (c *ClientFactory) NewDisksClient() *DisksClient { - subClient, _ := NewDisksClient(c.subscriptionID, c.credential, c.options) - return subClient + return &DisksClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewGalleriesClient creates a new instance of GalleriesClient. func (c *ClientFactory) NewGalleriesClient() *GalleriesClient { - subClient, _ := NewGalleriesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &GalleriesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewGalleryApplicationVersionsClient creates a new instance of GalleryApplicationVersionsClient. func (c *ClientFactory) NewGalleryApplicationVersionsClient() *GalleryApplicationVersionsClient { - subClient, _ := NewGalleryApplicationVersionsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &GalleryApplicationVersionsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewGalleryApplicationsClient creates a new instance of GalleryApplicationsClient. func (c *ClientFactory) NewGalleryApplicationsClient() *GalleryApplicationsClient { - subClient, _ := NewGalleryApplicationsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &GalleryApplicationsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewGalleryImageVersionsClient creates a new instance of GalleryImageVersionsClient. func (c *ClientFactory) NewGalleryImageVersionsClient() *GalleryImageVersionsClient { - subClient, _ := NewGalleryImageVersionsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &GalleryImageVersionsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewGalleryImagesClient creates a new instance of GalleryImagesClient. func (c *ClientFactory) NewGalleryImagesClient() *GalleryImagesClient { - subClient, _ := NewGalleryImagesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &GalleryImagesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewGallerySharingProfileClient creates a new instance of GallerySharingProfileClient. func (c *ClientFactory) NewGallerySharingProfileClient() *GallerySharingProfileClient { - subClient, _ := NewGallerySharingProfileClient(c.subscriptionID, c.credential, c.options) - return subClient + return &GallerySharingProfileClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewImagesClient creates a new instance of ImagesClient. func (c *ClientFactory) NewImagesClient() *ImagesClient { - subClient, _ := NewImagesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &ImagesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewLogAnalyticsClient creates a new instance of LogAnalyticsClient. func (c *ClientFactory) NewLogAnalyticsClient() *LogAnalyticsClient { - subClient, _ := NewLogAnalyticsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &LogAnalyticsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewOperationsClient creates a new instance of OperationsClient. func (c *ClientFactory) NewOperationsClient() *OperationsClient { - subClient, _ := NewOperationsClient(c.credential, c.options) - return subClient + return &OperationsClient{ + internal: c.internal, + } } // NewProximityPlacementGroupsClient creates a new instance of ProximityPlacementGroupsClient. func (c *ClientFactory) NewProximityPlacementGroupsClient() *ProximityPlacementGroupsClient { - subClient, _ := NewProximityPlacementGroupsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &ProximityPlacementGroupsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewResourceSKUsClient creates a new instance of ResourceSKUsClient. func (c *ClientFactory) NewResourceSKUsClient() *ResourceSKUsClient { - subClient, _ := NewResourceSKUsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &ResourceSKUsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewRestorePointCollectionsClient creates a new instance of RestorePointCollectionsClient. func (c *ClientFactory) NewRestorePointCollectionsClient() *RestorePointCollectionsClient { - subClient, _ := NewRestorePointCollectionsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &RestorePointCollectionsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewRestorePointsClient creates a new instance of RestorePointsClient. func (c *ClientFactory) NewRestorePointsClient() *RestorePointsClient { - subClient, _ := NewRestorePointsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &RestorePointsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewSSHPublicKeysClient creates a new instance of SSHPublicKeysClient. func (c *ClientFactory) NewSSHPublicKeysClient() *SSHPublicKeysClient { - subClient, _ := NewSSHPublicKeysClient(c.subscriptionID, c.credential, c.options) - return subClient + return &SSHPublicKeysClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewSharedGalleriesClient creates a new instance of SharedGalleriesClient. func (c *ClientFactory) NewSharedGalleriesClient() *SharedGalleriesClient { - subClient, _ := NewSharedGalleriesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &SharedGalleriesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewSharedGalleryImageVersionsClient creates a new instance of SharedGalleryImageVersionsClient. func (c *ClientFactory) NewSharedGalleryImageVersionsClient() *SharedGalleryImageVersionsClient { - subClient, _ := NewSharedGalleryImageVersionsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &SharedGalleryImageVersionsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewSharedGalleryImagesClient creates a new instance of SharedGalleryImagesClient. func (c *ClientFactory) NewSharedGalleryImagesClient() *SharedGalleryImagesClient { - subClient, _ := NewSharedGalleryImagesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &SharedGalleryImagesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewSnapshotsClient creates a new instance of SnapshotsClient. func (c *ClientFactory) NewSnapshotsClient() *SnapshotsClient { - subClient, _ := NewSnapshotsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &SnapshotsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewUsageClient creates a new instance of UsageClient. func (c *ClientFactory) NewUsageClient() *UsageClient { - subClient, _ := NewUsageClient(c.subscriptionID, c.credential, c.options) - return subClient + return &UsageClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineExtensionImagesClient creates a new instance of VirtualMachineExtensionImagesClient. func (c *ClientFactory) NewVirtualMachineExtensionImagesClient() *VirtualMachineExtensionImagesClient { - subClient, _ := NewVirtualMachineExtensionImagesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineExtensionImagesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineExtensionsClient creates a new instance of VirtualMachineExtensionsClient. func (c *ClientFactory) NewVirtualMachineExtensionsClient() *VirtualMachineExtensionsClient { - subClient, _ := NewVirtualMachineExtensionsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineExtensionsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineImagesClient creates a new instance of VirtualMachineImagesClient. func (c *ClientFactory) NewVirtualMachineImagesClient() *VirtualMachineImagesClient { - subClient, _ := NewVirtualMachineImagesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineImagesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineImagesEdgeZoneClient creates a new instance of VirtualMachineImagesEdgeZoneClient. func (c *ClientFactory) NewVirtualMachineImagesEdgeZoneClient() *VirtualMachineImagesEdgeZoneClient { - subClient, _ := NewVirtualMachineImagesEdgeZoneClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineImagesEdgeZoneClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineRunCommandsClient creates a new instance of VirtualMachineRunCommandsClient. func (c *ClientFactory) NewVirtualMachineRunCommandsClient() *VirtualMachineRunCommandsClient { - subClient, _ := NewVirtualMachineRunCommandsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineRunCommandsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineScaleSetExtensionsClient creates a new instance of VirtualMachineScaleSetExtensionsClient. func (c *ClientFactory) NewVirtualMachineScaleSetExtensionsClient() *VirtualMachineScaleSetExtensionsClient { - subClient, _ := NewVirtualMachineScaleSetExtensionsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineScaleSetExtensionsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineScaleSetRollingUpgradesClient creates a new instance of VirtualMachineScaleSetRollingUpgradesClient. func (c *ClientFactory) NewVirtualMachineScaleSetRollingUpgradesClient() *VirtualMachineScaleSetRollingUpgradesClient { - subClient, _ := NewVirtualMachineScaleSetRollingUpgradesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineScaleSetRollingUpgradesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineScaleSetVMExtensionsClient creates a new instance of VirtualMachineScaleSetVMExtensionsClient. func (c *ClientFactory) NewVirtualMachineScaleSetVMExtensionsClient() *VirtualMachineScaleSetVMExtensionsClient { - subClient, _ := NewVirtualMachineScaleSetVMExtensionsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineScaleSetVMExtensionsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineScaleSetVMRunCommandsClient creates a new instance of VirtualMachineScaleSetVMRunCommandsClient. func (c *ClientFactory) NewVirtualMachineScaleSetVMRunCommandsClient() *VirtualMachineScaleSetVMRunCommandsClient { - subClient, _ := NewVirtualMachineScaleSetVMRunCommandsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineScaleSetVMRunCommandsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineScaleSetVMsClient creates a new instance of VirtualMachineScaleSetVMsClient. func (c *ClientFactory) NewVirtualMachineScaleSetVMsClient() *VirtualMachineScaleSetVMsClient { - subClient, _ := NewVirtualMachineScaleSetVMsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineScaleSetVMsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineScaleSetsClient creates a new instance of VirtualMachineScaleSetsClient. func (c *ClientFactory) NewVirtualMachineScaleSetsClient() *VirtualMachineScaleSetsClient { - subClient, _ := NewVirtualMachineScaleSetsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineScaleSetsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineSizesClient creates a new instance of VirtualMachineSizesClient. func (c *ClientFactory) NewVirtualMachineSizesClient() *VirtualMachineSizesClient { - subClient, _ := NewVirtualMachineSizesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineSizesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachinesClient creates a new instance of VirtualMachinesClient. func (c *ClientFactory) NewVirtualMachinesClient() *VirtualMachinesClient { - subClient, _ := NewVirtualMachinesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachinesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/cloudserviceroleinstances_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/cloudserviceroleinstances_client.go index b84530166..1f7cdb7c3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/cloudserviceroleinstances_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/cloudserviceroleinstances_client.go @@ -180,10 +180,10 @@ func (client *CloudServiceRoleInstancesClient) getCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-09-04") if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } + reqQP.Set("api-version", "2022-09-04") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -380,10 +380,10 @@ func (client *CloudServiceRoleInstancesClient) listCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-09-04") if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } + reqQP.Set("api-version", "2022-09-04") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleries_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleries_client.go index 2bfc798f3..431a26cde 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleries_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleries_client.go @@ -47,7 +47,7 @@ func NewCommunityGalleriesClient(subscriptionID string, credential azcore.TokenC // Get - Get a community gallery by gallery public name. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - publicGalleryName - The public name of the community gallery. // - options - CommunityGalleriesClientGetOptions contains the optional parameters for the CommunityGalleriesClient.Get method. @@ -93,7 +93,7 @@ func (client *CommunityGalleriesClient) getCreateRequest(ctx context.Context, lo return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimages_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimages_client.go index 25b118f07..4ecc005ca 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimages_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimages_client.go @@ -47,7 +47,7 @@ func NewCommunityGalleryImagesClient(subscriptionID string, credential azcore.To // Get - Get a community gallery image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - publicGalleryName - The public name of the community gallery. // - galleryImageName - The name of the community gallery image definition. @@ -99,7 +99,7 @@ func (client *CommunityGalleryImagesClient) getCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -116,7 +116,7 @@ func (client *CommunityGalleryImagesClient) getHandleResponse(resp *http.Respons // NewListPager - List community gallery images inside a gallery. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - publicGalleryName - The public name of the community gallery. // - options - CommunityGalleryImagesClientListOptions contains the optional parameters for the CommunityGalleryImagesClient.NewListPager @@ -164,7 +164,7 @@ func (client *CommunityGalleryImagesClient) listCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimageversions_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimageversions_client.go index 5e382b36d..e98cfa506 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimageversions_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimageversions_client.go @@ -47,7 +47,7 @@ func NewCommunityGalleryImageVersionsClient(subscriptionID string, credential az // Get - Get a community gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - publicGalleryName - The public name of the community gallery. // - galleryImageName - The name of the community gallery image definition. @@ -106,7 +106,7 @@ func (client *CommunityGalleryImageVersionsClient) getCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -123,7 +123,7 @@ func (client *CommunityGalleryImageVersionsClient) getHandleResponse(resp *http. // NewListPager - List community gallery image versions inside an image. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - publicGalleryName - The public name of the community gallery. // - galleryImageName - The name of the community gallery image definition. @@ -176,7 +176,7 @@ func (client *CommunityGalleryImageVersionsClient) listCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/constants.go index d635d4f51..ab719dfdf 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/constants.go @@ -10,7 +10,7 @@ package armcompute const ( moduleName = "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute" - moduleVersion = "v5.4.0" + moduleVersion = "v5.7.0" ) type AccessLevel string @@ -333,15 +333,17 @@ func PossibleDiffDiskOptionsValues() []DiffDiskOptions { } // DiffDiskPlacement - Specifies the ephemeral disk placement for operating system disk. This property can be used by user -// in the request to choose the location i.e, cache disk or resource disk space for Ephemeral OS disk -// provisioning. For more information on Ephemeral OS disk size requirements, please refer Ephemeral OS disk size requirements -// for Windows VM at +// in the request to choose the location i.e, cache disk, resource disk or nvme disk space for +// Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer Ephemeral OS +// disk size requirements for Windows VM at // https://docs.microsoft.com/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VM at -// https://docs.microsoft.com/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements +// https://docs.microsoft.com/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. Minimum api-version for NvmeDisk: +// 2024-03-01. type DiffDiskPlacement string const ( DiffDiskPlacementCacheDisk DiffDiskPlacement = "CacheDisk" + DiffDiskPlacementNvmeDisk DiffDiskPlacement = "NvmeDisk" DiffDiskPlacementResourceDisk DiffDiskPlacement = "ResourceDisk" ) @@ -349,6 +351,7 @@ const ( func PossibleDiffDiskPlacementValues() []DiffDiskPlacement { return []DiffDiskPlacement{ DiffDiskPlacementCacheDisk, + DiffDiskPlacementNvmeDisk, DiffDiskPlacementResourceDisk, } } @@ -425,25 +428,31 @@ func PossibleDiskCreateOptionValues() []DiskCreateOption { } } -// DiskCreateOptionTypes - Specifies how the virtual machine should be created. Possible values are: Attach. This value is -// used when you are using a specialized disk to create the virtual machine. FromImage. This value is used -// when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference -// element described above. If you are using a marketplace image, you also -// use the plan element previously described. +// DiskCreateOptionTypes - Specifies how the virtual machine disk should be created. Possible values are Attach: This value +// is used when you are using a specialized disk to create the virtual machine. FromImage: This value is +// used when you are using an image to create the virtual machine. If you are using a platform image, you should also use +// the imageReference element described above. If you are using a marketplace image, +// you should also use the plan element previously described. Empty: This value is used when creating an empty data disk. +// Copy: This value is used to create a data disk from a snapshot or another disk. +// Restore: This value is used to create a data disk from a disk restore point. type DiskCreateOptionTypes string const ( DiskCreateOptionTypesAttach DiskCreateOptionTypes = "Attach" + DiskCreateOptionTypesCopy DiskCreateOptionTypes = "Copy" DiskCreateOptionTypesEmpty DiskCreateOptionTypes = "Empty" DiskCreateOptionTypesFromImage DiskCreateOptionTypes = "FromImage" + DiskCreateOptionTypesRestore DiskCreateOptionTypes = "Restore" ) // PossibleDiskCreateOptionTypesValues returns the possible values for the DiskCreateOptionTypes const type. func PossibleDiskCreateOptionTypesValues() []DiskCreateOptionTypes { return []DiskCreateOptionTypes{ DiskCreateOptionTypesAttach, + DiskCreateOptionTypesCopy, DiskCreateOptionTypesEmpty, DiskCreateOptionTypesFromImage, + DiskCreateOptionTypesRestore, } } @@ -543,6 +552,9 @@ const ( // DiskSecurityTypesConfidentialVMDiskEncryptedWithPlatformKey - Indicates Confidential VM disk with both OS disk and VM guest // state encrypted with a platform managed key DiskSecurityTypesConfidentialVMDiskEncryptedWithPlatformKey DiskSecurityTypes = "ConfidentialVM_DiskEncryptedWithPlatformKey" + // DiskSecurityTypesConfidentialVMNonPersistedTPM - Indicates Confidential VM disk with a ephemeral vTPM. vTPM state is not + // persisted across VM reboots. + DiskSecurityTypesConfidentialVMNonPersistedTPM DiskSecurityTypes = "ConfidentialVM_NonPersistedTPM" // DiskSecurityTypesConfidentialVMVmguestStateOnlyEncryptedWithPlatformKey - Indicates Confidential VM disk with only VM guest // state encrypted DiskSecurityTypesConfidentialVMVmguestStateOnlyEncryptedWithPlatformKey DiskSecurityTypes = "ConfidentialVM_VMGuestStateOnlyEncryptedWithPlatformKey" @@ -556,6 +568,7 @@ func PossibleDiskSecurityTypesValues() []DiskSecurityTypes { return []DiskSecurityTypes{ DiskSecurityTypesConfidentialVMDiskEncryptedWithCustomerKey, DiskSecurityTypesConfidentialVMDiskEncryptedWithPlatformKey, + DiskSecurityTypesConfidentialVMNonPersistedTPM, DiskSecurityTypesConfidentialVMVmguestStateOnlyEncryptedWithPlatformKey, DiskSecurityTypesTrustedLaunch, } @@ -1500,6 +1513,23 @@ func PossibleProtocolTypesValues() []ProtocolTypes { } } +// ProvisionedBandwidthCopyOption - If this field is set on a snapshot and createOption is CopyStart, the snapshot will be +// copied at a quicker speed. +type ProvisionedBandwidthCopyOption string + +const ( + ProvisionedBandwidthCopyOptionEnhanced ProvisionedBandwidthCopyOption = "Enhanced" + ProvisionedBandwidthCopyOptionNone ProvisionedBandwidthCopyOption = "None" +) + +// PossibleProvisionedBandwidthCopyOptionValues returns the possible values for the ProvisionedBandwidthCopyOption const type. +func PossibleProvisionedBandwidthCopyOptionValues() []ProvisionedBandwidthCopyOption { + return []ProvisionedBandwidthCopyOption{ + ProvisionedBandwidthCopyOptionEnhanced, + ProvisionedBandwidthCopyOptionNone, + } +} + // ProximityPlacementGroupType - Specifies the type of the proximity placement group. Possible values are: Standard : Co-locate // resources within an Azure region or Availability Zone. Ultra : For future use. type ProximityPlacementGroupType string @@ -1657,6 +1687,23 @@ func PossibleReplicationStatusTypesValues() []ReplicationStatusTypes { } } +type ResourceIDOptionsForGetCapacityReservationGroups string + +const ( + ResourceIDOptionsForGetCapacityReservationGroupsAll ResourceIDOptionsForGetCapacityReservationGroups = "All" + ResourceIDOptionsForGetCapacityReservationGroupsCreatedInSubscription ResourceIDOptionsForGetCapacityReservationGroups = "CreatedInSubscription" + ResourceIDOptionsForGetCapacityReservationGroupsSharedWithSubscription ResourceIDOptionsForGetCapacityReservationGroups = "SharedWithSubscription" +) + +// PossibleResourceIDOptionsForGetCapacityReservationGroupsValues returns the possible values for the ResourceIDOptionsForGetCapacityReservationGroups const type. +func PossibleResourceIDOptionsForGetCapacityReservationGroupsValues() []ResourceIDOptionsForGetCapacityReservationGroups { + return []ResourceIDOptionsForGetCapacityReservationGroups{ + ResourceIDOptionsForGetCapacityReservationGroupsAll, + ResourceIDOptionsForGetCapacityReservationGroupsCreatedInSubscription, + ResourceIDOptionsForGetCapacityReservationGroupsSharedWithSubscription, + } +} + // ResourceIdentityType - The type of identity used for the virtual machine scale set. The type 'SystemAssigned, UserAssigned' // includes both an implicitly created identity and a set of user assigned identities. The type 'None' // will remove any identities from the virtual machine scale set. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/dedicatedhostgroups_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/dedicatedhostgroups_client.go index 99434bfab..ae2c4f366 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/dedicatedhostgroups_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/dedicatedhostgroups_client.go @@ -48,7 +48,7 @@ func NewDedicatedHostGroupsClient(subscriptionID string, credential azcore.Token // see Dedicated Host Documentation [https://go.microsoft.com/fwlink/?linkid=2082596] // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - parameters - Parameters supplied to the Create Dedicated Host Group. @@ -96,7 +96,7 @@ func (client *DedicatedHostGroupsClient) createOrUpdateCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -117,7 +117,7 @@ func (client *DedicatedHostGroupsClient) createOrUpdateHandleResponse(resp *http // Delete - Delete a dedicated host group. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - options - DedicatedHostGroupsClientDeleteOptions contains the optional parameters for the DedicatedHostGroupsClient.Delete @@ -163,7 +163,7 @@ func (client *DedicatedHostGroupsClient) deleteCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -172,7 +172,7 @@ func (client *DedicatedHostGroupsClient) deleteCreateRequest(ctx context.Context // Get - Retrieves information about a dedicated host group. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - options - DedicatedHostGroupsClientGetOptions contains the optional parameters for the DedicatedHostGroupsClient.Get method. @@ -221,7 +221,7 @@ func (client *DedicatedHostGroupsClient) getCreateRequest(ctx context.Context, r if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -239,7 +239,7 @@ func (client *DedicatedHostGroupsClient) getHandleResponse(resp *http.Response) // NewListByResourceGroupPager - Lists all of the dedicated host groups in the specified resource group. Use the nextLink // property in the response to get the next page of dedicated host groups. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - options - DedicatedHostGroupsClientListByResourceGroupOptions contains the optional parameters for the DedicatedHostGroupsClient.NewListByResourceGroupPager // method. @@ -282,7 +282,7 @@ func (client *DedicatedHostGroupsClient) listByResourceGroupCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -300,7 +300,7 @@ func (client *DedicatedHostGroupsClient) listByResourceGroupHandleResponse(resp // NewListBySubscriptionPager - Lists all of the dedicated host groups in the subscription. Use the nextLink property in the // response to get the next page of dedicated host groups. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - options - DedicatedHostGroupsClientListBySubscriptionOptions contains the optional parameters for the DedicatedHostGroupsClient.NewListBySubscriptionPager // method. func (client *DedicatedHostGroupsClient) NewListBySubscriptionPager(options *DedicatedHostGroupsClientListBySubscriptionOptions) *runtime.Pager[DedicatedHostGroupsClientListBySubscriptionResponse] { @@ -338,7 +338,7 @@ func (client *DedicatedHostGroupsClient) listBySubscriptionCreateRequest(ctx con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -356,7 +356,7 @@ func (client *DedicatedHostGroupsClient) listBySubscriptionHandleResponse(resp * // Update - Update an dedicated host group. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - parameters - Parameters supplied to the Update Dedicated Host Group operation. @@ -404,7 +404,7 @@ func (client *DedicatedHostGroupsClient) updateCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/dedicatedhosts_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/dedicatedhosts_client.go index b50a3ec65..66ae1f99b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/dedicatedhosts_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/dedicatedhosts_client.go @@ -47,7 +47,7 @@ func NewDedicatedHostsClient(subscriptionID string, credential azcore.TokenCrede // BeginCreateOrUpdate - Create or update a dedicated host . // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - hostName - The name of the dedicated host . @@ -74,7 +74,7 @@ func (client *DedicatedHostsClient) BeginCreateOrUpdate(ctx context.Context, res // CreateOrUpdate - Create or update a dedicated host . // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *DedicatedHostsClient) createOrUpdate(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, parameters DedicatedHost, options *DedicatedHostsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "DedicatedHostsClient.BeginCreateOrUpdate" @@ -120,7 +120,7 @@ func (client *DedicatedHostsClient) createOrUpdateCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -132,7 +132,7 @@ func (client *DedicatedHostsClient) createOrUpdateCreateRequest(ctx context.Cont // BeginDelete - Delete a dedicated host. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - hostName - The name of the dedicated host. @@ -158,7 +158,7 @@ func (client *DedicatedHostsClient) BeginDelete(ctx context.Context, resourceGro // Delete - Delete a dedicated host. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *DedicatedHostsClient) deleteOperation(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, options *DedicatedHostsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "DedicatedHostsClient.BeginDelete" @@ -204,7 +204,7 @@ func (client *DedicatedHostsClient) deleteCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -213,7 +213,7 @@ func (client *DedicatedHostsClient) deleteCreateRequest(ctx context.Context, res // Get - Retrieves information about a dedicated host. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - hostName - The name of the dedicated host. @@ -267,7 +267,7 @@ func (client *DedicatedHostsClient) getCreateRequest(ctx context.Context, resour if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -285,7 +285,7 @@ func (client *DedicatedHostsClient) getHandleResponse(resp *http.Response) (Dedi // NewListAvailableSizesPager - Lists all available dedicated host sizes to which the specified dedicated host can be resized. // NOTE: The dedicated host sizes provided can be used to only scale up the existing dedicated host. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - hostName - The name of the dedicated host. @@ -339,7 +339,7 @@ func (client *DedicatedHostsClient) listAvailableSizesCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -357,7 +357,7 @@ func (client *DedicatedHostsClient) listAvailableSizesHandleResponse(resp *http. // NewListByHostGroupPager - Lists all of the dedicated hosts in the specified dedicated host group. Use the nextLink property // in the response to get the next page of dedicated hosts. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - options - DedicatedHostsClientListByHostGroupOptions contains the optional parameters for the DedicatedHostsClient.NewListByHostGroupPager @@ -405,7 +405,7 @@ func (client *DedicatedHostsClient) listByHostGroupCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -426,7 +426,7 @@ func (client *DedicatedHostsClient) listByHostGroupHandleResponse(resp *http.Res // for more details. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - hostName - The name of the dedicated host. @@ -455,7 +455,7 @@ func (client *DedicatedHostsClient) BeginRedeploy(ctx context.Context, resourceG // for more details. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *DedicatedHostsClient) redeploy(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, options *DedicatedHostsClientBeginRedeployOptions) (*http.Response, error) { var err error const operationName = "DedicatedHostsClient.BeginRedeploy" @@ -501,7 +501,7 @@ func (client *DedicatedHostsClient) redeployCreateRequest(ctx context.Context, r return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -513,7 +513,7 @@ func (client *DedicatedHostsClient) redeployCreateRequest(ctx context.Context, r // for more details. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - hostName - The name of the dedicated host. @@ -542,7 +542,7 @@ func (client *DedicatedHostsClient) BeginRestart(ctx context.Context, resourceGr // for more details. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *DedicatedHostsClient) restart(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, options *DedicatedHostsClientBeginRestartOptions) (*http.Response, error) { var err error const operationName = "DedicatedHostsClient.BeginRestart" @@ -588,7 +588,7 @@ func (client *DedicatedHostsClient) restartCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -597,7 +597,7 @@ func (client *DedicatedHostsClient) restartCreateRequest(ctx context.Context, re // BeginUpdate - Update a dedicated host . // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - hostName - The name of the dedicated host . @@ -624,7 +624,7 @@ func (client *DedicatedHostsClient) BeginUpdate(ctx context.Context, resourceGro // Update - Update a dedicated host . // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *DedicatedHostsClient) update(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, parameters DedicatedHostUpdate, options *DedicatedHostsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "DedicatedHostsClient.BeginUpdate" @@ -670,7 +670,7 @@ func (client *DedicatedHostsClient) updateCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskaccesses_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskaccesses_client.go index b189a0340..0bea05f3e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskaccesses_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskaccesses_client.go @@ -47,7 +47,7 @@ func NewDiskAccessesClient(subscriptionID string, credential azcore.TokenCredent // BeginCreateOrUpdate - Creates or updates a disk access resource // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption // set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The @@ -75,7 +75,7 @@ func (client *DiskAccessesClient) BeginCreateOrUpdate(ctx context.Context, resou // CreateOrUpdate - Creates or updates a disk access resource // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DiskAccessesClient) createOrUpdate(ctx context.Context, resourceGroupName string, diskAccessName string, diskAccess DiskAccess, options *DiskAccessesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "DiskAccessesClient.BeginCreateOrUpdate" @@ -117,7 +117,7 @@ func (client *DiskAccessesClient) createOrUpdateCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, diskAccess); err != nil { @@ -129,7 +129,7 @@ func (client *DiskAccessesClient) createOrUpdateCreateRequest(ctx context.Contex // BeginDelete - Deletes a disk access resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption // set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The @@ -156,7 +156,7 @@ func (client *DiskAccessesClient) BeginDelete(ctx context.Context, resourceGroup // Delete - Deletes a disk access resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DiskAccessesClient) deleteOperation(ctx context.Context, resourceGroupName string, diskAccessName string, options *DiskAccessesClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "DiskAccessesClient.BeginDelete" @@ -198,7 +198,7 @@ func (client *DiskAccessesClient) deleteCreateRequest(ctx context.Context, resou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -207,7 +207,7 @@ func (client *DiskAccessesClient) deleteCreateRequest(ctx context.Context, resou // BeginDeleteAPrivateEndpointConnection - Deletes a private endpoint connection under a disk access resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption // set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The @@ -235,7 +235,7 @@ func (client *DiskAccessesClient) BeginDeleteAPrivateEndpointConnection(ctx cont // DeleteAPrivateEndpointConnection - Deletes a private endpoint connection under a disk access resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DiskAccessesClient) deleteAPrivateEndpointConnection(ctx context.Context, resourceGroupName string, diskAccessName string, privateEndpointConnectionName string, options *DiskAccessesClientBeginDeleteAPrivateEndpointConnectionOptions) (*http.Response, error) { var err error const operationName = "DiskAccessesClient.BeginDeleteAPrivateEndpointConnection" @@ -281,7 +281,7 @@ func (client *DiskAccessesClient) deleteAPrivateEndpointConnectionCreateRequest( return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -290,7 +290,7 @@ func (client *DiskAccessesClient) deleteAPrivateEndpointConnectionCreateRequest( // Get - Gets information about a disk access resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption // set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The @@ -338,7 +338,7 @@ func (client *DiskAccessesClient) getCreateRequest(ctx context.Context, resource return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -356,7 +356,7 @@ func (client *DiskAccessesClient) getHandleResponse(resp *http.Response) (DiskAc // GetAPrivateEndpointConnection - Gets information about a private endpoint connection under a disk access resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption // set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The @@ -410,7 +410,7 @@ func (client *DiskAccessesClient) getAPrivateEndpointConnectionCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -428,7 +428,7 @@ func (client *DiskAccessesClient) getAPrivateEndpointConnectionHandleResponse(re // GetPrivateLinkResources - Gets the private link resources possible under disk access resource // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption // set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The @@ -477,7 +477,7 @@ func (client *DiskAccessesClient) getPrivateLinkResourcesCreateRequest(ctx conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -494,7 +494,7 @@ func (client *DiskAccessesClient) getPrivateLinkResourcesHandleResponse(resp *ht // NewListPager - Lists all the disk access resources under a subscription. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - options - DiskAccessesClientListOptions contains the optional parameters for the DiskAccessesClient.NewListPager method. func (client *DiskAccessesClient) NewListPager(options *DiskAccessesClientListOptions) *runtime.Pager[DiskAccessesClientListResponse] { return runtime.NewPager(runtime.PagingHandler[DiskAccessesClientListResponse]{ @@ -531,7 +531,7 @@ func (client *DiskAccessesClient) listCreateRequest(ctx context.Context, options return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -548,7 +548,7 @@ func (client *DiskAccessesClient) listHandleResponse(resp *http.Response) (DiskA // NewListByResourceGroupPager - Lists all the disk access resources under a resource group. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - options - DiskAccessesClientListByResourceGroupOptions contains the optional parameters for the DiskAccessesClient.NewListByResourceGroupPager // method. @@ -591,7 +591,7 @@ func (client *DiskAccessesClient) listByResourceGroupCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -608,7 +608,7 @@ func (client *DiskAccessesClient) listByResourceGroupHandleResponse(resp *http.R // NewListPrivateEndpointConnectionsPager - List information about private endpoint connections under a disk access resource // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption // set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The @@ -658,7 +658,7 @@ func (client *DiskAccessesClient) listPrivateEndpointConnectionsCreateRequest(ct return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -676,7 +676,7 @@ func (client *DiskAccessesClient) listPrivateEndpointConnectionsHandleResponse(r // BeginUpdate - Updates (patches) a disk access resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption // set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The @@ -704,7 +704,7 @@ func (client *DiskAccessesClient) BeginUpdate(ctx context.Context, resourceGroup // Update - Updates (patches) a disk access resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DiskAccessesClient) update(ctx context.Context, resourceGroupName string, diskAccessName string, diskAccess DiskAccessUpdate, options *DiskAccessesClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "DiskAccessesClient.BeginUpdate" @@ -746,7 +746,7 @@ func (client *DiskAccessesClient) updateCreateRequest(ctx context.Context, resou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, diskAccess); err != nil { @@ -759,7 +759,7 @@ func (client *DiskAccessesClient) updateCreateRequest(ctx context.Context, resou // can't be used to create a new private endpoint connection. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption // set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The @@ -790,7 +790,7 @@ func (client *DiskAccessesClient) BeginUpdateAPrivateEndpointConnection(ctx cont // be used to create a new private endpoint connection. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DiskAccessesClient) updateAPrivateEndpointConnection(ctx context.Context, resourceGroupName string, diskAccessName string, privateEndpointConnectionName string, privateEndpointConnection PrivateEndpointConnection, options *DiskAccessesClientBeginUpdateAPrivateEndpointConnectionOptions) (*http.Response, error) { var err error const operationName = "DiskAccessesClient.BeginUpdateAPrivateEndpointConnection" @@ -836,7 +836,7 @@ func (client *DiskAccessesClient) updateAPrivateEndpointConnectionCreateRequest( return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, privateEndpointConnection); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskencryptionsets_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskencryptionsets_client.go index 9306f9c0d..59d34d1e2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskencryptionsets_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskencryptionsets_client.go @@ -47,7 +47,7 @@ func NewDiskEncryptionSetsClient(subscriptionID string, credential azcore.TokenC // BeginCreateOrUpdate - Creates or updates a disk encryption set // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskEncryptionSetName - The name of the disk encryption set that is being created. The name can't be changed after the // disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum @@ -75,7 +75,7 @@ func (client *DiskEncryptionSetsClient) BeginCreateOrUpdate(ctx context.Context, // CreateOrUpdate - Creates or updates a disk encryption set // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DiskEncryptionSetsClient) createOrUpdate(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, diskEncryptionSet DiskEncryptionSet, options *DiskEncryptionSetsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "DiskEncryptionSetsClient.BeginCreateOrUpdate" @@ -117,7 +117,7 @@ func (client *DiskEncryptionSetsClient) createOrUpdateCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, diskEncryptionSet); err != nil { @@ -129,7 +129,7 @@ func (client *DiskEncryptionSetsClient) createOrUpdateCreateRequest(ctx context. // BeginDelete - Deletes a disk encryption set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskEncryptionSetName - The name of the disk encryption set that is being created. The name can't be changed after the // disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum @@ -156,7 +156,7 @@ func (client *DiskEncryptionSetsClient) BeginDelete(ctx context.Context, resourc // Delete - Deletes a disk encryption set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DiskEncryptionSetsClient) deleteOperation(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, options *DiskEncryptionSetsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "DiskEncryptionSetsClient.BeginDelete" @@ -198,7 +198,7 @@ func (client *DiskEncryptionSetsClient) deleteCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -207,7 +207,7 @@ func (client *DiskEncryptionSetsClient) deleteCreateRequest(ctx context.Context, // Get - Gets information about a disk encryption set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskEncryptionSetName - The name of the disk encryption set that is being created. The name can't be changed after the // disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum @@ -255,7 +255,7 @@ func (client *DiskEncryptionSetsClient) getCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -272,7 +272,7 @@ func (client *DiskEncryptionSetsClient) getHandleResponse(resp *http.Response) ( // NewListPager - Lists all the disk encryption sets under a subscription. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - options - DiskEncryptionSetsClientListOptions contains the optional parameters for the DiskEncryptionSetsClient.NewListPager // method. func (client *DiskEncryptionSetsClient) NewListPager(options *DiskEncryptionSetsClientListOptions) *runtime.Pager[DiskEncryptionSetsClientListResponse] { @@ -310,7 +310,7 @@ func (client *DiskEncryptionSetsClient) listCreateRequest(ctx context.Context, o return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -327,7 +327,7 @@ func (client *DiskEncryptionSetsClient) listHandleResponse(resp *http.Response) // NewListAssociatedResourcesPager - Lists all resources that are encrypted with this disk encryption set. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskEncryptionSetName - The name of the disk encryption set that is being created. The name can't be changed after the // disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum @@ -377,7 +377,7 @@ func (client *DiskEncryptionSetsClient) listAssociatedResourcesCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -394,7 +394,7 @@ func (client *DiskEncryptionSetsClient) listAssociatedResourcesHandleResponse(re // NewListByResourceGroupPager - Lists all the disk encryption sets under a resource group. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - options - DiskEncryptionSetsClientListByResourceGroupOptions contains the optional parameters for the DiskEncryptionSetsClient.NewListByResourceGroupPager // method. @@ -437,7 +437,7 @@ func (client *DiskEncryptionSetsClient) listByResourceGroupCreateRequest(ctx con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -455,7 +455,7 @@ func (client *DiskEncryptionSetsClient) listByResourceGroupHandleResponse(resp * // BeginUpdate - Updates (patches) a disk encryption set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskEncryptionSetName - The name of the disk encryption set that is being created. The name can't be changed after the // disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum @@ -483,7 +483,7 @@ func (client *DiskEncryptionSetsClient) BeginUpdate(ctx context.Context, resourc // Update - Updates (patches) a disk encryption set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DiskEncryptionSetsClient) update(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, diskEncryptionSet DiskEncryptionSetUpdate, options *DiskEncryptionSetsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "DiskEncryptionSetsClient.BeginUpdate" @@ -525,7 +525,7 @@ func (client *DiskEncryptionSetsClient) updateCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, diskEncryptionSet); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskrestorepoint_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskrestorepoint_client.go index 54b576957..43307ca04 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskrestorepoint_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskrestorepoint_client.go @@ -47,7 +47,7 @@ func NewDiskRestorePointClient(subscriptionID string, credential azcore.TokenCre // Get - Get disk restorePoint resource // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the restore point collection that the disk restore point belongs. // - vmRestorePointName - The name of the vm restore point that the disk disk restore point belongs. @@ -103,7 +103,7 @@ func (client *DiskRestorePointClient) getCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -121,7 +121,7 @@ func (client *DiskRestorePointClient) getHandleResponse(resp *http.Response) (Di // BeginGrantAccess - Grants access to a diskRestorePoint. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the restore point collection that the disk restore point belongs. // - vmRestorePointName - The name of the vm restore point that the disk disk restore point belongs. @@ -150,7 +150,7 @@ func (client *DiskRestorePointClient) BeginGrantAccess(ctx context.Context, reso // GrantAccess - Grants access to a diskRestorePoint. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DiskRestorePointClient) grantAccess(ctx context.Context, resourceGroupName string, restorePointCollectionName string, vmRestorePointName string, diskRestorePointName string, grantAccessData GrantAccessData, options *DiskRestorePointClientBeginGrantAccessOptions) (*http.Response, error) { var err error const operationName = "DiskRestorePointClient.BeginGrantAccess" @@ -200,7 +200,7 @@ func (client *DiskRestorePointClient) grantAccessCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, grantAccessData); err != nil { @@ -211,7 +211,7 @@ func (client *DiskRestorePointClient) grantAccessCreateRequest(ctx context.Conte // NewListByRestorePointPager - Lists diskRestorePoints under a vmRestorePoint. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the restore point collection that the disk restore point belongs. // - vmRestorePointName - The name of the vm restore point that the disk disk restore point belongs. @@ -264,7 +264,7 @@ func (client *DiskRestorePointClient) listByRestorePointCreateRequest(ctx contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -282,7 +282,7 @@ func (client *DiskRestorePointClient) listByRestorePointHandleResponse(resp *htt // BeginRevokeAccess - Revokes access to a diskRestorePoint. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the restore point collection that the disk restore point belongs. // - vmRestorePointName - The name of the vm restore point that the disk disk restore point belongs. @@ -310,7 +310,7 @@ func (client *DiskRestorePointClient) BeginRevokeAccess(ctx context.Context, res // RevokeAccess - Revokes access to a diskRestorePoint. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DiskRestorePointClient) revokeAccess(ctx context.Context, resourceGroupName string, restorePointCollectionName string, vmRestorePointName string, diskRestorePointName string, options *DiskRestorePointClientBeginRevokeAccessOptions) (*http.Response, error) { var err error const operationName = "DiskRestorePointClient.BeginRevokeAccess" @@ -360,7 +360,7 @@ func (client *DiskRestorePointClient) revokeAccessCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/disks_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/disks_client.go index 854a8ffd6..a0ffa752b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/disks_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/disks_client.go @@ -47,7 +47,7 @@ func NewDisksClient(subscriptionID string, credential azcore.TokenCredential, op // BeginCreateOrUpdate - Creates or updates a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskName - The name of the managed disk that is being created. The name can't be changed after the disk is created. Supported // characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 @@ -75,7 +75,7 @@ func (client *DisksClient) BeginCreateOrUpdate(ctx context.Context, resourceGrou // CreateOrUpdate - Creates or updates a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DisksClient) createOrUpdate(ctx context.Context, resourceGroupName string, diskName string, disk Disk, options *DisksClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "DisksClient.BeginCreateOrUpdate" @@ -117,7 +117,7 @@ func (client *DisksClient) createOrUpdateCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, disk); err != nil { @@ -129,7 +129,7 @@ func (client *DisksClient) createOrUpdateCreateRequest(ctx context.Context, reso // BeginDelete - Deletes a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskName - The name of the managed disk that is being created. The name can't be changed after the disk is created. Supported // characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 @@ -155,7 +155,7 @@ func (client *DisksClient) BeginDelete(ctx context.Context, resourceGroupName st // Delete - Deletes a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DisksClient) deleteOperation(ctx context.Context, resourceGroupName string, diskName string, options *DisksClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "DisksClient.BeginDelete" @@ -197,7 +197,7 @@ func (client *DisksClient) deleteCreateRequest(ctx context.Context, resourceGrou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() return req, nil } @@ -205,7 +205,7 @@ func (client *DisksClient) deleteCreateRequest(ctx context.Context, resourceGrou // Get - Gets information about a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskName - The name of the managed disk that is being created. The name can't be changed after the disk is created. Supported // characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 @@ -253,7 +253,7 @@ func (client *DisksClient) getCreateRequest(ctx context.Context, resourceGroupNa return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -271,7 +271,7 @@ func (client *DisksClient) getHandleResponse(resp *http.Response) (DisksClientGe // BeginGrantAccess - Grants access to a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskName - The name of the managed disk that is being created. The name can't be changed after the disk is created. Supported // characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 @@ -299,7 +299,7 @@ func (client *DisksClient) BeginGrantAccess(ctx context.Context, resourceGroupNa // GrantAccess - Grants access to a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DisksClient) grantAccess(ctx context.Context, resourceGroupName string, diskName string, grantAccessData GrantAccessData, options *DisksClientBeginGrantAccessOptions) (*http.Response, error) { var err error const operationName = "DisksClient.BeginGrantAccess" @@ -341,7 +341,7 @@ func (client *DisksClient) grantAccessCreateRequest(ctx context.Context, resourc return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, grantAccessData); err != nil { @@ -352,7 +352,7 @@ func (client *DisksClient) grantAccessCreateRequest(ctx context.Context, resourc // NewListPager - Lists all the disks under a subscription. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - options - DisksClientListOptions contains the optional parameters for the DisksClient.NewListPager method. func (client *DisksClient) NewListPager(options *DisksClientListOptions) *runtime.Pager[DisksClientListResponse] { return runtime.NewPager(runtime.PagingHandler[DisksClientListResponse]{ @@ -389,7 +389,7 @@ func (client *DisksClient) listCreateRequest(ctx context.Context, options *Disks return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -406,7 +406,7 @@ func (client *DisksClient) listHandleResponse(resp *http.Response) (DisksClientL // NewListByResourceGroupPager - Lists all the disks under a resource group. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - options - DisksClientListByResourceGroupOptions contains the optional parameters for the DisksClient.NewListByResourceGroupPager // method. @@ -449,7 +449,7 @@ func (client *DisksClient) listByResourceGroupCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -467,7 +467,7 @@ func (client *DisksClient) listByResourceGroupHandleResponse(resp *http.Response // BeginRevokeAccess - Revokes access to a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskName - The name of the managed disk that is being created. The name can't be changed after the disk is created. Supported // characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 @@ -494,7 +494,7 @@ func (client *DisksClient) BeginRevokeAccess(ctx context.Context, resourceGroupN // RevokeAccess - Revokes access to a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DisksClient) revokeAccess(ctx context.Context, resourceGroupName string, diskName string, options *DisksClientBeginRevokeAccessOptions) (*http.Response, error) { var err error const operationName = "DisksClient.BeginRevokeAccess" @@ -536,7 +536,7 @@ func (client *DisksClient) revokeAccessCreateRequest(ctx context.Context, resour return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() return req, nil } @@ -544,7 +544,7 @@ func (client *DisksClient) revokeAccessCreateRequest(ctx context.Context, resour // BeginUpdate - Updates (patches) a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskName - The name of the managed disk that is being created. The name can't be changed after the disk is created. Supported // characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 @@ -571,7 +571,7 @@ func (client *DisksClient) BeginUpdate(ctx context.Context, resourceGroupName st // Update - Updates (patches) a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DisksClient) update(ctx context.Context, resourceGroupName string, diskName string, disk DiskUpdate, options *DisksClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "DisksClient.BeginUpdate" @@ -613,7 +613,7 @@ func (client *DisksClient) updateCreateRequest(ctx context.Context, resourceGrou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, disk); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleries_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleries_client.go index 8fac494c7..f24e730a6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleries_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleries_client.go @@ -47,7 +47,7 @@ func NewGalleriesClient(subscriptionID string, credential azcore.TokenCredential // BeginCreateOrUpdate - Create or update a Shared Image Gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery. The allowed characters are alphabets and numbers with dots and periods // allowed in the middle. The maximum length is 80 characters. @@ -74,7 +74,7 @@ func (client *GalleriesClient) BeginCreateOrUpdate(ctx context.Context, resource // CreateOrUpdate - Create or update a Shared Image Gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleriesClient) createOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, gallery Gallery, options *GalleriesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleriesClient.BeginCreateOrUpdate" @@ -116,7 +116,7 @@ func (client *GalleriesClient) createOrUpdateCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, gallery); err != nil { @@ -128,7 +128,7 @@ func (client *GalleriesClient) createOrUpdateCreateRequest(ctx context.Context, // BeginDelete - Delete a Shared Image Gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery to be deleted. // - options - GalleriesClientBeginDeleteOptions contains the optional parameters for the GalleriesClient.BeginDelete method. @@ -152,7 +152,7 @@ func (client *GalleriesClient) BeginDelete(ctx context.Context, resourceGroupNam // Delete - Delete a Shared Image Gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleriesClient) deleteOperation(ctx context.Context, resourceGroupName string, galleryName string, options *GalleriesClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "GalleriesClient.BeginDelete" @@ -194,7 +194,7 @@ func (client *GalleriesClient) deleteCreateRequest(ctx context.Context, resource return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -203,7 +203,7 @@ func (client *GalleriesClient) deleteCreateRequest(ctx context.Context, resource // Get - Retrieves information about a Shared Image Gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery. // - options - GalleriesClientGetOptions contains the optional parameters for the GalleriesClient.Get method. @@ -249,13 +249,13 @@ func (client *GalleriesClient) getCreateRequest(ctx context.Context, resourceGro return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") - if options != nil && options.Select != nil { - reqQP.Set("$select", string(*options.Select)) - } if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } + if options != nil && options.Select != nil { + reqQP.Set("$select", string(*options.Select)) + } + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -272,7 +272,7 @@ func (client *GalleriesClient) getHandleResponse(resp *http.Response) (Galleries // NewListPager - List galleries under a subscription. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - options - GalleriesClientListOptions contains the optional parameters for the GalleriesClient.NewListPager method. func (client *GalleriesClient) NewListPager(options *GalleriesClientListOptions) *runtime.Pager[GalleriesClientListResponse] { return runtime.NewPager(runtime.PagingHandler[GalleriesClientListResponse]{ @@ -309,7 +309,7 @@ func (client *GalleriesClient) listCreateRequest(ctx context.Context, options *G return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -326,7 +326,7 @@ func (client *GalleriesClient) listHandleResponse(resp *http.Response) (Gallerie // NewListByResourceGroupPager - List galleries under a resource group. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - options - GalleriesClientListByResourceGroupOptions contains the optional parameters for the GalleriesClient.NewListByResourceGroupPager // method. @@ -369,7 +369,7 @@ func (client *GalleriesClient) listByResourceGroupCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -387,7 +387,7 @@ func (client *GalleriesClient) listByResourceGroupHandleResponse(resp *http.Resp // BeginUpdate - Update a Shared Image Gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery. The allowed characters are alphabets and numbers with dots and periods // allowed in the middle. The maximum length is 80 characters. @@ -413,7 +413,7 @@ func (client *GalleriesClient) BeginUpdate(ctx context.Context, resourceGroupNam // Update - Update a Shared Image Gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleriesClient) update(ctx context.Context, resourceGroupName string, galleryName string, gallery GalleryUpdate, options *GalleriesClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleriesClient.BeginUpdate" @@ -455,7 +455,7 @@ func (client *GalleriesClient) updateCreateRequest(ctx context.Context, resource return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, gallery); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplications_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplications_client.go index 1ebec623b..cd5d6f3ec 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplications_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplications_client.go @@ -47,7 +47,7 @@ func NewGalleryApplicationsClient(subscriptionID string, credential azcore.Token // BeginCreateOrUpdate - Create or update a gallery Application Definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition is to be created. // - galleryApplicationName - The name of the gallery Application Definition to be created or updated. The allowed characters @@ -76,7 +76,7 @@ func (client *GalleryApplicationsClient) BeginCreateOrUpdate(ctx context.Context // CreateOrUpdate - Create or update a gallery Application Definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryApplicationsClient) createOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplication GalleryApplication, options *GalleryApplicationsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryApplicationsClient.BeginCreateOrUpdate" @@ -122,7 +122,7 @@ func (client *GalleryApplicationsClient) createOrUpdateCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryApplication); err != nil { @@ -134,7 +134,7 @@ func (client *GalleryApplicationsClient) createOrUpdateCreateRequest(ctx context // BeginDelete - Delete a gallery Application. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition is to be deleted. // - galleryApplicationName - The name of the gallery Application Definition to be deleted. @@ -160,7 +160,7 @@ func (client *GalleryApplicationsClient) BeginDelete(ctx context.Context, resour // Delete - Delete a gallery Application. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryApplicationsClient) deleteOperation(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, options *GalleryApplicationsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "GalleryApplicationsClient.BeginDelete" @@ -206,7 +206,7 @@ func (client *GalleryApplicationsClient) deleteCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -215,7 +215,7 @@ func (client *GalleryApplicationsClient) deleteCreateRequest(ctx context.Context // Get - Retrieves information about a gallery Application Definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery from which the Application Definitions are to be retrieved. // - galleryApplicationName - The name of the gallery Application Definition to be retrieved. @@ -266,7 +266,7 @@ func (client *GalleryApplicationsClient) getCreateRequest(ctx context.Context, r return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -283,7 +283,7 @@ func (client *GalleryApplicationsClient) getHandleResponse(resp *http.Response) // NewListByGalleryPager - List gallery Application Definitions in a gallery. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery from which Application Definitions are to be listed. // - options - GalleryApplicationsClientListByGalleryOptions contains the optional parameters for the GalleryApplicationsClient.NewListByGalleryPager @@ -331,7 +331,7 @@ func (client *GalleryApplicationsClient) listByGalleryCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -349,7 +349,7 @@ func (client *GalleryApplicationsClient) listByGalleryHandleResponse(resp *http. // BeginUpdate - Update a gallery Application Definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition is to be updated. // - galleryApplicationName - The name of the gallery Application Definition to be updated. The allowed characters are alphabets @@ -378,7 +378,7 @@ func (client *GalleryApplicationsClient) BeginUpdate(ctx context.Context, resour // Update - Update a gallery Application Definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryApplicationsClient) update(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplication GalleryApplicationUpdate, options *GalleryApplicationsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryApplicationsClient.BeginUpdate" @@ -424,7 +424,7 @@ func (client *GalleryApplicationsClient) updateCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryApplication); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplicationversions_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplicationversions_client.go index 3d885cb93..06571af3d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplicationversions_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplicationversions_client.go @@ -47,7 +47,7 @@ func NewGalleryApplicationVersionsClient(subscriptionID string, credential azcor // BeginCreateOrUpdate - Create or update a gallery Application Version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition resides. // - galleryApplicationName - The name of the gallery Application Definition in which the Application Version is to be created. @@ -77,7 +77,7 @@ func (client *GalleryApplicationVersionsClient) BeginCreateOrUpdate(ctx context. // CreateOrUpdate - Create or update a gallery Application Version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryApplicationVersionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, galleryApplicationVersion GalleryApplicationVersion, options *GalleryApplicationVersionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryApplicationVersionsClient.BeginCreateOrUpdate" @@ -127,7 +127,7 @@ func (client *GalleryApplicationVersionsClient) createOrUpdateCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryApplicationVersion); err != nil { @@ -139,7 +139,7 @@ func (client *GalleryApplicationVersionsClient) createOrUpdateCreateRequest(ctx // BeginDelete - Delete a gallery Application Version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition resides. // - galleryApplicationName - The name of the gallery Application Definition in which the Application Version resides. @@ -166,7 +166,7 @@ func (client *GalleryApplicationVersionsClient) BeginDelete(ctx context.Context, // Delete - Delete a gallery Application Version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryApplicationVersionsClient) deleteOperation(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, options *GalleryApplicationVersionsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "GalleryApplicationVersionsClient.BeginDelete" @@ -216,7 +216,7 @@ func (client *GalleryApplicationVersionsClient) deleteCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -225,7 +225,7 @@ func (client *GalleryApplicationVersionsClient) deleteCreateRequest(ctx context. // Get - Retrieves information about a gallery Application Version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition resides. // - galleryApplicationName - The name of the gallery Application Definition in which the Application Version resides. @@ -285,7 +285,7 @@ func (client *GalleryApplicationVersionsClient) getCreateRequest(ctx context.Con if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -302,7 +302,7 @@ func (client *GalleryApplicationVersionsClient) getHandleResponse(resp *http.Res // NewListByGalleryApplicationPager - List gallery Application Versions in a gallery Application Definition. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition resides. // - galleryApplicationName - The name of the Shared Application Gallery Application Definition from which the Application Versions @@ -356,7 +356,7 @@ func (client *GalleryApplicationVersionsClient) listByGalleryApplicationCreateRe return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -374,7 +374,7 @@ func (client *GalleryApplicationVersionsClient) listByGalleryApplicationHandleRe // BeginUpdate - Update a gallery Application Version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition resides. // - galleryApplicationName - The name of the gallery Application Definition in which the Application Version is to be updated. @@ -404,7 +404,7 @@ func (client *GalleryApplicationVersionsClient) BeginUpdate(ctx context.Context, // Update - Update a gallery Application Version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryApplicationVersionsClient) update(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, galleryApplicationVersion GalleryApplicationVersionUpdate, options *GalleryApplicationVersionsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryApplicationVersionsClient.BeginUpdate" @@ -454,7 +454,7 @@ func (client *GalleryApplicationVersionsClient) updateCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryApplicationVersion); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimages_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimages_client.go index 6a684b3ed..8be1a5784 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimages_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimages_client.go @@ -47,7 +47,7 @@ func NewGalleryImagesClient(subscriptionID string, credential azcore.TokenCreden // BeginCreateOrUpdate - Create or update a gallery image definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition is to be created. // - galleryImageName - The name of the gallery image definition to be created or updated. The allowed characters are alphabets @@ -76,7 +76,7 @@ func (client *GalleryImagesClient) BeginCreateOrUpdate(ctx context.Context, reso // CreateOrUpdate - Create or update a gallery image definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryImagesClient) createOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImage GalleryImage, options *GalleryImagesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryImagesClient.BeginCreateOrUpdate" @@ -122,7 +122,7 @@ func (client *GalleryImagesClient) createOrUpdateCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryImage); err != nil { @@ -134,7 +134,7 @@ func (client *GalleryImagesClient) createOrUpdateCreateRequest(ctx context.Conte // BeginDelete - Delete a gallery image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition is to be deleted. // - galleryImageName - The name of the gallery image definition to be deleted. @@ -160,7 +160,7 @@ func (client *GalleryImagesClient) BeginDelete(ctx context.Context, resourceGrou // Delete - Delete a gallery image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryImagesClient) deleteOperation(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, options *GalleryImagesClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "GalleryImagesClient.BeginDelete" @@ -206,7 +206,7 @@ func (client *GalleryImagesClient) deleteCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -215,7 +215,7 @@ func (client *GalleryImagesClient) deleteCreateRequest(ctx context.Context, reso // Get - Retrieves information about a gallery image definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery from which the Image Definitions are to be retrieved. // - galleryImageName - The name of the gallery image definition to be retrieved. @@ -266,7 +266,7 @@ func (client *GalleryImagesClient) getCreateRequest(ctx context.Context, resourc return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -283,7 +283,7 @@ func (client *GalleryImagesClient) getHandleResponse(resp *http.Response) (Galle // NewListByGalleryPager - List gallery image definitions in a gallery. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery from which Image Definitions are to be listed. // - options - GalleryImagesClientListByGalleryOptions contains the optional parameters for the GalleryImagesClient.NewListByGalleryPager @@ -331,7 +331,7 @@ func (client *GalleryImagesClient) listByGalleryCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -349,7 +349,7 @@ func (client *GalleryImagesClient) listByGalleryHandleResponse(resp *http.Respon // BeginUpdate - Update a gallery image definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition is to be updated. // - galleryImageName - The name of the gallery image definition to be updated. The allowed characters are alphabets and numbers @@ -377,7 +377,7 @@ func (client *GalleryImagesClient) BeginUpdate(ctx context.Context, resourceGrou // Update - Update a gallery image definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryImagesClient) update(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImage GalleryImageUpdate, options *GalleryImagesClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryImagesClient.BeginUpdate" @@ -423,7 +423,7 @@ func (client *GalleryImagesClient) updateCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryImage); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimageversions_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimageversions_client.go index 05f3cf352..b16e42ed7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimageversions_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimageversions_client.go @@ -47,7 +47,7 @@ func NewGalleryImageVersionsClient(subscriptionID string, credential azcore.Toke // BeginCreateOrUpdate - Create or update a gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition resides. // - galleryImageName - The name of the gallery image definition in which the Image Version is to be created. @@ -77,7 +77,7 @@ func (client *GalleryImageVersionsClient) BeginCreateOrUpdate(ctx context.Contex // CreateOrUpdate - Create or update a gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryImageVersionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion GalleryImageVersion, options *GalleryImageVersionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryImageVersionsClient.BeginCreateOrUpdate" @@ -127,7 +127,7 @@ func (client *GalleryImageVersionsClient) createOrUpdateCreateRequest(ctx contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryImageVersion); err != nil { @@ -139,7 +139,7 @@ func (client *GalleryImageVersionsClient) createOrUpdateCreateRequest(ctx contex // BeginDelete - Delete a gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition resides. // - galleryImageName - The name of the gallery image definition in which the Image Version resides. @@ -166,7 +166,7 @@ func (client *GalleryImageVersionsClient) BeginDelete(ctx context.Context, resou // Delete - Delete a gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryImageVersionsClient) deleteOperation(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, options *GalleryImageVersionsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "GalleryImageVersionsClient.BeginDelete" @@ -216,7 +216,7 @@ func (client *GalleryImageVersionsClient) deleteCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -225,7 +225,7 @@ func (client *GalleryImageVersionsClient) deleteCreateRequest(ctx context.Contex // Get - Retrieves information about a gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition resides. // - galleryImageName - The name of the gallery image definition in which the Image Version resides. @@ -285,7 +285,7 @@ func (client *GalleryImageVersionsClient) getCreateRequest(ctx context.Context, if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -302,7 +302,7 @@ func (client *GalleryImageVersionsClient) getHandleResponse(resp *http.Response) // NewListByGalleryImagePager - List gallery image versions in a gallery image definition. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition resides. // - galleryImageName - The name of the Shared Image Gallery Image Definition from which the Image Versions are to be listed. @@ -355,7 +355,7 @@ func (client *GalleryImageVersionsClient) listByGalleryImageCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -373,7 +373,7 @@ func (client *GalleryImageVersionsClient) listByGalleryImageHandleResponse(resp // BeginUpdate - Update a gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition resides. // - galleryImageName - The name of the gallery image definition in which the Image Version is to be updated. @@ -403,7 +403,7 @@ func (client *GalleryImageVersionsClient) BeginUpdate(ctx context.Context, resou // Update - Update a gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryImageVersionsClient) update(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion GalleryImageVersionUpdate, options *GalleryImageVersionsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryImageVersionsClient.BeginUpdate" @@ -453,7 +453,7 @@ func (client *GalleryImageVersionsClient) updateCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryImageVersion); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/gallerysharingprofile_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/gallerysharingprofile_client.go index 28d024e97..0ccc5ce9f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/gallerysharingprofile_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/gallerysharingprofile_client.go @@ -47,7 +47,7 @@ func NewGallerySharingProfileClient(subscriptionID string, credential azcore.Tok // BeginUpdate - Update sharing profile of a gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery. // - sharingUpdate - Parameters supplied to the update gallery sharing profile. @@ -73,7 +73,7 @@ func (client *GallerySharingProfileClient) BeginUpdate(ctx context.Context, reso // Update - Update sharing profile of a gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GallerySharingProfileClient) update(ctx context.Context, resourceGroupName string, galleryName string, sharingUpdate SharingUpdate, options *GallerySharingProfileClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "GallerySharingProfileClient.BeginUpdate" @@ -115,7 +115,7 @@ func (client *GallerySharingProfileClient) updateCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, sharingUpdate); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/images_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/images_client.go index 5c5ab8a93..5fecdbf1d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/images_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/images_client.go @@ -47,7 +47,7 @@ func NewImagesClient(subscriptionID string, credential azcore.TokenCredential, o // BeginCreateOrUpdate - Create or update an image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - imageName - The name of the image. // - parameters - Parameters supplied to the Create Image operation. @@ -73,7 +73,7 @@ func (client *ImagesClient) BeginCreateOrUpdate(ctx context.Context, resourceGro // CreateOrUpdate - Create or update an image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *ImagesClient) createOrUpdate(ctx context.Context, resourceGroupName string, imageName string, parameters Image, options *ImagesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "ImagesClient.BeginCreateOrUpdate" @@ -115,7 +115,7 @@ func (client *ImagesClient) createOrUpdateCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -127,7 +127,7 @@ func (client *ImagesClient) createOrUpdateCreateRequest(ctx context.Context, res // BeginDelete - Deletes an Image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - imageName - The name of the image. // - options - ImagesClientBeginDeleteOptions contains the optional parameters for the ImagesClient.BeginDelete method. @@ -151,7 +151,7 @@ func (client *ImagesClient) BeginDelete(ctx context.Context, resourceGroupName s // Delete - Deletes an Image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *ImagesClient) deleteOperation(ctx context.Context, resourceGroupName string, imageName string, options *ImagesClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "ImagesClient.BeginDelete" @@ -193,7 +193,7 @@ func (client *ImagesClient) deleteCreateRequest(ctx context.Context, resourceGro return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -202,7 +202,7 @@ func (client *ImagesClient) deleteCreateRequest(ctx context.Context, resourceGro // Get - Gets an image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - imageName - The name of the image. // - options - ImagesClientGetOptions contains the optional parameters for the ImagesClient.Get method. @@ -251,7 +251,7 @@ func (client *ImagesClient) getCreateRequest(ctx context.Context, resourceGroupN if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -269,7 +269,7 @@ func (client *ImagesClient) getHandleResponse(resp *http.Response) (ImagesClient // NewListPager - Gets the list of Images in the subscription. Use nextLink property in the response to get the next page // of Images. Do this till nextLink is null to fetch all the Images. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - options - ImagesClientListOptions contains the optional parameters for the ImagesClient.NewListPager method. func (client *ImagesClient) NewListPager(options *ImagesClientListOptions) *runtime.Pager[ImagesClientListResponse] { return runtime.NewPager(runtime.PagingHandler[ImagesClientListResponse]{ @@ -306,7 +306,7 @@ func (client *ImagesClient) listCreateRequest(ctx context.Context, options *Imag return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -324,7 +324,7 @@ func (client *ImagesClient) listHandleResponse(resp *http.Response) (ImagesClien // NewListByResourceGroupPager - Gets the list of images under a resource group. Use nextLink property in the response to // get the next page of Images. Do this till nextLink is null to fetch all the Images. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - options - ImagesClientListByResourceGroupOptions contains the optional parameters for the ImagesClient.NewListByResourceGroupPager // method. @@ -367,7 +367,7 @@ func (client *ImagesClient) listByResourceGroupCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -385,7 +385,7 @@ func (client *ImagesClient) listByResourceGroupHandleResponse(resp *http.Respons // BeginUpdate - Update an image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - imageName - The name of the image. // - parameters - Parameters supplied to the Update Image operation. @@ -410,7 +410,7 @@ func (client *ImagesClient) BeginUpdate(ctx context.Context, resourceGroupName s // Update - Update an image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *ImagesClient) update(ctx context.Context, resourceGroupName string, imageName string, parameters ImageUpdate, options *ImagesClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "ImagesClient.BeginUpdate" @@ -452,7 +452,7 @@ func (client *ImagesClient) updateCreateRequest(ctx context.Context, resourceGro return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/loganalytics_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/loganalytics_client.go index a48766ffd..a3b77f828 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/loganalytics_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/loganalytics_client.go @@ -48,7 +48,7 @@ func NewLogAnalyticsClient(subscriptionID string, credential azcore.TokenCredent // to show throttling activities. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The location upon which virtual-machine-sizes is queried. // - parameters - Parameters supplied to the LogAnalytics getRequestRateByInterval Api. // - options - LogAnalyticsClientBeginExportRequestRateByIntervalOptions contains the optional parameters for the LogAnalyticsClient.BeginExportRequestRateByInterval @@ -75,7 +75,7 @@ func (client *LogAnalyticsClient) BeginExportRequestRateByInterval(ctx context.C // show throttling activities. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *LogAnalyticsClient) exportRequestRateByInterval(ctx context.Context, location string, parameters RequestRateByIntervalInput, options *LogAnalyticsClientBeginExportRequestRateByIntervalOptions) (*http.Response, error) { var err error const operationName = "LogAnalyticsClient.BeginExportRequestRateByInterval" @@ -113,7 +113,7 @@ func (client *LogAnalyticsClient) exportRequestRateByIntervalCreateRequest(ctx c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -126,7 +126,7 @@ func (client *LogAnalyticsClient) exportRequestRateByIntervalCreateRequest(ctx c // window. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The location upon which virtual-machine-sizes is queried. // - parameters - Parameters supplied to the LogAnalytics getThrottledRequests Api. // - options - LogAnalyticsClientBeginExportThrottledRequestsOptions contains the optional parameters for the LogAnalyticsClient.BeginExportThrottledRequests @@ -152,7 +152,7 @@ func (client *LogAnalyticsClient) BeginExportThrottledRequests(ctx context.Conte // ExportThrottledRequests - Export logs that show total throttled Api requests for this subscription in the given time window. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *LogAnalyticsClient) exportThrottledRequests(ctx context.Context, location string, parameters ThrottledRequestsInput, options *LogAnalyticsClientBeginExportThrottledRequestsOptions) (*http.Response, error) { var err error const operationName = "LogAnalyticsClient.BeginExportThrottledRequests" @@ -190,7 +190,7 @@ func (client *LogAnalyticsClient) exportThrottledRequestsCreateRequest(ctx conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models.go index 5b5a3197b..1fad0acd2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models.go @@ -368,7 +368,7 @@ type CapacityReservationGroupInstanceView struct { // READ-ONLY; List of instance view of the capacity reservations under the capacity reservation group. CapacityReservations []*CapacityReservationInstanceViewWithName - // READ-ONLY; List of the subscriptions that the capacity reservation group is shared with. Note: Minimum api-version: 2023-09-01. + // READ-ONLY; List of the subscriptions that the capacity reservation group is shared with. Note: Minimum api-version: 2024-03-01. // Please refer to https://aka.ms/computereservationsharing for more details. SharedSubscriptionIDs []*SubResourceReadOnly } @@ -388,7 +388,7 @@ type CapacityReservationGroupProperties struct { // Specifies the settings to enable sharing across subscriptions for the capacity reservation group resource. Pls. keep in // mind the capacity reservation group resource generally can be shared across // subscriptions belonging to a single azure AAD tenant or cross AAD tenant if there is a trust relationship established between - // the AAD tenants. Note: Minimum api-version: 2023-09-01. Please refer to + // the AAD tenants. Note: Minimum api-version: 2024-03-01. Please refer to // https://aka.ms/computereservationsharing for more details. SharingProfile *ResourceSharingProfile @@ -1037,6 +1037,9 @@ type CreationData struct { // disabled after enabled. PerformancePlus *bool + // If this field is set on a snapshot and createOption is CopyStart, the snapshot will be copied at a quicker speed. + ProvisionedBandwidthCopySpeed *ProvisionedBandwidthCopyOption + // If createOption is ImportSecure, this is the URI of a blob to be imported into VM guest state. SecurityDataURI *string @@ -1061,11 +1064,13 @@ type CreationData struct { // DataDisk - Describes a data disk. type DataDisk struct { - // REQUIRED; Specifies how the virtual machine should be created. Possible values are: Attach. This value is used when you - // are using a specialized disk to create the virtual machine. FromImage. This value is used - // when you are using an image to create the virtual machine. If you are using a platform image, you should also use the imageReference - // element described above. If you are using a marketplace image, you - // should also use the plan element previously described. + // REQUIRED; Specifies how the virtual machine disk should be created. Possible values are Attach: This value is used when + // you are using a specialized disk to create the virtual machine. FromImage: This value is + // used when you are using an image to create the virtual machine data disk. If you are using a platform image, you should + // also use the imageReference element described above. If you are using a + // marketplace image, you should also use the plan element previously described. Empty: This value is used when creating an + // empty data disk. Copy: This value is used to create a data disk from a snapshot + // or another disk. Restore: This value is used to create a data disk from a disk restore point. CreateOption *DiskCreateOptionTypes // REQUIRED; Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and @@ -1107,6 +1112,9 @@ type DataDisk struct { // The disk name. Name *string + // The source resource identifier. It can be a snapshot, or disk restore point from which to create a disk. + SourceResource *APIEntityReference + // Specifies whether the data disk is in process of detachment from the VirtualMachine/VirtualMachineScaleset ToBeDetached *bool @@ -1150,10 +1158,25 @@ type DataDisksToAttach struct { // REQUIRED; ID of the managed data disk. DiskID *string + // Specifies the caching requirements. Possible values are: None, ReadOnly, ReadWrite. The defaulting behavior is: None for + // Standard storage. ReadOnly for Premium storage. + Caching *CachingTypes + + // Specifies whether data disk should be deleted or detached upon VM deletion. Possible values are: Delete. If this value + // is used, the data disk is deleted when VM is deleted. Detach. If this value is + // used, the data disk is retained after VM is deleted. The default value is set to Detach. + DeleteOption *DiskDeleteOptionTypes + + // Specifies the customer managed disk encryption set resource id for the managed disk. + DiskEncryptionSet *DiskEncryptionSetParameters + // The logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be // unique for each data disk attached to a VM. If not specified, lun would be auto // assigned. Lun *int32 + + // Specifies whether writeAccelerator should be enabled or disabled on the disk. + WriteAcceleratorEnabled *bool } // DataDisksToDetach - Describes the data disk to be detached. @@ -1405,11 +1428,12 @@ type DiffDiskSettings struct { // Specifies the ephemeral disk settings for operating system disk. Option *DiffDiskOptions - // Specifies the ephemeral disk placement for operating system disk. Possible values are: CacheDisk, ResourceDisk. The defaulting - // behavior is: CacheDisk if one is configured for the VM size otherwise - // ResourceDisk is used. Refer to the VM size documentation for Windows VM at https://docs.microsoft.com/azure/virtual-machines/windows/sizes + // Specifies the ephemeral disk placement for operating system disk. Possible values are: CacheDisk, ResourceDisk, NvmeDisk. + // The defaulting behavior is: CacheDisk if one is configured for the VM size + // otherwise ResourceDisk or NvmeDisk is used. Refer to the VM size documentation for Windows VM at https://docs.microsoft.com/azure/virtual-machines/windows/sizes // and Linux VM at - // https://docs.microsoft.com/azure/virtual-machines/linux/sizes to check which VM sizes exposes a cache disk. + // https://docs.microsoft.com/azure/virtual-machines/linux/sizes to check which VM sizes exposes a cache disk. Minimum api-version + // for NvmeDisk: 2024-03-01. Placement *DiffDiskPlacement } @@ -2072,6 +2096,12 @@ type EncryptionSettingsElement struct { KeyEncryptionKey *KeyVaultAndKeyReference } +// EventGridAndResourceGraph - Specifies eventGridAndResourceGraph related Scheduled Event related configurations. +type EventGridAndResourceGraph struct { + // Specifies if event grid and resource graph is enabled for Scheduled event related configurations. + Enable *bool +} + // ExtendedLocation - The complex type of the extended location. type ExtendedLocation struct { // The name of the extended location. @@ -2382,13 +2412,17 @@ type GalleryArtifactVersionFullSource struct { // The resource Id of the source Community Gallery Image. Only required when using Community Gallery Image as a source. CommunityGalleryImageID *string - // The id of the gallery artifact version source. Can specify a disk uri, snapshot uri, user image or storage account resource. + // The id of the gallery artifact version source. ID *string + + // The resource Id of the source virtual machine. Only required when capturing a virtual machine to source this Gallery Image + // Version. + VirtualMachineID *string } // GalleryArtifactVersionSource - The gallery artifact version source. type GalleryArtifactVersionSource struct { - // The id of the gallery artifact version source. Can specify a disk uri, snapshot uri, user image or storage account resource. + // The id of the gallery artifact version source. ID *string } @@ -2423,7 +2457,7 @@ type GalleryDiskImage struct { // GalleryDiskImageSource - The source for the disk image. type GalleryDiskImageSource struct { - // The id of the gallery artifact version source. Can specify a disk uri, snapshot uri, user image or storage account resource. + // The id of the gallery artifact version source. ID *string // The Storage Account Id that contains the vhd blob being used as a source for this artifact version. @@ -3442,11 +3476,11 @@ type NetworkProfile struct { // disks, see About disks and VHDs for Azure virtual machines // [https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview]. type OSDisk struct { - // REQUIRED; Specifies how the virtual machine should be created. Possible values are: Attach. This value is used when you - // are using a specialized disk to create the virtual machine. FromImage. This value is used - // when you are using an image to create the virtual machine. If you are using a platform image, you should also use the imageReference - // element described above. If you are using a marketplace image, you - // should also use the plan element previously described. + // REQUIRED; Specifies how the virtual machine disk should be created. Possible values are Attach: This value is used when + // you are using a specialized disk to create the virtual machine. FromImage: This value is + // used when you are using an image to create the virtual machine. If you are using a platform image, you should also use + // the imageReference element described above. If you are using a marketplace image, + // you should also use the plan element previously described. CreateOption *DiskCreateOptionTypes // Specifies the caching requirements. Possible values are: None, ReadOnly, ReadWrite. The defaulting behavior is: None for @@ -4067,7 +4101,7 @@ type ProximityPlacementGroupUpdate struct { Tags map[string]*string } -// ProxyAgentSettings - Specifies ProxyAgent settings while creating the virtual machine. Minimum api-version: 2023-09-01. +// ProxyAgentSettings - Specifies ProxyAgent settings while creating the virtual machine. Minimum api-version: 2024-03-01. type ProxyAgentSettings struct { // Specifies whether ProxyAgent feature should be enabled on the virtual machine or virtual machine scale set. Enabled *bool @@ -4424,7 +4458,7 @@ type ResourceSKUsResult struct { type ResourceSharingProfile struct { // Specifies an array of subscription resource IDs that capacity reservation group is shared with. Note: Minimum api-version: - // 2023-09-01. Please refer to https://aka.ms/computereservationsharing for more + // 2024-03-01. Please refer to https://aka.ms/computereservationsharing for more // details. SubscriptionIDs []*SubResource } @@ -5114,6 +5148,24 @@ type ScaleInPolicy struct { Rules []*VirtualMachineScaleSetScaleInRules } +type ScheduledEventsAdditionalPublishingTargets struct { + // The configuration parameters used while creating eventGridAndResourceGraph Scheduled Event setting. + EventGridAndResourceGraph *EventGridAndResourceGraph +} + +// ScheduledEventsPolicy - Specifies Redeploy, Reboot and ScheduledEventsAdditionalPublishingTargets Scheduled Event related +// configurations. +type ScheduledEventsPolicy struct { + // The configuration parameters used while publishing scheduledEventsAdditionalPublishingTargets. + ScheduledEventsAdditionalPublishingTargets *ScheduledEventsAdditionalPublishingTargets + + // The configuration parameters used while creating userInitiatedReboot scheduled event setting creation. + UserInitiatedReboot *UserInitiatedReboot + + // The configuration parameters used while creating userInitiatedRedeploy scheduled event setting creation. + UserInitiatedRedeploy *UserInitiatedRedeploy +} + type ScheduledEventsProfile struct { // Specifies OS Image Scheduled Event related configurations. OSImageNotificationProfile *OSImageNotificationProfile @@ -5143,7 +5195,7 @@ type SecurityProfile struct { // Specifies the Managed Identity used by ADE to get access token for keyvault operations. EncryptionIdentity *EncryptionIdentity - // Specifies ProxyAgent settings while creating the virtual machine. Minimum api-version: 2023-09-01. + // Specifies ProxyAgent settings while creating the virtual machine. Minimum api-version: 2024-03-01. ProxyAgentSettings *ProxyAgentSettings // Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings. The @@ -5935,6 +5987,18 @@ type UserAssignedIdentitiesValue struct { PrincipalID *string } +// UserInitiatedReboot - Specifies Reboot related Scheduled Event related configurations. +type UserInitiatedReboot struct { + // Specifies Reboot Scheduled Event related configurations. + AutomaticallyApprove *bool +} + +// UserInitiatedRedeploy - Specifies Redeploy related Scheduled Event related configurations. +type UserInitiatedRedeploy struct { + // Specifies Redeploy Scheduled Event related configurations. + AutomaticallyApprove *bool +} + // VMDiskSecurityProfile - Specifies the security profile settings for the managed disk. Note: It can only be set for Confidential // VMs. type VMDiskSecurityProfile struct { @@ -6779,6 +6843,10 @@ type VirtualMachineProperties struct { // 2018-04-01. ProximityPlacementGroup *SubResource + // Specifies Redeploy, Reboot and ScheduledEventsAdditionalPublishingTargets Scheduled Event related configurations for the + // virtual machine. + ScheduledEventsPolicy *ScheduledEventsPolicy + // Specifies Scheduled Event related configurations. ScheduledEventsProfile *ScheduledEventsProfile @@ -7589,6 +7657,9 @@ type VirtualMachineScaleSetProperties struct { // Specifies the policies applied when scaling in Virtual Machines in the Virtual Machine Scale Set. ScaleInPolicy *ScaleInPolicy + // The ScheduledEventsPolicy. + ScheduledEventsPolicy *ScheduledEventsPolicy + // When true this limits the scale set to a single placement group, of max size 100 virtual machines. NOTE: If singlePlacementGroup // is true, it may be modified to false. However, if singlePlacementGroup // is false, it may not be modified to true. @@ -7673,6 +7744,9 @@ type VirtualMachineScaleSetReimageParameters struct { // is reimaged to the existing version of OS Disk. ExactVersion *string + // Parameter to force update ephemeral OS disk for a virtual machine scale set VM + ForceUpdateOSDiskForEphemeral *bool + // The virtual machine scale set instance ids. Omitting the virtual machine scale set instance ids will result in the operation // being performed on all virtual machines in the virtual machine scale set. InstanceIDs []*string @@ -7866,6 +7940,9 @@ type VirtualMachineScaleSetUpdateOSDisk struct { // delete option for Ephemeral OS Disk. DeleteOption *DiskDeleteOptionTypes + // Specifies the ephemeral disk Settings for the operating system disk used by the virtual machine scale set. + DiffDiskSettings *DiffDiskSettings + // Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the size of the disk in a // virtual machine image. // diskSizeGB is the number of bytes x 1024^3 for the disk and the value cannot be larger than 1023 @@ -8274,8 +8351,8 @@ type VirtualMachineScaleSetVMProfile struct { UserData *string // READ-ONLY; Specifies the time in which this VM profile for the Virtual Machine Scale Set was created. Minimum API version - // for this property is 2023-09-01. This value will be added to VMSS Flex VM tags when - // creating/updating the VMSS VM Profile with minimum api-version 2023-09-01. + // for this property is 2024-03-01. This value will be added to VMSS Flex VM tags when + // creating/updating the VMSS VM Profile with minimum api-version 2024-03-01. TimeCreated *time.Time } @@ -8372,6 +8449,9 @@ type VirtualMachineScaleSetVMReimageParameters struct { // is reimaged to the existing version of OS Disk. ExactVersion *string + // Parameter to force update ephemeral OS disk for a virtual machine scale set VM + ForceUpdateOSDiskForEphemeral *bool + // Specifies information required for reimaging the non-ephemeral OS disk. OSProfile *OSProfileProvisioningData diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models_serde.go index 3c63478d0..1bdfcb442 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models_serde.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models_serde.go @@ -2404,6 +2404,7 @@ func (c CreationData) MarshalJSON() ([]byte, error) { populate(objectMap, "imageReference", c.ImageReference) populate(objectMap, "logicalSectorSize", c.LogicalSectorSize) populate(objectMap, "performancePlus", c.PerformancePlus) + populate(objectMap, "provisionedBandwidthCopySpeed", c.ProvisionedBandwidthCopySpeed) populate(objectMap, "securityDataUri", c.SecurityDataURI) populate(objectMap, "sourceResourceId", c.SourceResourceID) populate(objectMap, "sourceUri", c.SourceURI) @@ -2440,6 +2441,9 @@ func (c *CreationData) UnmarshalJSON(data []byte) error { case "performancePlus": err = unpopulate(val, "PerformancePlus", &c.PerformancePlus) delete(rawMsg, key) + case "provisionedBandwidthCopySpeed": + err = unpopulate(val, "ProvisionedBandwidthCopySpeed", &c.ProvisionedBandwidthCopySpeed) + delete(rawMsg, key) case "securityDataUri": err = unpopulate(val, "SecurityDataURI", &c.SecurityDataURI) delete(rawMsg, key) @@ -2480,6 +2484,7 @@ func (d DataDisk) MarshalJSON() ([]byte, error) { populate(objectMap, "lun", d.Lun) populate(objectMap, "managedDisk", d.ManagedDisk) populate(objectMap, "name", d.Name) + populate(objectMap, "sourceResource", d.SourceResource) populate(objectMap, "toBeDetached", d.ToBeDetached) populate(objectMap, "vhd", d.Vhd) populate(objectMap, "writeAcceleratorEnabled", d.WriteAcceleratorEnabled) @@ -2528,6 +2533,9 @@ func (d *DataDisk) UnmarshalJSON(data []byte) error { case "name": err = unpopulate(val, "Name", &d.Name) delete(rawMsg, key) + case "sourceResource": + err = unpopulate(val, "SourceResource", &d.SourceResource) + delete(rawMsg, key) case "toBeDetached": err = unpopulate(val, "ToBeDetached", &d.ToBeDetached) delete(rawMsg, key) @@ -2606,8 +2614,12 @@ func (d *DataDiskImageEncryption) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type DataDisksToAttach. func (d DataDisksToAttach) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) + populate(objectMap, "caching", d.Caching) + populate(objectMap, "deleteOption", d.DeleteOption) + populate(objectMap, "diskEncryptionSet", d.DiskEncryptionSet) populate(objectMap, "diskId", d.DiskID) populate(objectMap, "lun", d.Lun) + populate(objectMap, "writeAcceleratorEnabled", d.WriteAcceleratorEnabled) return json.Marshal(objectMap) } @@ -2620,12 +2632,24 @@ func (d *DataDisksToAttach) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { + case "caching": + err = unpopulate(val, "Caching", &d.Caching) + delete(rawMsg, key) + case "deleteOption": + err = unpopulate(val, "DeleteOption", &d.DeleteOption) + delete(rawMsg, key) + case "diskEncryptionSet": + err = unpopulate(val, "DiskEncryptionSet", &d.DiskEncryptionSet) + delete(rawMsg, key) case "diskId": err = unpopulate(val, "DiskID", &d.DiskID) delete(rawMsg, key) case "lun": err = unpopulate(val, "Lun", &d.Lun) delete(rawMsg, key) + case "writeAcceleratorEnabled": + err = unpopulate(val, "WriteAcceleratorEnabled", &d.WriteAcceleratorEnabled) + delete(rawMsg, key) } if err != nil { return fmt.Errorf("unmarshalling type %T: %v", d, err) @@ -4745,6 +4769,33 @@ func (e *EncryptionSettingsElement) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type EventGridAndResourceGraph. +func (e EventGridAndResourceGraph) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "enable", e.Enable) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type EventGridAndResourceGraph. +func (e *EventGridAndResourceGraph) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "enable": + err = unpopulate(val, "Enable", &e.Enable) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type ExtendedLocation. func (e ExtendedLocation) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -5488,6 +5539,7 @@ func (g GalleryArtifactVersionFullSource) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "communityGalleryImageId", g.CommunityGalleryImageID) populate(objectMap, "id", g.ID) + populate(objectMap, "virtualMachineId", g.VirtualMachineID) return json.Marshal(objectMap) } @@ -5506,6 +5558,9 @@ func (g *GalleryArtifactVersionFullSource) UnmarshalJSON(data []byte) error { case "id": err = unpopulate(val, "ID", &g.ID) delete(rawMsg, key) + case "virtualMachineId": + err = unpopulate(val, "VirtualMachineID", &g.VirtualMachineID) + delete(rawMsg, key) } if err != nil { return fmt.Errorf("unmarshalling type %T: %v", g, err) @@ -12303,6 +12358,68 @@ func (s *ScaleInPolicy) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type ScheduledEventsAdditionalPublishingTargets. +func (s ScheduledEventsAdditionalPublishingTargets) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "eventGridAndResourceGraph", s.EventGridAndResourceGraph) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ScheduledEventsAdditionalPublishingTargets. +func (s *ScheduledEventsAdditionalPublishingTargets) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "eventGridAndResourceGraph": + err = unpopulate(val, "EventGridAndResourceGraph", &s.EventGridAndResourceGraph) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ScheduledEventsPolicy. +func (s ScheduledEventsPolicy) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "scheduledEventsAdditionalPublishingTargets", s.ScheduledEventsAdditionalPublishingTargets) + populate(objectMap, "userInitiatedReboot", s.UserInitiatedReboot) + populate(objectMap, "userInitiatedRedeploy", s.UserInitiatedRedeploy) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ScheduledEventsPolicy. +func (s *ScheduledEventsPolicy) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "scheduledEventsAdditionalPublishingTargets": + err = unpopulate(val, "ScheduledEventsAdditionalPublishingTargets", &s.ScheduledEventsAdditionalPublishingTargets) + delete(rawMsg, key) + case "userInitiatedReboot": + err = unpopulate(val, "UserInitiatedReboot", &s.UserInitiatedReboot) + delete(rawMsg, key) + case "userInitiatedRedeploy": + err = unpopulate(val, "UserInitiatedRedeploy", &s.UserInitiatedRedeploy) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type ScheduledEventsProfile. func (s ScheduledEventsProfile) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -14448,6 +14565,60 @@ func (u *UserAssignedIdentitiesValue) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type UserInitiatedReboot. +func (u UserInitiatedReboot) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "automaticallyApprove", u.AutomaticallyApprove) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type UserInitiatedReboot. +func (u *UserInitiatedReboot) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "automaticallyApprove": + err = unpopulate(val, "AutomaticallyApprove", &u.AutomaticallyApprove) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type UserInitiatedRedeploy. +func (u UserInitiatedRedeploy) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "automaticallyApprove", u.AutomaticallyApprove) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type UserInitiatedRedeploy. +func (u *UserInitiatedRedeploy) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "automaticallyApprove": + err = unpopulate(val, "AutomaticallyApprove", &u.AutomaticallyApprove) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type VMDiskSecurityProfile. func (v VMDiskSecurityProfile) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -16135,6 +16306,7 @@ func (v VirtualMachineProperties) MarshalJSON() ([]byte, error) { populate(objectMap, "priority", v.Priority) populate(objectMap, "provisioningState", v.ProvisioningState) populate(objectMap, "proximityPlacementGroup", v.ProximityPlacementGroup) + populate(objectMap, "scheduledEventsPolicy", v.ScheduledEventsPolicy) populate(objectMap, "scheduledEventsProfile", v.ScheduledEventsProfile) populate(objectMap, "securityProfile", v.SecurityProfile) populate(objectMap, "storageProfile", v.StorageProfile) @@ -16211,6 +16383,9 @@ func (v *VirtualMachineProperties) UnmarshalJSON(data []byte) error { case "proximityPlacementGroup": err = unpopulate(val, "ProximityPlacementGroup", &v.ProximityPlacementGroup) delete(rawMsg, key) + case "scheduledEventsPolicy": + err = unpopulate(val, "ScheduledEventsPolicy", &v.ScheduledEventsPolicy) + delete(rawMsg, key) case "scheduledEventsProfile": err = unpopulate(val, "ScheduledEventsProfile", &v.ScheduledEventsProfile) delete(rawMsg, key) @@ -17729,6 +17904,7 @@ func (v VirtualMachineScaleSetProperties) MarshalJSON() ([]byte, error) { populate(objectMap, "proximityPlacementGroup", v.ProximityPlacementGroup) populate(objectMap, "resiliencyPolicy", v.ResiliencyPolicy) populate(objectMap, "scaleInPolicy", v.ScaleInPolicy) + populate(objectMap, "scheduledEventsPolicy", v.ScheduledEventsPolicy) populate(objectMap, "singlePlacementGroup", v.SinglePlacementGroup) populate(objectMap, "spotRestorePolicy", v.SpotRestorePolicy) populateDateTimeRFC3339(objectMap, "timeCreated", v.TimeCreated) @@ -17787,6 +17963,9 @@ func (v *VirtualMachineScaleSetProperties) UnmarshalJSON(data []byte) error { case "scaleInPolicy": err = unpopulate(val, "ScaleInPolicy", &v.ScaleInPolicy) delete(rawMsg, key) + case "scheduledEventsPolicy": + err = unpopulate(val, "ScheduledEventsPolicy", &v.ScheduledEventsPolicy) + delete(rawMsg, key) case "singlePlacementGroup": err = unpopulate(val, "SinglePlacementGroup", &v.SinglePlacementGroup) delete(rawMsg, key) @@ -17933,6 +18112,7 @@ func (v *VirtualMachineScaleSetPublicIPAddressConfigurationProperties) Unmarshal func (v VirtualMachineScaleSetReimageParameters) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "exactVersion", v.ExactVersion) + populate(objectMap, "forceUpdateOSDiskForEphemeral", v.ForceUpdateOSDiskForEphemeral) populate(objectMap, "instanceIds", v.InstanceIDs) populate(objectMap, "osProfile", v.OSProfile) populate(objectMap, "tempDisk", v.TempDisk) @@ -17951,6 +18131,9 @@ func (v *VirtualMachineScaleSetReimageParameters) UnmarshalJSON(data []byte) err case "exactVersion": err = unpopulate(val, "ExactVersion", &v.ExactVersion) delete(rawMsg, key) + case "forceUpdateOSDiskForEphemeral": + err = unpopulate(val, "ForceUpdateOSDiskForEphemeral", &v.ForceUpdateOSDiskForEphemeral) + delete(rawMsg, key) case "instanceIds": err = unpopulate(val, "InstanceIDs", &v.InstanceIDs) delete(rawMsg, key) @@ -18348,6 +18531,7 @@ func (v VirtualMachineScaleSetUpdateOSDisk) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "caching", v.Caching) populate(objectMap, "deleteOption", v.DeleteOption) + populate(objectMap, "diffDiskSettings", v.DiffDiskSettings) populate(objectMap, "diskSizeGB", v.DiskSizeGB) populate(objectMap, "image", v.Image) populate(objectMap, "managedDisk", v.ManagedDisk) @@ -18371,6 +18555,9 @@ func (v *VirtualMachineScaleSetUpdateOSDisk) UnmarshalJSON(data []byte) error { case "deleteOption": err = unpopulate(val, "DeleteOption", &v.DeleteOption) delete(rawMsg, key) + case "diffDiskSettings": + err = unpopulate(val, "DiffDiskSettings", &v.DiffDiskSettings) + delete(rawMsg, key) case "diskSizeGB": err = unpopulate(val, "DiskSizeGB", &v.DiskSizeGB) delete(rawMsg, key) @@ -19319,6 +19506,7 @@ func (v *VirtualMachineScaleSetVMProtectionPolicy) UnmarshalJSON(data []byte) er func (v VirtualMachineScaleSetVMReimageParameters) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "exactVersion", v.ExactVersion) + populate(objectMap, "forceUpdateOSDiskForEphemeral", v.ForceUpdateOSDiskForEphemeral) populate(objectMap, "osProfile", v.OSProfile) populate(objectMap, "tempDisk", v.TempDisk) return json.Marshal(objectMap) @@ -19336,6 +19524,9 @@ func (v *VirtualMachineScaleSetVMReimageParameters) UnmarshalJSON(data []byte) e case "exactVersion": err = unpopulate(val, "ExactVersion", &v.ExactVersion) delete(rawMsg, key) + case "forceUpdateOSDiskForEphemeral": + err = unpopulate(val, "ForceUpdateOSDiskForEphemeral", &v.ForceUpdateOSDiskForEphemeral) + delete(rawMsg, key) case "osProfile": err = unpopulate(val, "OSProfile", &v.OSProfile) delete(rawMsg, key) @@ -19765,7 +19956,7 @@ func populateAny(m map[string]any, k string, v any) { } func unpopulate(data json.RawMessage, fn string, v any) error { - if data == nil { + if data == nil || string(data) == "null" { return nil } if err := json.Unmarshal(data, v); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/operations_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/operations_client.go index f7fded74a..7c438a561 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/operations_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/operations_client.go @@ -39,7 +39,7 @@ func NewOperationsClient(credential azcore.TokenCredential, options *arm.ClientO // NewListPager - Gets a list of compute operations. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - options - OperationsClientListOptions contains the optional parameters for the OperationsClient.NewListPager method. func (client *OperationsClient) NewListPager(options *OperationsClientListOptions) *runtime.Pager[OperationsClientListResponse] { return runtime.NewPager(runtime.PagingHandler[OperationsClientListResponse]{ @@ -73,7 +73,7 @@ func (client *OperationsClient) listCreateRequest(ctx context.Context, options * return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/options.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/options.go index 0c363663a..5ea3446d0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/options.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/options.go @@ -84,6 +84,13 @@ type CapacityReservationGroupsClientListBySubscriptionOptions struct { // VM Instance or both resource Ids which are associated to capacity // reservation group in the response. Expand *ExpandTypesForGetCapacityReservationGroups + + // The query option to fetch Capacity Reservation Group Resource Ids. + // 'CreatedInSubscription' enables fetching Resource Ids for all capacity reservation group resources created in the subscription. + // 'SharedWithSubscription' enables fetching Resource Ids for all capacity reservation group resources shared with the subscription. + // 'All' enables fetching Resource Ids for all capacity reservation group resources shared with the subscription and created + // in the subscription. + ResourceIDsOnly *ResourceIDOptionsForGetCapacityReservationGroups } // CapacityReservationGroupsClientUpdateOptions contains the optional parameters for the CapacityReservationGroupsClient.Update diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/proximityplacementgroups_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/proximityplacementgroups_client.go index 947d83736..c18659203 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/proximityplacementgroups_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/proximityplacementgroups_client.go @@ -47,7 +47,7 @@ func NewProximityPlacementGroupsClient(subscriptionID string, credential azcore. // CreateOrUpdate - Create or update a proximity placement group. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - proximityPlacementGroupName - The name of the proximity placement group. // - parameters - Parameters supplied to the Create Proximity Placement Group operation. @@ -95,7 +95,7 @@ func (client *ProximityPlacementGroupsClient) createOrUpdateCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -116,7 +116,7 @@ func (client *ProximityPlacementGroupsClient) createOrUpdateHandleResponse(resp // Delete - Delete a proximity placement group. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - proximityPlacementGroupName - The name of the proximity placement group. // - options - ProximityPlacementGroupsClientDeleteOptions contains the optional parameters for the ProximityPlacementGroupsClient.Delete @@ -162,7 +162,7 @@ func (client *ProximityPlacementGroupsClient) deleteCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -171,7 +171,7 @@ func (client *ProximityPlacementGroupsClient) deleteCreateRequest(ctx context.Co // Get - Retrieves information about a proximity placement group . // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - proximityPlacementGroupName - The name of the proximity placement group. // - options - ProximityPlacementGroupsClientGetOptions contains the optional parameters for the ProximityPlacementGroupsClient.Get @@ -218,10 +218,10 @@ func (client *ProximityPlacementGroupsClient) getCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2024-03-01") if options != nil && options.IncludeColocationStatus != nil { reqQP.Set("includeColocationStatus", *options.IncludeColocationStatus) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -238,7 +238,7 @@ func (client *ProximityPlacementGroupsClient) getHandleResponse(resp *http.Respo // NewListByResourceGroupPager - Lists all proximity placement groups in a resource group. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - options - ProximityPlacementGroupsClientListByResourceGroupOptions contains the optional parameters for the ProximityPlacementGroupsClient.NewListByResourceGroupPager // method. @@ -281,7 +281,7 @@ func (client *ProximityPlacementGroupsClient) listByResourceGroupCreateRequest(c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -298,7 +298,7 @@ func (client *ProximityPlacementGroupsClient) listByResourceGroupHandleResponse( // NewListBySubscriptionPager - Lists all proximity placement groups in a subscription. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - options - ProximityPlacementGroupsClientListBySubscriptionOptions contains the optional parameters for the ProximityPlacementGroupsClient.NewListBySubscriptionPager // method. func (client *ProximityPlacementGroupsClient) NewListBySubscriptionPager(options *ProximityPlacementGroupsClientListBySubscriptionOptions) *runtime.Pager[ProximityPlacementGroupsClientListBySubscriptionResponse] { @@ -336,7 +336,7 @@ func (client *ProximityPlacementGroupsClient) listBySubscriptionCreateRequest(ct return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -354,7 +354,7 @@ func (client *ProximityPlacementGroupsClient) listBySubscriptionHandleResponse(r // Update - Update a proximity placement group. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - proximityPlacementGroupName - The name of the proximity placement group. // - parameters - Parameters supplied to the Update Proximity Placement Group operation. @@ -402,7 +402,7 @@ func (client *ProximityPlacementGroupsClient) updateCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/resourceskus_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/resourceskus_client.go index e0dd1afa4..f716c1ccf 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/resourceskus_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/resourceskus_client.go @@ -83,10 +83,10 @@ func (client *ResourceSKUsClient) listCreateRequest(ctx context.Context, options return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2021-07-01") if options != nil && options.Filter != nil { reqQP.Set("$filter", *options.Filter) } + reqQP.Set("api-version", "2021-07-01") if options != nil && options.IncludeExtendedLocations != nil { reqQP.Set("includeExtendedLocations", *options.IncludeExtendedLocations) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/responses.go similarity index 100% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/response_types.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/responses.go diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/restorepointcollections_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/restorepointcollections_client.go index b07477e6d..f53e3a7d4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/restorepointcollections_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/restorepointcollections_client.go @@ -48,7 +48,7 @@ func NewRestorePointCollectionsClient(subscriptionID string, credential azcore.T // for more details. When updating a restore point collection, only tags may be modified. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the restore point collection. // - parameters - Parameters supplied to the Create or Update restore point collection operation. @@ -96,7 +96,7 @@ func (client *RestorePointCollectionsClient) createOrUpdateCreateRequest(ctx con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -118,7 +118,7 @@ func (client *RestorePointCollectionsClient) createOrUpdateHandleResponse(resp * // points. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the Restore Point Collection. // - options - RestorePointCollectionsClientBeginDeleteOptions contains the optional parameters for the RestorePointCollectionsClient.BeginDelete @@ -144,7 +144,7 @@ func (client *RestorePointCollectionsClient) BeginDelete(ctx context.Context, re // points. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *RestorePointCollectionsClient) deleteOperation(ctx context.Context, resourceGroupName string, restorePointCollectionName string, options *RestorePointCollectionsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "RestorePointCollectionsClient.BeginDelete" @@ -186,7 +186,7 @@ func (client *RestorePointCollectionsClient) deleteCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -195,7 +195,7 @@ func (client *RestorePointCollectionsClient) deleteCreateRequest(ctx context.Con // Get - The operation to get the restore point collection. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the restore point collection. // - options - RestorePointCollectionsClientGetOptions contains the optional parameters for the RestorePointCollectionsClient.Get @@ -245,7 +245,7 @@ func (client *RestorePointCollectionsClient) getCreateRequest(ctx context.Contex if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -262,7 +262,7 @@ func (client *RestorePointCollectionsClient) getHandleResponse(resp *http.Respon // NewListPager - Gets the list of restore point collections in a resource group. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - options - RestorePointCollectionsClientListOptions contains the optional parameters for the RestorePointCollectionsClient.NewListPager // method. @@ -305,7 +305,7 @@ func (client *RestorePointCollectionsClient) listCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -324,7 +324,7 @@ func (client *RestorePointCollectionsClient) listHandleResponse(resp *http.Respo // to get the next page of restore point collections. Do this till nextLink is not null to fetch all // the restore point collections. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - options - RestorePointCollectionsClientListAllOptions contains the optional parameters for the RestorePointCollectionsClient.NewListAllPager // method. func (client *RestorePointCollectionsClient) NewListAllPager(options *RestorePointCollectionsClientListAllOptions) *runtime.Pager[RestorePointCollectionsClientListAllResponse] { @@ -362,7 +362,7 @@ func (client *RestorePointCollectionsClient) listAllCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -380,7 +380,7 @@ func (client *RestorePointCollectionsClient) listAllHandleResponse(resp *http.Re // Update - The operation to update the restore point collection. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the restore point collection. // - parameters - Parameters supplied to the Update restore point collection operation. @@ -428,7 +428,7 @@ func (client *RestorePointCollectionsClient) updateCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/restorepoints_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/restorepoints_client.go index 7b6364f92..dffafe2da 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/restorepoints_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/restorepoints_client.go @@ -47,7 +47,7 @@ func NewRestorePointsClient(subscriptionID string, credential azcore.TokenCreden // BeginCreate - The operation to create the restore point. Updating properties of an existing restore point is not allowed // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the restore point collection. // - restorePointName - The name of the restore point. @@ -74,7 +74,7 @@ func (client *RestorePointsClient) BeginCreate(ctx context.Context, resourceGrou // Create - The operation to create the restore point. Updating properties of an existing restore point is not allowed // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *RestorePointsClient) create(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string, parameters RestorePoint, options *RestorePointsClientBeginCreateOptions) (*http.Response, error) { var err error const operationName = "RestorePointsClient.BeginCreate" @@ -120,7 +120,7 @@ func (client *RestorePointsClient) createCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -132,7 +132,7 @@ func (client *RestorePointsClient) createCreateRequest(ctx context.Context, reso // BeginDelete - The operation to delete the restore point. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the Restore Point Collection. // - restorePointName - The name of the restore point. @@ -158,7 +158,7 @@ func (client *RestorePointsClient) BeginDelete(ctx context.Context, resourceGrou // Delete - The operation to delete the restore point. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *RestorePointsClient) deleteOperation(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string, options *RestorePointsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "RestorePointsClient.BeginDelete" @@ -204,7 +204,7 @@ func (client *RestorePointsClient) deleteCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -213,7 +213,7 @@ func (client *RestorePointsClient) deleteCreateRequest(ctx context.Context, reso // Get - The operation to get the restore point. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the restore point collection. // - restorePointName - The name of the restore point. @@ -267,7 +267,7 @@ func (client *RestorePointsClient) getCreateRequest(ctx context.Context, resourc if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleries_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleries_client.go index 4bf2d0960..ee8172bae 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleries_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleries_client.go @@ -47,7 +47,7 @@ func NewSharedGalleriesClient(subscriptionID string, credential azcore.TokenCred // Get - Get a shared gallery by subscription id or tenant id. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - galleryUniqueName - The unique name of the Shared Gallery. // - options - SharedGalleriesClientGetOptions contains the optional parameters for the SharedGalleriesClient.Get method. @@ -93,7 +93,7 @@ func (client *SharedGalleriesClient) getCreateRequest(ctx context.Context, locat return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -110,7 +110,7 @@ func (client *SharedGalleriesClient) getHandleResponse(resp *http.Response) (Sha // NewListPager - List shared galleries by subscription id or tenant id. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - options - SharedGalleriesClientListOptions contains the optional parameters for the SharedGalleriesClient.NewListPager // method. @@ -153,7 +153,7 @@ func (client *SharedGalleriesClient) listCreateRequest(ctx context.Context, loca return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") if options != nil && options.SharedTo != nil { reqQP.Set("sharedTo", string(*options.SharedTo)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimages_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimages_client.go index ad549b140..8f0ef79cf 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimages_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimages_client.go @@ -47,7 +47,7 @@ func NewSharedGalleryImagesClient(subscriptionID string, credential azcore.Token // Get - Get a shared gallery image by subscription id or tenant id. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - galleryUniqueName - The unique name of the Shared Gallery. // - galleryImageName - The name of the Shared Gallery Image Definition from which the Image Versions are to be listed. @@ -98,7 +98,7 @@ func (client *SharedGalleryImagesClient) getCreateRequest(ctx context.Context, l return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -115,7 +115,7 @@ func (client *SharedGalleryImagesClient) getHandleResponse(resp *http.Response) // NewListPager - List shared gallery images by subscription id or tenant id. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - galleryUniqueName - The unique name of the Shared Gallery. // - options - SharedGalleryImagesClientListOptions contains the optional parameters for the SharedGalleryImagesClient.NewListPager @@ -163,7 +163,7 @@ func (client *SharedGalleryImagesClient) listCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") if options != nil && options.SharedTo != nil { reqQP.Set("sharedTo", string(*options.SharedTo)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimageversions_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimageversions_client.go index 5c64dc844..729b0e3c5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimageversions_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimageversions_client.go @@ -47,7 +47,7 @@ func NewSharedGalleryImageVersionsClient(subscriptionID string, credential azcor // Get - Get a shared gallery image version by subscription id or tenant id. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - galleryUniqueName - The unique name of the Shared Gallery. // - galleryImageName - The name of the Shared Gallery Image Definition from which the Image Versions are to be listed. @@ -106,7 +106,7 @@ func (client *SharedGalleryImageVersionsClient) getCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -123,7 +123,7 @@ func (client *SharedGalleryImageVersionsClient) getHandleResponse(resp *http.Res // NewListPager - List shared gallery image versions by subscription id or tenant id. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - galleryUniqueName - The unique name of the Shared Gallery. // - galleryImageName - The name of the Shared Gallery Image Definition from which the Image Versions are to be listed. @@ -176,7 +176,7 @@ func (client *SharedGalleryImageVersionsClient) listCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") if options != nil && options.SharedTo != nil { reqQP.Set("sharedTo", string(*options.SharedTo)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/snapshots_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/snapshots_client.go index a0d263613..8c73f7709 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/snapshots_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/snapshots_client.go @@ -47,7 +47,7 @@ func NewSnapshotsClient(subscriptionID string, credential azcore.TokenCredential // BeginCreateOrUpdate - Creates or updates a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - snapshotName - The name of the snapshot that is being created. The name can't be changed after the snapshot is created. // Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80 @@ -75,7 +75,7 @@ func (client *SnapshotsClient) BeginCreateOrUpdate(ctx context.Context, resource // CreateOrUpdate - Creates or updates a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *SnapshotsClient) createOrUpdate(ctx context.Context, resourceGroupName string, snapshotName string, snapshot Snapshot, options *SnapshotsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "SnapshotsClient.BeginCreateOrUpdate" @@ -117,7 +117,7 @@ func (client *SnapshotsClient) createOrUpdateCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, snapshot); err != nil { @@ -129,7 +129,7 @@ func (client *SnapshotsClient) createOrUpdateCreateRequest(ctx context.Context, // BeginDelete - Deletes a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - snapshotName - The name of the snapshot that is being created. The name can't be changed after the snapshot is created. // Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80 @@ -155,7 +155,7 @@ func (client *SnapshotsClient) BeginDelete(ctx context.Context, resourceGroupNam // Delete - Deletes a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *SnapshotsClient) deleteOperation(ctx context.Context, resourceGroupName string, snapshotName string, options *SnapshotsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "SnapshotsClient.BeginDelete" @@ -197,7 +197,7 @@ func (client *SnapshotsClient) deleteCreateRequest(ctx context.Context, resource return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() return req, nil } @@ -205,7 +205,7 @@ func (client *SnapshotsClient) deleteCreateRequest(ctx context.Context, resource // Get - Gets information about a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - snapshotName - The name of the snapshot that is being created. The name can't be changed after the snapshot is created. // Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80 @@ -253,7 +253,7 @@ func (client *SnapshotsClient) getCreateRequest(ctx context.Context, resourceGro return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -271,7 +271,7 @@ func (client *SnapshotsClient) getHandleResponse(resp *http.Response) (Snapshots // BeginGrantAccess - Grants access to a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - snapshotName - The name of the snapshot that is being created. The name can't be changed after the snapshot is created. // Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80 @@ -300,7 +300,7 @@ func (client *SnapshotsClient) BeginGrantAccess(ctx context.Context, resourceGro // GrantAccess - Grants access to a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *SnapshotsClient) grantAccess(ctx context.Context, resourceGroupName string, snapshotName string, grantAccessData GrantAccessData, options *SnapshotsClientBeginGrantAccessOptions) (*http.Response, error) { var err error const operationName = "SnapshotsClient.BeginGrantAccess" @@ -342,7 +342,7 @@ func (client *SnapshotsClient) grantAccessCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, grantAccessData); err != nil { @@ -353,7 +353,7 @@ func (client *SnapshotsClient) grantAccessCreateRequest(ctx context.Context, res // NewListPager - Lists snapshots under a subscription. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - options - SnapshotsClientListOptions contains the optional parameters for the SnapshotsClient.NewListPager method. func (client *SnapshotsClient) NewListPager(options *SnapshotsClientListOptions) *runtime.Pager[SnapshotsClientListResponse] { return runtime.NewPager(runtime.PagingHandler[SnapshotsClientListResponse]{ @@ -390,7 +390,7 @@ func (client *SnapshotsClient) listCreateRequest(ctx context.Context, options *S return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -407,7 +407,7 @@ func (client *SnapshotsClient) listHandleResponse(resp *http.Response) (Snapshot // NewListByResourceGroupPager - Lists snapshots under a resource group. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - options - SnapshotsClientListByResourceGroupOptions contains the optional parameters for the SnapshotsClient.NewListByResourceGroupPager // method. @@ -450,7 +450,7 @@ func (client *SnapshotsClient) listByResourceGroupCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -468,7 +468,7 @@ func (client *SnapshotsClient) listByResourceGroupHandleResponse(resp *http.Resp // BeginRevokeAccess - Revokes access to a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - snapshotName - The name of the snapshot that is being created. The name can't be changed after the snapshot is created. // Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80 @@ -496,7 +496,7 @@ func (client *SnapshotsClient) BeginRevokeAccess(ctx context.Context, resourceGr // RevokeAccess - Revokes access to a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *SnapshotsClient) revokeAccess(ctx context.Context, resourceGroupName string, snapshotName string, options *SnapshotsClientBeginRevokeAccessOptions) (*http.Response, error) { var err error const operationName = "SnapshotsClient.BeginRevokeAccess" @@ -538,7 +538,7 @@ func (client *SnapshotsClient) revokeAccessCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() return req, nil } @@ -546,7 +546,7 @@ func (client *SnapshotsClient) revokeAccessCreateRequest(ctx context.Context, re // BeginUpdate - Updates (patches) a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - snapshotName - The name of the snapshot that is being created. The name can't be changed after the snapshot is created. // Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80 @@ -573,7 +573,7 @@ func (client *SnapshotsClient) BeginUpdate(ctx context.Context, resourceGroupNam // Update - Updates (patches) a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *SnapshotsClient) update(ctx context.Context, resourceGroupName string, snapshotName string, snapshot SnapshotUpdate, options *SnapshotsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "SnapshotsClient.BeginUpdate" @@ -615,7 +615,7 @@ func (client *SnapshotsClient) updateCreateRequest(ctx context.Context, resource return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, snapshot); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sshpublickeys_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sshpublickeys_client.go index a577d55d2..58d8d18ba 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sshpublickeys_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sshpublickeys_client.go @@ -47,7 +47,7 @@ func NewSSHPublicKeysClient(subscriptionID string, credential azcore.TokenCreden // Create - Creates a new SSH public key resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - sshPublicKeyName - The name of the SSH public key. // - parameters - Parameters supplied to create the SSH public key. @@ -94,7 +94,7 @@ func (client *SSHPublicKeysClient) createCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -115,7 +115,7 @@ func (client *SSHPublicKeysClient) createHandleResponse(resp *http.Response) (SS // Delete - Delete an SSH public key. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - sshPublicKeyName - The name of the SSH public key. // - options - SSHPublicKeysClientDeleteOptions contains the optional parameters for the SSHPublicKeysClient.Delete method. @@ -160,7 +160,7 @@ func (client *SSHPublicKeysClient) deleteCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -171,7 +171,7 @@ func (client *SSHPublicKeysClient) deleteCreateRequest(ctx context.Context, reso // SSH public key resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - sshPublicKeyName - The name of the SSH public key. // - options - SSHPublicKeysClientGenerateKeyPairOptions contains the optional parameters for the SSHPublicKeysClient.GenerateKeyPair @@ -218,7 +218,7 @@ func (client *SSHPublicKeysClient) generateKeyPairCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.Parameters != nil { @@ -242,7 +242,7 @@ func (client *SSHPublicKeysClient) generateKeyPairHandleResponse(resp *http.Resp // Get - Retrieves information about an SSH public key. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - sshPublicKeyName - The name of the SSH public key. // - options - SSHPublicKeysClientGetOptions contains the optional parameters for the SSHPublicKeysClient.Get method. @@ -288,7 +288,7 @@ func (client *SSHPublicKeysClient) getCreateRequest(ctx context.Context, resourc return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -306,7 +306,7 @@ func (client *SSHPublicKeysClient) getHandleResponse(resp *http.Response) (SSHPu // NewListByResourceGroupPager - Lists all of the SSH public keys in the specified resource group. Use the nextLink property // in the response to get the next page of SSH public keys. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - options - SSHPublicKeysClientListByResourceGroupOptions contains the optional parameters for the SSHPublicKeysClient.NewListByResourceGroupPager // method. @@ -349,7 +349,7 @@ func (client *SSHPublicKeysClient) listByResourceGroupCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -367,7 +367,7 @@ func (client *SSHPublicKeysClient) listByResourceGroupHandleResponse(resp *http. // NewListBySubscriptionPager - Lists all of the SSH public keys in the subscription. Use the nextLink property in the response // to get the next page of SSH public keys. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - options - SSHPublicKeysClientListBySubscriptionOptions contains the optional parameters for the SSHPublicKeysClient.NewListBySubscriptionPager // method. func (client *SSHPublicKeysClient) NewListBySubscriptionPager(options *SSHPublicKeysClientListBySubscriptionOptions) *runtime.Pager[SSHPublicKeysClientListBySubscriptionResponse] { @@ -405,7 +405,7 @@ func (client *SSHPublicKeysClient) listBySubscriptionCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -423,7 +423,7 @@ func (client *SSHPublicKeysClient) listBySubscriptionHandleResponse(resp *http.R // Update - Updates a new SSH public key resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - sshPublicKeyName - The name of the SSH public key. // - parameters - Parameters supplied to update the SSH public key. @@ -470,7 +470,7 @@ func (client *SSHPublicKeysClient) updateCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/time_rfc3339.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/time_rfc3339.go index 4f75ccd6f..ae4e62dd4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/time_rfc3339.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/time_rfc3339.go @@ -19,12 +19,16 @@ import ( ) // Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. -var tzOffsetRegex = regexp.MustCompile(`(Z|z|\+|-)(\d+:\d+)*"*$`) +var tzOffsetRegex = regexp.MustCompile(`(?:Z|z|\+|-)(?:\d+:\d+)*"*$`) const ( - utcDateTimeJSON = `"2006-01-02T15:04:05.999999999"` - utcDateTime = "2006-01-02T15:04:05.999999999" - dateTimeJSON = `"` + time.RFC3339Nano + `"` + utcDateTime = "2006-01-02T15:04:05.999999999" + utcDateTimeJSON = `"` + utcDateTime + `"` + utcDateTimeNoT = "2006-01-02 15:04:05.999999999" + utcDateTimeJSONNoT = `"` + utcDateTimeNoT + `"` + dateTimeNoT = `2006-01-02 15:04:05.999999999Z07:00` + dateTimeJSON = `"` + time.RFC3339Nano + `"` + dateTimeJSONNoT = `"` + dateTimeNoT + `"` ) type dateTimeRFC3339 time.Time @@ -40,17 +44,33 @@ func (t dateTimeRFC3339) MarshalText() ([]byte, error) { } func (t *dateTimeRFC3339) UnmarshalJSON(data []byte) error { - layout := utcDateTimeJSON - if tzOffsetRegex.Match(data) { + tzOffset := tzOffsetRegex.Match(data) + hasT := strings.Contains(string(data), "T") || strings.Contains(string(data), "t") + var layout string + if tzOffset && hasT { layout = dateTimeJSON + } else if tzOffset { + layout = dateTimeJSONNoT + } else if hasT { + layout = utcDateTimeJSON + } else { + layout = utcDateTimeJSONNoT } return t.Parse(layout, string(data)) } func (t *dateTimeRFC3339) UnmarshalText(data []byte) error { - layout := utcDateTime - if tzOffsetRegex.Match(data) { + tzOffset := tzOffsetRegex.Match(data) + hasT := strings.Contains(string(data), "T") || strings.Contains(string(data), "t") + var layout string + if tzOffset && hasT { layout = time.RFC3339Nano + } else if tzOffset { + layout = dateTimeNoT + } else if hasT { + layout = utcDateTime + } else { + layout = utcDateTimeNoT } return t.Parse(layout, string(data)) } @@ -61,6 +81,10 @@ func (t *dateTimeRFC3339) Parse(layout, value string) error { return err } +func (t dateTimeRFC3339) String() string { + return time.Time(t).Format(time.RFC3339Nano) +} + func populateDateTimeRFC3339(m map[string]any, k string, t *time.Time) { if t == nil { return @@ -74,7 +98,7 @@ func populateDateTimeRFC3339(m map[string]any, k string, t *time.Time) { } func unpopulateDateTimeRFC3339(data json.RawMessage, fn string, t **time.Time) error { - if data == nil || strings.EqualFold(string(data), "null") { + if data == nil || string(data) == "null" { return nil } var aux dateTimeRFC3339 diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/usage_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/usage_client.go index afb6852b0..e251486e9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/usage_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/usage_client.go @@ -47,7 +47,7 @@ func NewUsageClient(subscriptionID string, credential azcore.TokenCredential, op // NewListPager - Gets, for the specified location, the current compute resource usage information as well as the limits for // compute resources under the subscription. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The location for which resource usage is queried. // - options - UsageClientListOptions contains the optional parameters for the UsageClient.NewListPager method. func (client *UsageClient) NewListPager(location string, options *UsageClientListOptions) *runtime.Pager[UsageClientListResponse] { @@ -89,7 +89,7 @@ func (client *UsageClient) listCreateRequest(ctx context.Context, location strin return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensionimages_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensionimages_client.go index a8e90c4b1..a337d2453 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensionimages_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensionimages_client.go @@ -48,7 +48,7 @@ func NewVirtualMachineExtensionImagesClient(subscriptionID string, credential az // Get - Gets a virtual machine extension image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The name of a supported Azure region. // - options - VirtualMachineExtensionImagesClientGetOptions contains the optional parameters for the VirtualMachineExtensionImagesClient.Get // method. @@ -102,7 +102,7 @@ func (client *VirtualMachineExtensionImagesClient) getCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -120,7 +120,7 @@ func (client *VirtualMachineExtensionImagesClient) getHandleResponse(resp *http. // ListTypes - Gets a list of virtual machine extension image types. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The name of a supported Azure region. // - options - VirtualMachineExtensionImagesClientListTypesOptions contains the optional parameters for the VirtualMachineExtensionImagesClient.ListTypes // method. @@ -166,7 +166,7 @@ func (client *VirtualMachineExtensionImagesClient) listTypesCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -184,7 +184,7 @@ func (client *VirtualMachineExtensionImagesClient) listTypesHandleResponse(resp // ListVersions - Gets a list of virtual machine extension image versions. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The name of a supported Azure region. // - options - VirtualMachineExtensionImagesClientListVersionsOptions contains the optional parameters for the VirtualMachineExtensionImagesClient.ListVersions // method. @@ -237,13 +237,13 @@ func (client *VirtualMachineExtensionImagesClient) listVersionsCreateRequest(ctx if options != nil && options.Filter != nil { reqQP.Set("$filter", *options.Filter) } - if options != nil && options.Top != nil { - reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) - } if options != nil && options.Orderby != nil { reqQP.Set("$orderby", *options.Orderby) } - reqQP.Set("api-version", "2023-09-01") + if options != nil && options.Top != nil { + reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) + } + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensions_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensions_client.go index 9d658bb4e..bd62e8137 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensions_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensions_client.go @@ -47,7 +47,7 @@ func NewVirtualMachineExtensionsClient(subscriptionID string, credential azcore. // BeginCreateOrUpdate - The operation to create or update the extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine where the extension should be created or updated. // - vmExtensionName - The name of the virtual machine extension. @@ -74,7 +74,7 @@ func (client *VirtualMachineExtensionsClient) BeginCreateOrUpdate(ctx context.Co // CreateOrUpdate - The operation to create or update the extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineExtensionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, vmName string, vmExtensionName string, extensionParameters VirtualMachineExtension, options *VirtualMachineExtensionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineExtensionsClient.BeginCreateOrUpdate" @@ -120,7 +120,7 @@ func (client *VirtualMachineExtensionsClient) createOrUpdateCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, extensionParameters); err != nil { @@ -132,7 +132,7 @@ func (client *VirtualMachineExtensionsClient) createOrUpdateCreateRequest(ctx co // BeginDelete - The operation to delete the extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine where the extension should be deleted. // - vmExtensionName - The name of the virtual machine extension. @@ -158,7 +158,7 @@ func (client *VirtualMachineExtensionsClient) BeginDelete(ctx context.Context, r // Delete - The operation to delete the extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineExtensionsClient) deleteOperation(ctx context.Context, resourceGroupName string, vmName string, vmExtensionName string, options *VirtualMachineExtensionsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineExtensionsClient.BeginDelete" @@ -204,7 +204,7 @@ func (client *VirtualMachineExtensionsClient) deleteCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -213,7 +213,7 @@ func (client *VirtualMachineExtensionsClient) deleteCreateRequest(ctx context.Co // Get - The operation to get the extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine containing the extension. // - vmExtensionName - The name of the virtual machine extension. @@ -268,7 +268,7 @@ func (client *VirtualMachineExtensionsClient) getCreateRequest(ctx context.Conte if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -286,7 +286,7 @@ func (client *VirtualMachineExtensionsClient) getHandleResponse(resp *http.Respo // List - The operation to get all extensions of a Virtual Machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine containing the extension. // - options - VirtualMachineExtensionsClientListOptions contains the optional parameters for the VirtualMachineExtensionsClient.List @@ -336,7 +336,7 @@ func (client *VirtualMachineExtensionsClient) listCreateRequest(ctx context.Cont if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -354,7 +354,7 @@ func (client *VirtualMachineExtensionsClient) listHandleResponse(resp *http.Resp // BeginUpdate - The operation to update the extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine where the extension should be updated. // - vmExtensionName - The name of the virtual machine extension. @@ -381,7 +381,7 @@ func (client *VirtualMachineExtensionsClient) BeginUpdate(ctx context.Context, r // Update - The operation to update the extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineExtensionsClient) update(ctx context.Context, resourceGroupName string, vmName string, vmExtensionName string, extensionParameters VirtualMachineExtensionUpdate, options *VirtualMachineExtensionsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineExtensionsClient.BeginUpdate" @@ -427,7 +427,7 @@ func (client *VirtualMachineExtensionsClient) updateCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, extensionParameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimages_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimages_client.go index 1c5ab27e5..d2557d0d4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimages_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimages_client.go @@ -48,7 +48,7 @@ func NewVirtualMachineImagesClient(subscriptionID string, credential azcore.Toke // Get - Gets a virtual machine image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The name of a supported Azure region. // - publisherName - A valid image publisher. // - offer - A valid image publisher offer. @@ -110,7 +110,7 @@ func (client *VirtualMachineImagesClient) getCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -128,7 +128,7 @@ func (client *VirtualMachineImagesClient) getHandleResponse(resp *http.Response) // List - Gets a list of all virtual machine image versions for the specified location, publisher, offer, and SKU. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The name of a supported Azure region. // - publisherName - A valid image publisher. // - offer - A valid image publisher offer. @@ -188,13 +188,13 @@ func (client *VirtualMachineImagesClient) listCreateRequest(ctx context.Context, if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - if options != nil && options.Top != nil { - reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) - } if options != nil && options.Orderby != nil { reqQP.Set("$orderby", *options.Orderby) } - reqQP.Set("api-version", "2023-09-01") + if options != nil && options.Top != nil { + reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) + } + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -212,7 +212,7 @@ func (client *VirtualMachineImagesClient) listHandleResponse(resp *http.Response // ListByEdgeZone - Gets a list of all virtual machine image versions for the specified edge zone // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The name of a supported Azure region. // - edgeZone - The name of the edge zone. // - options - VirtualMachineImagesClientListByEdgeZoneOptions contains the optional parameters for the VirtualMachineImagesClient.ListByEdgeZone @@ -259,7 +259,7 @@ func (client *VirtualMachineImagesClient) listByEdgeZoneCreateRequest(ctx contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -277,7 +277,7 @@ func (client *VirtualMachineImagesClient) listByEdgeZoneHandleResponse(resp *htt // ListOffers - Gets a list of virtual machine image offers for the specified location and publisher. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The name of a supported Azure region. // - publisherName - A valid image publisher. // - options - VirtualMachineImagesClientListOffersOptions contains the optional parameters for the VirtualMachineImagesClient.ListOffers @@ -324,7 +324,7 @@ func (client *VirtualMachineImagesClient) listOffersCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -342,7 +342,7 @@ func (client *VirtualMachineImagesClient) listOffersHandleResponse(resp *http.Re // ListPublishers - Gets a list of virtual machine image publishers for the specified Azure location. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The name of a supported Azure region. // - options - VirtualMachineImagesClientListPublishersOptions contains the optional parameters for the VirtualMachineImagesClient.ListPublishers // method. @@ -384,7 +384,7 @@ func (client *VirtualMachineImagesClient) listPublishersCreateRequest(ctx contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -402,7 +402,7 @@ func (client *VirtualMachineImagesClient) listPublishersHandleResponse(resp *htt // ListSKUs - Gets a list of virtual machine image SKUs for the specified location, publisher, and offer. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The name of a supported Azure region. // - publisherName - A valid image publisher. // - offer - A valid image publisher offer. @@ -454,7 +454,7 @@ func (client *VirtualMachineImagesClient) listSKUsCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimagesedgezone_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimagesedgezone_client.go index 4151b10d3..8558798f5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimagesedgezone_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimagesedgezone_client.go @@ -48,7 +48,7 @@ func NewVirtualMachineImagesEdgeZoneClient(subscriptionID string, credential azc // Get - Gets a virtual machine image in an edge zone. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The name of a supported Azure region. // - edgeZone - The name of the edge zone. // - publisherName - A valid image publisher. @@ -115,7 +115,7 @@ func (client *VirtualMachineImagesEdgeZoneClient) getCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -133,7 +133,7 @@ func (client *VirtualMachineImagesEdgeZoneClient) getHandleResponse(resp *http.R // List - Gets a list of all virtual machine image versions for the specified location, edge zone, publisher, offer, and SKU. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The name of a supported Azure region. // - edgeZone - The name of the edge zone. // - publisherName - A valid image publisher. @@ -198,13 +198,13 @@ func (client *VirtualMachineImagesEdgeZoneClient) listCreateRequest(ctx context. if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - if options != nil && options.Top != nil { - reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) - } if options != nil && options.Orderby != nil { reqQP.Set("$orderby", *options.Orderby) } - reqQP.Set("api-version", "2023-09-01") + if options != nil && options.Top != nil { + reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) + } + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -222,7 +222,7 @@ func (client *VirtualMachineImagesEdgeZoneClient) listHandleResponse(resp *http. // ListOffers - Gets a list of virtual machine image offers for the specified location, edge zone and publisher. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The name of a supported Azure region. // - edgeZone - The name of the edge zone. // - publisherName - A valid image publisher. @@ -274,7 +274,7 @@ func (client *VirtualMachineImagesEdgeZoneClient) listOffersCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -292,7 +292,7 @@ func (client *VirtualMachineImagesEdgeZoneClient) listOffersHandleResponse(resp // ListPublishers - Gets a list of virtual machine image publishers for the specified Azure location and edge zone. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The name of a supported Azure region. // - edgeZone - The name of the edge zone. // - options - VirtualMachineImagesEdgeZoneClientListPublishersOptions contains the optional parameters for the VirtualMachineImagesEdgeZoneClient.ListPublishers @@ -339,7 +339,7 @@ func (client *VirtualMachineImagesEdgeZoneClient) listPublishersCreateRequest(ct return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -357,7 +357,7 @@ func (client *VirtualMachineImagesEdgeZoneClient) listPublishersHandleResponse(r // ListSKUs - Gets a list of virtual machine image SKUs for the specified location, edge zone, publisher, and offer. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The name of a supported Azure region. // - edgeZone - The name of the edge zone. // - publisherName - A valid image publisher. @@ -414,7 +414,7 @@ func (client *VirtualMachineImagesEdgeZoneClient) listSKUsCreateRequest(ctx cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineruncommands_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineruncommands_client.go index 348c59ade..c5ef4894c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineruncommands_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineruncommands_client.go @@ -47,7 +47,7 @@ func NewVirtualMachineRunCommandsClient(subscriptionID string, credential azcore // BeginCreateOrUpdate - The operation to create or update the run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine where the run command should be created or updated. // - runCommandName - The name of the virtual machine run command. @@ -74,7 +74,7 @@ func (client *VirtualMachineRunCommandsClient) BeginCreateOrUpdate(ctx context.C // CreateOrUpdate - The operation to create or update the run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineRunCommandsClient) createOrUpdate(ctx context.Context, resourceGroupName string, vmName string, runCommandName string, runCommand VirtualMachineRunCommand, options *VirtualMachineRunCommandsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineRunCommandsClient.BeginCreateOrUpdate" @@ -120,7 +120,7 @@ func (client *VirtualMachineRunCommandsClient) createOrUpdateCreateRequest(ctx c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} if err := runtime.MarshalAsJSON(req, runCommand); err != nil { @@ -132,7 +132,7 @@ func (client *VirtualMachineRunCommandsClient) createOrUpdateCreateRequest(ctx c // BeginDelete - The operation to delete the run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine where the run command should be deleted. // - runCommandName - The name of the virtual machine run command. @@ -158,7 +158,7 @@ func (client *VirtualMachineRunCommandsClient) BeginDelete(ctx context.Context, // Delete - The operation to delete the run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineRunCommandsClient) deleteOperation(ctx context.Context, resourceGroupName string, vmName string, runCommandName string, options *VirtualMachineRunCommandsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineRunCommandsClient.BeginDelete" @@ -204,7 +204,7 @@ func (client *VirtualMachineRunCommandsClient) deleteCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} return req, nil @@ -213,7 +213,7 @@ func (client *VirtualMachineRunCommandsClient) deleteCreateRequest(ctx context.C // Get - Gets specific run command for a subscription in a location. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The location upon which run commands is queried. // - commandID - The command id. // - options - VirtualMachineRunCommandsClientGetOptions contains the optional parameters for the VirtualMachineRunCommandsClient.Get @@ -260,7 +260,7 @@ func (client *VirtualMachineRunCommandsClient) getCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} return req, nil @@ -278,7 +278,7 @@ func (client *VirtualMachineRunCommandsClient) getHandleResponse(resp *http.Resp // GetByVirtualMachine - The operation to get the run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine containing the run command. // - runCommandName - The name of the virtual machine run command. @@ -333,7 +333,7 @@ func (client *VirtualMachineRunCommandsClient) getByVirtualMachineCreateRequest( if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} return req, nil @@ -350,7 +350,7 @@ func (client *VirtualMachineRunCommandsClient) getByVirtualMachineHandleResponse // NewListPager - Lists all available run commands for a subscription in a location. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The location upon which run commands is queried. // - options - VirtualMachineRunCommandsClientListOptions contains the optional parameters for the VirtualMachineRunCommandsClient.NewListPager // method. @@ -393,7 +393,7 @@ func (client *VirtualMachineRunCommandsClient) listCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} return req, nil @@ -410,7 +410,7 @@ func (client *VirtualMachineRunCommandsClient) listHandleResponse(resp *http.Res // NewListByVirtualMachinePager - The operation to get all run commands of a Virtual Machine. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine containing the run command. // - options - VirtualMachineRunCommandsClientListByVirtualMachineOptions contains the optional parameters for the VirtualMachineRunCommandsClient.NewListByVirtualMachinePager @@ -461,7 +461,7 @@ func (client *VirtualMachineRunCommandsClient) listByVirtualMachineCreateRequest if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} return req, nil @@ -479,7 +479,7 @@ func (client *VirtualMachineRunCommandsClient) listByVirtualMachineHandleRespons // BeginUpdate - The operation to update the run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine where the run command should be updated. // - runCommandName - The name of the virtual machine run command. @@ -506,7 +506,7 @@ func (client *VirtualMachineRunCommandsClient) BeginUpdate(ctx context.Context, // Update - The operation to update the run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineRunCommandsClient) update(ctx context.Context, resourceGroupName string, vmName string, runCommandName string, runCommand VirtualMachineRunCommandUpdate, options *VirtualMachineRunCommandsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineRunCommandsClient.BeginUpdate" @@ -552,7 +552,7 @@ func (client *VirtualMachineRunCommandsClient) updateCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} if err := runtime.MarshalAsJSON(req, runCommand); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachines_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachines_client.go index b665b49f5..dd55aabc3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachines_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachines_client.go @@ -48,7 +48,7 @@ func NewVirtualMachinesClient(subscriptionID string, credential azcore.TokenCred // BeginAssessPatches - Assess patches on the VM. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginAssessPatchesOptions contains the optional parameters for the VirtualMachinesClient.BeginAssessPatches @@ -74,7 +74,7 @@ func (client *VirtualMachinesClient) BeginAssessPatches(ctx context.Context, res // AssessPatches - Assess patches on the VM. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) assessPatches(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginAssessPatchesOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginAssessPatches" @@ -116,7 +116,7 @@ func (client *VirtualMachinesClient) assessPatchesCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -125,7 +125,7 @@ func (client *VirtualMachinesClient) assessPatchesCreateRequest(ctx context.Cont // BeginAttachDetachDataDisks - Attach and detach data disks to/from the virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - parameters - Parameters supplied to the attach and detach data disks operation on the virtual machine. @@ -152,7 +152,7 @@ func (client *VirtualMachinesClient) BeginAttachDetachDataDisks(ctx context.Cont // AttachDetachDataDisks - Attach and detach data disks to/from the virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) attachDetachDataDisks(ctx context.Context, resourceGroupName string, vmName string, parameters AttachDetachDataDisksRequest, options *VirtualMachinesClientBeginAttachDetachDataDisksOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginAttachDetachDataDisks" @@ -194,7 +194,7 @@ func (client *VirtualMachinesClient) attachDetachDataDisksCreateRequest(ctx cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -207,7 +207,7 @@ func (client *VirtualMachinesClient) attachDetachDataDisksCreateRequest(ctx cont // similar VMs. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - parameters - Parameters supplied to the Capture Virtual Machine operation. @@ -235,7 +235,7 @@ func (client *VirtualMachinesClient) BeginCapture(ctx context.Context, resourceG // VMs. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) capture(ctx context.Context, resourceGroupName string, vmName string, parameters VirtualMachineCaptureParameters, options *VirtualMachinesClientBeginCaptureOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginCapture" @@ -277,7 +277,7 @@ func (client *VirtualMachinesClient) captureCreateRequest(ctx context.Context, r return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -290,7 +290,7 @@ func (client *VirtualMachinesClient) captureCreateRequest(ctx context.Context, r // before invoking this operation. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginConvertToManagedDisksOptions contains the optional parameters for the VirtualMachinesClient.BeginConvertToManagedDisks @@ -316,7 +316,7 @@ func (client *VirtualMachinesClient) BeginConvertToManagedDisks(ctx context.Cont // before invoking this operation. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) convertToManagedDisks(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginConvertToManagedDisksOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginConvertToManagedDisks" @@ -358,7 +358,7 @@ func (client *VirtualMachinesClient) convertToManagedDisksCreateRequest(ctx cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -368,7 +368,7 @@ func (client *VirtualMachinesClient) convertToManagedDisksCreateRequest(ctx cont // during virtual machine creation. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - parameters - Parameters supplied to the Create Virtual Machine operation. @@ -395,7 +395,7 @@ func (client *VirtualMachinesClient) BeginCreateOrUpdate(ctx context.Context, re // virtual machine creation. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) createOrUpdate(ctx context.Context, resourceGroupName string, vmName string, parameters VirtualMachine, options *VirtualMachinesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginCreateOrUpdate" @@ -437,15 +437,15 @@ func (client *VirtualMachinesClient) createOrUpdateCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.IfMatch != nil { req.Raw().Header["If-Match"] = []string{*options.IfMatch} } if options != nil && options.IfNoneMatch != nil { req.Raw().Header["If-None-Match"] = []string{*options.IfNoneMatch} } - req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { return nil, err } @@ -456,7 +456,7 @@ func (client *VirtualMachinesClient) createOrUpdateCreateRequest(ctx context.Con // resources that this virtual machine uses. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginDeallocateOptions contains the optional parameters for the VirtualMachinesClient.BeginDeallocate @@ -482,7 +482,7 @@ func (client *VirtualMachinesClient) BeginDeallocate(ctx context.Context, resour // that this virtual machine uses. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) deallocate(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginDeallocateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginDeallocate" @@ -524,10 +524,10 @@ func (client *VirtualMachinesClient) deallocateCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2024-03-01") if options != nil && options.Hibernate != nil { reqQP.Set("hibernate", strconv.FormatBool(*options.Hibernate)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -536,7 +536,7 @@ func (client *VirtualMachinesClient) deallocateCreateRequest(ctx context.Context // BeginDelete - The operation to delete a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginDeleteOptions contains the optional parameters for the VirtualMachinesClient.BeginDelete @@ -561,7 +561,7 @@ func (client *VirtualMachinesClient) BeginDelete(ctx context.Context, resourceGr // Delete - The operation to delete a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) deleteOperation(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginDelete" @@ -603,10 +603,10 @@ func (client *VirtualMachinesClient) deleteCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2024-03-01") if options != nil && options.ForceDeletion != nil { reqQP.Set("forceDeletion", strconv.FormatBool(*options.ForceDeletion)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -619,7 +619,7 @@ func (client *VirtualMachinesClient) deleteCreateRequest(ctx context.Context, re // [https://docs.microsoft.com/azure/virtual-machines/linux/capture-image]. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientGeneralizeOptions contains the optional parameters for the VirtualMachinesClient.Generalize @@ -665,7 +665,7 @@ func (client *VirtualMachinesClient) generalizeCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -674,7 +674,7 @@ func (client *VirtualMachinesClient) generalizeCreateRequest(ctx context.Context // Get - Retrieves information about the model view or the instance view of a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientGetOptions contains the optional parameters for the VirtualMachinesClient.Get method. @@ -723,7 +723,7 @@ func (client *VirtualMachinesClient) getCreateRequest(ctx context.Context, resou if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -741,7 +741,7 @@ func (client *VirtualMachinesClient) getHandleResponse(resp *http.Response) (Vir // BeginInstallPatches - Installs patches on the VM. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - installPatchesInput - Input for InstallPatches as directly received by the API @@ -768,7 +768,7 @@ func (client *VirtualMachinesClient) BeginInstallPatches(ctx context.Context, re // InstallPatches - Installs patches on the VM. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) installPatches(ctx context.Context, resourceGroupName string, vmName string, installPatchesInput VirtualMachineInstallPatchesParameters, options *VirtualMachinesClientBeginInstallPatchesOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginInstallPatches" @@ -810,7 +810,7 @@ func (client *VirtualMachinesClient) installPatchesCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, installPatchesInput); err != nil { @@ -822,7 +822,7 @@ func (client *VirtualMachinesClient) installPatchesCreateRequest(ctx context.Con // InstanceView - Retrieves information about the run-time state of a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientInstanceViewOptions contains the optional parameters for the VirtualMachinesClient.InstanceView @@ -869,7 +869,7 @@ func (client *VirtualMachinesClient) instanceViewCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -887,7 +887,7 @@ func (client *VirtualMachinesClient) instanceViewHandleResponse(resp *http.Respo // NewListPager - Lists all of the virtual machines in the specified resource group. Use the nextLink property in the response // to get the next page of virtual machines. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - options - VirtualMachinesClientListOptions contains the optional parameters for the VirtualMachinesClient.NewListPager // method. @@ -930,13 +930,13 @@ func (client *VirtualMachinesClient) listCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - if options != nil && options.Filter != nil { - reqQP.Set("$filter", *options.Filter) - } if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2023-09-01") + if options != nil && options.Filter != nil { + reqQP.Set("$filter", *options.Filter) + } + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -954,7 +954,7 @@ func (client *VirtualMachinesClient) listHandleResponse(resp *http.Response) (Vi // NewListAllPager - Lists all of the virtual machines in the specified subscription. Use the nextLink property in the response // to get the next page of virtual machines. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - options - VirtualMachinesClientListAllOptions contains the optional parameters for the VirtualMachinesClient.NewListAllPager // method. func (client *VirtualMachinesClient) NewListAllPager(options *VirtualMachinesClientListAllOptions) *runtime.Pager[VirtualMachinesClientListAllResponse] { @@ -992,15 +992,15 @@ func (client *VirtualMachinesClient) listAllCreateRequest(ctx context.Context, o return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") - if options != nil && options.StatusOnly != nil { - reqQP.Set("statusOnly", *options.StatusOnly) + if options != nil && options.Expand != nil { + reqQP.Set("$expand", string(*options.Expand)) } if options != nil && options.Filter != nil { reqQP.Set("$filter", *options.Filter) } - if options != nil && options.Expand != nil { - reqQP.Set("$expand", string(*options.Expand)) + reqQP.Set("api-version", "2024-03-01") + if options != nil && options.StatusOnly != nil { + reqQP.Set("statusOnly", *options.StatusOnly) } req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} @@ -1018,7 +1018,7 @@ func (client *VirtualMachinesClient) listAllHandleResponse(resp *http.Response) // NewListAvailableSizesPager - Lists all available virtual machine sizes to which the specified virtual machine can be resized. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientListAvailableSizesOptions contains the optional parameters for the VirtualMachinesClient.NewListAvailableSizesPager @@ -1067,7 +1067,7 @@ func (client *VirtualMachinesClient) listAvailableSizesCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1084,7 +1084,7 @@ func (client *VirtualMachinesClient) listAvailableSizesHandleResponse(resp *http // NewListByLocationPager - Gets all the virtual machines under the specified subscription for the specified location. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The location for which virtual machines under the subscription are queried. // - options - VirtualMachinesClientListByLocationOptions contains the optional parameters for the VirtualMachinesClient.NewListByLocationPager // method. @@ -1127,7 +1127,7 @@ func (client *VirtualMachinesClient) listByLocationCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1145,7 +1145,7 @@ func (client *VirtualMachinesClient) listByLocationHandleResponse(resp *http.Res // BeginPerformMaintenance - The operation to perform maintenance on a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginPerformMaintenanceOptions contains the optional parameters for the VirtualMachinesClient.BeginPerformMaintenance @@ -1170,7 +1170,7 @@ func (client *VirtualMachinesClient) BeginPerformMaintenance(ctx context.Context // PerformMaintenance - The operation to perform maintenance on a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) performMaintenance(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginPerformMaintenanceOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginPerformMaintenance" @@ -1212,7 +1212,7 @@ func (client *VirtualMachinesClient) performMaintenanceCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1222,7 +1222,7 @@ func (client *VirtualMachinesClient) performMaintenanceCreateRequest(ctx context // provisioned resources. You are still charged for this virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginPowerOffOptions contains the optional parameters for the VirtualMachinesClient.BeginPowerOff @@ -1248,7 +1248,7 @@ func (client *VirtualMachinesClient) BeginPowerOff(ctx context.Context, resource // resources. You are still charged for this virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) powerOff(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginPowerOffOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginPowerOff" @@ -1290,10 +1290,10 @@ func (client *VirtualMachinesClient) powerOffCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2024-03-01") if options != nil && options.SkipShutdown != nil { reqQP.Set("skipShutdown", strconv.FormatBool(*options.SkipShutdown)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1302,7 +1302,7 @@ func (client *VirtualMachinesClient) powerOffCreateRequest(ctx context.Context, // BeginReapply - The operation to reapply a virtual machine's state. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginReapplyOptions contains the optional parameters for the VirtualMachinesClient.BeginReapply @@ -1327,7 +1327,7 @@ func (client *VirtualMachinesClient) BeginReapply(ctx context.Context, resourceG // Reapply - The operation to reapply a virtual machine's state. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) reapply(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginReapplyOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginReapply" @@ -1369,7 +1369,7 @@ func (client *VirtualMachinesClient) reapplyCreateRequest(ctx context.Context, r return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1378,7 +1378,7 @@ func (client *VirtualMachinesClient) reapplyCreateRequest(ctx context.Context, r // BeginRedeploy - Shuts down the virtual machine, moves it to a new node, and powers it back on. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginRedeployOptions contains the optional parameters for the VirtualMachinesClient.BeginRedeploy @@ -1403,7 +1403,7 @@ func (client *VirtualMachinesClient) BeginRedeploy(ctx context.Context, resource // Redeploy - Shuts down the virtual machine, moves it to a new node, and powers it back on. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) redeploy(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginRedeployOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginRedeploy" @@ -1445,7 +1445,7 @@ func (client *VirtualMachinesClient) redeployCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1458,7 +1458,7 @@ func (client *VirtualMachinesClient) redeployCreateRequest(ctx context.Context, // will be deleted after reimage. The deleteOption of the OS disk should be updated accordingly before performing the reimage. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginReimageOptions contains the optional parameters for the VirtualMachinesClient.BeginReimage @@ -1487,7 +1487,7 @@ func (client *VirtualMachinesClient) BeginReimage(ctx context.Context, resourceG // will be deleted after reimage. The deleteOption of the OS disk should be updated accordingly before performing the reimage. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) reimage(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginReimageOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginReimage" @@ -1529,7 +1529,7 @@ func (client *VirtualMachinesClient) reimageCreateRequest(ctx context.Context, r return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.Parameters != nil { @@ -1544,7 +1544,7 @@ func (client *VirtualMachinesClient) reimageCreateRequest(ctx context.Context, r // BeginRestart - The operation to restart a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginRestartOptions contains the optional parameters for the VirtualMachinesClient.BeginRestart @@ -1569,7 +1569,7 @@ func (client *VirtualMachinesClient) BeginRestart(ctx context.Context, resourceG // Restart - The operation to restart a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) restart(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginRestartOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginRestart" @@ -1611,7 +1611,7 @@ func (client *VirtualMachinesClient) restartCreateRequest(ctx context.Context, r return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1620,7 +1620,7 @@ func (client *VirtualMachinesClient) restartCreateRequest(ctx context.Context, r // RetrieveBootDiagnosticsData - The operation to retrieve SAS URIs for a virtual machine's boot diagnostic logs. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientRetrieveBootDiagnosticsDataOptions contains the optional parameters for the VirtualMachinesClient.RetrieveBootDiagnosticsData @@ -1667,10 +1667,10 @@ func (client *VirtualMachinesClient) retrieveBootDiagnosticsDataCreateRequest(ct return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2024-03-01") if options != nil && options.SasURIExpirationTimeInMinutes != nil { reqQP.Set("sasUriExpirationTimeInMinutes", strconv.FormatInt(int64(*options.SasURIExpirationTimeInMinutes), 10)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1688,7 +1688,7 @@ func (client *VirtualMachinesClient) retrieveBootDiagnosticsDataHandleResponse(r // BeginRunCommand - Run command on the VM. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - parameters - Parameters supplied to the Run command operation. @@ -1715,7 +1715,7 @@ func (client *VirtualMachinesClient) BeginRunCommand(ctx context.Context, resour // RunCommand - Run command on the VM. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) runCommand(ctx context.Context, resourceGroupName string, vmName string, parameters RunCommandInput, options *VirtualMachinesClientBeginRunCommandOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginRunCommand" @@ -1757,7 +1757,7 @@ func (client *VirtualMachinesClient) runCommandCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -1769,7 +1769,7 @@ func (client *VirtualMachinesClient) runCommandCreateRequest(ctx context.Context // SimulateEviction - The operation to simulate the eviction of spot virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientSimulateEvictionOptions contains the optional parameters for the VirtualMachinesClient.SimulateEviction @@ -1815,7 +1815,7 @@ func (client *VirtualMachinesClient) simulateEvictionCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1824,7 +1824,7 @@ func (client *VirtualMachinesClient) simulateEvictionCreateRequest(ctx context.C // BeginStart - The operation to start a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginStartOptions contains the optional parameters for the VirtualMachinesClient.BeginStart @@ -1849,7 +1849,7 @@ func (client *VirtualMachinesClient) BeginStart(ctx context.Context, resourceGro // Start - The operation to start a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) start(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginStartOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginStart" @@ -1891,7 +1891,7 @@ func (client *VirtualMachinesClient) startCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1900,7 +1900,7 @@ func (client *VirtualMachinesClient) startCreateRequest(ctx context.Context, res // BeginUpdate - The operation to update a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - parameters - Parameters supplied to the Update Virtual Machine operation. @@ -1926,7 +1926,7 @@ func (client *VirtualMachinesClient) BeginUpdate(ctx context.Context, resourceGr // Update - The operation to update a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) update(ctx context.Context, resourceGroupName string, vmName string, parameters VirtualMachineUpdate, options *VirtualMachinesClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginUpdate" @@ -1968,15 +1968,15 @@ func (client *VirtualMachinesClient) updateCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.IfMatch != nil { req.Raw().Header["If-Match"] = []string{*options.IfMatch} } if options != nil && options.IfNoneMatch != nil { req.Raw().Header["If-None-Match"] = []string{*options.IfNoneMatch} } - req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetextensions_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetextensions_client.go index 585b42657..81ddfad6a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetextensions_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetextensions_client.go @@ -47,7 +47,7 @@ func NewVirtualMachineScaleSetExtensionsClient(subscriptionID string, credential // BeginCreateOrUpdate - The operation to create or update an extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set where the extension should be create or updated. // - vmssExtensionName - The name of the VM scale set extension. @@ -74,7 +74,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) BeginCreateOrUpdate(ctx co // CreateOrUpdate - The operation to create or update an extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetExtensionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmssExtensionName string, extensionParameters VirtualMachineScaleSetExtension, options *VirtualMachineScaleSetExtensionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetExtensionsClient.BeginCreateOrUpdate" @@ -120,7 +120,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) createOrUpdateCreateReques return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, extensionParameters); err != nil { @@ -132,7 +132,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) createOrUpdateCreateReques // BeginDelete - The operation to delete the extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set where the extension should be deleted. // - vmssExtensionName - The name of the VM scale set extension. @@ -158,7 +158,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) BeginDelete(ctx context.Co // Delete - The operation to delete the extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetExtensionsClient) deleteOperation(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmssExtensionName string, options *VirtualMachineScaleSetExtensionsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetExtensionsClient.BeginDelete" @@ -204,7 +204,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) deleteCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -213,7 +213,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) deleteCreateRequest(ctx co // Get - The operation to get the extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set containing the extension. // - vmssExtensionName - The name of the VM scale set extension. @@ -268,7 +268,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) getCreateRequest(ctx conte if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -285,7 +285,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) getHandleResponse(resp *ht // NewListPager - Gets a list of all extensions in a VM scale set. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set containing the extension. // - options - VirtualMachineScaleSetExtensionsClientListOptions contains the optional parameters for the VirtualMachineScaleSetExtensionsClient.NewListPager @@ -333,7 +333,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) listCreateRequest(ctx cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -351,7 +351,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) listHandleResponse(resp *h // BeginUpdate - The operation to update an extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set where the extension should be updated. // - vmssExtensionName - The name of the VM scale set extension. @@ -378,7 +378,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) BeginUpdate(ctx context.Co // Update - The operation to update an extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetExtensionsClient) update(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmssExtensionName string, extensionParameters VirtualMachineScaleSetExtensionUpdate, options *VirtualMachineScaleSetExtensionsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetExtensionsClient.BeginUpdate" @@ -424,7 +424,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) updateCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, extensionParameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetrollingupgrades_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetrollingupgrades_client.go index 9ef8a1902..41e01d5d7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetrollingupgrades_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetrollingupgrades_client.go @@ -47,7 +47,7 @@ func NewVirtualMachineScaleSetRollingUpgradesClient(subscriptionID string, crede // BeginCancel - Cancels the current virtual machine scale set rolling upgrade. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetRollingUpgradesClientBeginCancelOptions contains the optional parameters for the VirtualMachineScaleSetRollingUpgradesClient.BeginCancel @@ -72,7 +72,7 @@ func (client *VirtualMachineScaleSetRollingUpgradesClient) BeginCancel(ctx conte // Cancel - Cancels the current virtual machine scale set rolling upgrade. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetRollingUpgradesClient) cancel(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetRollingUpgradesClientBeginCancelOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetRollingUpgradesClient.BeginCancel" @@ -114,7 +114,7 @@ func (client *VirtualMachineScaleSetRollingUpgradesClient) cancelCreateRequest(c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -123,7 +123,7 @@ func (client *VirtualMachineScaleSetRollingUpgradesClient) cancelCreateRequest(c // GetLatest - Gets the status of the latest virtual machine scale set rolling upgrade. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetRollingUpgradesClientGetLatestOptions contains the optional parameters for the VirtualMachineScaleSetRollingUpgradesClient.GetLatest @@ -170,7 +170,7 @@ func (client *VirtualMachineScaleSetRollingUpgradesClient) getLatestCreateReques return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -190,7 +190,7 @@ func (client *VirtualMachineScaleSetRollingUpgradesClient) getLatestHandleRespon // are not affected. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetRollingUpgradesClientBeginStartExtensionUpgradeOptions contains the optional parameters @@ -217,7 +217,7 @@ func (client *VirtualMachineScaleSetRollingUpgradesClient) BeginStartExtensionUp // are not affected. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetRollingUpgradesClient) startExtensionUpgrade(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetRollingUpgradesClientBeginStartExtensionUpgradeOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetRollingUpgradesClient.BeginStartExtensionUpgrade" @@ -259,7 +259,7 @@ func (client *VirtualMachineScaleSetRollingUpgradesClient) startExtensionUpgrade return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -270,7 +270,7 @@ func (client *VirtualMachineScaleSetRollingUpgradesClient) startExtensionUpgrade // affected. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetRollingUpgradesClientBeginStartOSUpgradeOptions contains the optional parameters for the @@ -297,7 +297,7 @@ func (client *VirtualMachineScaleSetRollingUpgradesClient) BeginStartOSUpgrade(c // affected. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetRollingUpgradesClient) startOSUpgrade(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetRollingUpgradesClientBeginStartOSUpgradeOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetRollingUpgradesClient.BeginStartOSUpgrade" @@ -339,7 +339,7 @@ func (client *VirtualMachineScaleSetRollingUpgradesClient) startOSUpgradeCreateR return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesets_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesets_client.go index eb6506c15..a03ff0c50 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesets_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesets_client.go @@ -49,7 +49,7 @@ func NewVirtualMachineScaleSetsClient(subscriptionID string, credential azcore.T // scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginApproveRollingUpgradeOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginApproveRollingUpgrade @@ -75,7 +75,7 @@ func (client *VirtualMachineScaleSetsClient) BeginApproveRollingUpgrade(ctx cont // set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) approveRollingUpgrade(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginApproveRollingUpgradeOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginApproveRollingUpgrade" @@ -117,7 +117,7 @@ func (client *VirtualMachineScaleSetsClient) approveRollingUpgradeCreateRequest( return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.VMInstanceIDs != nil { @@ -132,7 +132,7 @@ func (client *VirtualMachineScaleSetsClient) approveRollingUpgradeCreateRequest( // ConvertToSinglePlacementGroup - Converts SinglePlacementGroup property to false for a existing virtual machine scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the virtual machine scale set to create or update. // - parameters - The input object for ConvertToSinglePlacementGroup API. @@ -179,7 +179,7 @@ func (client *VirtualMachineScaleSetsClient) convertToSinglePlacementGroupCreate return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -191,7 +191,7 @@ func (client *VirtualMachineScaleSetsClient) convertToSinglePlacementGroupCreate // BeginCreateOrUpdate - Create or update a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set to create or update. // - parameters - The scale set object. @@ -217,7 +217,7 @@ func (client *VirtualMachineScaleSetsClient) BeginCreateOrUpdate(ctx context.Con // CreateOrUpdate - Create or update a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) createOrUpdate(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters VirtualMachineScaleSet, options *VirtualMachineScaleSetsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginCreateOrUpdate" @@ -259,15 +259,15 @@ func (client *VirtualMachineScaleSetsClient) createOrUpdateCreateRequest(ctx con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.IfMatch != nil { req.Raw().Header["If-Match"] = []string{*options.IfMatch} } if options != nil && options.IfNoneMatch != nil { req.Raw().Header["If-None-Match"] = []string{*options.IfNoneMatch} } - req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { return nil, err } @@ -279,7 +279,7 @@ func (client *VirtualMachineScaleSetsClient) createOrUpdateCreateRequest(ctx con // scale set deallocates. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginDeallocateOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginDeallocate @@ -306,7 +306,7 @@ func (client *VirtualMachineScaleSetsClient) BeginDeallocate(ctx context.Context // scale set deallocates. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) deallocate(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginDeallocateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginDeallocate" @@ -348,10 +348,10 @@ func (client *VirtualMachineScaleSetsClient) deallocateCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2024-03-01") if options != nil && options.Hibernate != nil { reqQP.Set("hibernate", strconv.FormatBool(*options.Hibernate)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.VMInstanceIDs != nil { @@ -366,7 +366,7 @@ func (client *VirtualMachineScaleSetsClient) deallocateCreateRequest(ctx context // BeginDelete - Deletes a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginDeleteOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginDelete @@ -391,7 +391,7 @@ func (client *VirtualMachineScaleSetsClient) BeginDelete(ctx context.Context, re // Delete - Deletes a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) deleteOperation(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginDelete" @@ -433,10 +433,10 @@ func (client *VirtualMachineScaleSetsClient) deleteCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2024-03-01") if options != nil && options.ForceDeletion != nil { reqQP.Set("forceDeletion", strconv.FormatBool(*options.ForceDeletion)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -445,7 +445,7 @@ func (client *VirtualMachineScaleSetsClient) deleteCreateRequest(ctx context.Con // BeginDeleteInstances - Deletes virtual machines in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - vmInstanceIDs - A list of virtual machine instance IDs from the VM scale set. @@ -471,7 +471,7 @@ func (client *VirtualMachineScaleSetsClient) BeginDeleteInstances(ctx context.Co // DeleteInstances - Deletes virtual machines in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) deleteInstances(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, options *VirtualMachineScaleSetsClientBeginDeleteInstancesOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginDeleteInstances" @@ -513,10 +513,10 @@ func (client *VirtualMachineScaleSetsClient) deleteInstancesCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2024-03-01") if options != nil && options.ForceDeletion != nil { reqQP.Set("forceDeletion", strconv.FormatBool(*options.ForceDeletion)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, vmInstanceIDs); err != nil { @@ -529,7 +529,7 @@ func (client *VirtualMachineScaleSetsClient) deleteInstancesCreateRequest(ctx co // service fabric virtual machine scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - platformUpdateDomain - The platform update domain for which a manual recovery walk is requested @@ -577,14 +577,14 @@ func (client *VirtualMachineScaleSetsClient) forceRecoveryServiceFabricPlatformU return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") + if options != nil && options.PlacementGroupID != nil { + reqQP.Set("placementGroupId", *options.PlacementGroupID) + } reqQP.Set("platformUpdateDomain", strconv.FormatInt(int64(platformUpdateDomain), 10)) if options != nil && options.Zone != nil { reqQP.Set("zone", *options.Zone) } - if options != nil && options.PlacementGroupID != nil { - reqQP.Set("placementGroupId", *options.PlacementGroupID) - } req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -602,7 +602,7 @@ func (client *VirtualMachineScaleSetsClient) forceRecoveryServiceFabricPlatformU // Get - Display information about a virtual machine scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientGetOptions contains the optional parameters for the VirtualMachineScaleSetsClient.Get @@ -649,10 +649,10 @@ func (client *VirtualMachineScaleSetsClient) getCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -670,7 +670,7 @@ func (client *VirtualMachineScaleSetsClient) getHandleResponse(resp *http.Respon // GetInstanceView - Gets the status of a VM scale set instance. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientGetInstanceViewOptions contains the optional parameters for the VirtualMachineScaleSetsClient.GetInstanceView @@ -717,7 +717,7 @@ func (client *VirtualMachineScaleSetsClient) getInstanceViewCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -734,7 +734,7 @@ func (client *VirtualMachineScaleSetsClient) getInstanceViewHandleResponse(resp // NewGetOSUpgradeHistoryPager - Gets list of OS upgrades on a VM scale set instance. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientGetOSUpgradeHistoryOptions contains the optional parameters for the VirtualMachineScaleSetsClient.NewGetOSUpgradeHistoryPager @@ -782,7 +782,7 @@ func (client *VirtualMachineScaleSetsClient) getOSUpgradeHistoryCreateRequest(ct return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -799,7 +799,7 @@ func (client *VirtualMachineScaleSetsClient) getOSUpgradeHistoryHandleResponse(r // NewListPager - Gets a list of all VM scale sets under a resource group. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - options - VirtualMachineScaleSetsClientListOptions contains the optional parameters for the VirtualMachineScaleSetsClient.NewListPager // method. @@ -842,7 +842,7 @@ func (client *VirtualMachineScaleSetsClient) listCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -861,7 +861,7 @@ func (client *VirtualMachineScaleSetsClient) listHandleResponse(resp *http.Respo // nextLink property in the response to get the next page of VM Scale Sets. Do this till nextLink is // null to fetch all the VM Scale Sets. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - options - VirtualMachineScaleSetsClientListAllOptions contains the optional parameters for the VirtualMachineScaleSetsClient.NewListAllPager // method. func (client *VirtualMachineScaleSetsClient) NewListAllPager(options *VirtualMachineScaleSetsClientListAllOptions) *runtime.Pager[VirtualMachineScaleSetsClientListAllResponse] { @@ -899,7 +899,7 @@ func (client *VirtualMachineScaleSetsClient) listAllCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -916,7 +916,7 @@ func (client *VirtualMachineScaleSetsClient) listAllHandleResponse(resp *http.Re // NewListByLocationPager - Gets all the VM scale sets under the specified subscription for the specified location. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The location for which VM scale sets under the subscription are queried. // - options - VirtualMachineScaleSetsClientListByLocationOptions contains the optional parameters for the VirtualMachineScaleSetsClient.NewListByLocationPager // method. @@ -959,7 +959,7 @@ func (client *VirtualMachineScaleSetsClient) listByLocationCreateRequest(ctx con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -977,7 +977,7 @@ func (client *VirtualMachineScaleSetsClient) listByLocationHandleResponse(resp * // NewListSKUsPager - Gets a list of SKUs available for your VM scale set, including the minimum and maximum VM instances // allowed for each SKU. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientListSKUsOptions contains the optional parameters for the VirtualMachineScaleSetsClient.NewListSKUsPager @@ -1025,7 +1025,7 @@ func (client *VirtualMachineScaleSetsClient) listSKUsCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1045,7 +1045,7 @@ func (client *VirtualMachineScaleSetsClient) listSKUsHandleResponse(resp *http.R // details: https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-maintenance-notifications // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginPerformMaintenanceOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginPerformMaintenance @@ -1072,7 +1072,7 @@ func (client *VirtualMachineScaleSetsClient) BeginPerformMaintenance(ctx context // details: https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-maintenance-notifications // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) performMaintenance(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginPerformMaintenanceOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginPerformMaintenance" @@ -1114,7 +1114,7 @@ func (client *VirtualMachineScaleSetsClient) performMaintenanceCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.VMInstanceIDs != nil { @@ -1131,7 +1131,7 @@ func (client *VirtualMachineScaleSetsClient) performMaintenanceCreateRequest(ctx // avoid charges. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginPowerOffOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginPowerOff @@ -1158,7 +1158,7 @@ func (client *VirtualMachineScaleSetsClient) BeginPowerOff(ctx context.Context, // avoid charges. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) powerOff(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginPowerOffOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginPowerOff" @@ -1200,10 +1200,10 @@ func (client *VirtualMachineScaleSetsClient) powerOffCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2024-03-01") if options != nil && options.SkipShutdown != nil { reqQP.Set("skipShutdown", strconv.FormatBool(*options.SkipShutdown)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.VMInstanceIDs != nil { @@ -1218,7 +1218,7 @@ func (client *VirtualMachineScaleSetsClient) powerOffCreateRequest(ctx context.C // BeginReapply - Reapplies the Virtual Machine Scale Set Virtual Machine Profile to the Virtual Machine Instances // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginReapplyOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginReapply @@ -1244,7 +1244,7 @@ func (client *VirtualMachineScaleSetsClient) BeginReapply(ctx context.Context, r // Reapply - Reapplies the Virtual Machine Scale Set Virtual Machine Profile to the Virtual Machine Instances // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) reapply(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginReapplyOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginReapply" @@ -1286,7 +1286,7 @@ func (client *VirtualMachineScaleSetsClient) reapplyCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1296,7 +1296,7 @@ func (client *VirtualMachineScaleSetsClient) reapplyCreateRequest(ctx context.Co // them back on. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginRedeployOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginRedeploy @@ -1322,7 +1322,7 @@ func (client *VirtualMachineScaleSetsClient) BeginRedeploy(ctx context.Context, // back on. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) redeploy(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginRedeployOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginRedeploy" @@ -1364,7 +1364,7 @@ func (client *VirtualMachineScaleSetsClient) redeployCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.VMInstanceIDs != nil { @@ -1381,7 +1381,7 @@ func (client *VirtualMachineScaleSetsClient) redeployCreateRequest(ctx context.C // reset to initial state. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginReimageOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginReimage @@ -1408,7 +1408,7 @@ func (client *VirtualMachineScaleSetsClient) BeginReimage(ctx context.Context, r // reset to initial state. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) reimage(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginReimageOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginReimage" @@ -1450,7 +1450,7 @@ func (client *VirtualMachineScaleSetsClient) reimageCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.VMScaleSetReimageInput != nil { @@ -1466,7 +1466,7 @@ func (client *VirtualMachineScaleSetsClient) reimageCreateRequest(ctx context.Co // is only supported for managed disks. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginReimageAllOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginReimageAll @@ -1492,7 +1492,7 @@ func (client *VirtualMachineScaleSetsClient) BeginReimageAll(ctx context.Context // is only supported for managed disks. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) reimageAll(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginReimageAllOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginReimageAll" @@ -1534,7 +1534,7 @@ func (client *VirtualMachineScaleSetsClient) reimageAllCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.VMInstanceIDs != nil { @@ -1549,7 +1549,7 @@ func (client *VirtualMachineScaleSetsClient) reimageAllCreateRequest(ctx context // BeginRestart - Restarts one or more virtual machines in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginRestartOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginRestart @@ -1574,7 +1574,7 @@ func (client *VirtualMachineScaleSetsClient) BeginRestart(ctx context.Context, r // Restart - Restarts one or more virtual machines in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) restart(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginRestartOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginRestart" @@ -1616,7 +1616,7 @@ func (client *VirtualMachineScaleSetsClient) restartCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.VMInstanceIDs != nil { @@ -1631,7 +1631,7 @@ func (client *VirtualMachineScaleSetsClient) restartCreateRequest(ctx context.Co // BeginSetOrchestrationServiceState - Changes ServiceState property for a given service // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the virtual machine scale set to create or update. // - parameters - The input object for SetOrchestrationServiceState API. @@ -1657,7 +1657,7 @@ func (client *VirtualMachineScaleSetsClient) BeginSetOrchestrationServiceState(c // SetOrchestrationServiceState - Changes ServiceState property for a given service // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) setOrchestrationServiceState(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters OrchestrationServiceStateInput, options *VirtualMachineScaleSetsClientBeginSetOrchestrationServiceStateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginSetOrchestrationServiceState" @@ -1699,7 +1699,7 @@ func (client *VirtualMachineScaleSetsClient) setOrchestrationServiceStateCreateR return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -1711,7 +1711,7 @@ func (client *VirtualMachineScaleSetsClient) setOrchestrationServiceStateCreateR // BeginStart - Starts one or more virtual machines in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginStartOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginStart @@ -1736,7 +1736,7 @@ func (client *VirtualMachineScaleSetsClient) BeginStart(ctx context.Context, res // Start - Starts one or more virtual machines in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) start(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginStartOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginStart" @@ -1778,7 +1778,7 @@ func (client *VirtualMachineScaleSetsClient) startCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.VMInstanceIDs != nil { @@ -1793,7 +1793,7 @@ func (client *VirtualMachineScaleSetsClient) startCreateRequest(ctx context.Cont // BeginUpdate - Update a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set to create or update. // - parameters - The scale set object. @@ -1819,7 +1819,7 @@ func (client *VirtualMachineScaleSetsClient) BeginUpdate(ctx context.Context, re // Update - Update a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) update(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters VirtualMachineScaleSetUpdate, options *VirtualMachineScaleSetsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginUpdate" @@ -1861,15 +1861,15 @@ func (client *VirtualMachineScaleSetsClient) updateCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.IfMatch != nil { req.Raw().Header["If-Match"] = []string{*options.IfMatch} } if options != nil && options.IfNoneMatch != nil { req.Raw().Header["If-None-Match"] = []string{*options.IfNoneMatch} } - req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { return nil, err } @@ -1879,7 +1879,7 @@ func (client *VirtualMachineScaleSetsClient) updateCreateRequest(ctx context.Con // BeginUpdateInstances - Upgrades one or more virtual machines to the latest SKU set in the VM scale set model. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - vmInstanceIDs - A list of virtual machine instance IDs from the VM scale set. @@ -1905,7 +1905,7 @@ func (client *VirtualMachineScaleSetsClient) BeginUpdateInstances(ctx context.Co // UpdateInstances - Upgrades one or more virtual machines to the latest SKU set in the VM scale set model. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) updateInstances(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, options *VirtualMachineScaleSetsClientBeginUpdateInstancesOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginUpdateInstances" @@ -1947,7 +1947,7 @@ func (client *VirtualMachineScaleSetsClient) updateInstancesCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, vmInstanceIDs); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvmextensions_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvmextensions_client.go index ff2347623..c16821d83 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvmextensions_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvmextensions_client.go @@ -47,7 +47,7 @@ func NewVirtualMachineScaleSetVMExtensionsClient(subscriptionID string, credenti // BeginCreateOrUpdate - The operation to create or update the VMSS VM extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -75,7 +75,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) BeginCreateOrUpdate(ctx // CreateOrUpdate - The operation to create or update the VMSS VM extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMExtensionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, vmExtensionName string, extensionParameters VirtualMachineScaleSetVMExtension, options *VirtualMachineScaleSetVMExtensionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMExtensionsClient.BeginCreateOrUpdate" @@ -125,7 +125,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) createOrUpdateCreateRequ return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, extensionParameters); err != nil { @@ -137,7 +137,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) createOrUpdateCreateRequ // BeginDelete - The operation to delete the VMSS VM extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -164,7 +164,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) BeginDelete(ctx context. // Delete - The operation to delete the VMSS VM extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMExtensionsClient) deleteOperation(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, vmExtensionName string, options *VirtualMachineScaleSetVMExtensionsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMExtensionsClient.BeginDelete" @@ -214,7 +214,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) deleteCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -223,7 +223,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) deleteCreateRequest(ctx // Get - The operation to get the VMSS VM extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -283,7 +283,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) getCreateRequest(ctx con if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -301,7 +301,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) getHandleResponse(resp * // List - The operation to get all extensions of an instance in Virtual Machine Scaleset. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -356,7 +356,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) listCreateRequest(ctx co if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -374,7 +374,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) listHandleResponse(resp // BeginUpdate - The operation to update the VMSS VM extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -402,7 +402,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) BeginUpdate(ctx context. // Update - The operation to update the VMSS VM extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMExtensionsClient) update(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, vmExtensionName string, extensionParameters VirtualMachineScaleSetVMExtensionUpdate, options *VirtualMachineScaleSetVMExtensionsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMExtensionsClient.BeginUpdate" @@ -452,7 +452,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) updateCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, extensionParameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvmruncommands_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvmruncommands_client.go index 9736973fc..85e67fc55 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvmruncommands_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvmruncommands_client.go @@ -47,7 +47,7 @@ func NewVirtualMachineScaleSetVMRunCommandsClient(subscriptionID string, credent // BeginCreateOrUpdate - The operation to create or update the VMSS VM run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -75,7 +75,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) BeginCreateOrUpdate(ctx // CreateOrUpdate - The operation to create or update the VMSS VM run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMRunCommandsClient) createOrUpdate(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, runCommandName string, runCommand VirtualMachineRunCommand, options *VirtualMachineScaleSetVMRunCommandsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMRunCommandsClient.BeginCreateOrUpdate" @@ -125,7 +125,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) createOrUpdateCreateReq return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} if err := runtime.MarshalAsJSON(req, runCommand); err != nil { @@ -137,7 +137,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) createOrUpdateCreateReq // BeginDelete - The operation to delete the VMSS VM run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -164,7 +164,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) BeginDelete(ctx context // Delete - The operation to delete the VMSS VM run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMRunCommandsClient) deleteOperation(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, runCommandName string, options *VirtualMachineScaleSetVMRunCommandsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMRunCommandsClient.BeginDelete" @@ -214,7 +214,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) deleteCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} return req, nil @@ -223,7 +223,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) deleteCreateRequest(ctx // Get - The operation to get the VMSS VM run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -283,7 +283,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) getCreateRequest(ctx co if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} return req, nil @@ -300,7 +300,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) getHandleResponse(resp // NewListPager - The operation to get all run commands of an instance in Virtual Machine Scaleset. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -356,7 +356,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) listCreateRequest(ctx c if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} return req, nil @@ -374,7 +374,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) listHandleResponse(resp // BeginUpdate - The operation to update the VMSS VM run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -402,7 +402,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) BeginUpdate(ctx context // Update - The operation to update the VMSS VM run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMRunCommandsClient) update(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, runCommandName string, runCommand VirtualMachineRunCommandUpdate, options *VirtualMachineScaleSetVMRunCommandsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMRunCommandsClient.BeginUpdate" @@ -452,7 +452,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) updateCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} if err := runtime.MarshalAsJSON(req, runCommand); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvms_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvms_client.go index e1da5fc11..379ae96b2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvms_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvms_client.go @@ -48,7 +48,7 @@ func NewVirtualMachineScaleSetVMsClient(subscriptionID string, credential azcore // BeginApproveRollingUpgrade - Approve upgrade on deferred rolling upgrade for OS disk on a VM scale set instance. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -74,7 +74,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginApproveRollingUpgrade(ctx co // ApproveRollingUpgrade - Approve upgrade on deferred rolling upgrade for OS disk on a VM scale set instance. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMsClient) approveRollingUpgrade(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginApproveRollingUpgradeOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginApproveRollingUpgrade" @@ -120,7 +120,7 @@ func (client *VirtualMachineScaleSetVMsClient) approveRollingUpgradeCreateReques return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -129,7 +129,7 @@ func (client *VirtualMachineScaleSetVMsClient) approveRollingUpgradeCreateReques // BeginAttachDetachDataDisks - Attach and detach data disks to/from a virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -157,7 +157,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginAttachDetachDataDisks(ctx co // AttachDetachDataDisks - Attach and detach data disks to/from a virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMsClient) attachDetachDataDisks(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, parameters AttachDetachDataDisksRequest, options *VirtualMachineScaleSetVMsClientBeginAttachDetachDataDisksOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginAttachDetachDataDisks" @@ -203,7 +203,7 @@ func (client *VirtualMachineScaleSetVMsClient) attachDetachDataDisksCreateReques return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -217,7 +217,7 @@ func (client *VirtualMachineScaleSetVMsClient) attachDetachDataDisksCreateReques // machine once it is deallocated. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -245,7 +245,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginDeallocate(ctx context.Conte // machine once it is deallocated. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMsClient) deallocate(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginDeallocateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginDeallocate" @@ -291,7 +291,7 @@ func (client *VirtualMachineScaleSetVMsClient) deallocateCreateRequest(ctx conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -300,7 +300,7 @@ func (client *VirtualMachineScaleSetVMsClient) deallocateCreateRequest(ctx conte // BeginDelete - Deletes a virtual machine from a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -326,7 +326,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginDelete(ctx context.Context, // Delete - Deletes a virtual machine from a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMsClient) deleteOperation(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginDelete" @@ -372,10 +372,10 @@ func (client *VirtualMachineScaleSetVMsClient) deleteCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2024-03-01") if options != nil && options.ForceDeletion != nil { reqQP.Set("forceDeletion", strconv.FormatBool(*options.ForceDeletion)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -384,7 +384,7 @@ func (client *VirtualMachineScaleSetVMsClient) deleteCreateRequest(ctx context.C // Get - Gets a virtual machine from a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -439,7 +439,7 @@ func (client *VirtualMachineScaleSetVMsClient) getCreateRequest(ctx context.Cont if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -457,7 +457,7 @@ func (client *VirtualMachineScaleSetVMsClient) getHandleResponse(resp *http.Resp // GetInstanceView - Gets the status of a virtual machine from a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -509,7 +509,7 @@ func (client *VirtualMachineScaleSetVMsClient) getInstanceViewCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -526,7 +526,7 @@ func (client *VirtualMachineScaleSetVMsClient) getInstanceViewHandleResponse(res // NewListPager - Gets a list of all virtual machines in a VM scale sets. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - virtualMachineScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetVMsClientListOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.NewListPager @@ -574,16 +574,16 @@ func (client *VirtualMachineScaleSetVMsClient) listCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() + if options != nil && options.Expand != nil { + reqQP.Set("$expand", *options.Expand) + } if options != nil && options.Filter != nil { reqQP.Set("$filter", *options.Filter) } if options != nil && options.Select != nil { reqQP.Set("$select", *options.Select) } - if options != nil && options.Expand != nil { - reqQP.Set("$expand", *options.Expand) - } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -601,7 +601,7 @@ func (client *VirtualMachineScaleSetVMsClient) listHandleResponse(resp *http.Res // BeginPerformMaintenance - Performs maintenance on a virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -627,7 +627,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginPerformMaintenance(ctx conte // PerformMaintenance - Performs maintenance on a virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMsClient) performMaintenance(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginPerformMaintenanceOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginPerformMaintenance" @@ -673,7 +673,7 @@ func (client *VirtualMachineScaleSetVMsClient) performMaintenanceCreateRequest(c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -684,7 +684,7 @@ func (client *VirtualMachineScaleSetVMsClient) performMaintenanceCreateRequest(c // charges. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -712,7 +712,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginPowerOff(ctx context.Context // charges. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMsClient) powerOff(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginPowerOffOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginPowerOff" @@ -758,10 +758,10 @@ func (client *VirtualMachineScaleSetVMsClient) powerOffCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2024-03-01") if options != nil && options.SkipShutdown != nil { reqQP.Set("skipShutdown", strconv.FormatBool(*options.SkipShutdown)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -771,7 +771,7 @@ func (client *VirtualMachineScaleSetVMsClient) powerOffCreateRequest(ctx context // back on. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -798,7 +798,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginRedeploy(ctx context.Context // on. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMsClient) redeploy(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginRedeployOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginRedeploy" @@ -844,7 +844,7 @@ func (client *VirtualMachineScaleSetVMsClient) redeployCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -853,7 +853,7 @@ func (client *VirtualMachineScaleSetVMsClient) redeployCreateRequest(ctx context // BeginReimage - Reimages (upgrade the operating system) a specific virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -879,7 +879,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginReimage(ctx context.Context, // Reimage - Reimages (upgrade the operating system) a specific virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMsClient) reimage(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginReimageOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginReimage" @@ -925,7 +925,7 @@ func (client *VirtualMachineScaleSetVMsClient) reimageCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.VMScaleSetVMReimageInput != nil { @@ -941,7 +941,7 @@ func (client *VirtualMachineScaleSetVMsClient) reimageCreateRequest(ctx context. // is only supported for managed disks. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -968,7 +968,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginReimageAll(ctx context.Conte // is only supported for managed disks. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMsClient) reimageAll(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginReimageAllOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginReimageAll" @@ -1014,7 +1014,7 @@ func (client *VirtualMachineScaleSetVMsClient) reimageAllCreateRequest(ctx conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1023,7 +1023,7 @@ func (client *VirtualMachineScaleSetVMsClient) reimageAllCreateRequest(ctx conte // BeginRestart - Restarts a virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -1049,7 +1049,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginRestart(ctx context.Context, // Restart - Restarts a virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMsClient) restart(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginRestartOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginRestart" @@ -1095,7 +1095,7 @@ func (client *VirtualMachineScaleSetVMsClient) restartCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1105,7 +1105,7 @@ func (client *VirtualMachineScaleSetVMsClient) restartCreateRequest(ctx context. // scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -1157,10 +1157,10 @@ func (client *VirtualMachineScaleSetVMsClient) retrieveBootDiagnosticsDataCreate return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2024-03-01") if options != nil && options.SasURIExpirationTimeInMinutes != nil { reqQP.Set("sasUriExpirationTimeInMinutes", strconv.FormatInt(int64(*options.SasURIExpirationTimeInMinutes), 10)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1178,7 +1178,7 @@ func (client *VirtualMachineScaleSetVMsClient) retrieveBootDiagnosticsDataHandle // BeginRunCommand - Run command on a virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -1206,7 +1206,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginRunCommand(ctx context.Conte // RunCommand - Run command on a virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMsClient) runCommand(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, parameters RunCommandInput, options *VirtualMachineScaleSetVMsClientBeginRunCommandOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginRunCommand" @@ -1252,7 +1252,7 @@ func (client *VirtualMachineScaleSetVMsClient) runCommandCreateRequest(ctx conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -1264,7 +1264,7 @@ func (client *VirtualMachineScaleSetVMsClient) runCommandCreateRequest(ctx conte // SimulateEviction - The operation to simulate the eviction of spot virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -1315,7 +1315,7 @@ func (client *VirtualMachineScaleSetVMsClient) simulateEvictionCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1324,7 +1324,7 @@ func (client *VirtualMachineScaleSetVMsClient) simulateEvictionCreateRequest(ctx // BeginStart - Starts a virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -1350,7 +1350,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginStart(ctx context.Context, r // Start - Starts a virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMsClient) start(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginStartOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginStart" @@ -1396,7 +1396,7 @@ func (client *VirtualMachineScaleSetVMsClient) startCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1405,7 +1405,7 @@ func (client *VirtualMachineScaleSetVMsClient) startCreateRequest(ctx context.Co // BeginUpdate - Updates a virtual machine of a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set where the extension should be create or updated. // - instanceID - The instance ID of the virtual machine. @@ -1432,7 +1432,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginUpdate(ctx context.Context, // Update - Updates a virtual machine of a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMsClient) update(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, parameters VirtualMachineScaleSetVM, options *VirtualMachineScaleSetVMsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginUpdate" @@ -1478,15 +1478,15 @@ func (client *VirtualMachineScaleSetVMsClient) updateCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.IfMatch != nil { req.Raw().Header["If-Match"] = []string{*options.IfMatch} } if options != nil && options.IfNoneMatch != nil { req.Raw().Header["If-None-Match"] = []string{*options.IfNoneMatch} } - req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinesizes_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinesizes_client.go index 02aadc8c2..80a7316ac 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinesizes_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinesizes_client.go @@ -46,7 +46,7 @@ func NewVirtualMachineSizesClient(subscriptionID string, credential azcore.Token // NewListPager - This API is deprecated. Use Resources Skus [https://docs.microsoft.com/rest/api/compute/resourceskus/list] // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The location upon which virtual-machine-sizes is queried. // - options - VirtualMachineSizesClientListOptions contains the optional parameters for the VirtualMachineSizesClient.NewListPager // method. @@ -90,7 +90,7 @@ func (client *VirtualMachineSizesClient) listCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go index 1841d146f..f86286051 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go @@ -48,8 +48,8 @@ duplication. .Net People, Take note on X509: This uses x509.Certificates and private keys. x509 does not store private keys. .Net -has some x509.Certificate2 thing that has private keys, but that is just some bullcrap that .Net -added, it doesn't exist in real life. As such I've put a PEM decoder into here. +has a x509.Certificate2 abstraction that has private keys, but that just a strange invention. +As such I've put a PEM decoder into here. */ // TODO(msal): This should have example code for each method on client using Go's example doc framework. diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go index e346ff3df..392e5e43f 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go @@ -217,11 +217,13 @@ func WithClaims(claims string) interface { func WithAuthenticationScheme(authnScheme AuthenticationScheme) interface { AcquireSilentOption AcquireInteractiveOption + AcquireByUsernamePasswordOption options.CallOption } { return struct { AcquireSilentOption AcquireInteractiveOption + AcquireByUsernamePasswordOption options.CallOption }{ CallOption: options.NewCallOption( @@ -231,6 +233,8 @@ func WithAuthenticationScheme(authnScheme AuthenticationScheme) interface { t.authnScheme = authnScheme case *interactiveAuthOptions: t.authnScheme = authnScheme + case *acquireTokenByUsernamePasswordOptions: + t.authnScheme = authnScheme default: return fmt.Errorf("unexpected options type %T", a) } @@ -349,6 +353,7 @@ func (pca Client) AcquireTokenSilent(ctx context.Context, scopes []string, opts // acquireTokenByUsernamePasswordOptions contains optional configuration for AcquireTokenByUsernamePassword type acquireTokenByUsernamePasswordOptions struct { claims, tenantID string + authnScheme AuthenticationScheme } // AcquireByUsernamePasswordOption is implemented by options for AcquireTokenByUsernamePassword @@ -374,6 +379,9 @@ func (pca Client) AcquireTokenByUsernamePassword(ctx context.Context, scopes []s authParams.Claims = o.claims authParams.Username = username authParams.Password = password + if o.authnScheme != nil { + authParams.AuthnScheme = o.authnScheme + } token, err := pca.base.Token.UsernamePassword(ctx, authParams) if err != nil { diff --git a/vendor/github.com/Code-Hex/go-generics-cache/cache.go b/vendor/github.com/Code-Hex/go-generics-cache/cache.go index df7572848..e9f6dea83 100644 --- a/vendor/github.com/Code-Hex/go-generics-cache/cache.go +++ b/vendor/github.com/Code-Hex/go-generics-cache/cache.go @@ -23,6 +23,8 @@ type Interface[K comparable, V any] interface { Keys() []K // Delete deletes the item with provided key from the cache. Delete(key K) + // Len returns the number of items in the cache. + Len() int } var ( @@ -44,9 +46,13 @@ type Item[K comparable, V any] struct { InitialReferenceCount int } +func (item *Item[K, V]) hasExpiration() bool { + return !item.Expiration.IsZero() +} + // Expired returns true if the item has expired. func (item *Item[K, V]) Expired() bool { - if item.Expiration.IsZero() { + if !item.hasExpiration() { return false } return nowFunc().After(item.Expiration) @@ -105,8 +111,9 @@ func newItem[K comparable, V any](key K, val V, opts ...ItemOption) *Item[K, V] type Cache[K comparable, V any] struct { cache Interface[K, *Item[K, V]] // mu is used to do lock in some method process. - mu sync.Mutex - janitor *janitor + mu sync.Mutex + janitor *janitor + expManager *expirationManager[K] } // Option is an option for cache. @@ -188,15 +195,16 @@ func NewContext[K comparable, V any](ctx context.Context, opts ...Option[K, V]) optFunc(o) } cache := &Cache[K, V]{ - cache: o.cache, - janitor: newJanitor(ctx, o.janitorInterval), + cache: o.cache, + janitor: newJanitor(ctx, o.janitorInterval), + expManager: newExpirationManager[K](), } cache.janitor.run(cache.DeleteExpired) return cache } // Get looks up a key's value from the cache. -func (c *Cache[K, V]) Get(key K) (value V, ok bool) { +func (c *Cache[K, V]) Get(key K) (zero V, ok bool) { c.mu.Lock() defer c.mu.Unlock() item, ok := c.cache.Get(key) @@ -208,7 +216,24 @@ func (c *Cache[K, V]) Get(key K) (value V, ok bool) { // Returns nil if the item has been expired. // Do not delete here and leave it to an external process such as Janitor. if item.Expired() { - return value, false + return zero, false + } + + return item.Value, true +} + +// GetOrSet atomically gets a key's value from the cache, or if the +// key is not present, sets the given value. +// The loaded result is true if the value was loaded, false if stored. +func (c *Cache[K, V]) GetOrSet(key K, val V, opts ...ItemOption) (actual V, loaded bool) { + c.mu.Lock() + defer c.mu.Unlock() + item, ok := c.cache.Get(key) + + if !ok || item.Expired() { + item := newItem(key, val, opts...) + c.cache.Set(key, item) + return val, false } return item.Value, true @@ -217,17 +242,30 @@ func (c *Cache[K, V]) Get(key K) (value V, ok bool) { // DeleteExpired all expired items from the cache. func (c *Cache[K, V]) DeleteExpired() { c.mu.Lock() - keys := c.cache.Keys() + l := c.expManager.len() c.mu.Unlock() - for _, key := range keys { - c.mu.Lock() + evict := func() bool { + key := c.expManager.pop() // if is expired, delete it and return nil instead item, ok := c.cache.Get(key) - if ok && item.Expired() { - c.cache.Delete(key) + if ok { + if item.Expired() { + c.cache.Delete(key) + return false + } + c.expManager.update(key, item.Expiration) } + return true + } + + for i := 0; i < l; i++ { + c.mu.Lock() + shouldBreak := evict() c.mu.Unlock() + if shouldBreak { + break + } } } @@ -236,6 +274,9 @@ func (c *Cache[K, V]) Set(key K, val V, opts ...ItemOption) { c.mu.Lock() defer c.mu.Unlock() item := newItem(key, val, opts...) + if item.hasExpiration() { + c.expManager.update(key, item.Expiration) + } c.cache.Set(key, item) } @@ -251,6 +292,14 @@ func (c *Cache[K, V]) Delete(key K) { c.mu.Lock() defer c.mu.Unlock() c.cache.Delete(key) + c.expManager.remove(key) +} + +// Len returns the number of items in the cache. +func (c *Cache[K, V]) Len() int { + c.mu.Lock() + defer c.mu.Unlock() + return c.cache.Len() } // Contains reports whether key is within cache. diff --git a/vendor/github.com/Code-Hex/go-generics-cache/expiration.go b/vendor/github.com/Code-Hex/go-generics-cache/expiration.go new file mode 100644 index 000000000..44ee4eabf --- /dev/null +++ b/vendor/github.com/Code-Hex/go-generics-cache/expiration.go @@ -0,0 +1,92 @@ +package cache + +import ( + "container/heap" + "time" +) + +type expirationManager[K comparable] struct { + queue expirationQueue[K] + mapping map[K]*expirationKey[K] +} + +func newExpirationManager[K comparable]() *expirationManager[K] { + q := make(expirationQueue[K], 0) + heap.Init(&q) + return &expirationManager[K]{ + queue: q, + mapping: make(map[K]*expirationKey[K]), + } +} + +func (m *expirationManager[K]) update(key K, expiration time.Time) { + if e, ok := m.mapping[key]; ok { + e.expiration = expiration + heap.Fix(&m.queue, e.index) + } else { + v := &expirationKey[K]{ + key: key, + expiration: expiration, + } + heap.Push(&m.queue, v) + m.mapping[key] = v + } +} + +func (m *expirationManager[K]) len() int { + return m.queue.Len() +} + +func (m *expirationManager[K]) pop() K { + v := heap.Pop(&m.queue) + key := v.(*expirationKey[K]).key + delete(m.mapping, key) + return key +} + +func (m *expirationManager[K]) remove(key K) { + if e, ok := m.mapping[key]; ok { + heap.Remove(&m.queue, e.index) + delete(m.mapping, key) + } +} + +type expirationKey[K comparable] struct { + key K + expiration time.Time + index int +} + +// expirationQueue implements heap.Interface and holds CacheItems. +type expirationQueue[K comparable] []*expirationKey[K] + +var _ heap.Interface = (*expirationQueue[int])(nil) + +func (pq expirationQueue[K]) Len() int { return len(pq) } + +func (pq expirationQueue[K]) Less(i, j int) bool { + // We want Pop to give us the least based on expiration time, not the greater + return pq[i].expiration.Before(pq[j].expiration) +} + +func (pq expirationQueue[K]) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] + pq[i].index = i + pq[j].index = j +} + +func (pq *expirationQueue[K]) Push(x interface{}) { + n := len(*pq) + item := x.(*expirationKey[K]) + item.index = n + *pq = append(*pq, item) +} + +func (pq *expirationQueue[K]) Pop() interface{} { + old := *pq + n := len(old) + item := old[n-1] + item.index = -1 // For safety + *pq = old[0 : n-1] + return item +} diff --git a/vendor/github.com/Code-Hex/go-generics-cache/policy/lfu/lfu.go b/vendor/github.com/Code-Hex/go-generics-cache/policy/lfu/lfu.go index d722f932a..8ac2f9a67 100644 --- a/vendor/github.com/Code-Hex/go-generics-cache/policy/lfu/lfu.go +++ b/vendor/github.com/Code-Hex/go-generics-cache/policy/lfu/lfu.go @@ -71,8 +71,10 @@ func (c *Cache[K, V]) Set(key K, val V) { } if len(c.items) == c.cap { - evictedEntry := heap.Pop(c.queue).(*entry[K, V]) - delete(c.items, evictedEntry.key) + evictedEntry := heap.Pop(c.queue) + if evictedEntry != nil { + delete(c.items, evictedEntry.(*entry[K, V]).key) + } } e := newEntry(key, val) diff --git a/vendor/github.com/Code-Hex/go-generics-cache/policy/lfu/priority_queue.go b/vendor/github.com/Code-Hex/go-generics-cache/policy/lfu/priority_queue.go index 575890c93..27c96fcba 100644 --- a/vendor/github.com/Code-Hex/go-generics-cache/policy/lfu/priority_queue.go +++ b/vendor/github.com/Code-Hex/go-generics-cache/policy/lfu/priority_queue.go @@ -50,6 +50,9 @@ func (q priorityQueue[K, V]) Less(i, j int) bool { } func (q priorityQueue[K, V]) Swap(i, j int) { + if len(q) < 2 { + return + } q[i], q[j] = q[j], q[i] q[i].index = i q[j].index = j @@ -64,13 +67,13 @@ func (q *priorityQueue[K, V]) Push(x interface{}) { func (q *priorityQueue[K, V]) Pop() interface{} { old := *q n := len(old) + if n == 0 { + return nil // Return nil if the queue is empty to prevent panic + } entry := old[n-1] old[n-1] = nil // avoid memory leak entry.index = -1 // for safety new := old[0 : n-1] - for i := 0; i < len(new); i++ { - new[i].index = i - } *q = new return entry } diff --git a/vendor/github.com/Code-Hex/go-generics-cache/policy/simple/simple.go b/vendor/github.com/Code-Hex/go-generics-cache/policy/simple/simple.go index 2ed4cdbc4..cb15ccdd2 100644 --- a/vendor/github.com/Code-Hex/go-generics-cache/policy/simple/simple.go +++ b/vendor/github.com/Code-Hex/go-generics-cache/policy/simple/simple.go @@ -59,3 +59,8 @@ func (c *Cache[K, _]) Keys() []K { func (c *Cache[K, V]) Delete(key K) { delete(c.items, key) } + +// Len returns the number of items in the cache. +func (c *Cache[K, V]) Len() int { + return len(c.items) +} diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/bms.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/bms.go new file mode 100644 index 000000000..d3992a4f7 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/bms.go @@ -0,0 +1,55 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gcp + +const ( + bmsProjectIDEnv = "BMS_PROJECT_ID" + bmsRegionEnv = "BMS_REGION" + bmsInstanceIDEnv = "BMS_INSTANCE_ID" +) + +// onBareMetalSolution checks if the code is running on a Google Cloud Bare Metal Solution (BMS) by verifying +// the presence and non-empty values of BMS_PROJECT_ID, BMS_REGION, and BMS_INSTANCE_ID environment variables. +// For more information on Google Cloud Bare Metal Solution, see: https://cloud.google.com/bare-metal/docs +func (d *Detector) onBareMetalSolution() bool { + projectID, projectIDExists := d.os.LookupEnv(bmsProjectIDEnv) + region, regionExists := d.os.LookupEnv(bmsRegionEnv) + instanceID, instanceIDExists := d.os.LookupEnv(bmsInstanceIDEnv) + return projectIDExists && regionExists && instanceIDExists && projectID != "" && region != "" && instanceID != "" +} + +// BareMetalSolutionInstanceID returns the instance ID from the BMS_INSTANCE_ID environment variable. +func (d *Detector) BareMetalSolutionInstanceID() (string, error) { + if instanceID, found := d.os.LookupEnv(bmsInstanceIDEnv); found { + return instanceID, nil + } + return "", errEnvVarNotFound +} + +// BareMetalSolutionCloudRegion returns the region from the BMS_REGION environment variable. +func (d *Detector) BareMetalSolutionCloudRegion() (string, error) { + if region, found := d.os.LookupEnv(bmsRegionEnv); found { + return region, nil + } + return "", errEnvVarNotFound +} + +// BareMetalSolutionProjectID returns the project ID from the BMS_PROJECT_ID environment variable. +func (d *Detector) BareMetalSolutionProjectID() (string, error) { + if project, found := d.os.LookupEnv(bmsProjectIDEnv); found { + return project, nil + } + return "", errEnvVarNotFound +} diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go index 372621553..2cc62de09 100644 --- a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go @@ -40,11 +40,14 @@ const ( CloudFunctions AppEngineStandard AppEngineFlex + BareMetalSolution ) // CloudPlatform returns the platform on which this program is running. func (d *Detector) CloudPlatform() Platform { switch { + case d.onBareMetalSolution(): + return BareMetalSolution case d.onGKE(): return GKE case d.onCloudFunctions(): diff --git a/vendor/github.com/Microsoft/go-winio/.golangci.yml b/vendor/github.com/Microsoft/go-winio/.golangci.yml index 7b503d26a..faedfe937 100644 --- a/vendor/github.com/Microsoft/go-winio/.golangci.yml +++ b/vendor/github.com/Microsoft/go-winio/.golangci.yml @@ -1,7 +1,3 @@ -run: - skip-dirs: - - pkg/etw/sample - linters: enable: # style @@ -20,9 +16,13 @@ linters: - gofmt # files are gofmt'ed - gosec # security - nilerr # returns nil even with non-nil error + - thelper # test helpers without t.Helper() - unparam # unused function params issues: + exclude-dirs: + - pkg/etw/sample + exclude-rules: # err is very often shadowed in nested scopes - linters: @@ -69,9 +69,7 @@ linters-settings: # struct order is often for Win32 compat # also, ignore pointer bytes/GC issues for now until performance becomes an issue - fieldalignment - check-shadowing: true nolintlint: - allow-leading-space: false require-explanation: true require-specific: true revive: diff --git a/vendor/github.com/Microsoft/go-winio/backup.go b/vendor/github.com/Microsoft/go-winio/backup.go index 09621c884..b54341daa 100644 --- a/vendor/github.com/Microsoft/go-winio/backup.go +++ b/vendor/github.com/Microsoft/go-winio/backup.go @@ -10,14 +10,14 @@ import ( "io" "os" "runtime" - "syscall" "unicode/utf16" + "github.com/Microsoft/go-winio/internal/fs" "golang.org/x/sys/windows" ) -//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead -//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite +//sys backupRead(h windows.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead +//sys backupWrite(h windows.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite const ( BackupData = uint32(iota + 1) @@ -104,7 +104,7 @@ func (r *BackupStreamReader) Next() (*BackupHeader, error) { if err := binary.Read(r.r, binary.LittleEndian, name); err != nil { return nil, err } - hdr.Name = syscall.UTF16ToString(name) + hdr.Name = windows.UTF16ToString(name) } if wsi.StreamID == BackupSparseBlock { if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { @@ -205,7 +205,7 @@ func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader { // Read reads a backup stream from the file by calling the Win32 API BackupRead(). func (r *BackupFileReader) Read(b []byte) (int, error) { var bytesRead uint32 - err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) + err := backupRead(windows.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) if err != nil { return 0, &os.PathError{Op: "BackupRead", Path: r.f.Name(), Err: err} } @@ -220,7 +220,7 @@ func (r *BackupFileReader) Read(b []byte) (int, error) { // the underlying file. func (r *BackupFileReader) Close() error { if r.ctx != 0 { - _ = backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) + _ = backupRead(windows.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) runtime.KeepAlive(r.f) r.ctx = 0 } @@ -244,7 +244,7 @@ func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { // Write restores a portion of the file using the provided backup stream. func (w *BackupFileWriter) Write(b []byte) (int, error) { var bytesWritten uint32 - err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) + err := backupWrite(windows.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) if err != nil { return 0, &os.PathError{Op: "BackupWrite", Path: w.f.Name(), Err: err} } @@ -259,7 +259,7 @@ func (w *BackupFileWriter) Write(b []byte) (int, error) { // close the underlying file. func (w *BackupFileWriter) Close() error { if w.ctx != 0 { - _ = backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) + _ = backupWrite(windows.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) runtime.KeepAlive(w.f) w.ctx = 0 } @@ -271,17 +271,14 @@ func (w *BackupFileWriter) Close() error { // // If the file opened was a directory, it cannot be used with Readdir(). func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) { - winPath, err := syscall.UTF16FromString(path) - if err != nil { - return nil, err - } - h, err := syscall.CreateFile(&winPath[0], - access, - share, + h, err := fs.CreateFile(path, + fs.AccessMask(access), + fs.FileShareMode(share), nil, - createmode, - syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, - 0) + fs.FileCreationDisposition(createmode), + fs.FILE_FLAG_BACKUP_SEMANTICS|fs.FILE_FLAG_OPEN_REPARSE_POINT, + 0, + ) if err != nil { err = &os.PathError{Op: "open", Path: path, Err: err} return nil, err diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go index 175a99d3f..fe82a180d 100644 --- a/vendor/github.com/Microsoft/go-winio/file.go +++ b/vendor/github.com/Microsoft/go-winio/file.go @@ -15,26 +15,11 @@ import ( "golang.org/x/sys/windows" ) -//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx -//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort -//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus -//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes -//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult - -type atomicBool int32 - -func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } -func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } -func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } - -//revive:disable-next-line:predeclared Keep "new" to maintain consistency with "atomic" pkg -func (b *atomicBool) swap(new bool) bool { - var newInt int32 - if new { - newInt = 1 - } - return atomic.SwapInt32((*int32)(b), newInt) == 1 -} +//sys cancelIoEx(file windows.Handle, o *windows.Overlapped) (err error) = CancelIoEx +//sys createIoCompletionPort(file windows.Handle, port windows.Handle, key uintptr, threadCount uint32) (newport windows.Handle, err error) = CreateIoCompletionPort +//sys getQueuedCompletionStatus(port windows.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus +//sys setFileCompletionNotificationModes(h windows.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes +//sys wsaGetOverlappedResult(h windows.Handle, o *windows.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult var ( ErrFileClosed = errors.New("file has already been closed") @@ -50,7 +35,7 @@ func (*timeoutError) Temporary() bool { return true } type timeoutChan chan struct{} var ioInitOnce sync.Once -var ioCompletionPort syscall.Handle +var ioCompletionPort windows.Handle // ioResult contains the result of an asynchronous IO operation. type ioResult struct { @@ -60,12 +45,12 @@ type ioResult struct { // ioOperation represents an outstanding asynchronous Win32 IO. type ioOperation struct { - o syscall.Overlapped + o windows.Overlapped ch chan ioResult } func initIO() { - h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff) + h, err := createIoCompletionPort(windows.InvalidHandle, 0, 0, 0xffffffff) if err != nil { panic(err) } @@ -76,10 +61,10 @@ func initIO() { // win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall. // It takes ownership of this handle and will close it if it is garbage collected. type win32File struct { - handle syscall.Handle + handle windows.Handle wg sync.WaitGroup wgLock sync.RWMutex - closing atomicBool + closing atomic.Bool socket bool readDeadline deadlineHandler writeDeadline deadlineHandler @@ -90,11 +75,11 @@ type deadlineHandler struct { channel timeoutChan channelLock sync.RWMutex timer *time.Timer - timedout atomicBool + timedout atomic.Bool } // makeWin32File makes a new win32File from an existing file handle. -func makeWin32File(h syscall.Handle) (*win32File, error) { +func makeWin32File(h windows.Handle) (*win32File, error) { f := &win32File{handle: h} ioInitOnce.Do(initIO) _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) @@ -110,7 +95,12 @@ func makeWin32File(h syscall.Handle) (*win32File, error) { return f, nil } +// Deprecated: use NewOpenFile instead. func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { + return NewOpenFile(windows.Handle(h)) +} + +func NewOpenFile(h windows.Handle) (io.ReadWriteCloser, error) { // If we return the result of makeWin32File directly, it can result in an // interface-wrapped nil, rather than a nil interface value. f, err := makeWin32File(h) @@ -124,13 +114,13 @@ func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { func (f *win32File) closeHandle() { f.wgLock.Lock() // Atomically set that we are closing, releasing the resources only once. - if !f.closing.swap(true) { + if !f.closing.Swap(true) { f.wgLock.Unlock() // cancel all IO and wait for it to complete _ = cancelIoEx(f.handle, nil) f.wg.Wait() // at this point, no new IO can start - syscall.Close(f.handle) + windows.Close(f.handle) f.handle = 0 } else { f.wgLock.Unlock() @@ -145,14 +135,14 @@ func (f *win32File) Close() error { // IsClosed checks if the file has been closed. func (f *win32File) IsClosed() bool { - return f.closing.isSet() + return f.closing.Load() } // prepareIO prepares for a new IO operation. // The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. func (f *win32File) prepareIO() (*ioOperation, error) { f.wgLock.RLock() - if f.closing.isSet() { + if f.closing.Load() { f.wgLock.RUnlock() return nil, ErrFileClosed } @@ -164,12 +154,12 @@ func (f *win32File) prepareIO() (*ioOperation, error) { } // ioCompletionProcessor processes completed async IOs forever. -func ioCompletionProcessor(h syscall.Handle) { +func ioCompletionProcessor(h windows.Handle) { for { var bytes uint32 var key uintptr var op *ioOperation - err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE) + err := getQueuedCompletionStatus(h, &bytes, &key, &op, windows.INFINITE) if op == nil { panic(err) } @@ -182,11 +172,11 @@ func ioCompletionProcessor(h syscall.Handle) { // asyncIO processes the return value from ReadFile or WriteFile, blocking until // the operation has actually completed. func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { - if err != syscall.ERROR_IO_PENDING { //nolint:errorlint // err is Errno + if err != windows.ERROR_IO_PENDING { //nolint:errorlint // err is Errno return int(bytes), err } - if f.closing.isSet() { + if f.closing.Load() { _ = cancelIoEx(f.handle, &c.o) } @@ -201,8 +191,8 @@ func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, er select { case r = <-c.ch: err = r.err - if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno - if f.closing.isSet() { + if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno + if f.closing.Load() { err = ErrFileClosed } } else if err != nil && f.socket { @@ -214,7 +204,7 @@ func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, er _ = cancelIoEx(f.handle, &c.o) r = <-c.ch err = r.err - if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno + if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno err = ErrTimeout } } @@ -235,23 +225,22 @@ func (f *win32File) Read(b []byte) (int, error) { } defer f.wg.Done() - if f.readDeadline.timedout.isSet() { + if f.readDeadline.timedout.Load() { return 0, ErrTimeout } var bytes uint32 - err = syscall.ReadFile(f.handle, b, &bytes, &c.o) + err = windows.ReadFile(f.handle, b, &bytes, &c.o) n, err := f.asyncIO(c, &f.readDeadline, bytes, err) runtime.KeepAlive(b) // Handle EOF conditions. if err == nil && n == 0 && len(b) != 0 { return 0, io.EOF - } else if err == syscall.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno + } else if err == windows.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno return 0, io.EOF - } else { - return n, err } + return n, err } // Write writes to a file handle. @@ -262,12 +251,12 @@ func (f *win32File) Write(b []byte) (int, error) { } defer f.wg.Done() - if f.writeDeadline.timedout.isSet() { + if f.writeDeadline.timedout.Load() { return 0, ErrTimeout } var bytes uint32 - err = syscall.WriteFile(f.handle, b, &bytes, &c.o) + err = windows.WriteFile(f.handle, b, &bytes, &c.o) n, err := f.asyncIO(c, &f.writeDeadline, bytes, err) runtime.KeepAlive(b) return n, err @@ -282,7 +271,7 @@ func (f *win32File) SetWriteDeadline(deadline time.Time) error { } func (f *win32File) Flush() error { - return syscall.FlushFileBuffers(f.handle) + return windows.FlushFileBuffers(f.handle) } func (f *win32File) Fd() uintptr { @@ -299,7 +288,7 @@ func (d *deadlineHandler) set(deadline time.Time) error { } d.timer = nil } - d.timedout.setFalse() + d.timedout.Store(false) select { case <-d.channel: @@ -314,7 +303,7 @@ func (d *deadlineHandler) set(deadline time.Time) error { } timeoutIO := func() { - d.timedout.setTrue() + d.timedout.Store(true) close(d.channel) } diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go index 702950e72..c860eb991 100644 --- a/vendor/github.com/Microsoft/go-winio/fileinfo.go +++ b/vendor/github.com/Microsoft/go-winio/fileinfo.go @@ -18,9 +18,18 @@ type FileBasicInfo struct { _ uint32 // padding } +// alignedFileBasicInfo is a FileBasicInfo, but aligned to uint64 by containing +// uint64 rather than windows.Filetime. Filetime contains two uint32s. uint64 +// alignment is necessary to pass this as FILE_BASIC_INFO. +type alignedFileBasicInfo struct { + CreationTime, LastAccessTime, LastWriteTime, ChangeTime uint64 + FileAttributes uint32 + _ uint32 // padding +} + // GetFileBasicInfo retrieves times and attributes for a file. func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { - bi := &FileBasicInfo{} + bi := &alignedFileBasicInfo{} if err := windows.GetFileInformationByHandleEx( windows.Handle(f.Fd()), windows.FileBasicInfo, @@ -30,16 +39,21 @@ func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} } runtime.KeepAlive(f) - return bi, nil + // Reinterpret the alignedFileBasicInfo as a FileBasicInfo so it matches the + // public API of this module. The data may be unnecessarily aligned. + return (*FileBasicInfo)(unsafe.Pointer(bi)), nil } // SetFileBasicInfo sets times and attributes for a file. func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { + // Create an alignedFileBasicInfo based on a FileBasicInfo. The copy is + // suitable to pass to GetFileInformationByHandleEx. + biAligned := *(*alignedFileBasicInfo)(unsafe.Pointer(bi)) if err := windows.SetFileInformationByHandle( windows.Handle(f.Fd()), windows.FileBasicInfo, - (*byte)(unsafe.Pointer(bi)), - uint32(unsafe.Sizeof(*bi)), + (*byte)(unsafe.Pointer(&biAligned)), + uint32(unsafe.Sizeof(biAligned)), ); err != nil { return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} } diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go index c88191658..c4fdd9d4a 100644 --- a/vendor/github.com/Microsoft/go-winio/hvsock.go +++ b/vendor/github.com/Microsoft/go-winio/hvsock.go @@ -10,7 +10,6 @@ import ( "io" "net" "os" - "syscall" "time" "unsafe" @@ -181,13 +180,13 @@ type HvsockConn struct { var _ net.Conn = &HvsockConn{} func newHVSocket() (*win32File, error) { - fd, err := syscall.Socket(afHVSock, syscall.SOCK_STREAM, 1) + fd, err := windows.Socket(afHVSock, windows.SOCK_STREAM, 1) if err != nil { return nil, os.NewSyscallError("socket", err) } f, err := makeWin32File(fd) if err != nil { - syscall.Close(fd) + windows.Close(fd) return nil, err } f.socket = true @@ -197,16 +196,24 @@ func newHVSocket() (*win32File, error) { // ListenHvsock listens for connections on the specified hvsock address. func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) { l := &HvsockListener{addr: *addr} - sock, err := newHVSocket() + + var sock *win32File + sock, err = newHVSocket() if err != nil { return nil, l.opErr("listen", err) } + defer func() { + if err != nil { + _ = sock.Close() + } + }() + sa := addr.raw() - err = socket.Bind(windows.Handle(sock.handle), &sa) + err = socket.Bind(sock.handle, &sa) if err != nil { return nil, l.opErr("listen", os.NewSyscallError("socket", err)) } - err = syscall.Listen(sock.handle, 16) + err = windows.Listen(sock.handle, 16) if err != nil { return nil, l.opErr("listen", os.NewSyscallError("listen", err)) } @@ -246,7 +253,7 @@ func (l *HvsockListener) Accept() (_ net.Conn, err error) { var addrbuf [addrlen * 2]byte var bytes uint32 - err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o) + err = windows.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o) if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil { return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) } @@ -263,7 +270,7 @@ func (l *HvsockListener) Accept() (_ net.Conn, err error) { conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen]))) // initialize the accepted socket and update its properties with those of the listening socket - if err = windows.Setsockopt(windows.Handle(sock.handle), + if err = windows.Setsockopt(sock.handle, windows.SOL_SOCKET, windows.SO_UPDATE_ACCEPT_CONTEXT, (*byte)(unsafe.Pointer(&l.sock.handle)), int32(unsafe.Sizeof(l.sock.handle))); err != nil { return nil, conn.opErr("accept", os.NewSyscallError("setsockopt", err)) @@ -334,7 +341,7 @@ func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *Hvsock }() sa := addr.raw() - err = socket.Bind(windows.Handle(sock.handle), &sa) + err = socket.Bind(sock.handle, &sa) if err != nil { return nil, conn.opErr(op, os.NewSyscallError("bind", err)) } @@ -347,7 +354,7 @@ func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *Hvsock var bytes uint32 for i := uint(0); i <= d.Retries; i++ { err = socket.ConnectEx( - windows.Handle(sock.handle), + sock.handle, &sa, nil, // sendBuf 0, // sendDataLen @@ -367,7 +374,7 @@ func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *Hvsock // update the connection properties, so shutdown can be used if err = windows.Setsockopt( - windows.Handle(sock.handle), + sock.handle, windows.SOL_SOCKET, windows.SO_UPDATE_CONNECT_CONTEXT, nil, // optvalue @@ -378,7 +385,7 @@ func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *Hvsock // get the local name var sal rawHvsockAddr - err = socket.GetSockName(windows.Handle(sock.handle), &sal) + err = socket.GetSockName(sock.handle, &sal) if err != nil { return nil, conn.opErr(op, os.NewSyscallError("getsockname", err)) } @@ -421,7 +428,7 @@ func (d *HvsockDialer) redialWait(ctx context.Context) (err error) { return ctx.Err() } -// assumes error is a plain, unwrapped syscall.Errno provided by direct syscall. +// assumes error is a plain, unwrapped windows.Errno provided by direct syscall. func canRedial(err error) bool { //nolint:errorlint // guaranteed to be an Errno switch err { @@ -447,9 +454,9 @@ func (conn *HvsockConn) Read(b []byte) (int, error) { return 0, conn.opErr("read", err) } defer conn.sock.wg.Done() - buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + buf := windows.WSABuf{Buf: &b[0], Len: uint32(len(b))} var flags, bytes uint32 - err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) + err = windows.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) n, err := conn.sock.asyncIO(c, &conn.sock.readDeadline, bytes, err) if err != nil { var eno windows.Errno @@ -482,9 +489,9 @@ func (conn *HvsockConn) write(b []byte) (int, error) { return 0, conn.opErr("write", err) } defer conn.sock.wg.Done() - buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + buf := windows.WSABuf{Buf: &b[0], Len: uint32(len(b))} var bytes uint32 - err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) + err = windows.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) n, err := conn.sock.asyncIO(c, &conn.sock.writeDeadline, bytes, err) if err != nil { var eno windows.Errno @@ -511,7 +518,7 @@ func (conn *HvsockConn) shutdown(how int) error { return socket.ErrSocketClosed } - err := syscall.Shutdown(conn.sock.handle, how) + err := windows.Shutdown(conn.sock.handle, how) if err != nil { // If the connection was closed, shutdowns fail with "not connected" if errors.Is(err, windows.WSAENOTCONN) || @@ -525,7 +532,7 @@ func (conn *HvsockConn) shutdown(how int) error { // CloseRead shuts down the read end of the socket, preventing future read operations. func (conn *HvsockConn) CloseRead() error { - err := conn.shutdown(syscall.SHUT_RD) + err := conn.shutdown(windows.SHUT_RD) if err != nil { return conn.opErr("closeread", err) } @@ -535,7 +542,7 @@ func (conn *HvsockConn) CloseRead() error { // CloseWrite shuts down the write end of the socket, preventing future write operations and // notifying the other endpoint that no more data will be written. func (conn *HvsockConn) CloseWrite() error { - err := conn.shutdown(syscall.SHUT_WR) + err := conn.shutdown(windows.SHUT_WR) if err != nil { return conn.opErr("closewrite", err) } diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go b/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go index 509b3ec64..0cd9621df 100644 --- a/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go @@ -11,12 +11,14 @@ import ( //go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go fs.go // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew -//sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW +//sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW const NullHandle windows.Handle = 0 // AccessMask defines standard, specific, and generic rights. // +// Used with CreateFile and NtCreateFile (and co.). +// // Bitmask: // 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 // 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 @@ -47,6 +49,12 @@ const ( // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew#parameters FILE_ANY_ACCESS AccessMask = 0 + GENERIC_READ AccessMask = 0x8000_0000 + GENERIC_WRITE AccessMask = 0x4000_0000 + GENERIC_EXECUTE AccessMask = 0x2000_0000 + GENERIC_ALL AccessMask = 0x1000_0000 + ACCESS_SYSTEM_SECURITY AccessMask = 0x0100_0000 + // Specific Object Access // from ntioapi.h @@ -124,14 +132,32 @@ const ( TRUNCATE_EXISTING FileCreationDisposition = 0x05 ) +// Create disposition values for NtCreate* +type NTFileCreationDisposition uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + // From ntioapi.h + + FILE_SUPERSEDE NTFileCreationDisposition = 0x00 + FILE_OPEN NTFileCreationDisposition = 0x01 + FILE_CREATE NTFileCreationDisposition = 0x02 + FILE_OPEN_IF NTFileCreationDisposition = 0x03 + FILE_OVERWRITE NTFileCreationDisposition = 0x04 + FILE_OVERWRITE_IF NTFileCreationDisposition = 0x05 + FILE_MAXIMUM_DISPOSITION NTFileCreationDisposition = 0x05 +) + // CreateFile and co. take flags or attributes together as one parameter. // Define alias until we can use generics to allow both - +// // https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants type FileFlagOrAttribute uint32 //nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( // from winnt.h +const ( + // from winnt.h + FILE_FLAG_WRITE_THROUGH FileFlagOrAttribute = 0x8000_0000 FILE_FLAG_OVERLAPPED FileFlagOrAttribute = 0x4000_0000 FILE_FLAG_NO_BUFFERING FileFlagOrAttribute = 0x2000_0000 @@ -145,17 +171,51 @@ const ( // from winnt.h FILE_FLAG_FIRST_PIPE_INSTANCE FileFlagOrAttribute = 0x0008_0000 ) +// NtCreate* functions take a dedicated CreateOptions parameter. +// +// https://learn.microsoft.com/en-us/windows/win32/api/Winternl/nf-winternl-ntcreatefile +// +// https://learn.microsoft.com/en-us/windows/win32/devnotes/nt-create-named-pipe-file +type NTCreateOptions uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + // From ntioapi.h + + FILE_DIRECTORY_FILE NTCreateOptions = 0x0000_0001 + FILE_WRITE_THROUGH NTCreateOptions = 0x0000_0002 + FILE_SEQUENTIAL_ONLY NTCreateOptions = 0x0000_0004 + FILE_NO_INTERMEDIATE_BUFFERING NTCreateOptions = 0x0000_0008 + + FILE_SYNCHRONOUS_IO_ALERT NTCreateOptions = 0x0000_0010 + FILE_SYNCHRONOUS_IO_NONALERT NTCreateOptions = 0x0000_0020 + FILE_NON_DIRECTORY_FILE NTCreateOptions = 0x0000_0040 + FILE_CREATE_TREE_CONNECTION NTCreateOptions = 0x0000_0080 + + FILE_COMPLETE_IF_OPLOCKED NTCreateOptions = 0x0000_0100 + FILE_NO_EA_KNOWLEDGE NTCreateOptions = 0x0000_0200 + FILE_DISABLE_TUNNELING NTCreateOptions = 0x0000_0400 + FILE_RANDOM_ACCESS NTCreateOptions = 0x0000_0800 + + FILE_DELETE_ON_CLOSE NTCreateOptions = 0x0000_1000 + FILE_OPEN_BY_FILE_ID NTCreateOptions = 0x0000_2000 + FILE_OPEN_FOR_BACKUP_INTENT NTCreateOptions = 0x0000_4000 + FILE_NO_COMPRESSION NTCreateOptions = 0x0000_8000 +) + type FileSQSFlag = FileFlagOrAttribute //nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( // from winbase.h +const ( + // from winbase.h + SECURITY_ANONYMOUS FileSQSFlag = FileSQSFlag(SecurityAnonymous << 16) SECURITY_IDENTIFICATION FileSQSFlag = FileSQSFlag(SecurityIdentification << 16) SECURITY_IMPERSONATION FileSQSFlag = FileSQSFlag(SecurityImpersonation << 16) SECURITY_DELEGATION FileSQSFlag = FileSQSFlag(SecurityDelegation << 16) - SECURITY_SQOS_PRESENT FileSQSFlag = 0x00100000 - SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F0000 + SECURITY_SQOS_PRESENT FileSQSFlag = 0x0010_0000 + SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F_0000 ) // GetFinalPathNameByHandle flags diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go index e2f7bb24e..a94e234c7 100644 --- a/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go @@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error { case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) return e } @@ -45,7 +42,7 @@ var ( procCreateFileW = modkernel32.NewProc("CreateFileW") ) -func CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { +func CreateFile(name string, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { var _p0 *uint16 _p0, err = syscall.UTF16PtrFromString(name) if err != nil { @@ -54,8 +51,8 @@ func CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall. return _CreateFile(_p0, access, mode, sa, createmode, attrs, templatefile) } -func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) +func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { + r0, _, e1 := syscall.SyscallN(procCreateFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile)) handle = windows.Handle(r0) if handle == windows.InvalidHandle { err = errnoErr(e1) diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go index aeb7b7250..88580d974 100644 --- a/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go +++ b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go @@ -156,9 +156,7 @@ func connectEx( bytesSent *uint32, overlapped *windows.Overlapped, ) (err error) { - // todo: after upgrading to 1.18, switch from syscall.Syscall9 to syscall.SyscallN - r1, _, e1 := syscall.Syscall9(connectExFunc.addr, - 7, + r1, _, e1 := syscall.SyscallN(connectExFunc.addr, uintptr(s), uintptr(name), uintptr(namelen), @@ -166,8 +164,8 @@ func connectEx( uintptr(sendDataLen), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), - 0, - 0) + ) + if r1 == 0 { if e1 != 0 { err = error(e1) diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go index 6d2e1a9e4..e1504126a 100644 --- a/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go @@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error { case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) return e } @@ -48,7 +45,7 @@ var ( ) func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + r1, _, e1 := syscall.SyscallN(procbind.Addr(), uintptr(s), uintptr(name), uintptr(namelen)) if r1 == socketError { err = errnoErr(e1) } @@ -56,7 +53,7 @@ func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) { } func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) + r1, _, e1 := syscall.SyscallN(procgetpeername.Addr(), uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) if r1 == socketError { err = errnoErr(e1) } @@ -64,7 +61,7 @@ func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err err } func getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) + r1, _, e1 := syscall.SyscallN(procgetsockname.Addr(), uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) if r1 == socketError { err = errnoErr(e1) } diff --git a/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go b/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go index 7ad505702..42ebc019f 100644 --- a/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go +++ b/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go @@ -62,7 +62,7 @@ func (b *WString) Free() { // ResizeTo grows the buffer to at least c and returns the new capacity, freeing the // previous buffer back into pool. func (b *WString) ResizeTo(c uint32) uint32 { - // allready sufficient (or n is 0) + // already sufficient (or n is 0) if c <= b.Cap() { return b.Cap() } diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go index 25cc81103..a2da6639d 100644 --- a/vendor/github.com/Microsoft/go-winio/pipe.go +++ b/vendor/github.com/Microsoft/go-winio/pipe.go @@ -11,7 +11,6 @@ import ( "net" "os" "runtime" - "syscall" "time" "unsafe" @@ -20,20 +19,44 @@ import ( "github.com/Microsoft/go-winio/internal/fs" ) -//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe -//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW -//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo -//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW -//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc -//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile +//sys connectNamedPipe(pipe windows.Handle, o *windows.Overlapped) (err error) = ConnectNamedPipe +//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateNamedPipeW +//sys disconnectNamedPipe(pipe windows.Handle) (err error) = DisconnectNamedPipe +//sys getNamedPipeInfo(pipe windows.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo +//sys getNamedPipeHandleState(pipe windows.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW +//sys ntCreateNamedPipeFile(pipe *windows.Handle, access ntAccessMask, oa *objectAttributes, iosb *ioStatusBlock, share ntFileShareMode, disposition ntFileCreationDisposition, options ntFileOptions, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile //sys rtlNtStatusToDosError(status ntStatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb //sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) = ntdll.RtlDosPathNameToNtPathName_U //sys rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) = ntdll.RtlDefaultNpAcl +type PipeConn interface { + net.Conn + Disconnect() error + Flush() error +} + +// type aliases for mkwinsyscall code +type ( + ntAccessMask = fs.AccessMask + ntFileShareMode = fs.FileShareMode + ntFileCreationDisposition = fs.NTFileCreationDisposition + ntFileOptions = fs.NTCreateOptions +) + type ioStatusBlock struct { Status, Information uintptr } +// typedef struct _OBJECT_ATTRIBUTES { +// ULONG Length; +// HANDLE RootDirectory; +// PUNICODE_STRING ObjectName; +// ULONG Attributes; +// PVOID SecurityDescriptor; +// PVOID SecurityQualityOfService; +// } OBJECT_ATTRIBUTES; +// +// https://learn.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-_object_attributes type objectAttributes struct { Length uintptr RootDirectory uintptr @@ -49,6 +72,17 @@ type unicodeString struct { Buffer uintptr } +// typedef struct _SECURITY_DESCRIPTOR { +// BYTE Revision; +// BYTE Sbz1; +// SECURITY_DESCRIPTOR_CONTROL Control; +// PSID Owner; +// PSID Group; +// PACL Sacl; +// PACL Dacl; +// } SECURITY_DESCRIPTOR, *PISECURITY_DESCRIPTOR; +// +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-security_descriptor type securityDescriptor struct { Revision byte Sbz1 byte @@ -80,6 +114,8 @@ type win32Pipe struct { path string } +var _ PipeConn = (*win32Pipe)(nil) + type win32MessageBytePipe struct { win32Pipe writeClosed bool @@ -103,6 +139,10 @@ func (f *win32Pipe) SetDeadline(t time.Time) error { return f.SetWriteDeadline(t) } +func (f *win32Pipe) Disconnect() error { + return disconnectNamedPipe(f.win32File.handle) +} + // CloseWrite closes the write side of a message pipe in byte mode. func (f *win32MessageBytePipe) CloseWrite() error { if f.writeClosed { @@ -146,7 +186,7 @@ func (f *win32MessageBytePipe) Read(b []byte) (int, error) { // zero-byte message, ensure that all future Read() calls // also return EOF. f.readEOF = true - } else if err == syscall.ERROR_MORE_DATA { //nolint:errorlint // err is Errno + } else if err == windows.ERROR_MORE_DATA { //nolint:errorlint // err is Errno // ERROR_MORE_DATA indicates that the pipe's read mode is message mode // and the message still has more bytes. Treat this as a success, since // this package presents all named pipes as byte streams. @@ -164,21 +204,20 @@ func (s pipeAddress) String() string { } // tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. -func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask) (syscall.Handle, error) { +func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask, impLevel PipeImpLevel) (windows.Handle, error) { for { select { case <-ctx.Done(): - return syscall.Handle(0), ctx.Err() + return windows.Handle(0), ctx.Err() default: - wh, err := fs.CreateFile(*path, + h, err := fs.CreateFile(*path, access, 0, // mode nil, // security attributes fs.OPEN_EXISTING, - fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.SECURITY_ANONYMOUS, + fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.FileSQSFlag(impLevel), 0, // template file handle ) - h := syscall.Handle(wh) if err == nil { return h, nil } @@ -214,15 +253,33 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { // DialPipeContext attempts to connect to a named pipe by `path` until `ctx` // cancellation or timeout. func DialPipeContext(ctx context.Context, path string) (net.Conn, error) { - return DialPipeAccess(ctx, path, syscall.GENERIC_READ|syscall.GENERIC_WRITE) + return DialPipeAccess(ctx, path, uint32(fs.GENERIC_READ|fs.GENERIC_WRITE)) } +// PipeImpLevel is an enumeration of impersonation levels that may be set +// when calling DialPipeAccessImpersonation. +type PipeImpLevel uint32 + +const ( + PipeImpLevelAnonymous = PipeImpLevel(fs.SECURITY_ANONYMOUS) + PipeImpLevelIdentification = PipeImpLevel(fs.SECURITY_IDENTIFICATION) + PipeImpLevelImpersonation = PipeImpLevel(fs.SECURITY_IMPERSONATION) + PipeImpLevelDelegation = PipeImpLevel(fs.SECURITY_DELEGATION) +) + // DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx` // cancellation or timeout. func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) { + return DialPipeAccessImpLevel(ctx, path, access, PipeImpLevelAnonymous) +} + +// DialPipeAccessImpLevel attempts to connect to a named pipe by `path` with +// `access` at `impLevel` until `ctx` cancellation or timeout. The other +// DialPipe* implementations use PipeImpLevelAnonymous. +func DialPipeAccessImpLevel(ctx context.Context, path string, access uint32, impLevel PipeImpLevel) (net.Conn, error) { var err error - var h syscall.Handle - h, err = tryDialPipe(ctx, &path, fs.AccessMask(access)) + var h windows.Handle + h, err = tryDialPipe(ctx, &path, fs.AccessMask(access), impLevel) if err != nil { return nil, err } @@ -235,7 +292,7 @@ func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, f, err := makeWin32File(h) if err != nil { - syscall.Close(h) + windows.Close(h) return nil, err } @@ -255,7 +312,7 @@ type acceptResponse struct { } type win32PipeListener struct { - firstHandle syscall.Handle + firstHandle windows.Handle path string config PipeConfig acceptCh chan (chan acceptResponse) @@ -263,8 +320,8 @@ type win32PipeListener struct { doneCh chan int } -func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (syscall.Handle, error) { - path16, err := syscall.UTF16FromString(path) +func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (windows.Handle, error) { + path16, err := windows.UTF16FromString(path) if err != nil { return 0, &os.PathError{Op: "open", Path: path, Err: err} } @@ -280,16 +337,20 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy ).Err(); err != nil { return 0, &os.PathError{Op: "open", Path: path, Err: err} } - defer localFree(ntPath.Buffer) + defer windows.LocalFree(windows.Handle(ntPath.Buffer)) //nolint:errcheck oa.ObjectName = &ntPath oa.Attributes = windows.OBJ_CASE_INSENSITIVE // The security descriptor is only needed for the first pipe. if first { if sd != nil { + //todo: does `sdb` need to be allocated on the heap, or can go allocate it? l := uint32(len(sd)) - sdb := localAlloc(0, l) - defer localFree(sdb) + sdb, err := windows.LocalAlloc(0, l) + if err != nil { + return 0, fmt.Errorf("LocalAlloc for security descriptor with of length %d: %w", l, err) + } + defer windows.LocalFree(windows.Handle(sdb)) //nolint:errcheck copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd) oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb)) } else { @@ -298,7 +359,7 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy if err := rtlDefaultNpAcl(&dacl).Err(); err != nil { return 0, fmt.Errorf("getting default named pipe ACL: %w", err) } - defer localFree(dacl) + defer windows.LocalFree(windows.Handle(dacl)) //nolint:errcheck sdb := &securityDescriptor{ Revision: 1, @@ -314,27 +375,27 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy typ |= windows.FILE_PIPE_MESSAGE_TYPE } - disposition := uint32(windows.FILE_OPEN) - access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE) + disposition := fs.FILE_OPEN + access := fs.GENERIC_READ | fs.GENERIC_WRITE | fs.SYNCHRONIZE if first { - disposition = windows.FILE_CREATE + disposition = fs.FILE_CREATE // By not asking for read or write access, the named pipe file system // will put this pipe into an initially disconnected state, blocking // client connections until the next call with first == false. - access = syscall.SYNCHRONIZE + access = fs.SYNCHRONIZE } timeout := int64(-50 * 10000) // 50ms var ( - h syscall.Handle + h windows.Handle iosb ioStatusBlock ) err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, + fs.FILE_SHARE_READ|fs.FILE_SHARE_WRITE, disposition, 0, typ, @@ -359,7 +420,7 @@ func (l *win32PipeListener) makeServerPipe() (*win32File, error) { } f, err := makeWin32File(h) if err != nil { - syscall.Close(h) + windows.Close(h) return nil, err } return f, nil @@ -418,7 +479,7 @@ func (l *win32PipeListener) listenerRoutine() { closed = err == ErrPipeListenerClosed //nolint:errorlint // err is Errno } } - syscall.Close(l.firstHandle) + windows.Close(l.firstHandle) l.firstHandle = 0 // Notify Close() and Accept() callers that the handle has been closed. close(l.doneCh) diff --git a/vendor/github.com/Microsoft/go-winio/privilege.go b/vendor/github.com/Microsoft/go-winio/privilege.go index 0ff9dac90..d9b90b6e8 100644 --- a/vendor/github.com/Microsoft/go-winio/privilege.go +++ b/vendor/github.com/Microsoft/go-winio/privilege.go @@ -9,7 +9,6 @@ import ( "fmt" "runtime" "sync" - "syscall" "unicode/utf16" "golang.org/x/sys/windows" @@ -18,8 +17,8 @@ import ( //sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges //sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf //sys revertToSelf() (err error) = advapi32.RevertToSelf -//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken -//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread +//sys openThreadToken(thread windows.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken +//sys getCurrentThread() (h windows.Handle) = GetCurrentThread //sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW //sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW //sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW @@ -29,7 +28,7 @@ const ( SE_PRIVILEGE_ENABLED = windows.SE_PRIVILEGE_ENABLED //revive:disable-next-line:var-naming ALL_CAPS - ERROR_NOT_ALL_ASSIGNED syscall.Errno = windows.ERROR_NOT_ALL_ASSIGNED + ERROR_NOT_ALL_ASSIGNED windows.Errno = windows.ERROR_NOT_ALL_ASSIGNED SeBackupPrivilege = "SeBackupPrivilege" SeRestorePrivilege = "SeRestorePrivilege" @@ -177,7 +176,7 @@ func newThreadToken() (windows.Token, error) { } var token windows.Token - err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token) + err = openThreadToken(getCurrentThread(), windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, false, &token) if err != nil { rerr := revertToSelf() if rerr != nil { diff --git a/vendor/github.com/Microsoft/go-winio/sd.go b/vendor/github.com/Microsoft/go-winio/sd.go index 5550ef6b6..c3685e98e 100644 --- a/vendor/github.com/Microsoft/go-winio/sd.go +++ b/vendor/github.com/Microsoft/go-winio/sd.go @@ -5,7 +5,7 @@ package winio import ( "errors" - "syscall" + "fmt" "unsafe" "golang.org/x/sys/windows" @@ -15,10 +15,6 @@ import ( //sys lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountSidW //sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW //sys convertStringSidToSid(str *uint16, sid **byte) (err error) = advapi32.ConvertStringSidToSidW -//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW -//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW -//sys localFree(mem uintptr) = LocalFree -//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength type AccountLookupError struct { Name string @@ -64,7 +60,7 @@ func LookupSidByName(name string) (sid string, err error) { var sidSize, sidNameUse, refDomainSize uint32 err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) - if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno + if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno return "", &AccountLookupError{name, err} } sidBuffer := make([]byte, sidSize) @@ -78,8 +74,8 @@ func LookupSidByName(name string) (sid string, err error) { if err != nil { return "", &AccountLookupError{name, err} } - sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) - localFree(uintptr(unsafe.Pointer(strBuffer))) + sid = windows.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) + _, _ = windows.LocalFree(windows.Handle(unsafe.Pointer(strBuffer))) return sid, nil } @@ -100,7 +96,7 @@ func LookupNameBySid(sid string) (name string, err error) { if err = convertStringSidToSid(sidBuffer, &sidPtr); err != nil { return "", &AccountLookupError{sid, err} } - defer localFree(uintptr(unsafe.Pointer(sidPtr))) + defer windows.LocalFree(windows.Handle(unsafe.Pointer(sidPtr))) //nolint:errcheck var nameSize, refDomainSize, sidNameUse uint32 err = lookupAccountSid(nil, sidPtr, nil, &nameSize, nil, &refDomainSize, &sidNameUse) @@ -120,25 +116,18 @@ func LookupNameBySid(sid string) (name string, err error) { } func SddlToSecurityDescriptor(sddl string) ([]byte, error) { - var sdBuffer uintptr - err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil) + sd, err := windows.SecurityDescriptorFromString(sddl) if err != nil { - return nil, &SddlConversionError{sddl, err} + return nil, &SddlConversionError{Sddl: sddl, Err: err} } - defer localFree(sdBuffer) - sd := make([]byte, getSecurityDescriptorLength(sdBuffer)) - copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)]) - return sd, nil + b := unsafe.Slice((*byte)(unsafe.Pointer(sd)), sd.Length()) + return b, nil } func SecurityDescriptorToSddl(sd []byte) (string, error) { - var sddl *uint16 - // The returned string length seems to include an arbitrary number of terminating NULs. - // Don't use it. - err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil) - if err != nil { - return "", err + if l := int(unsafe.Sizeof(windows.SECURITY_DESCRIPTOR{})); len(sd) < l { + return "", fmt.Errorf("SecurityDescriptor (%d) smaller than expected (%d): %w", len(sd), l, windows.ERROR_INCORRECT_SIZE) } - defer localFree(uintptr(unsafe.Pointer(sddl))) - return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil + s := (*windows.SECURITY_DESCRIPTOR)(unsafe.Pointer(&sd[0])) + return s.String(), nil } diff --git a/vendor/github.com/Microsoft/go-winio/tools.go b/vendor/github.com/Microsoft/go-winio/tools.go deleted file mode 100644 index 2aa045843..000000000 --- a/vendor/github.com/Microsoft/go-winio/tools.go +++ /dev/null @@ -1,5 +0,0 @@ -//go:build tools - -package winio - -import _ "golang.org/x/tools/cmd/stringer" diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go index 469b16f63..89b66eda8 100644 --- a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error { case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) return e } @@ -45,38 +42,34 @@ var ( modntdll = windows.NewLazySystemDLL("ntdll.dll") modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") - procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") - procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") - procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") - procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") - procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") - procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") - procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") - procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") - procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") - procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") - procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") - procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") - procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") - procRevertToSelf = modadvapi32.NewProc("RevertToSelf") - procBackupRead = modkernel32.NewProc("BackupRead") - procBackupWrite = modkernel32.NewProc("BackupWrite") - procCancelIoEx = modkernel32.NewProc("CancelIoEx") - procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") - procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") - procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") - procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") - procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") - procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") - procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") - procLocalAlloc = modkernel32.NewProc("LocalAlloc") - procLocalFree = modkernel32.NewProc("LocalFree") - procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") - procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") - procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") - procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") - procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") - procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") + procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") + procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") + procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procBackupRead = modkernel32.NewProc("BackupRead") + procBackupWrite = modkernel32.NewProc("BackupWrite") + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") + procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") + procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe") + procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") + procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") + procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") + procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") + procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") + procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") + procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") ) func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { @@ -84,7 +77,7 @@ func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, ou if releaseAll { _p0 = 1 } - r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) + r0, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) success = r0 != 0 if true { err = errnoErr(e1) @@ -92,33 +85,8 @@ func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, ou return } -func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - func convertSidToStringSid(sid *byte, str **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(str) - if err != nil { - return - } - return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) -} - -func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procConvertSidToStringSidW.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str))) if r1 == 0 { err = errnoErr(e1) } @@ -126,21 +94,15 @@ func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision } func convertStringSidToSid(str *uint16, sid **byte) (err error) { - r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid)), 0) + r1, _, e1 := syscall.SyscallN(procConvertStringSidToSidW.Addr(), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid))) if r1 == 0 { err = errnoErr(e1) } return } -func getSecurityDescriptorLength(sd uintptr) (len uint32) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) - len = uint32(r0) - return -} - func impersonateSelf(level uint32) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) + r1, _, e1 := syscall.SyscallN(procImpersonateSelf.Addr(), uintptr(level)) if r1 == 0 { err = errnoErr(e1) } @@ -157,7 +119,7 @@ func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSiz } func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) + r1, _, e1 := syscall.SyscallN(procLookupAccountNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse))) if r1 == 0 { err = errnoErr(e1) } @@ -165,7 +127,7 @@ func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidS } func lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) + r1, _, e1 := syscall.SyscallN(procLookupAccountSidW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse))) if r1 == 0 { err = errnoErr(e1) } @@ -182,7 +144,7 @@ func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, } func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) + r1, _, e1 := syscall.SyscallN(procLookupPrivilegeDisplayNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId))) if r1 == 0 { err = errnoErr(e1) } @@ -199,7 +161,7 @@ func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size * } func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procLookupPrivilegeNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -221,19 +183,19 @@ func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err err } func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { - r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + r1, _, e1 := syscall.SyscallN(procLookupPrivilegeValueW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) if r1 == 0 { err = errnoErr(e1) } return } -func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { +func openThreadToken(thread windows.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { var _p0 uint32 if openAsSelf { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + r1, _, e1 := syscall.SyscallN(procOpenThreadToken.Addr(), uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } @@ -241,14 +203,14 @@ func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, } func revertToSelf() (err error) { - r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + r1, _, e1 := syscall.SyscallN(procRevertToSelf.Addr()) if r1 == 0 { err = errnoErr(e1) } return } -func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { +func backupRead(h windows.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { var _p0 *byte if len(b) > 0 { _p0 = &b[0] @@ -261,14 +223,14 @@ func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, proce if processSecurity { _p2 = 1 } - r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + r1, _, e1 := syscall.SyscallN(procBackupRead.Addr(), uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context))) if r1 == 0 { err = errnoErr(e1) } return } -func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { +func backupWrite(h windows.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { var _p0 *byte if len(b) > 0 { _p0 = &b[0] @@ -281,39 +243,39 @@ func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, p if processSecurity { _p2 = 1 } - r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + r1, _, e1 := syscall.SyscallN(procBackupWrite.Addr(), uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context))) if r1 == 0 { err = errnoErr(e1) } return } -func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) +func cancelIoEx(file windows.Handle, o *windows.Overlapped) (err error) { + r1, _, e1 := syscall.SyscallN(procCancelIoEx.Addr(), uintptr(file), uintptr(unsafe.Pointer(o))) if r1 == 0 { err = errnoErr(e1) } return } -func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) +func connectNamedPipe(pipe windows.Handle, o *windows.Overlapped) (err error) { + r1, _, e1 := syscall.SyscallN(procConnectNamedPipe.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(o))) if r1 == 0 { err = errnoErr(e1) } return } -func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) - newport = syscall.Handle(r0) +func createIoCompletionPort(file windows.Handle, port windows.Handle, key uintptr, threadCount uint32) (newport windows.Handle, err error) { + r0, _, e1 := syscall.SyscallN(procCreateIoCompletionPort.Addr(), uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount)) + newport = windows.Handle(r0) if newport == 0 { err = errnoErr(e1) } return } -func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { +func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) { var _p0 *uint16 _p0, err = syscall.UTF16PtrFromString(name) if err != nil { @@ -322,96 +284,93 @@ func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances ui return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) } -func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { +func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) { + r0, _, e1 := syscall.SyscallN(procCreateNamedPipeW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa))) + handle = windows.Handle(r0) + if handle == windows.InvalidHandle { err = errnoErr(e1) } return } -func getCurrentThread() (h syscall.Handle) { - r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) - h = syscall.Handle(r0) - return -} - -func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) +func disconnectNamedPipe(pipe windows.Handle) (err error) { + r1, _, e1 := syscall.SyscallN(procDisconnectNamedPipe.Addr(), uintptr(pipe)) if r1 == 0 { err = errnoErr(e1) } return } -func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) - if r1 == 0 { - err = errnoErr(e1) - } +func getCurrentThread() (h windows.Handle) { + r0, _, _ := syscall.SyscallN(procGetCurrentThread.Addr()) + h = windows.Handle(r0) return } -func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) +func getNamedPipeHandleState(pipe windows.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procGetNamedPipeHandleStateW.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize)) if r1 == 0 { err = errnoErr(e1) } return } -func localAlloc(uFlags uint32, length uint32) (ptr uintptr) { - r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0) - ptr = uintptr(r0) +func getNamedPipeInfo(pipe windows.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procGetNamedPipeInfo.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances))) + if r1 == 0 { + err = errnoErr(e1) + } return } -func localFree(mem uintptr) { - syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0) +func getQueuedCompletionStatus(port windows.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procGetQueuedCompletionStatus.Addr(), uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout)) + if r1 == 0 { + err = errnoErr(e1) + } return } -func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) +func setFileCompletionNotificationModes(h windows.Handle, flags uint8) (err error) { + r1, _, e1 := syscall.SyscallN(procSetFileCompletionNotificationModes.Addr(), uintptr(h), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } return } -func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) { - r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) +func ntCreateNamedPipeFile(pipe *windows.Handle, access ntAccessMask, oa *objectAttributes, iosb *ioStatusBlock, share ntFileShareMode, disposition ntFileCreationDisposition, options ntFileOptions, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) { + r0, _, _ := syscall.SyscallN(procNtCreateNamedPipeFile.Addr(), uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout))) status = ntStatus(r0) return } func rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) { - r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDefaultNpAcl.Addr(), uintptr(unsafe.Pointer(dacl))) status = ntStatus(r0) return } func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) { - r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDosPathNameToNtPathName_U.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved)) status = ntStatus(r0) return } func rtlNtStatusToDosError(status ntStatus) (winerr error) { - r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(status)) if r0 != 0 { winerr = syscall.Errno(r0) } return } -func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { +func wsaGetOverlappedResult(h windows.Handle, o *windows.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { var _p0 uint32 if wait { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) + r1, _, e1 := syscall.SyscallN(procWSAGetOverlappedResult.Addr(), uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags))) if r1 == 0 { err = errnoErr(e1) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index b6d122c15..3aa0224a2 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -298,6 +298,12 @@ var awsPartition = partition{ endpointKey{ Region: "ca-west-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -331,6 +337,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "access-analyzer-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -807,6 +822,12 @@ var awsPartition = partition{ }, "airflow": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -831,6 +852,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -840,6 +864,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -849,6 +876,9 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -865,6 +895,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -923,6 +956,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -981,6 +1017,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -1036,6 +1075,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -1048,6 +1090,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -1900,6 +1948,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -1924,6 +1975,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -4783,9 +4837,15 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, endpointKey{ Region: "bedrock-ap-northeast-1", }: endpoint{ @@ -4794,6 +4854,14 @@ var awsPartition = partition{ Region: "ap-northeast-1", }, }, + endpointKey{ + Region: "bedrock-ap-south-1", + }: endpoint{ + Hostname: "bedrock.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, endpointKey{ Region: "bedrock-ap-southeast-1", }: endpoint{ @@ -4802,6 +4870,14 @@ var awsPartition = partition{ Region: "ap-southeast-1", }, }, + endpointKey{ + Region: "bedrock-ap-southeast-2", + }: endpoint{ + Hostname: "bedrock.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, endpointKey{ Region: "bedrock-eu-central-1", }: endpoint{ @@ -4810,6 +4886,22 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "bedrock-eu-west-1", + }: endpoint{ + Hostname: "bedrock.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "bedrock-eu-west-3", + }: endpoint{ + Hostname: "bedrock.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, endpointKey{ Region: "bedrock-fips-us-east-1", }: endpoint{ @@ -4834,6 +4926,14 @@ var awsPartition = partition{ Region: "ap-northeast-1", }, }, + endpointKey{ + Region: "bedrock-runtime-ap-south-1", + }: endpoint{ + Hostname: "bedrock-runtime.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, endpointKey{ Region: "bedrock-runtime-ap-southeast-1", }: endpoint{ @@ -4842,6 +4942,14 @@ var awsPartition = partition{ Region: "ap-southeast-1", }, }, + endpointKey{ + Region: "bedrock-runtime-ap-southeast-2", + }: endpoint{ + Hostname: "bedrock-runtime.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, endpointKey{ Region: "bedrock-runtime-eu-central-1", }: endpoint{ @@ -4850,6 +4958,22 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "bedrock-runtime-eu-west-1", + }: endpoint{ + Hostname: "bedrock-runtime.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-eu-west-3", + }: endpoint{ + Hostname: "bedrock-runtime.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, endpointKey{ Region: "bedrock-runtime-fips-us-east-1", }: endpoint{ @@ -4901,6 +5025,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -4925,6 +5055,9 @@ var awsPartition = partition{ }, "braket": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, @@ -6852,6 +6985,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -6861,6 +6997,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -6876,6 +7015,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -6988,6 +7130,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -6997,6 +7142,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -7012,6 +7160,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -7901,6 +8052,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "controltower-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -13536,6 +13705,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "fms-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -13662,6 +13840,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "fms-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-eu-central-1", }: endpoint{ @@ -14047,6 +14234,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "fsx-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -14080,6 +14276,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "fsx-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-prod-ca-central-1", }: endpoint{ @@ -14089,6 +14294,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-prod-ca-west-1", + }: endpoint{ + Hostname: "fsx-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-prod-us-east-1", }: endpoint{ @@ -14188,6 +14402,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "prod-ca-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "prod-us-east-1", }: endpoint{ @@ -17039,6 +17271,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "kafka-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -17072,6 +17313,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "kafka-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -18806,6 +19056,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -19105,6 +19358,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -19980,12 +20236,30 @@ var awsPartition = partition{ }, "media-pipelines-chime": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -20189,6 +20463,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -20244,6 +20521,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -20298,6 +20578,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -22057,6 +22340,14 @@ var awsPartition = partition{ Region: "ap-south-1", }, }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "oidc.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{ @@ -22129,6 +22420,14 @@ var awsPartition = partition{ Region: "eu-south-1", }, }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "oidc.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -23214,6 +23513,14 @@ var awsPartition = partition{ Region: "ap-south-1", }, }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "portal.sso.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{ @@ -23286,6 +23593,14 @@ var awsPartition = partition{ Region: "eu-south-1", }, }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "portal.sso.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -25277,6 +25592,12 @@ var awsPartition = partition{ }, "resource-explorer-2": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -25289,6 +25610,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -25298,15 +25622,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -25316,6 +25655,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -25812,33 +26157,81 @@ var awsPartition = partition{ }, "rum": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -26608,6 +27001,44 @@ var awsPartition = partition{ }, }, Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "s3-control.af-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.af-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "s3-control.ap-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, endpointKey{ Region: "ap-northeast-1", }: endpoint{ @@ -26684,6 +27115,25 @@ var awsPartition = partition{ Region: "ap-south-1", }, }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "s3-control.ap-south-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-south-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{ @@ -26722,6 +27172,44 @@ var awsPartition = partition{ Region: "ap-southeast-2", }, }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "s3-control.ap-southeast-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-southeast-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "s3-control.ap-southeast-4.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-southeast-4.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, endpointKey{ Region: "ca-central-1", }: endpoint{ @@ -26771,6 +27259,55 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "s3-control.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -26790,6 +27327,25 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "s3-control.eu-central-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-central-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, endpointKey{ Region: "eu-north-1", }: endpoint{ @@ -26809,6 +27365,44 @@ var awsPartition = partition{ Region: "eu-north-1", }, }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "s3-control.eu-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "s3-control.eu-south-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-south-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -26866,6 +27460,63 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "s3-control.il-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.il-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "s3-control.me-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.me-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "s3-control.me-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.me-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, endpointKey{ Region: "sa-east-1", }: endpoint{ @@ -28178,21 +28829,85 @@ var awsPartition = partition{ }: endpoint{ Protocols: []string{"https"}, }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "serverlessrepo-fips.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "serverlessrepo-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-2", }: endpoint{ Protocols: []string{"https"}, }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "serverlessrepo-fips.us-east-2.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "serverlessrepo-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-west-1", }: endpoint{ Protocols: []string{"https"}, }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "serverlessrepo-fips.us-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "serverlessrepo-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-west-2", }: endpoint{ Protocols: []string{"https"}, }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "serverlessrepo-fips.us-west-2.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "serverlessrepo-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, }, }, "servicecatalog": service{ @@ -28640,6 +29355,36 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ca-west-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.ca-west-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -29872,6 +30617,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -30838,6 +31586,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -30865,6 +31616,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -32423,6 +33177,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "transfer-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -32456,6 +33219,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "transfer-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -32682,6 +33454,21 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -32706,6 +33493,63 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -32718,15 +33562,39 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-west-2.amazonaws.com", + }, }, }, "voice-chime": service{ @@ -32886,6 +33754,12 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -32907,6 +33781,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -33915,6 +34792,23 @@ var awsPartition = partition{ Region: "ca-central-1", }, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "wafv2.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -34159,6 +35053,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "wafv2-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-eu-central-1", }: endpoint{ @@ -35760,6 +36663,19 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "entitlement.marketplace": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "entitlement-marketplace.cn-northwest-1.amazonaws.com.cn", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "es": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -36197,7 +37113,7 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-northwest-1", }: endpoint{ - Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", + Hostname: "mediaconvert.cn-northwest-1.amazonaws.com.cn", CredentialScope: credentialScope{ Region: "cn-northwest-1", }, @@ -37925,12 +38841,44 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{ + Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com", Protocols: []string{"http", "https"}, }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ + Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com", Protocols: []string{"http", "https"}, + + Deprecated: boxedTrue, }, }, }, @@ -38014,6 +38962,22 @@ var awsusgovPartition = partition{ }, "bedrock": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "bedrock-runtime-us-gov-west-1", + }: endpoint{ + Hostname: "bedrock-runtime.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "bedrock-us-gov-west-1", + }: endpoint{ + Hostname: "bedrock.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -38629,9 +39593,39 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "controltower-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "controltower-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "data-ats.iot": service{ @@ -39511,6 +40505,15 @@ var awsusgovPartition = partition{ }, "email": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "email-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-gov-west-1", }: endpoint{ @@ -39520,6 +40523,15 @@ var awsusgovPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -39533,22 +40545,82 @@ var awsusgovPartition = partition{ }, "emr-containers": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "emr-containers.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "emr-containers.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers.us-gov-west-1.amazonaws.com", + }, }, }, "emr-serverless": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "emr-serverless.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "emr-serverless.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless.us-gov-west-1.amazonaws.com", + }, }, }, "es": service{ @@ -40949,6 +42021,16 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, + "license-manager-user-subscriptions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "logs": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -42718,12 +43800,74 @@ var awsusgovPartition = partition{ }, "signer": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "signer-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "signer-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-verification-us-gov-east-1", + }: endpoint{ + Hostname: "verification.signer-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "fips-verification-us-gov-west-1", + }: endpoint{ + Hostname: "verification.signer-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "verification-us-gov-east-1", + }: endpoint{ + Hostname: "verification.signer.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "verification-us-gov-west-1", + }: endpoint{ + Hostname: "verification.signer.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "simspaceweaver": service{ @@ -43551,6 +44695,46 @@ var awsusgovPartition = partition{ }, }, }, + "verifiedpermissions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, "waf-regional": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -44313,6 +45497,55 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "fsx": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-prod-us-iso-east-1", + }: endpoint{ + Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-iso-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov", + }, + }, + }, "glacier": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -44812,6 +46045,131 @@ var awsisoPartition = partition{ }, }, }, + "s3-control": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Hostname: "s3-control.us-iso-east-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + endpointKey{ + Region: "us-iso-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-iso-east-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-iso-east-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-iso-east-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + endpointKey{ + Region: "us-iso-east-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-iso-east-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{ + Hostname: "s3-control.us-iso-west-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + }, + endpointKey{ + Region: "us-iso-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-iso-west-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + }, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-iso-west-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + }, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-iso-west-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + }, + endpointKey{ + Region: "us-iso-west-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-iso-west-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "s3-outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{}, + }, + }, "secretsmanager": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -44827,6 +46185,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "sns": service{ @@ -45345,6 +46706,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "firehose": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "glacier": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -45432,6 +46800,20 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "medialive": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "mediapackage": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "metering.marketplace": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -45651,6 +47033,82 @@ var awsisobPartition = partition{ }, }, }, + "s3-control": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{ + Hostname: "s3-control.us-isob-east-1.sc2s.sgov.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + endpointKey{ + Region: "us-isob-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-isob-east-1.sc2s.sgov.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-isob-east-1.sc2s.sgov.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-isob-east-1.sc2s.sgov.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + endpointKey{ + Region: "us-isob-east-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-isob-east-1.sc2s.sgov.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "s3-outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{}, + }, + }, "secretsmanager": service{ Endpoints: serviceEndpoints{ endpointKey{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 466742536..2bf54dd1e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.51.7" +const SDKVersion = "1.53.9" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go index 058334053..2ca0b19db 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -122,8 +122,8 @@ func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix stri } func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { - // If it's empty, generate an empty value - if !value.IsNil() && value.Len() == 0 { + // If it's empty, and not ec2, generate an empty value + if !value.IsNil() && value.Len() == 0 && !q.isEC2 { v.Set(prefix, "") return nil } diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index e571e38a6..4f0147de2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -4006,6 +4006,11 @@ func (c *EC2) CancelSpotFleetRequestsRequest(input *CancelSpotFleetRequestsInput // enters the cancelled_running state and the instances continue to run until // they are interrupted or you terminate them manually. // +// Restrictions +// +// - You can delete up to 100 fleets in a single request. If you exceed the +// specified number, no fleets are deleted. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -5318,10 +5323,10 @@ func (c *EC2) CreateDhcpOptionsRequest(input *CreateDhcpOptionsInput) (req *requ // that you specify 2. Broadcast and multicast are not supported. For more // information about NetBIOS node types, see RFC 2132 (http://www.ietf.org/rfc/rfc2132.txt). // -// - ipv6-preferred-lease-time - A value (in seconds, minutes, hours, or -// years) for how frequently a running instance with an IPv6 assigned to -// it goes through DHCPv6 lease renewal. Acceptable values are between 140 -// and 2147483647 seconds (approximately 68 years). If no value is entered, +// - ipv6-address-preferred-lease-time - A value (in seconds, minutes, hours, +// or years) for how frequently a running instance with an IPv6 assigned +// to it goes through DHCPv6 lease renewal. Acceptable values are between +// 140 and 2147483647 seconds (approximately 68 years). If no value is entered, // the default lease time is 140 seconds. If you use long-term addressing // for EC2 instances, you can increase the lease time and avoid frequent // lease renewal requests. Lease renewal typically occurs when half of the @@ -6548,10 +6553,10 @@ func (c *EC2) CreateLaunchTemplateRequest(input *CreateLaunchTemplateInput) (req // see Launch an instance from a launch template (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html) // in the Amazon Elastic Compute Cloud User Guide. // -// If you want to clone an existing launch template as the basis for creating -// a new launch template, you can use the Amazon EC2 console. The API, SDKs, -// and CLI do not support cloning a template. For more information, see Create -// a launch template from an existing launch template (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#create-launch-template-from-existing-launch-template) +// To clone an existing launch template as the basis for a new launch template, +// use the Amazon EC2 console. The API, SDKs, and CLI do not support cloning +// a template. For more information, see Create a launch template from an existing +// launch template (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#create-launch-template-from-existing-launch-template) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6625,15 +6630,17 @@ func (c *EC2) CreateLaunchTemplateVersionRequest(input *CreateLaunchTemplateVers // CreateLaunchTemplateVersion API operation for Amazon Elastic Compute Cloud. // -// Creates a new version of a launch template. You can specify an existing version -// of launch template from which to base the new version. +// Creates a new version of a launch template. You must specify an existing +// launch template, either by name or ID. You can determine whether the new +// version inherits parameters from a source version, and add or overwrite parameters +// as needed. // // Launch template versions are numbered in the order in which they are created. -// You cannot specify, change, or replace the numbering of launch template versions. +// You can't specify, change, or replace the numbering of launch template versions. // // Launch templates are immutable; after you create a launch template, you can't // modify it. Instead, you can create a new version of the launch template that -// includes any changes you require. +// includes the changes that you require. // // For more information, see Modify a launch template (manage launch template // versions) (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#manage-launch-template-versions) @@ -11804,17 +11811,22 @@ func (c *EC2) DeleteFleetsRequest(input *DeleteFleetsInput) (req *request.Reques // manually. // // For instant fleets, EC2 Fleet must terminate the instances when the fleet -// is deleted. A deleted instant fleet with running instances is not supported. +// is deleted. Up to 1000 instances can be terminated in a single request to +// delete instant fleets. A deleted instant fleet with running instances is +// not supported. // // Restrictions // -// - You can delete up to 25 instant fleets in a single request. If you exceed -// this number, no instant fleets are deleted and an error is returned. There -// is no restriction on the number of fleets of type maintain or request -// that can be deleted in a single request. +// - You can delete up to 25 fleets of type instant in a single request. +// +// - You can delete up to 100 fleets of type maintain or request in a single +// request. +// +// - You can delete up to 125 fleets in a single request, provided you do +// not exceed the quota for each fleet type, as specified above. // -// - Up to 1000 instances can be terminated in a single request to delete -// instant fleets. +// - If you exceed the specified number of fleets to delete, no fleets are +// deleted. // // For more information, see Delete an EC2 Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/manage-ec2-fleet.html#delete-fleet) // in the Amazon EC2 User Guide. @@ -15561,9 +15573,10 @@ func (c *EC2) DeleteTransitGatewayRouteTableRequest(input *DeleteTransitGatewayR // DeleteTransitGatewayRouteTable API operation for Amazon Elastic Compute Cloud. // -// Deletes the specified transit gateway route table. You must disassociate -// the route table from any transit gateway route tables before you can delete -// it. +// Deletes the specified transit gateway route table. If there are any route +// tables associated with the transit gateway route table, you must first run +// DisassociateRouteTable before you can delete the transit gateway route table. +// This removes any route tables associated with the transit gateway route table. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -22199,6 +22212,9 @@ func (c *EC2) DescribeImagesRequest(input *DescribeImagesInput) (req *request.Re // AMI are terminated, specifying the ID of the image will eventually return // an error indicating that the AMI ID cannot be found. // +// We strongly recommend using only paginated requests. Unpaginated requests +// are susceptible to throttling and timeouts. +// // The order of the elements in the response, including those within nested // structures, might vary. Applications should not assume the elements appear // in a particular order. @@ -23467,9 +23483,9 @@ func (c *EC2) DescribeInstanceTypeOfferingsRequest(input *DescribeInstanceTypeOf // DescribeInstanceTypeOfferings API operation for Amazon Elastic Compute Cloud. // -// Returns a list of all instance types offered. The results can be filtered -// by location (Region or Availability Zone). If no location is specified, the -// instance types offered in the current Region are returned. +// Lists the instance types that are offered for the specified location. If +// no location is specified, the default is to list the instance types that +// are offered in the current Region. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -23599,8 +23615,8 @@ func (c *EC2) DescribeInstanceTypesRequest(input *DescribeInstanceTypesInput) (r // DescribeInstanceTypes API operation for Amazon Elastic Compute Cloud. // -// Describes the details of the instance types that are offered in a location. -// The results can be filtered by the attributes of the instance types. +// Describes the specified instance types. By default, all instance types for +// the current Region are described. Alternatively, you can filter the results. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -23751,6 +23767,9 @@ func (c *EC2) DescribeInstancesRequest(input *DescribeInstancesInput) (req *requ // If you describe instances and specify only instance IDs that are in an unaffected // zone, the call works normally. // +// We strongly recommend using only paginated requests. Unpaginated requests +// are susceptible to throttling and timeouts. +// // The order of the elements in the response, including those within nested // structures, might vary. Applications should not assume the elements appear // in a particular order. @@ -27463,6 +27482,9 @@ func (c *EC2) DescribeNetworkInterfacesRequest(input *DescribeNetworkInterfacesI // you use pagination or one of the following filters: group-id, mac-address, // private-dns-name, private-ip-address, private-dns-name, subnet-id, or vpc-id. // +// We strongly recommend using only paginated requests. Unpaginated requests +// are susceptible to throttling and timeouts. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -29749,6 +29771,9 @@ func (c *EC2) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *requ // For more information about EBS snapshots, see Amazon EBS snapshots (https://docs.aws.amazon.com/ebs/latest/userguide/ebs-snapshots.html) // in the Amazon EBS User Guide. // +// We strongly recommend using only paginated requests. Unpaginated requests +// are susceptible to throttling and timeouts. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -30939,6 +30964,9 @@ func (c *EC2) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Reques // For more information about tags, see Tag your Amazon EC2 resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) // in the Amazon Elastic Compute Cloud User Guide. // +// We strongly recommend using only paginated requests. Unpaginated requests +// are susceptible to throttling and timeouts. +// // The order of the elements in the response, including those within nested // structures, might vary. Applications should not assume the elements appear // in a particular order. @@ -33803,6 +33831,9 @@ func (c *EC2) DescribeVolumesRequest(input *DescribeVolumesInput) (req *request. // For more information about EBS volumes, see Amazon EBS volumes (https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volumes.html) // in the Amazon EBS User Guide. // +// We strongly recommend using only paginated requests. Unpaginated requests +// are susceptible to throttling and timeouts. +// // The order of the elements in the response, including those within nested // structures, might vary. Applications should not assume the elements appear // in a particular order. @@ -36560,6 +36591,87 @@ func (c *EC2) DisableImageDeprecationWithContext(ctx aws.Context, input *Disable return out, req.Send() } +const opDisableImageDeregistrationProtection = "DisableImageDeregistrationProtection" + +// DisableImageDeregistrationProtectionRequest generates a "aws/request.Request" representing the +// client's request for the DisableImageDeregistrationProtection operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisableImageDeregistrationProtection for more information on using the DisableImageDeregistrationProtection +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DisableImageDeregistrationProtectionRequest method. +// req, resp := client.DisableImageDeregistrationProtectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableImageDeregistrationProtection +func (c *EC2) DisableImageDeregistrationProtectionRequest(input *DisableImageDeregistrationProtectionInput) (req *request.Request, output *DisableImageDeregistrationProtectionOutput) { + op := &request.Operation{ + Name: opDisableImageDeregistrationProtection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableImageDeregistrationProtectionInput{} + } + + output = &DisableImageDeregistrationProtectionOutput{} + req = c.newRequest(op, input, output) + return +} + +// DisableImageDeregistrationProtection API operation for Amazon Elastic Compute Cloud. +// +// Disables deregistration protection for an AMI. When deregistration protection +// is disabled, the AMI can be deregistered. +// +// If you chose to include a 24-hour cooldown period when you enabled deregistration +// protection for the AMI, then, when you disable deregistration protection, +// you won’t immediately be able to deregister the AMI. +// +// For more information, see Protect an AMI from deregistration (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/deregister-ami.html#ami-deregistration-protection) +// in the Amazon EC2 User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DisableImageDeregistrationProtection for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableImageDeregistrationProtection +func (c *EC2) DisableImageDeregistrationProtection(input *DisableImageDeregistrationProtectionInput) (*DisableImageDeregistrationProtectionOutput, error) { + req, out := c.DisableImageDeregistrationProtectionRequest(input) + return out, req.Send() +} + +// DisableImageDeregistrationProtectionWithContext is the same as DisableImageDeregistrationProtection with the addition of +// the ability to pass a context and additional request options. +// +// See DisableImageDeregistrationProtection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DisableImageDeregistrationProtectionWithContext(ctx aws.Context, input *DisableImageDeregistrationProtectionInput, opts ...request.Option) (*DisableImageDeregistrationProtectionOutput, error) { + req, out := c.DisableImageDeregistrationProtectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDisableIpamOrganizationAdminAccount = "DisableIpamOrganizationAdminAccount" // DisableIpamOrganizationAdminAccountRequest generates a "aws/request.Request" representing the @@ -38893,6 +39005,86 @@ func (c *EC2) EnableImageDeprecationWithContext(ctx aws.Context, input *EnableIm return out, req.Send() } +const opEnableImageDeregistrationProtection = "EnableImageDeregistrationProtection" + +// EnableImageDeregistrationProtectionRequest generates a "aws/request.Request" representing the +// client's request for the EnableImageDeregistrationProtection operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See EnableImageDeregistrationProtection for more information on using the EnableImageDeregistrationProtection +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the EnableImageDeregistrationProtectionRequest method. +// req, resp := client.EnableImageDeregistrationProtectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableImageDeregistrationProtection +func (c *EC2) EnableImageDeregistrationProtectionRequest(input *EnableImageDeregistrationProtectionInput) (req *request.Request, output *EnableImageDeregistrationProtectionOutput) { + op := &request.Operation{ + Name: opEnableImageDeregistrationProtection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableImageDeregistrationProtectionInput{} + } + + output = &EnableImageDeregistrationProtectionOutput{} + req = c.newRequest(op, input, output) + return +} + +// EnableImageDeregistrationProtection API operation for Amazon Elastic Compute Cloud. +// +// Enables deregistration protection for an AMI. When deregistration protection +// is enabled, the AMI can't be deregistered. +// +// To allow the AMI to be deregistered, you must first disable deregistration +// protection using DisableImageDeregistrationProtection. +// +// For more information, see Protect an AMI from deregistration (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/deregister-ami.html#ami-deregistration-protection) +// in the Amazon EC2 User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation EnableImageDeregistrationProtection for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableImageDeregistrationProtection +func (c *EC2) EnableImageDeregistrationProtection(input *EnableImageDeregistrationProtectionInput) (*EnableImageDeregistrationProtectionOutput, error) { + req, out := c.EnableImageDeregistrationProtectionRequest(input) + return out, req.Send() +} + +// EnableImageDeregistrationProtectionWithContext is the same as EnableImageDeregistrationProtection with the addition of +// the ability to pass a context and additional request options. +// +// See EnableImageDeregistrationProtection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) EnableImageDeregistrationProtectionWithContext(ctx aws.Context, input *EnableImageDeregistrationProtectionInput, opts ...request.Option) (*EnableImageDeregistrationProtectionOutput, error) { + req, out := c.EnableImageDeregistrationProtectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opEnableIpamOrganizationAdminAccount = "EnableIpamOrganizationAdminAccount" // EnableIpamOrganizationAdminAccountRequest generates a "aws/request.Request" representing the @@ -40523,6 +40715,9 @@ func (c *EC2) GetConsoleScreenshotRequest(input *GetConsoleScreenshotInput) (req // // The returned content is Base64-encoded. // +// For more information, see Instance console output (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/troubleshoot-unreachable-instance.html#instance-console-console-output) +// in the Amazon EC2 User Guide. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -41235,6 +41430,80 @@ func (c *EC2) GetInstanceMetadataDefaultsWithContext(ctx aws.Context, input *Get return out, req.Send() } +const opGetInstanceTpmEkPub = "GetInstanceTpmEkPub" + +// GetInstanceTpmEkPubRequest generates a "aws/request.Request" representing the +// client's request for the GetInstanceTpmEkPub operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetInstanceTpmEkPub for more information on using the GetInstanceTpmEkPub +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetInstanceTpmEkPubRequest method. +// req, resp := client.GetInstanceTpmEkPubRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetInstanceTpmEkPub +func (c *EC2) GetInstanceTpmEkPubRequest(input *GetInstanceTpmEkPubInput) (req *request.Request, output *GetInstanceTpmEkPubOutput) { + op := &request.Operation{ + Name: opGetInstanceTpmEkPub, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetInstanceTpmEkPubInput{} + } + + output = &GetInstanceTpmEkPubOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetInstanceTpmEkPub API operation for Amazon Elastic Compute Cloud. +// +// Gets the public endorsement key associated with the Nitro Trusted Platform +// Module (NitroTPM) for the specified instance. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetInstanceTpmEkPub for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetInstanceTpmEkPub +func (c *EC2) GetInstanceTpmEkPub(input *GetInstanceTpmEkPubInput) (*GetInstanceTpmEkPubOutput, error) { + req, out := c.GetInstanceTpmEkPubRequest(input) + return out, req.Send() +} + +// GetInstanceTpmEkPubWithContext is the same as GetInstanceTpmEkPub with the addition of +// the ability to pass a context and additional request options. +// +// See GetInstanceTpmEkPub for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetInstanceTpmEkPubWithContext(ctx aws.Context, input *GetInstanceTpmEkPubInput, opts ...request.Option) (*GetInstanceTpmEkPubOutput, error) { + req, out := c.GetInstanceTpmEkPubRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetInstanceTypesFromInstanceRequirements = "GetInstanceTypesFromInstanceRequirements" // GetInstanceTypesFromInstanceRequirementsRequest generates a "aws/request.Request" representing the @@ -47291,9 +47560,9 @@ func (c *EC2) ModifyInstanceMetadataDefaultsRequest(input *ModifyInstanceMetadat // level in the specified Amazon Web Services Region. // // To remove a parameter's account-level default setting, specify no-preference. -// At instance launch, the value will come from the AMI, or from the launch -// parameter if specified. For more information, see Order of precedence for -// instance metadata options (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-options.html#instance-metadata-options-order-of-precedence) +// If an account-level setting is cleared with no-preference, then the instance +// launch considers the other instance metadata settings. For more information, +// see Order of precedence for instance metadata options (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-options.html#instance-metadata-options-order-of-precedence) // in the Amazon EC2 User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -62333,12 +62602,12 @@ type AssociatedRole struct { // The name of the Amazon S3 bucket in which the Amazon S3 object is stored. CertificateS3BucketName *string `locationName:"certificateS3BucketName" type:"string"` - // The key of the Amazon S3 object ey where the certificate, certificate chain, - // and encrypted private key bundle is stored. The object key is formated as - // follows: role_arn/certificate_arn. + // The key of the Amazon S3 object where the certificate, certificate chain, + // and encrypted private key bundle are stored. The object key is formatted + // as follows: role_arn/certificate_arn. CertificateS3ObjectKey *string `locationName:"certificateS3ObjectKey" type:"string"` - // The ID of the KMS customer master key (CMK) used to encrypt the private key. + // The ID of the KMS key used to encrypt the private key. EncryptionKmsKeyId *string `locationName:"encryptionKmsKeyId" type:"string"` } @@ -65615,6 +65884,8 @@ type CancelSpotFleetRequestsInput struct { // The IDs of the Spot Fleet requests. // + // Constraint: You can specify up to 100 IDs in a single request. + // // SpotFleetRequestIds is a required field SpotFleetRequestIds []*string `locationName:"spotFleetRequestId" locationNameList:"item" type:"list" required:"true"` @@ -71954,11 +72225,11 @@ type CreateFleetError struct { _ struct{} `type:"structure"` // The error code that indicates why the instance could not be launched. For - // more information about error codes, see Error codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html.html). + // more information about error codes, see Error codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html). ErrorCode *string `locationName:"errorCode" type:"string"` // The error message that describes why the instance could not be launched. - // For more information about error messages, see Error codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html.html). + // For more information about error messages, see Error codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html). ErrorMessage *string `locationName:"errorMessage" type:"string"` // The launch templates and overrides that were used for launching the instances. @@ -74389,14 +74660,14 @@ type CreateLaunchTemplateVersionInput struct { // The ID of the launch template. // - // You must specify either the LaunchTemplateId or the LaunchTemplateName, but - // not both. + // You must specify either the launch template ID or the launch template name, + // but not both. LaunchTemplateId *string `type:"string"` // The name of the launch template. // - // You must specify the LaunchTemplateName or the LaunchTemplateId, but not - // both. + // You must specify either the launch template ID or the launch template name, + // but not both. LaunchTemplateName *string `min:"3" type:"string"` // If true, and if a Systems Manager parameter is specified for ImageId, the @@ -74407,11 +74678,17 @@ type CreateLaunchTemplateVersionInput struct { // Default: false ResolveAlias *bool `type:"boolean"` - // The version number of the launch template version on which to base the new - // version. The new version inherits the same launch parameters as the source - // version, except for parameters that you specify in LaunchTemplateData. Snapshots + // The version of the launch template on which to base the new version. Snapshots // applied to the block device mapping are ignored when creating a new version // unless they are explicitly included. + // + // If you specify this parameter, the new version inherits the launch parameters + // from the source version. If you specify additional launch parameters for + // the new version, they overwrite any corresponding launch parameters inherited + // from the source version. + // + // If you omit this parameter, the new version contains only the launch parameters + // that you specify for the new version. SourceVersion *string `type:"string"` // A description for the version of the launch template. @@ -84237,6 +84514,9 @@ type DeleteFleetsInput struct { // The IDs of the EC2 Fleets. // + // Constraints: In a single request, you can specify up to 25 instant fleet + // IDs and up to 100 maintain or request fleet IDs. + // // FleetIds is a required field FleetIds []*string `locationName:"FleetId" type:"list" required:"true"` @@ -85291,14 +85571,14 @@ type DeleteLaunchTemplateInput struct { // The ID of the launch template. // - // You must specify either the LaunchTemplateId or the LaunchTemplateName, but - // not both. + // You must specify either the launch template ID or the launch template name, + // but not both. LaunchTemplateId *string `type:"string"` // The name of the launch template. // - // You must specify either the LaunchTemplateName or the LaunchTemplateId, but - // not both. + // You must specify either the launch template ID or the launch template name, + // but not both. LaunchTemplateName *string `min:"3" type:"string"` } @@ -85393,14 +85673,14 @@ type DeleteLaunchTemplateVersionsInput struct { // The ID of the launch template. // - // You must specify either the LaunchTemplateId or the LaunchTemplateName, but - // not both. + // You must specify either the launch template ID or the launch template name, + // but not both. LaunchTemplateId *string `type:"string"` // The name of the launch template. // - // You must specify either the LaunchTemplateName or the LaunchTemplateId, but - // not both. + // You must specify either the launch template ID or the launch template name, + // but not both. LaunchTemplateName *string `min:"3" type:"string"` // The version numbers of one or more launch template versions to delete. You @@ -96821,6 +97101,9 @@ type DescribeImageAttributeOutput struct { // The boot mode. BootMode *AttributeValue `locationName:"bootMode" type:"structure"` + // Indicates whether deregistration protection is enabled for the AMI. + DeregistrationProtection *AttributeValue `locationName:"deregistrationProtection" type:"structure"` + // A description for the AMI. Description *AttributeValue `locationName:"description" type:"structure"` @@ -96900,6 +97183,12 @@ func (s *DescribeImageAttributeOutput) SetBootMode(v *AttributeValue) *DescribeI return s } +// SetDeregistrationProtection sets the DeregistrationProtection field's value. +func (s *DescribeImageAttributeOutput) SetDeregistrationProtection(v *AttributeValue) *DescribeImageAttributeOutput { + s.DeregistrationProtection = v + return s +} + // SetDescription sets the Description field's value. func (s *DescribeImageAttributeOutput) SetDescription(v *AttributeValue) *DescribeImageAttributeOutput { s.Description = v @@ -98574,14 +98863,26 @@ type DescribeInstanceTypeOfferingsInput struct { // One or more filters. Filter names and values are case-sensitive. // - // * location - This depends on the location type. For example, if the location - // type is region (default), the location is the Region code (for example, - // us-east-2.) + // * instance-type - The instance type. For a list of possible values, see + // Instance (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_Instance.html). // - // * instance-type - The instance type. For example, c5.2xlarge. + // * location - The location. For a list of possible identifiers, see Regions + // and Zones (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html). Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The location type. + // + // * availability-zone - The Availability Zone. When you specify a location + // filter, it must be an Availability Zone for the current Region. + // + // * availability-zone-id - The AZ ID. When you specify a location filter, + // it must be an AZ ID for the current Region. + // + // * outpost - The Outpost ARN. When you specify a location filter, it must + // be an Outpost ARN for the current Region. + // + // * region - The current Region. If you specify a location filter, it must + // match the current Region. LocationType *string `type:"string" enum:"LocationType"` // The maximum number of items to return for this request. To get the next page @@ -98658,7 +98959,7 @@ func (s *DescribeInstanceTypeOfferingsInput) SetNextToken(v string) *DescribeIns type DescribeInstanceTypeOfferingsOutput struct { _ struct{} `type:"structure"` - // The instance types offered. + // The instance types offered in the location. InstanceTypeOfferings []*InstanceTypeOffering `locationName:"instanceTypeOfferingSet" locationNameList:"item" type:"list"` // The token to include in another request to get the next page of items. This @@ -98850,8 +99151,7 @@ type DescribeInstanceTypesInput struct { // can be configured for the instance type. For example, "1" or "1,2". Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` - // The instance types. For more information, see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) - // in the Amazon EC2 User Guide. + // The instance types. InstanceTypes []*string `locationName:"InstanceType" type:"list" enum:"InstanceType"` // The maximum number of items to return for this request. To get the next page @@ -98928,8 +99228,7 @@ func (s *DescribeInstanceTypesInput) SetNextToken(v string) *DescribeInstanceTyp type DescribeInstanceTypesOutput struct { _ struct{} `type:"structure"` - // The instance type. For more information, see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) - // in the Amazon EC2 User Guide. + // The instance type. InstanceTypes []*InstanceTypeInfo `locationName:"instanceTypeSet" locationNameList:"item" type:"list"` // The token to include in another request to get the next page of items. This @@ -100661,7 +100960,8 @@ type DescribeLaunchTemplateVersionsInput struct { // The ID of the launch template. // // To describe one or more versions of a specified launch template, you must - // specify either the LaunchTemplateId or the LaunchTemplateName, but not both. + // specify either the launch template ID or the launch template name, but not + // both. // // To describe all the latest or default launch template versions in your account, // you must omit this parameter. @@ -100670,7 +100970,8 @@ type DescribeLaunchTemplateVersionsInput struct { // The name of the launch template. // // To describe one or more versions of a specified launch template, you must - // specify either the LaunchTemplateName or the LaunchTemplateId, but not both. + // specify either the launch template name or the launch template ID, but not + // both. // // To describe all the latest or default launch template versions in your account, // you must omit this parameter. @@ -103334,6 +103635,11 @@ func (s *DescribeNetworkInterfaceAttributeInput) SetNetworkInterfaceId(v string) type DescribeNetworkInterfaceAttributeOutput struct { _ struct{} `type:"structure"` + // Indicates whether to assign a public IPv4 address to a network interface. + // This option can be enabled for any network interface but will only apply + // to the primary network interface (eth0). + AssociatePublicIpAddress *bool `locationName:"associatePublicIpAddress" type:"boolean"` + // The attachment (if any) of the network interface. Attachment *NetworkInterfaceAttachment `locationName:"attachment" type:"structure"` @@ -103368,6 +103674,12 @@ func (s DescribeNetworkInterfaceAttributeOutput) GoString() string { return s.String() } +// SetAssociatePublicIpAddress sets the AssociatePublicIpAddress field's value. +func (s *DescribeNetworkInterfaceAttributeOutput) SetAssociatePublicIpAddress(v bool) *DescribeNetworkInterfaceAttributeOutput { + s.AssociatePublicIpAddress = &v + return s +} + // SetAttachment sets the Attachment field's value. func (s *DescribeNetworkInterfaceAttributeOutput) SetAttachment(v *NetworkInterfaceAttachment) *DescribeNetworkInterfaceAttributeOutput { s.Attachment = v @@ -103729,7 +104041,7 @@ func (s *DescribeNetworkInterfacesInput) SetNextToken(v string) *DescribeNetwork type DescribeNetworkInterfacesOutput struct { _ struct{} `type:"structure"` - // Information about one or more network interfaces. + // Information about the network interfaces. NetworkInterfaces []*NetworkInterface `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"` // The token to include in another request to get the next page of items. This @@ -107826,13 +108138,8 @@ type DescribeTagsInput struct { // // * resource-id - The ID of the resource. // - // * resource-type - The resource type (customer-gateway | dedicated-host - // | dhcp-options | elastic-ip | fleet | fpga-image | host-reservation | - // image | instance | internet-gateway | key-pair | launch-template | natgateway - // | network-acl | network-interface | placement-group | reserved-instances - // | route-table | security-group | snapshot | spot-instances-request | subnet - // | volume | vpc | vpc-endpoint | vpc-endpoint-service | vpc-peering-connection - // | vpn-connection | vpn-gateway). + // * resource-type - The resource type. For a list of possible values, see + // TagSpecification (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_TagSpecification.html). // // * tag: - The key/value combination of the tag. For example, specify // "tag:Owner" for the filter name and "TeamA" for the filter value to find @@ -109610,6 +109917,12 @@ type DescribeTransitGatewaysInput struct { // | modifying | pending). // // * transit-gateway-id - The ID of the transit gateway. + // + // * tag-key - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The maximum number of results to return with a single call. To retrieve the @@ -114674,6 +114987,95 @@ func (s *DisableImageDeprecationOutput) SetReturn(v bool) *DisableImageDeprecati return s } +type DisableImageDeregistrationProtectionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the AMI. + // + // ImageId is a required field + ImageId *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisableImageDeregistrationProtectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisableImageDeregistrationProtectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableImageDeregistrationProtectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableImageDeregistrationProtectionInput"} + if s.ImageId == nil { + invalidParams.Add(request.NewErrParamRequired("ImageId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DisableImageDeregistrationProtectionInput) SetDryRun(v bool) *DisableImageDeregistrationProtectionInput { + s.DryRun = &v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *DisableImageDeregistrationProtectionInput) SetImageId(v string) *DisableImageDeregistrationProtectionInput { + s.ImageId = &v + return s +} + +type DisableImageDeregistrationProtectionOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *string `locationName:"return" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisableImageDeregistrationProtectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisableImageDeregistrationProtectionOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *DisableImageDeregistrationProtectionOutput) SetReturn(v string) *DisableImageDeregistrationProtectionOutput { + s.Return = &v + return s +} + type DisableImageInput struct { _ struct{} `type:"structure"` @@ -119646,6 +120048,105 @@ func (s *EnableImageDeprecationOutput) SetReturn(v bool) *EnableImageDeprecation return s } +type EnableImageDeregistrationProtectionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the AMI. + // + // ImageId is a required field + ImageId *string `type:"string" required:"true"` + + // If true, enforces deregistration protection for 24 hours after deregistration + // protection is disabled. + WithCooldown *bool `type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableImageDeregistrationProtectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableImageDeregistrationProtectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableImageDeregistrationProtectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableImageDeregistrationProtectionInput"} + if s.ImageId == nil { + invalidParams.Add(request.NewErrParamRequired("ImageId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *EnableImageDeregistrationProtectionInput) SetDryRun(v bool) *EnableImageDeregistrationProtectionInput { + s.DryRun = &v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *EnableImageDeregistrationProtectionInput) SetImageId(v string) *EnableImageDeregistrationProtectionInput { + s.ImageId = &v + return s +} + +// SetWithCooldown sets the WithCooldown field's value. +func (s *EnableImageDeregistrationProtectionInput) SetWithCooldown(v bool) *EnableImageDeregistrationProtectionInput { + s.WithCooldown = &v + return s +} + +type EnableImageDeregistrationProtectionOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *string `locationName:"return" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableImageDeregistrationProtectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableImageDeregistrationProtectionOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *EnableImageDeregistrationProtectionOutput) SetReturn(v string) *EnableImageDeregistrationProtectionOutput { + s.Return = &v + return s +} + type EnableImageInput struct { _ struct{} `type:"structure"` @@ -126190,6 +126691,155 @@ func (s *GetInstanceMetadataDefaultsOutput) SetAccountLevel(v *InstanceMetadataD return s } +type GetInstanceTpmEkPubInput struct { + _ struct{} `type:"structure"` + + // Specify this parameter to verify whether the request will succeed, without + // actually making the request. If the request will succeed, the response is + // DryRunOperation. Otherwise, the response is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the instance for which to get the public endorsement key. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` + + // The required public endorsement key format. Specify der for a DER-encoded + // public key that is compatible with OpenSSL. Specify tpmt for a TPM 2.0 format + // that is compatible with tpm2-tools. The returned key is base64 encoded. + // + // KeyFormat is a required field + KeyFormat *string `type:"string" required:"true" enum:"EkPubKeyFormat"` + + // The required public endorsement key type. + // + // KeyType is a required field + KeyType *string `type:"string" required:"true" enum:"EkPubKeyType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetInstanceTpmEkPubInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetInstanceTpmEkPubInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetInstanceTpmEkPubInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetInstanceTpmEkPubInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.KeyFormat == nil { + invalidParams.Add(request.NewErrParamRequired("KeyFormat")) + } + if s.KeyType == nil { + invalidParams.Add(request.NewErrParamRequired("KeyType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *GetInstanceTpmEkPubInput) SetDryRun(v bool) *GetInstanceTpmEkPubInput { + s.DryRun = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *GetInstanceTpmEkPubInput) SetInstanceId(v string) *GetInstanceTpmEkPubInput { + s.InstanceId = &v + return s +} + +// SetKeyFormat sets the KeyFormat field's value. +func (s *GetInstanceTpmEkPubInput) SetKeyFormat(v string) *GetInstanceTpmEkPubInput { + s.KeyFormat = &v + return s +} + +// SetKeyType sets the KeyType field's value. +func (s *GetInstanceTpmEkPubInput) SetKeyType(v string) *GetInstanceTpmEkPubInput { + s.KeyType = &v + return s +} + +type GetInstanceTpmEkPubOutput struct { + _ struct{} `type:"structure"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The public endorsement key format. + KeyFormat *string `locationName:"keyFormat" type:"string" enum:"EkPubKeyFormat"` + + // The public endorsement key type. + KeyType *string `locationName:"keyType" type:"string" enum:"EkPubKeyType"` + + // The public endorsement key material. + // + // KeyValue is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by GetInstanceTpmEkPubOutput's + // String and GoString methods. + KeyValue *string `locationName:"keyValue" type:"string" sensitive:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetInstanceTpmEkPubOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetInstanceTpmEkPubOutput) GoString() string { + return s.String() +} + +// SetInstanceId sets the InstanceId field's value. +func (s *GetInstanceTpmEkPubOutput) SetInstanceId(v string) *GetInstanceTpmEkPubOutput { + s.InstanceId = &v + return s +} + +// SetKeyFormat sets the KeyFormat field's value. +func (s *GetInstanceTpmEkPubOutput) SetKeyFormat(v string) *GetInstanceTpmEkPubOutput { + s.KeyFormat = &v + return s +} + +// SetKeyType sets the KeyType field's value. +func (s *GetInstanceTpmEkPubOutput) SetKeyType(v string) *GetInstanceTpmEkPubOutput { + s.KeyType = &v + return s +} + +// SetKeyValue sets the KeyValue field's value. +func (s *GetInstanceTpmEkPubOutput) SetKeyValue(v string) *GetInstanceTpmEkPubOutput { + s.KeyValue = &v + return s +} + type GetInstanceTypesFromInstanceRequirementsInput struct { _ struct{} `type:"structure"` @@ -131795,6 +132445,9 @@ type Image struct { // the seconds to the nearest minute. DeprecationTime *string `locationName:"deprecationTime" type:"string"` + // Indicates whether deregistration protection is enabled for the AMI. + DeregistrationProtection *string `locationName:"deregistrationProtection" type:"string"` + // The description of the AMI that was provided during image creation. Description *string `locationName:"description" type:"string"` @@ -131828,6 +132481,13 @@ type Image struct { // images. KernelId *string `locationName:"kernelId" type:"string"` + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the AMI was last used to launch an EC2 instance. When the AMI is used + // to launch an instance, there is a 24-hour delay before that usage is reported. + // + // lastLaunchedTime data is available starting April 2017. + LastLaunchedTime *string `locationName:"lastLaunchedTime" type:"string"` + // The name of the AMI that was provided during image creation. Name *string `locationName:"name" type:"string"` @@ -131947,6 +132607,12 @@ func (s *Image) SetDeprecationTime(v string) *Image { return s } +// SetDeregistrationProtection sets the DeregistrationProtection field's value. +func (s *Image) SetDeregistrationProtection(v string) *Image { + s.DeregistrationProtection = &v + return s +} + // SetDescription sets the Description field's value. func (s *Image) SetDescription(v string) *Image { s.Description = &v @@ -132001,6 +132667,12 @@ func (s *Image) SetKernelId(v string) *Image { return s } +// SetLastLaunchedTime sets the LastLaunchedTime field's value. +func (s *Image) SetLastLaunchedTime(v string) *Image { + s.LastLaunchedTime = &v + return s +} + // SetName sets the Name field's value. func (s *Image) SetName(v string) *Image { s.Name = &v @@ -138635,6 +139307,10 @@ type InstanceTypeInfo struct { // Indicates whether NitroTPM is supported. NitroTpmSupport *string `locationName:"nitroTpmSupport" type:"string" enum:"NitroTpmSupport"` + // Indicates whether a local Precision Time Protocol (PTP) hardware clock (PHC) + // is supported. + PhcSupport *string `locationName:"phcSupport" type:"string" enum:"PhcSupport"` + // Describes the placement group settings for the instance type. PlacementGroupInfo *PlacementGroupInfo `locationName:"placementGroupInfo" type:"structure"` @@ -138808,6 +139484,12 @@ func (s *InstanceTypeInfo) SetNitroTpmSupport(v string) *InstanceTypeInfo { return s } +// SetPhcSupport sets the PhcSupport field's value. +func (s *InstanceTypeInfo) SetPhcSupport(v string) *InstanceTypeInfo { + s.PhcSupport = &v + return s +} + // SetPlacementGroupInfo sets the PlacementGroupInfo field's value. func (s *InstanceTypeInfo) SetPlacementGroupInfo(v *PlacementGroupInfo) *InstanceTypeInfo { s.PlacementGroupInfo = v @@ -144097,7 +144779,11 @@ type LaunchTemplateInstanceNetworkInterfaceSpecificationRequest struct { // A description for the network interface. Description *string `type:"string"` - // The device index for the network interface attachment. + // The device index for the network interface attachment. Each network interface + // requires a device index. If you create a launch template that includes secondary + // network interfaces but not a primary network interface, then you must add + // a primary network interface as a launch parameter when you launch an instance + // from the template. DeviceIndex *int64 `type:"integer"` // Configure ENA Express settings for your launch template. @@ -144823,30 +145509,27 @@ func (s *LaunchTemplatePrivateDnsNameOptionsRequest) SetHostnameType(v string) * return s } -// The launch template to use. You must specify either the launch template ID -// or launch template name in the request, but not both. +// Describes the launch template to use. type LaunchTemplateSpecification struct { _ struct{} `type:"structure"` // The ID of the launch template. // - // You must specify the LaunchTemplateId or the LaunchTemplateName, but not - // both. + // You must specify either the launch template ID or the launch template name, + // but not both. LaunchTemplateId *string `type:"string"` // The name of the launch template. // - // You must specify the LaunchTemplateName or the LaunchTemplateId, but not - // both. + // You must specify either the launch template ID or the launch template name, + // but not both. LaunchTemplateName *string `type:"string"` // The launch template version number, $Latest, or $Default. // - // If the value is $Latest, Amazon EC2 uses the latest version of the launch - // template. + // A value of $Latest uses the latest version of the launch template. // - // If the value is $Default, Amazon EC2 uses the default version of the launch - // template. + // A value of $Default uses the default version of the launch template. // // Default: The default version of the launch template. Version *string `type:"string"` @@ -150096,11 +150779,10 @@ type ModifyInstanceMetadataDefaultsInput struct { // instance metadata can't be accessed. HttpEndpoint *string `type:"string" enum:"DefaultInstanceMetadataEndpointState"` - // The maximum number of hops that the metadata token can travel. - // - // Minimum: 1 + // The maximum number of hops that the metadata token can travel. To indicate + // no preference, specify -1. // - // Maximum: 64 + // Possible values: Integers from 1 to 64, and -1 to indicate no preference HttpPutResponseHopLimit *int64 `type:"integer"` // Indicates whether IMDSv2 is required. @@ -151241,14 +151923,14 @@ type ModifyLaunchTemplateInput struct { // The ID of the launch template. // - // You must specify either the LaunchTemplateId or the LaunchTemplateName, but - // not both. + // You must specify either the launch template ID or the launch template name, + // but not both. LaunchTemplateId *string `type:"string"` // The name of the launch template. // - // You must specify either the LaunchTemplateName or the LaunchTemplateId, but - // not both. + // You must specify either the launch template ID or the launch template name, + // but not both. LaunchTemplateName *string `min:"3" type:"string"` } @@ -151636,6 +152318,11 @@ func (s *ModifyManagedPrefixListOutput) SetPrefixList(v *ManagedPrefixList) *Mod type ModifyNetworkInterfaceAttributeInput struct { _ struct{} `type:"structure"` + // Indicates whether to assign a public IPv4 address to a network interface. + // This option can be enabled for any network interface but will only apply + // to the primary network interface (eth0). + AssociatePublicIpAddress *bool `type:"boolean"` + // Information about the interface attachment. If modifying the delete on termination // attribute, you must specify the ID of the interface attachment. Attachment *NetworkInterfaceAttachmentChanges `locationName:"attachment" type:"structure"` @@ -151722,6 +152409,12 @@ func (s *ModifyNetworkInterfaceAttributeInput) Validate() error { return nil } +// SetAssociatePublicIpAddress sets the AssociatePublicIpAddress field's value. +func (s *ModifyNetworkInterfaceAttributeInput) SetAssociatePublicIpAddress(v bool) *ModifyNetworkInterfaceAttributeInput { + s.AssociatePublicIpAddress = &v + return s +} + // SetAttachment sets the Attachment field's value. func (s *ModifyNetworkInterfaceAttributeInput) SetAttachment(v *NetworkInterfaceAttachmentChanges) *ModifyNetworkInterfaceAttributeInput { s.Attachment = v @@ -159717,11 +160410,14 @@ func (s *NeuronInfo) SetTotalNeuronDeviceMemoryInMiB(v int64) *NeuronInfo { return s } +// Describes a DHCP configuration option. type NewDhcpConfiguration struct { _ struct{} `type:"structure"` - Key *string `locationName:"key" type:"string"` + // The name of a DHCP option. + Key *string `type:"string"` + // The values for the DHCP option. Values []*string `locationName:"Value" locationNameList:"item" type:"list"` } @@ -167247,8 +167943,7 @@ type RequestLaunchTemplateData struct { // The monitoring for the instance. Monitoring *LaunchTemplatesMonitoringRequest `type:"structure"` - // One or more network interfaces. If you specify a network interface, you must - // specify any security groups and subnets as part of the network interface. + // The network interfaces for the instance. NetworkInterfaces []*LaunchTemplateInstanceNetworkInterfaceSpecificationRequest `locationName:"NetworkInterface" locationNameList:"InstanceNetworkInterfaceSpecification" type:"list"` // The placement for the instance. @@ -167265,12 +167960,17 @@ type RequestLaunchTemplateData struct { // in the Amazon Elastic Compute Cloud User Guide. RamDiskId *string `type:"string"` - // One or more security group IDs. You can create a security group using CreateSecurityGroup - // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateSecurityGroup.html). + // The IDs of the security groups. + // + // If you specify a network interface, you must specify any security groups + // as part of the network interface instead of using this parameter. SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` - // One or more security group names. For a nondefault VPC, you must use security + // The names of the security groups. For a nondefault VPC, you must use security // group IDs instead. + // + // If you specify a network interface, you must specify any security groups + // as part of the network interface instead of using this parameter. SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"SecurityGroup" type:"list"` // The tags to apply to the resources that are created during instance launch. @@ -171917,26 +172617,15 @@ type RunInstancesInput struct { // Default: false EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` - // Deprecated. + // An elastic GPU to associate with the instance. // - // Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads - // that require graphics acceleration, we recommend that you use Amazon EC2 - // G4ad, G4dn, or G5 instances. + // Amazon Elastic Graphics reached end of life on January 8, 2024. ElasticGpuSpecification []*ElasticGpuSpecification `locationNameList:"item" type:"list"` - // An elastic inference accelerator to associate with the instance. Elastic - // inference accelerators are a resource you can attach to your Amazon EC2 instances - // to accelerate your Deep Learning (DL) inference workloads. + // An elastic inference accelerator to associate with the instance. // - // You cannot specify accelerators from different generations in the same request. - // - // Starting April 15, 2023, Amazon Web Services will not onboard new customers - // to Amazon Elastic Inference (EI), and will help current customers migrate - // their workloads to options that offer better price and performance. After - // April 15, 2023, new customers will not be able to launch instances with Amazon - // EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, - // customers who have used Amazon EI at least once during the past 30-day period - // are considered current customers and will be able to continue using the service. + // Amazon Elastic Inference (EI) is no longer available to new customers. For + // more information, see Amazon Elastic Inference FAQs (http://aws.amazon.com/machine-learning/elastic-inference/faqs/). ElasticInferenceAccelerators []*ElasticInferenceAccelerator `locationName:"ElasticInferenceAccelerator" locationNameList:"item" type:"list"` // If you’re launching an instance into a dual-stack or IPv6-only subnet, @@ -172027,9 +172716,8 @@ type RunInstancesInput struct { // you choose an AMI that is configured to allow users another way to log in. KeyName *string `type:"string"` - // The launch template to use to launch the instances. Any parameters that you - // specify in RunInstances override the same parameters in the launch template. - // You can specify either the name or ID of a launch template, but not both. + // The launch template. Any additional parameters that you specify for the new + // instance overwrite the corresponding parameters included in the launch template. LaunchTemplate *LaunchTemplateSpecification `type:"structure"` // The license configurations. @@ -172069,9 +172757,7 @@ type RunInstancesInput struct { // Specifies whether detailed monitoring is enabled for the instance. Monitoring *RunInstancesMonitoringEnabled `type:"structure"` - // The network interfaces to associate with the instance. If you specify a network - // interface, you must specify any security groups and subnets as part of the - // network interface. + // The network interfaces to associate with the instance. NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"networkInterface" locationNameList:"item" type:"list"` // The placement for the instance. @@ -172108,13 +172794,13 @@ type RunInstancesInput struct { // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateSecurityGroup.html). // // If you specify a network interface, you must specify any security groups - // as part of the network interface. + // as part of the network interface instead of using this parameter. SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` // [Default VPC] The names of the security groups. // // If you specify a network interface, you must specify any security groups - // as part of the network interface. + // as part of the network interface instead of using this parameter. // // Default: Amazon EC2 uses the default security group. SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"SecurityGroup" type:"list"` @@ -172122,7 +172808,7 @@ type RunInstancesInput struct { // The ID of the subnet to launch the instance into. // // If you specify a network interface, you must specify any subnets as part - // of the network interface. + // of the network interface instead of using this parameter. SubnetId *string `type:"string"` // The tags to apply to the resources that are created during instance launch. @@ -176547,11 +177233,11 @@ type SpotFleetLaunchSpecification struct { // Enable or disable monitoring for the instances. Monitoring *SpotFleetMonitoring `locationName:"monitoring" type:"structure"` - // One or more network interfaces. If you specify a network interface, you must - // specify subnet IDs and security group IDs using the network interface. + // The network interfaces. // - // SpotFleetLaunchSpecification currently does not support Elastic Fabric Adapter - // (EFA). To specify an EFA, you must use LaunchTemplateConfig (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_LaunchTemplateConfig.html). + // SpotFleetLaunchSpecification does not support Elastic Fabric Adapter (EFA). + // You must use LaunchTemplateConfig (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_LaunchTemplateConfig.html) + // instead. NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"` // The placement information. @@ -176564,6 +177250,9 @@ type SpotFleetLaunchSpecification struct { RamdiskId *string `locationName:"ramdiskId" type:"string"` // The security groups. + // + // If you specify a network interface, you must specify any security groups + // as part of the network interface instead of using this parameter. SecurityGroups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` // The maximum price per unit hour that you are willing to pay for a Spot Instance. @@ -176578,6 +177267,9 @@ type SpotFleetLaunchSpecification struct { // The IDs of the subnets in which to launch the instances. To specify multiple // subnets, separate them using commas; for example, "subnet-1234abcdeexample1, // subnet-0987cdef6example2". + // + // If you specify a network interface, you must specify any subnets as part + // of the network interface instead of using this parameter. SubnetId *string `locationName:"subnetId" type:"string"` // The tags to apply during creation. @@ -187617,6 +188309,9 @@ func (s *VgwTelemetry) SetStatusMessage(v string) *VgwTelemetry { type Volume struct { _ struct{} `type:"structure"` + // + // This parameter is not returned by CreateVolume. + // // Information about the volume attachments. Attachments []*VolumeAttachment `locationName:"attachmentSet" locationNameList:"item" type:"list"` @@ -187629,6 +188324,9 @@ type Volume struct { // Indicates whether the volume is encrypted. Encrypted *bool `locationName:"encrypted" type:"boolean"` + // + // This parameter is not returned by CreateVolume. + // // Indicates whether the volume was created using fast snapshot restore. FastRestored *bool `locationName:"fastRestored" type:"boolean"` @@ -187654,6 +188352,9 @@ type Volume struct { // The snapshot from which the volume was created, if applicable. SnapshotId *string `locationName:"snapshotId" type:"string"` + // + // This parameter is not returned by CreateVolume. + // // Reserved for future use. SseType *string `locationName:"sseType" type:"string" enum:"SSEType"` @@ -192283,6 +192984,38 @@ func Ec2InstanceConnectEndpointState_Values() []string { } } +const ( + // EkPubKeyFormatDer is a EkPubKeyFormat enum value + EkPubKeyFormatDer = "der" + + // EkPubKeyFormatTpmt is a EkPubKeyFormat enum value + EkPubKeyFormatTpmt = "tpmt" +) + +// EkPubKeyFormat_Values returns all elements of the EkPubKeyFormat enum +func EkPubKeyFormat_Values() []string { + return []string{ + EkPubKeyFormatDer, + EkPubKeyFormatTpmt, + } +} + +const ( + // EkPubKeyTypeRsa2048 is a EkPubKeyType enum value + EkPubKeyTypeRsa2048 = "rsa-2048" + + // EkPubKeyTypeEccSecP384 is a EkPubKeyType enum value + EkPubKeyTypeEccSecP384 = "ecc-sec-p384" +) + +// EkPubKeyType_Values returns all elements of the EkPubKeyType enum +func EkPubKeyType_Values() []string { + return []string{ + EkPubKeyTypeRsa2048, + EkPubKeyTypeEccSecP384, + } +} + const ( // ElasticGpuStateAttached is a ElasticGpuState enum value ElasticGpuStateAttached = "ATTACHED" @@ -193039,6 +193772,9 @@ const ( // ImageAttributeNameImdsSupport is a ImageAttributeName enum value ImageAttributeNameImdsSupport = "imdsSupport" + + // ImageAttributeNameDeregistrationProtection is a ImageAttributeName enum value + ImageAttributeNameDeregistrationProtection = "deregistrationProtection" ) // ImageAttributeName_Values returns all elements of the ImageAttributeName enum @@ -193056,6 +193792,7 @@ func ImageAttributeName_Values() []string { ImageAttributeNameUefiData, ImageAttributeNameLastLaunchedTime, ImageAttributeNameImdsSupport, + ImageAttributeNameDeregistrationProtection, } } @@ -195848,6 +196585,45 @@ const ( // InstanceTypeR7izMetal32xl is a InstanceType enum value InstanceTypeR7izMetal32xl = "r7iz.metal-32xl" + + // InstanceTypeC7gdMetal is a InstanceType enum value + InstanceTypeC7gdMetal = "c7gd.metal" + + // InstanceTypeM7gdMetal is a InstanceType enum value + InstanceTypeM7gdMetal = "m7gd.metal" + + // InstanceTypeR7gdMetal is a InstanceType enum value + InstanceTypeR7gdMetal = "r7gd.metal" + + // InstanceTypeG6Xlarge is a InstanceType enum value + InstanceTypeG6Xlarge = "g6.xlarge" + + // InstanceTypeG62xlarge is a InstanceType enum value + InstanceTypeG62xlarge = "g6.2xlarge" + + // InstanceTypeG64xlarge is a InstanceType enum value + InstanceTypeG64xlarge = "g6.4xlarge" + + // InstanceTypeG68xlarge is a InstanceType enum value + InstanceTypeG68xlarge = "g6.8xlarge" + + // InstanceTypeG612xlarge is a InstanceType enum value + InstanceTypeG612xlarge = "g6.12xlarge" + + // InstanceTypeG616xlarge is a InstanceType enum value + InstanceTypeG616xlarge = "g6.16xlarge" + + // InstanceTypeG624xlarge is a InstanceType enum value + InstanceTypeG624xlarge = "g6.24xlarge" + + // InstanceTypeG648xlarge is a InstanceType enum value + InstanceTypeG648xlarge = "g6.48xlarge" + + // InstanceTypeGr64xlarge is a InstanceType enum value + InstanceTypeGr64xlarge = "gr6.4xlarge" + + // InstanceTypeGr68xlarge is a InstanceType enum value + InstanceTypeGr68xlarge = "gr6.8xlarge" ) // InstanceType_Values returns all elements of the InstanceType enum @@ -196636,6 +197412,19 @@ func InstanceType_Values() []string { InstanceTypeR7iMetal48xl, InstanceTypeR7izMetal16xl, InstanceTypeR7izMetal32xl, + InstanceTypeC7gdMetal, + InstanceTypeM7gdMetal, + InstanceTypeR7gdMetal, + InstanceTypeG6Xlarge, + InstanceTypeG62xlarge, + InstanceTypeG64xlarge, + InstanceTypeG68xlarge, + InstanceTypeG612xlarge, + InstanceTypeG616xlarge, + InstanceTypeG624xlarge, + InstanceTypeG648xlarge, + InstanceTypeGr64xlarge, + InstanceTypeGr68xlarge, } } @@ -197991,6 +198780,9 @@ const ( // NetworkInterfaceAttributeAttachment is a NetworkInterfaceAttribute enum value NetworkInterfaceAttributeAttachment = "attachment" + + // NetworkInterfaceAttributeAssociatePublicIpAddress is a NetworkInterfaceAttribute enum value + NetworkInterfaceAttributeAssociatePublicIpAddress = "associatePublicIpAddress" ) // NetworkInterfaceAttribute_Values returns all elements of the NetworkInterfaceAttribute enum @@ -198000,6 +198792,7 @@ func NetworkInterfaceAttribute_Values() []string { NetworkInterfaceAttributeGroupSet, NetworkInterfaceAttributeSourceDestCheck, NetworkInterfaceAttributeAttachment, + NetworkInterfaceAttributeAssociatePublicIpAddress, } } @@ -198363,6 +199156,22 @@ func PermissionGroup_Values() []string { } } +const ( + // PhcSupportUnsupported is a PhcSupport enum value + PhcSupportUnsupported = "unsupported" + + // PhcSupportSupported is a PhcSupport enum value + PhcSupportSupported = "supported" +) + +// PhcSupport_Values returns all elements of the PhcSupport enum +func PhcSupport_Values() []string { + return []string{ + PhcSupportUnsupported, + PhcSupportSupported, + } +} + const ( // PlacementGroupStatePending is a PlacementGroupState enum value PlacementGroupStatePending = "pending" diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go index 4be7c4786..5a1c44764 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go @@ -1934,6 +1934,10 @@ type EC2API interface { DisableImageDeprecationWithContext(aws.Context, *ec2.DisableImageDeprecationInput, ...request.Option) (*ec2.DisableImageDeprecationOutput, error) DisableImageDeprecationRequest(*ec2.DisableImageDeprecationInput) (*request.Request, *ec2.DisableImageDeprecationOutput) + DisableImageDeregistrationProtection(*ec2.DisableImageDeregistrationProtectionInput) (*ec2.DisableImageDeregistrationProtectionOutput, error) + DisableImageDeregistrationProtectionWithContext(aws.Context, *ec2.DisableImageDeregistrationProtectionInput, ...request.Option) (*ec2.DisableImageDeregistrationProtectionOutput, error) + DisableImageDeregistrationProtectionRequest(*ec2.DisableImageDeregistrationProtectionInput) (*request.Request, *ec2.DisableImageDeregistrationProtectionOutput) + DisableIpamOrganizationAdminAccount(*ec2.DisableIpamOrganizationAdminAccountInput) (*ec2.DisableIpamOrganizationAdminAccountOutput, error) DisableIpamOrganizationAdminAccountWithContext(aws.Context, *ec2.DisableIpamOrganizationAdminAccountInput, ...request.Option) (*ec2.DisableIpamOrganizationAdminAccountOutput, error) DisableIpamOrganizationAdminAccountRequest(*ec2.DisableIpamOrganizationAdminAccountInput) (*request.Request, *ec2.DisableIpamOrganizationAdminAccountOutput) @@ -2054,6 +2058,10 @@ type EC2API interface { EnableImageDeprecationWithContext(aws.Context, *ec2.EnableImageDeprecationInput, ...request.Option) (*ec2.EnableImageDeprecationOutput, error) EnableImageDeprecationRequest(*ec2.EnableImageDeprecationInput) (*request.Request, *ec2.EnableImageDeprecationOutput) + EnableImageDeregistrationProtection(*ec2.EnableImageDeregistrationProtectionInput) (*ec2.EnableImageDeregistrationProtectionOutput, error) + EnableImageDeregistrationProtectionWithContext(aws.Context, *ec2.EnableImageDeregistrationProtectionInput, ...request.Option) (*ec2.EnableImageDeregistrationProtectionOutput, error) + EnableImageDeregistrationProtectionRequest(*ec2.EnableImageDeregistrationProtectionInput) (*request.Request, *ec2.EnableImageDeregistrationProtectionOutput) + EnableIpamOrganizationAdminAccount(*ec2.EnableIpamOrganizationAdminAccountInput) (*ec2.EnableIpamOrganizationAdminAccountOutput, error) EnableIpamOrganizationAdminAccountWithContext(aws.Context, *ec2.EnableIpamOrganizationAdminAccountInput, ...request.Option) (*ec2.EnableIpamOrganizationAdminAccountOutput, error) EnableIpamOrganizationAdminAccountRequest(*ec2.EnableIpamOrganizationAdminAccountInput) (*request.Request, *ec2.EnableIpamOrganizationAdminAccountOutput) @@ -2175,6 +2183,10 @@ type EC2API interface { GetInstanceMetadataDefaultsWithContext(aws.Context, *ec2.GetInstanceMetadataDefaultsInput, ...request.Option) (*ec2.GetInstanceMetadataDefaultsOutput, error) GetInstanceMetadataDefaultsRequest(*ec2.GetInstanceMetadataDefaultsInput) (*request.Request, *ec2.GetInstanceMetadataDefaultsOutput) + GetInstanceTpmEkPub(*ec2.GetInstanceTpmEkPubInput) (*ec2.GetInstanceTpmEkPubOutput, error) + GetInstanceTpmEkPubWithContext(aws.Context, *ec2.GetInstanceTpmEkPubInput, ...request.Option) (*ec2.GetInstanceTpmEkPubOutput, error) + GetInstanceTpmEkPubRequest(*ec2.GetInstanceTpmEkPubInput) (*request.Request, *ec2.GetInstanceTpmEkPubOutput) + GetInstanceTypesFromInstanceRequirements(*ec2.GetInstanceTypesFromInstanceRequirementsInput) (*ec2.GetInstanceTypesFromInstanceRequirementsOutput, error) GetInstanceTypesFromInstanceRequirementsWithContext(aws.Context, *ec2.GetInstanceTypesFromInstanceRequirementsInput, ...request.Option) (*ec2.GetInstanceTypesFromInstanceRequirementsOutput, error) GetInstanceTypesFromInstanceRequirementsRequest(*ec2.GetInstanceTypesFromInstanceRequirementsInput) (*request.Request, *ec2.GetInstanceTypesFromInstanceRequirementsOutput) diff --git a/vendor/github.com/aws/aws-sdk-go/service/lightsail/api.go b/vendor/github.com/aws/aws-sdk-go/service/lightsail/api.go index a51a0d417..c4c13e83a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/lightsail/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/lightsail/api.go @@ -18889,7 +18889,7 @@ type Blueprint struct { // This parameter only applies to Lightsail for Research resources. AppCategory *string `locationName:"appCategory" type:"string" enum:"AppCategory"` - // The ID for the virtual private server image (app_wordpress_4_4 or app_lamp_7_0). + // The ID for the virtual private server image (app_wordpress_x_x or app_lamp_x_x). BlueprintId *string `locationName:"blueprintId" type:"string"` // The description of the blueprint. @@ -19452,7 +19452,7 @@ func (s *BucketState) SetMessage(v string) *BucketState { type Bundle struct { _ struct{} `type:"structure"` - // The bundle ID (micro_1_0). + // The bundle ID (micro_x_x). BundleId *string `locationName:"bundleId" type:"string"` // The number of vCPUs included in the bundle (2). @@ -19461,7 +19461,7 @@ type Bundle struct { // The size of the SSD (30). DiskSizeInGb *int64 `locationName:"diskSizeInGb" type:"integer"` - // The Amazon EC2 instance type (t2.micro). + // The instance type (micro). InstanceType *string `locationName:"instanceType" type:"string"` // A Boolean value indicating whether the bundle is active. @@ -23300,6 +23300,12 @@ type CreateDistributionInput struct { // An array of objects that describe the per-path cache behavior for the distribution. CacheBehaviors []*CacheBehaviorPerPath `locationName:"cacheBehaviors" type:"list"` + // The name of the SSL/TLS certificate that you want to attach to the distribution. + // + // Use the GetCertificates (https://docs.aws.amazon.com/lightsail/2016-11-28/api-reference/API_GetCertificates.html) + // action to get a list of certificate names that you can specify. + CertificateName *string `locationName:"certificateName" type:"string"` + // An object that describes the default cache behavior for the distribution. // // DefaultCacheBehavior is a required field @@ -23329,6 +23335,9 @@ type CreateDistributionInput struct { // // Use the TagResource action to tag a resource after it's created. Tags []*Tag `locationName:"tags" type:"list"` + + // The minimum TLS protocol version for the SSL/TLS certificate. + ViewerMinimumTlsProtocolVersion *string `locationName:"viewerMinimumTlsProtocolVersion" type:"string" enum:"ViewerMinimumTlsProtocolVersionEnum"` } // String returns the string representation. @@ -23389,6 +23398,12 @@ func (s *CreateDistributionInput) SetCacheBehaviors(v []*CacheBehaviorPerPath) * return s } +// SetCertificateName sets the CertificateName field's value. +func (s *CreateDistributionInput) SetCertificateName(v string) *CreateDistributionInput { + s.CertificateName = &v + return s +} + // SetDefaultCacheBehavior sets the DefaultCacheBehavior field's value. func (s *CreateDistributionInput) SetDefaultCacheBehavior(v *CacheBehavior) *CreateDistributionInput { s.DefaultCacheBehavior = v @@ -23419,6 +23434,12 @@ func (s *CreateDistributionInput) SetTags(v []*Tag) *CreateDistributionInput { return s } +// SetViewerMinimumTlsProtocolVersion sets the ViewerMinimumTlsProtocolVersion field's value. +func (s *CreateDistributionInput) SetViewerMinimumTlsProtocolVersion(v string) *CreateDistributionInput { + s.ViewerMinimumTlsProtocolVersion = &v + return s +} + type CreateDistributionOutput struct { _ struct{} `type:"structure"` @@ -23881,7 +23902,7 @@ type CreateInstancesFromSnapshotInput struct { AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"` // The bundle of specification information for your virtual private server (or - // instance), including the pricing plan (micro_1_0). + // instance), including the pricing plan (micro_x_x). // // BundleId is a required field BundleId *string `locationName:"bundleId" type:"string" required:"true"` @@ -23904,7 +23925,8 @@ type CreateInstancesFromSnapshotInput struct { // The IP address type for the instance. // - // The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6. + // The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack + // for IPv4 and IPv6. // // The default value is dualstack. IpAddressType *string `locationName:"ipAddressType" type:"string" enum:"IpAddressType"` @@ -24145,7 +24167,7 @@ type CreateInstancesInput struct { // AvailabilityZone is a required field AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"` - // The ID for a virtual private server image (app_wordpress_4_4 or app_lamp_7_0). + // The ID for a virtual private server image (app_wordpress_x_x or app_lamp_x_x). // Use the get blueprints operation to return a list of available images (or // blueprints). // @@ -24158,7 +24180,7 @@ type CreateInstancesInput struct { BlueprintId *string `locationName:"blueprintId" type:"string" required:"true"` // The bundle of specification information for your virtual private server (or - // instance), including the pricing plan (micro_1_0). + // instance), including the pricing plan (medium_x_x). // // BundleId is a required field BundleId *string `locationName:"bundleId" type:"string" required:"true"` @@ -24179,7 +24201,8 @@ type CreateInstancesInput struct { // The IP address type for the instance. // - // The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6. + // The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack + // for IPv4 and IPv6. // // The default value is dualstack. IpAddressType *string `locationName:"ipAddressType" type:"string" enum:"IpAddressType"` @@ -24498,7 +24521,8 @@ type CreateLoadBalancerInput struct { // The IP address type for the load balancer. // - // The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6. + // The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack + // for IPv4 and IPv6. // // The default value is dualstack. IpAddressType *string `locationName:"ipAddressType" type:"string" enum:"IpAddressType"` @@ -36131,6 +36155,12 @@ type InputOrigin struct { // The AWS Region name of the origin resource. RegionName *string `locationName:"regionName" type:"string" enum:"RegionName"` + + // The amount of time, in seconds, that the distribution waits for a response + // after forwarding a request to the origin. The minimum timeout is 1 second, + // the maximum is 60 seconds, and the default (if you don't specify otherwise) + // is 30 seconds. + ResponseTimeout *int64 `locationName:"responseTimeout" type:"integer"` } // String returns the string representation. @@ -36169,6 +36199,12 @@ func (s *InputOrigin) SetRegionName(v string) *InputOrigin { return s } +// SetResponseTimeout sets the ResponseTimeout field's value. +func (s *InputOrigin) SetResponseTimeout(v int64) *InputOrigin { + s.ResponseTimeout = &v + return s +} + // Describes an instance (a virtual private server). type Instance struct { _ struct{} `type:"structure"` @@ -36179,13 +36215,13 @@ type Instance struct { // The Amazon Resource Name (ARN) of the instance (arn:aws:lightsail:us-east-2:123456789101:Instance/244ad76f-8aad-4741-809f-12345EXAMPLE). Arn *string `locationName:"arn" type:"string"` - // The blueprint ID (os_amlinux_2016_03). + // The blueprint ID (amazon_linux_2023). BlueprintId *string `locationName:"blueprintId" type:"string"` - // The friendly name of the blueprint (Amazon Linux). + // The friendly name of the blueprint (Amazon Linux 2023). BlueprintName *string `locationName:"blueprintName" type:"string"` - // The bundle for the instance (micro_1_0). + // The bundle for the instance (micro_x_x). BundleId *string `locationName:"bundleId" type:"string"` // The timestamp when the instance was created (1479734909.17) in Unix time @@ -36197,7 +36233,8 @@ type Instance struct { // The IP address type of the instance. // - // The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6. + // The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack + // for IPv4 and IPv6. IpAddressType *string `locationName:"ipAddressType" type:"string" enum:"IpAddressType"` // The IPv6 addresses of the instance. @@ -36213,7 +36250,7 @@ type Instance struct { // The metadata options for the Amazon Lightsail instance. MetadataOptions *InstanceMetadataOptions `locationName:"metadataOptions" type:"structure"` - // The name the user gave the instance (Amazon_Linux-1GB-Ohio-1). + // The name the user gave the instance (Amazon_Linux_2023-1). Name *string `locationName:"name" type:"string"` // Information about the public ports and monthly data transfer rates for the @@ -37052,6 +37089,10 @@ type InstancePortInfo struct { // an instance could not be reached. When you specify icmp as the protocol, // you must specify the ICMP type using the fromPort parameter, and ICMP // code using the toPort parameter. + // + // * icmp6 - Internet Control Message Protocol (ICMP) for IPv6. When you + // specify icmp6 as the protocol, you must specify the ICMP type using the + // fromPort parameter, and ICMP code using the toPort parameter. Protocol *string `locationName:"protocol" type:"string" enum:"NetworkProtocol"` // The last port in a range of open ports on an instance. @@ -37228,6 +37269,10 @@ type InstancePortState struct { // an instance could not be reached. When you specify icmp as the protocol, // you must specify the ICMP type using the fromPort parameter, and ICMP // code using the toPort parameter. + // + // * icmp6 - Internet Control Message Protocol (ICMP) for IPv6. When you + // specify icmp6 as the protocol, you must specify the ICMP type using the + // fromPort parameter, and ICMP code using the toPort parameter. Protocol *string `locationName:"protocol" type:"string" enum:"NetworkProtocol"` // Specifies whether the instance port is open or closed. @@ -37325,12 +37370,12 @@ type InstanceSnapshot struct { // An array of disk objects containing information about all block storage disks. FromAttachedDisks []*Disk `locationName:"fromAttachedDisks" type:"list"` - // The blueprint ID from which you created the snapshot (os_debian_8_3). A blueprint - // is a virtual private server (or instance) image used to create instances - // quickly. + // The blueprint ID from which you created the snapshot (amazon_linux_2023). + // A blueprint is a virtual private server (or instance) image used to create + // instances quickly. FromBlueprintId *string `locationName:"fromBlueprintId" type:"string"` - // The bundle ID from which you created the snapshot (micro_1_0). + // The bundle ID from which you created the snapshot (micro_x_x). FromBundleId *string `locationName:"fromBundleId" type:"string"` // The Amazon Resource Name (ARN) of the instance from which the snapshot was @@ -37492,10 +37537,10 @@ func (s *InstanceSnapshot) SetTags(v []*Tag) *InstanceSnapshot { type InstanceSnapshotInfo struct { _ struct{} `type:"structure"` - // The blueprint ID from which the source instance (os_debian_8_3). + // The blueprint ID from which the source instance (amazon_linux_2023). FromBlueprintId *string `locationName:"fromBlueprintId" type:"string"` - // The bundle ID from which the source instance was created (micro_1_0). + // The bundle ID from which the source instance was created (micro_x_x). FromBundleId *string `locationName:"fromBundleId" type:"string"` // A list of objects describing the disks that were attached to the source instance. @@ -37885,6 +37930,10 @@ type LightsailDistribution struct { // The tag keys and optional values for the resource. For more information about // tags in Lightsail, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags). Tags []*Tag `locationName:"tags" type:"list"` + + // The minimum TLS protocol version that the distribution can use to communicate + // with viewers. + ViewerMinimumTlsProtocolVersion *string `locationName:"viewerMinimumTlsProtocolVersion" type:"string"` } // String returns the string representation. @@ -38025,6 +38074,12 @@ func (s *LightsailDistribution) SetTags(v []*Tag) *LightsailDistribution { return s } +// SetViewerMinimumTlsProtocolVersion sets the ViewerMinimumTlsProtocolVersion field's value. +func (s *LightsailDistribution) SetViewerMinimumTlsProtocolVersion(v string) *LightsailDistribution { + s.ViewerMinimumTlsProtocolVersion = &v + return s +} + // Describes a load balancer. type LoadBalancer struct { _ struct{} `type:"structure"` @@ -38060,7 +38115,8 @@ type LoadBalancer struct { // The IP address type of the load balancer. // - // The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6. + // The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack + // for IPv4 and IPv6. IpAddressType *string `locationName:"ipAddressType" type:"string" enum:"IpAddressType"` // The AWS Region where your load balancer was created (us-east-2a). Lightsail @@ -39588,6 +39644,12 @@ type Origin struct { // The resource type of the origin resource (Instance). ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` + + // The amount of time, in seconds, that the distribution waits for a response + // after forwarding a request to the origin. The minimum timeout is 1 second, + // the maximum is 60 seconds, and the default (if you don't specify otherwise) + // is 30 seconds. + ResponseTimeout *int64 `locationName:"responseTimeout" type:"integer"` } // String returns the string representation. @@ -39632,6 +39694,12 @@ func (s *Origin) SetResourceType(v string) *Origin { return s } +// SetResponseTimeout sets the ResponseTimeout field's value. +func (s *Origin) SetResponseTimeout(v int64) *Origin { + s.ResponseTimeout = &v + return s +} + // The password data for the Windows Server-based instance, including the ciphertext // and the key pair name. type PasswordData struct { @@ -39932,6 +40000,10 @@ type PortInfo struct { // an instance could not be reached. When you specify icmp as the protocol, // you must specify the ICMP type using the fromPort parameter, and ICMP // code using the toPort parameter. + // + // * icmp6 - Internet Control Message Protocol (ICMP) for IPv6. When you + // specify icmp6 as the protocol, you must specify the ICMP type using the + // fromPort parameter, and ICMP code using the toPort parameter. Protocol *string `locationName:"protocol" type:"string" enum:"NetworkProtocol"` // The last port in a range of open ports on an instance. @@ -42549,9 +42621,21 @@ func (s *Session) SetUrl(v string) *Session { type SetIpAddressTypeInput struct { _ struct{} `type:"structure"` + // Required parameter to accept the instance bundle update when changing to, + // and from, IPv6-only. + // + // An instance bundle will change when switching from dual-stack or ipv4, to + // ipv6. It also changes when switching from ipv6, to dual-stack or ipv4. + // + // You must include this parameter in the command to update the bundle. For + // example, if you switch from dual-stack to ipv6, the bundle will be updated, + // and billing for the IPv6-only instance bundle begins immediately. + AcceptBundleUpdate *bool `locationName:"acceptBundleUpdate" type:"boolean"` + // The IP address type to set for the specified resource. // - // The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6. + // The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack + // for IPv4 and IPv6. // // IpAddressType is a required field IpAddressType *string `locationName:"ipAddressType" type:"string" required:"true" enum:"IpAddressType"` @@ -42610,6 +42694,12 @@ func (s *SetIpAddressTypeInput) Validate() error { return nil } +// SetAcceptBundleUpdate sets the AcceptBundleUpdate field's value. +func (s *SetIpAddressTypeInput) SetAcceptBundleUpdate(v bool) *SetIpAddressTypeInput { + s.AcceptBundleUpdate = &v + return s +} + // SetIpAddressType sets the IpAddressType field's value. func (s *SetIpAddressTypeInput) SetIpAddressType(v string) *SetIpAddressTypeInput { s.IpAddressType = &v @@ -44859,6 +44949,14 @@ type UpdateDistributionInput struct { // An array of objects that describe the per-path cache behavior for the distribution. CacheBehaviors []*CacheBehaviorPerPath `locationName:"cacheBehaviors" type:"list"` + // The name of the SSL/TLS certificate that you want to attach to the distribution. + // + // Only certificates with a status of ISSUED can be attached to a distribution. + // + // Use the GetCertificates (https://docs.aws.amazon.com/lightsail/2016-11-28/api-reference/API_GetCertificates.html) + // action to get a list of certificate names that you can specify. + CertificateName *string `locationName:"certificateName" type:"string"` + // An object that describes the default cache behavior for the distribution. DefaultCacheBehavior *CacheBehavior `locationName:"defaultCacheBehavior" type:"structure"` @@ -44878,6 +44976,17 @@ type UpdateDistributionInput struct { // // The distribution pulls, caches, and serves content from the origin. Origin *InputOrigin `locationName:"origin" type:"structure"` + + // Indicates whether the default SSL/TLS certificate is attached to the distribution. + // The default value is true. When true, the distribution uses the default domain + // name such as d111111abcdef8.cloudfront.net. + // + // Set this value to false to attach a new certificate to the distribution. + UseDefaultCertificate *bool `locationName:"useDefaultCertificate" type:"boolean"` + + // Use this parameter to update the minimum TLS protocol version for the SSL/TLS + // certificate that's attached to the distribution. + ViewerMinimumTlsProtocolVersion *string `locationName:"viewerMinimumTlsProtocolVersion" type:"string" enum:"ViewerMinimumTlsProtocolVersionEnum"` } // String returns the string representation. @@ -44923,6 +45032,12 @@ func (s *UpdateDistributionInput) SetCacheBehaviors(v []*CacheBehaviorPerPath) * return s } +// SetCertificateName sets the CertificateName field's value. +func (s *UpdateDistributionInput) SetCertificateName(v string) *UpdateDistributionInput { + s.CertificateName = &v + return s +} + // SetDefaultCacheBehavior sets the DefaultCacheBehavior field's value. func (s *UpdateDistributionInput) SetDefaultCacheBehavior(v *CacheBehavior) *UpdateDistributionInput { s.DefaultCacheBehavior = v @@ -44947,6 +45062,18 @@ func (s *UpdateDistributionInput) SetOrigin(v *InputOrigin) *UpdateDistributionI return s } +// SetUseDefaultCertificate sets the UseDefaultCertificate field's value. +func (s *UpdateDistributionInput) SetUseDefaultCertificate(v bool) *UpdateDistributionInput { + s.UseDefaultCertificate = &v + return s +} + +// SetViewerMinimumTlsProtocolVersion sets the ViewerMinimumTlsProtocolVersion field's value. +func (s *UpdateDistributionInput) SetViewerMinimumTlsProtocolVersion(v string) *UpdateDistributionInput { + s.ViewerMinimumTlsProtocolVersion = &v + return s +} + type UpdateDistributionOutput struct { _ struct{} `type:"structure"` @@ -46691,6 +46818,9 @@ const ( // IpAddressTypeIpv4 is a IpAddressType enum value IpAddressTypeIpv4 = "ipv4" + + // IpAddressTypeIpv6 is a IpAddressType enum value + IpAddressTypeIpv6 = "ipv6" ) // IpAddressType_Values returns all elements of the IpAddressType enum @@ -46698,6 +46828,7 @@ func IpAddressType_Values() []string { return []string{ IpAddressTypeDualstack, IpAddressTypeIpv4, + IpAddressTypeIpv6, } } @@ -47297,6 +47428,9 @@ const ( // NetworkProtocolIcmp is a NetworkProtocol enum value NetworkProtocolIcmp = "icmp" + + // NetworkProtocolIcmpv6 is a NetworkProtocol enum value + NetworkProtocolIcmpv6 = "icmpv6" ) // NetworkProtocol_Values returns all elements of the NetworkProtocol enum @@ -47306,6 +47440,7 @@ func NetworkProtocol_Values() []string { NetworkProtocolAll, NetworkProtocolUdp, NetworkProtocolIcmp, + NetworkProtocolIcmpv6, } } @@ -48188,3 +48323,27 @@ func TreatMissingData_Values() []string { TreatMissingDataMissing, } } + +const ( + // ViewerMinimumTlsProtocolVersionEnumTlsv112016 is a ViewerMinimumTlsProtocolVersionEnum enum value + ViewerMinimumTlsProtocolVersionEnumTlsv112016 = "TLSv1.1_2016" + + // ViewerMinimumTlsProtocolVersionEnumTlsv122018 is a ViewerMinimumTlsProtocolVersionEnum enum value + ViewerMinimumTlsProtocolVersionEnumTlsv122018 = "TLSv1.2_2018" + + // ViewerMinimumTlsProtocolVersionEnumTlsv122019 is a ViewerMinimumTlsProtocolVersionEnum enum value + ViewerMinimumTlsProtocolVersionEnumTlsv122019 = "TLSv1.2_2019" + + // ViewerMinimumTlsProtocolVersionEnumTlsv122021 is a ViewerMinimumTlsProtocolVersionEnum enum value + ViewerMinimumTlsProtocolVersionEnumTlsv122021 = "TLSv1.2_2021" +) + +// ViewerMinimumTlsProtocolVersionEnum_Values returns all elements of the ViewerMinimumTlsProtocolVersionEnum enum +func ViewerMinimumTlsProtocolVersionEnum_Values() []string { + return []string{ + ViewerMinimumTlsProtocolVersionEnumTlsv112016, + ViewerMinimumTlsProtocolVersionEnumTlsv122018, + ViewerMinimumTlsProtocolVersionEnumTlsv122019, + ViewerMinimumTlsProtocolVersionEnumTlsv122021, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go index 04f6c811b..827bd5194 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go @@ -179,8 +179,8 @@ func (c *SSOOIDC) CreateTokenWithIAMRequest(input *CreateTokenWithIAMInput) (req // // Creates and returns access and refresh tokens for clients and applications // that are authenticated using IAM entities. The access token can be used to -// fetch short-term credentials for the assigned AWS accounts or to access application -// APIs using bearer authentication. +// fetch short-term credentials for the assigned Amazon Web Services accounts +// or to access application APIs using bearer authentication. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -331,6 +331,13 @@ func (c *SSOOIDC) RegisterClientRequest(input *RegisterClientInput) (req *reques // Indicates that an error from the service occurred while trying to process // a request. // +// - InvalidRedirectUriException +// Indicates that one or more redirect URI in the request is not supported for +// this operation. +// +// - UnsupportedGrantTypeException +// Indicates that the grant type in the request is not supported by the service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/RegisterClient func (c *SSOOIDC) RegisterClient(input *RegisterClientInput) (*RegisterClientOutput, error) { req, out := c.RegisterClientRequest(input) @@ -619,6 +626,15 @@ type CreateTokenInput struct { // type is currently unsupported for the CreateToken API. Code *string `locationName:"code" type:"string"` + // Used only when calling this API for the Authorization Code grant type. This + // value is generated by the client and presented to validate the original code + // challenge value the client passed at authorization time. + // + // CodeVerifier is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenInput's + // String and GoString methods. + CodeVerifier *string `locationName:"codeVerifier" type:"string" sensitive:"true"` + // Used only when calling this API for the Device Code grant type. This short-term // code is used to identify this authorization request. This comes from the // result of the StartDeviceAuthorization API. @@ -718,6 +734,12 @@ func (s *CreateTokenInput) SetCode(v string) *CreateTokenInput { return s } +// SetCodeVerifier sets the CodeVerifier field's value. +func (s *CreateTokenInput) SetCodeVerifier(v string) *CreateTokenInput { + s.CodeVerifier = &v + return s +} + // SetDeviceCode sets the DeviceCode field's value. func (s *CreateTokenInput) SetDeviceCode(v string) *CreateTokenInput { s.DeviceCode = &v @@ -751,7 +773,8 @@ func (s *CreateTokenInput) SetScope(v []*string) *CreateTokenInput { type CreateTokenOutput struct { _ struct{} `type:"structure"` - // A bearer token to access AWS accounts and applications assigned to a user. + // A bearer token to access Amazon Web Services accounts and applications assigned + // to a user. // // AccessToken is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CreateTokenOutput's @@ -863,6 +886,15 @@ type CreateTokenWithIAMInput struct { // persisted in the Authorization Code GrantOptions for the application. Code *string `locationName:"code" type:"string"` + // Used only when calling this API for the Authorization Code grant type. This + // value is generated by the client and presented to validate the original code + // challenge value the client passed at authorization time. + // + // CodeVerifier is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's + // String and GoString methods. + CodeVerifier *string `locationName:"codeVerifier" type:"string" sensitive:"true"` + // Supports the following OAuth grant types: Authorization Code, Refresh Token, // JWT Bearer, and Token Exchange. Specify one of the following values, depending // on the grant type that you want: @@ -982,6 +1014,12 @@ func (s *CreateTokenWithIAMInput) SetCode(v string) *CreateTokenWithIAMInput { return s } +// SetCodeVerifier sets the CodeVerifier field's value. +func (s *CreateTokenWithIAMInput) SetCodeVerifier(v string) *CreateTokenWithIAMInput { + s.CodeVerifier = &v + return s +} + // SetGrantType sets the GrantType field's value. func (s *CreateTokenWithIAMInput) SetGrantType(v string) *CreateTokenWithIAMInput { s.GrantType = &v @@ -1027,7 +1065,8 @@ func (s *CreateTokenWithIAMInput) SetSubjectTokenType(v string) *CreateTokenWith type CreateTokenWithIAMOutput struct { _ struct{} `type:"structure"` - // A bearer token to access AWS accounts and applications assigned to a user. + // A bearer token to access Amazon Web Services accounts and applications assigned + // to a user. // // AccessToken is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's @@ -1495,6 +1534,78 @@ func (s *InvalidGrantException) RequestID() string { return s.RespMetadata.RequestID } +// Indicates that one or more redirect URI in the request is not supported for +// this operation. +type InvalidRedirectUriException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be invalid_redirect_uri. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRedirectUriException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRedirectUriException) GoString() string { + return s.String() +} + +func newErrorInvalidRedirectUriException(v protocol.ResponseMetadata) error { + return &InvalidRedirectUriException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRedirectUriException) Code() string { + return "InvalidRedirectUriException" +} + +// Message returns the exception's message. +func (s *InvalidRedirectUriException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRedirectUriException) OrigErr() error { + return nil +} + +func (s *InvalidRedirectUriException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRedirectUriException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRedirectUriException) RequestID() string { + return s.RespMetadata.RequestID +} + // Indicates that something is wrong with the input to the request. For example, // a required parameter might be missing or out of range. type InvalidRequestException struct { @@ -1731,6 +1842,25 @@ type RegisterClientInput struct { // ClientType is a required field ClientType *string `locationName:"clientType" type:"string" required:"true"` + // This IAM Identity Center application ARN is used to define administrator-managed + // configuration for public client access to resources. At authorization, the + // scopes, grants, and redirect URI available to this client will be restricted + // by this application resource. + EntitledApplicationArn *string `locationName:"entitledApplicationArn" type:"string"` + + // The list of OAuth 2.0 grant types that are defined by the client. This list + // is used to restrict the token granting flows available to the client. + GrantTypes []*string `locationName:"grantTypes" type:"list"` + + // The IAM Identity Center Issuer URL associated with an instance of IAM Identity + // Center. This value is needed for user access to resources through the client. + IssuerUrl *string `locationName:"issuerUrl" type:"string"` + + // The list of redirect URI that are defined by the client. At completion of + // authorization, this list is used to restrict what locations the user agent + // can be redirected back to. + RedirectUris []*string `locationName:"redirectUris" type:"list"` + // The list of scopes that are defined by the client. Upon authorization, this // list is used to restrict permissions when granting an access token. Scopes []*string `locationName:"scopes" type:"list"` @@ -1782,6 +1912,30 @@ func (s *RegisterClientInput) SetClientType(v string) *RegisterClientInput { return s } +// SetEntitledApplicationArn sets the EntitledApplicationArn field's value. +func (s *RegisterClientInput) SetEntitledApplicationArn(v string) *RegisterClientInput { + s.EntitledApplicationArn = &v + return s +} + +// SetGrantTypes sets the GrantTypes field's value. +func (s *RegisterClientInput) SetGrantTypes(v []*string) *RegisterClientInput { + s.GrantTypes = v + return s +} + +// SetIssuerUrl sets the IssuerUrl field's value. +func (s *RegisterClientInput) SetIssuerUrl(v string) *RegisterClientInput { + s.IssuerUrl = &v + return s +} + +// SetRedirectUris sets the RedirectUris field's value. +func (s *RegisterClientInput) SetRedirectUris(v []*string) *RegisterClientInput { + s.RedirectUris = v + return s +} + // SetScopes sets the Scopes field's value. func (s *RegisterClientInput) SetScopes(v []*string) *RegisterClientInput { s.Scopes = v diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go index e6242e492..cadf4584d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go @@ -57,6 +57,13 @@ const ( // makes a CreateToken request with an invalid grant type. ErrCodeInvalidGrantException = "InvalidGrantException" + // ErrCodeInvalidRedirectUriException for service response error code + // "InvalidRedirectUriException". + // + // Indicates that one or more redirect URI in the request is not supported for + // this operation. + ErrCodeInvalidRedirectUriException = "InvalidRedirectUriException" + // ErrCodeInvalidRequestException for service response error code // "InvalidRequestException". // @@ -106,6 +113,7 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "InvalidClientException": newErrorInvalidClientException, "InvalidClientMetadataException": newErrorInvalidClientMetadataException, "InvalidGrantException": newErrorInvalidGrantException, + "InvalidRedirectUriException": newErrorInvalidRedirectUriException, "InvalidRequestException": newErrorInvalidRequestException, "InvalidRequestRegionException": newErrorInvalidRequestRegionException, "InvalidScopeException": newErrorInvalidScopeException, diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md index 8bf0e5b78..33c88305c 100644 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -70,3 +70,5 @@ benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') - [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) - [FreeCache](https://github.com/coocood/freecache) - [FastCache](https://github.com/VictoriaMetrics/fastcache) +- [Ristretto](https://github.com/dgraph-io/ristretto) +- [Badger](https://github.com/dgraph-io/badger) diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go index a9e0d45c9..78bddf1ce 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -19,10 +19,13 @@ const ( // Store the primes in an array as well. // // The consts are used when possible in Go code to avoid MOVs but we need a -// contiguous array of the assembly code. +// contiguous array for the assembly code. var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. +// +// Note that a zero-valued Digest is not ready to receive writes. +// Call Reset or create a Digest using New before calling other methods. type Digest struct { v1 uint64 v2 uint64 @@ -33,19 +36,31 @@ type Digest struct { n int // how much of mem is used } -// New creates a new Digest that computes the 64-bit xxHash algorithm. +// New creates a new Digest with a zero seed. func New() *Digest { + return NewWithSeed(0) +} + +// NewWithSeed creates a new Digest with the given seed. +func NewWithSeed(seed uint64) *Digest { var d Digest - d.Reset() + d.ResetWithSeed(seed) return &d } // Reset clears the Digest's state so that it can be reused. +// It uses a seed value of zero. func (d *Digest) Reset() { - d.v1 = primes[0] + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -primes[0] + d.ResetWithSeed(0) +} + +// ResetWithSeed clears the Digest's state so that it can be reused. +// It uses the given seed to initialize the state. +func (d *Digest) ResetWithSeed(seed uint64) { + d.v1 = seed + prime1 + prime2 + d.v2 = seed + prime2 + d.v3 = seed + d.v4 = seed - prime1 d.total = 0 d.n = 0 } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go index 9216e0a40..78f95f256 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go @@ -6,7 +6,7 @@ package xxhash -// Sum64 computes the 64-bit xxHash digest of b. +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. // //go:noescape func Sum64(b []byte) uint64 diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go index 26df13bba..118e49e81 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -3,7 +3,7 @@ package xxhash -// Sum64 computes the 64-bit xxHash digest of b. +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. func Sum64(b []byte) uint64 { // A simpler version would be // d := New() diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go index e86f1b5fd..05f5e7dfe 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -5,7 +5,7 @@ package xxhash -// Sum64String computes the 64-bit xxHash digest of s. +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. func Sum64String(s string) uint64 { return Sum64([]byte(s)) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go index 1c1638fd8..cf9d42aed 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -33,7 +33,7 @@ import ( // // See https://github.com/golang/go/issues/42739 for discussion. -// Sum64String computes the 64-bit xxHash digest of s. +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. // It may be faster than Sum64([]byte(s)) by avoiding a copy. func Sum64String(s string) uint64 { b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})) diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go index 4da2bd363..7d3e1536b 100644 --- a/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.32.0 // protoc v3.21.5 // source: udpa/annotations/migrate.proto diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.validate.go index 1b72b067f..38196d5eb 100644 --- a/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.validate.go +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.validate.go @@ -11,6 +11,7 @@ import ( "net/mail" "net/url" "regexp" + "sort" "strings" "time" "unicode/utf8" @@ -31,21 +32,57 @@ var ( _ = (*url.URL)(nil) _ = (*mail.Address)(nil) _ = anypb.Any{} + _ = sort.Sort ) // Validate checks the field values on MigrateAnnotation with the rules defined -// in the proto definition for this message. If any rules are violated, an -// error is returned. +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. func (m *MigrateAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MigrateAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MigrateAnnotationMultiError, or nil if none found. +func (m *MigrateAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *MigrateAnnotation) validate(all bool) error { if m == nil { return nil } + var errors []error + // no validation rules for Rename + if len(errors) > 0 { + return MigrateAnnotationMultiError(errors) + } + return nil } +// MigrateAnnotationMultiError is an error wrapping multiple validation errors +// returned by MigrateAnnotation.ValidateAll() if the designated constraints +// aren't met. +type MigrateAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MigrateAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MigrateAnnotationMultiError) AllErrors() []error { return m } + // MigrateAnnotationValidationError is the validation error returned by // MigrateAnnotation.Validate if the designated constraints aren't met. type MigrateAnnotationValidationError struct { @@ -104,19 +141,54 @@ var _ interface { // Validate checks the field values on FieldMigrateAnnotation with the rules // defined in the proto definition for this message. If any rules are -// violated, an error is returned. +// violated, the first error encountered is returned, or nil if there are no violations. func (m *FieldMigrateAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FieldMigrateAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FieldMigrateAnnotationMultiError, or nil if none found. +func (m *FieldMigrateAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *FieldMigrateAnnotation) validate(all bool) error { if m == nil { return nil } + var errors []error + // no validation rules for Rename // no validation rules for OneofPromotion + if len(errors) > 0 { + return FieldMigrateAnnotationMultiError(errors) + } + return nil } +// FieldMigrateAnnotationMultiError is an error wrapping multiple validation +// errors returned by FieldMigrateAnnotation.ValidateAll() if the designated +// constraints aren't met. +type FieldMigrateAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FieldMigrateAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FieldMigrateAnnotationMultiError) AllErrors() []error { return m } + // FieldMigrateAnnotationValidationError is the validation error returned by // FieldMigrateAnnotation.Validate if the designated constraints aren't met. type FieldMigrateAnnotationValidationError struct { @@ -175,17 +247,52 @@ var _ interface { // Validate checks the field values on FileMigrateAnnotation with the rules // defined in the proto definition for this message. If any rules are -// violated, an error is returned. +// violated, the first error encountered is returned, or nil if there are no violations. func (m *FileMigrateAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FileMigrateAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FileMigrateAnnotationMultiError, or nil if none found. +func (m *FileMigrateAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *FileMigrateAnnotation) validate(all bool) error { if m == nil { return nil } + var errors []error + // no validation rules for MoveToPackage + if len(errors) > 0 { + return FileMigrateAnnotationMultiError(errors) + } + return nil } +// FileMigrateAnnotationMultiError is an error wrapping multiple validation +// errors returned by FileMigrateAnnotation.ValidateAll() if the designated +// constraints aren't met. +type FileMigrateAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FileMigrateAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FileMigrateAnnotationMultiError) AllErrors() []error { return m } + // FileMigrateAnnotationValidationError is the validation error returned by // FileMigrateAnnotation.Validate if the designated constraints aren't met. type FileMigrateAnnotationValidationError struct { diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go index c06e280ab..719577895 100644 --- a/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.32.0 // protoc v3.21.5 // source: udpa/annotations/security.proto @@ -121,10 +121,10 @@ var file_udpa_annotations_security_proto_rawDesc = []byte{ 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x42, - 0x31, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, - 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, - 0x08, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x31, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x08, 0x01, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, + 0x6f, 0x2f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.validate.go index 64058ccdd..acc9bd7a1 100644 --- a/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.validate.go +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.validate.go @@ -11,6 +11,7 @@ import ( "net/mail" "net/url" "regexp" + "sort" "strings" "time" "unicode/utf8" @@ -31,23 +32,59 @@ var ( _ = (*url.URL)(nil) _ = (*mail.Address)(nil) _ = anypb.Any{} + _ = sort.Sort ) // Validate checks the field values on FieldSecurityAnnotation with the rules // defined in the proto definition for this message. If any rules are -// violated, an error is returned. +// violated, the first error encountered is returned, or nil if there are no violations. func (m *FieldSecurityAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FieldSecurityAnnotation with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FieldSecurityAnnotationMultiError, or nil if none found. +func (m *FieldSecurityAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *FieldSecurityAnnotation) validate(all bool) error { if m == nil { return nil } + var errors []error + // no validation rules for ConfigureForUntrustedDownstream // no validation rules for ConfigureForUntrustedUpstream + if len(errors) > 0 { + return FieldSecurityAnnotationMultiError(errors) + } + return nil } +// FieldSecurityAnnotationMultiError is an error wrapping multiple validation +// errors returned by FieldSecurityAnnotation.ValidateAll() if the designated +// constraints aren't met. +type FieldSecurityAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FieldSecurityAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FieldSecurityAnnotationMultiError) AllErrors() []error { return m } + // FieldSecurityAnnotationValidationError is the validation error returned by // FieldSecurityAnnotation.Validate if the designated constraints aren't met. type FieldSecurityAnnotationValidationError struct { diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go index f8fc82294..8631b8568 100644 --- a/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.32.0 // protoc v3.21.5 // source: udpa/annotations/sensitive.proto diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.validate.go index dd4fea9b2..f3fa61974 100644 --- a/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.validate.go +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.validate.go @@ -11,6 +11,7 @@ import ( "net/mail" "net/url" "regexp" + "sort" "strings" "time" "unicode/utf8" @@ -31,4 +32,5 @@ var ( _ = (*url.URL)(nil) _ = (*mail.Address)(nil) _ = anypb.Any{} + _ = sort.Sort ) diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go index ac7238e55..f2fdc3ca3 100644 --- a/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.32.0 // protoc v3.21.5 // source: udpa/annotations/status.proto diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.validate.go index 9af17c92f..5633a8383 100644 --- a/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.validate.go +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.validate.go @@ -11,6 +11,7 @@ import ( "net/mail" "net/url" "regexp" + "sort" "strings" "time" "unicode/utf8" @@ -31,23 +32,59 @@ var ( _ = (*url.URL)(nil) _ = (*mail.Address)(nil) _ = anypb.Any{} + _ = sort.Sort ) // Validate checks the field values on StatusAnnotation with the rules defined -// in the proto definition for this message. If any rules are violated, an -// error is returned. +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. func (m *StatusAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StatusAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// StatusAnnotationMultiError, or nil if none found. +func (m *StatusAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *StatusAnnotation) validate(all bool) error { if m == nil { return nil } + var errors []error + // no validation rules for WorkInProgress // no validation rules for PackageVersionStatus + if len(errors) > 0 { + return StatusAnnotationMultiError(errors) + } + return nil } +// StatusAnnotationMultiError is an error wrapping multiple validation errors +// returned by StatusAnnotation.ValidateAll() if the designated constraints +// aren't met. +type StatusAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StatusAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StatusAnnotationMultiError) AllErrors() []error { return m } + // StatusAnnotationValidationError is the validation error returned by // StatusAnnotation.Validate if the designated constraints aren't met. type StatusAnnotationValidationError struct { diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go index 68a101a3f..df83e0a2e 100644 --- a/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.32.0 // protoc v3.21.5 // source: udpa/annotations/versioning.proto diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.validate.go index e88144cc1..5fd86baff 100644 --- a/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.validate.go +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.validate.go @@ -11,6 +11,7 @@ import ( "net/mail" "net/url" "regexp" + "sort" "strings" "time" "unicode/utf8" @@ -31,21 +32,57 @@ var ( _ = (*url.URL)(nil) _ = (*mail.Address)(nil) _ = anypb.Any{} + _ = sort.Sort ) // Validate checks the field values on VersioningAnnotation with the rules // defined in the proto definition for this message. If any rules are -// violated, an error is returned. +// violated, the first error encountered is returned, or nil if there are no violations. func (m *VersioningAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on VersioningAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// VersioningAnnotationMultiError, or nil if none found. +func (m *VersioningAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *VersioningAnnotation) validate(all bool) error { if m == nil { return nil } + var errors []error + // no validation rules for PreviousMessageType + if len(errors) > 0 { + return VersioningAnnotationMultiError(errors) + } + return nil } +// VersioningAnnotationMultiError is an error wrapping multiple validation +// errors returned by VersioningAnnotation.ValidateAll() if the designated +// constraints aren't met. +type VersioningAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m VersioningAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m VersioningAnnotationMultiError) AllErrors() []error { return m } + // VersioningAnnotationValidationError is the validation error returned by // VersioningAnnotation.Validate if the designated constraints aren't met. type VersioningAnnotationValidationError struct { diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.go index 0cdd47f75..ad24b1f7f 100644 --- a/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.go +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.32.0 // protoc v3.21.5 // source: xds/annotations/v3/migrate.proto diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.validate.go index c74f35897..d57d77824 100644 --- a/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.validate.go +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.validate.go @@ -11,6 +11,7 @@ import ( "net/mail" "net/url" "regexp" + "sort" "strings" "time" "unicode/utf8" @@ -31,21 +32,57 @@ var ( _ = (*url.URL)(nil) _ = (*mail.Address)(nil) _ = anypb.Any{} + _ = sort.Sort ) // Validate checks the field values on MigrateAnnotation with the rules defined -// in the proto definition for this message. If any rules are violated, an -// error is returned. +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. func (m *MigrateAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MigrateAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MigrateAnnotationMultiError, or nil if none found. +func (m *MigrateAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *MigrateAnnotation) validate(all bool) error { if m == nil { return nil } + var errors []error + // no validation rules for Rename + if len(errors) > 0 { + return MigrateAnnotationMultiError(errors) + } + return nil } +// MigrateAnnotationMultiError is an error wrapping multiple validation errors +// returned by MigrateAnnotation.ValidateAll() if the designated constraints +// aren't met. +type MigrateAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MigrateAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MigrateAnnotationMultiError) AllErrors() []error { return m } + // MigrateAnnotationValidationError is the validation error returned by // MigrateAnnotation.Validate if the designated constraints aren't met. type MigrateAnnotationValidationError struct { @@ -104,19 +141,54 @@ var _ interface { // Validate checks the field values on FieldMigrateAnnotation with the rules // defined in the proto definition for this message. If any rules are -// violated, an error is returned. +// violated, the first error encountered is returned, or nil if there are no violations. func (m *FieldMigrateAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FieldMigrateAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FieldMigrateAnnotationMultiError, or nil if none found. +func (m *FieldMigrateAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *FieldMigrateAnnotation) validate(all bool) error { if m == nil { return nil } + var errors []error + // no validation rules for Rename // no validation rules for OneofPromotion + if len(errors) > 0 { + return FieldMigrateAnnotationMultiError(errors) + } + return nil } +// FieldMigrateAnnotationMultiError is an error wrapping multiple validation +// errors returned by FieldMigrateAnnotation.ValidateAll() if the designated +// constraints aren't met. +type FieldMigrateAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FieldMigrateAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FieldMigrateAnnotationMultiError) AllErrors() []error { return m } + // FieldMigrateAnnotationValidationError is the validation error returned by // FieldMigrateAnnotation.Validate if the designated constraints aren't met. type FieldMigrateAnnotationValidationError struct { @@ -175,17 +247,52 @@ var _ interface { // Validate checks the field values on FileMigrateAnnotation with the rules // defined in the proto definition for this message. If any rules are -// violated, an error is returned. +// violated, the first error encountered is returned, or nil if there are no violations. func (m *FileMigrateAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FileMigrateAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FileMigrateAnnotationMultiError, or nil if none found. +func (m *FileMigrateAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *FileMigrateAnnotation) validate(all bool) error { if m == nil { return nil } + var errors []error + // no validation rules for MoveToPackage + if len(errors) > 0 { + return FileMigrateAnnotationMultiError(errors) + } + return nil } +// FileMigrateAnnotationMultiError is an error wrapping multiple validation +// errors returned by FileMigrateAnnotation.ValidateAll() if the designated +// constraints aren't met. +type FileMigrateAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FileMigrateAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FileMigrateAnnotationMultiError) AllErrors() []error { return m } + // FileMigrateAnnotationValidationError is the validation error returned by // FileMigrateAnnotation.Validate if the designated constraints aren't met. type FileMigrateAnnotationValidationError struct { diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.go index a50efc41b..61df6890b 100644 --- a/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.go +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.32.0 // protoc v3.21.5 // source: xds/annotations/v3/security.proto @@ -121,10 +121,10 @@ var file_xds_annotations_v3_security_proto_rawDesc = []byte{ 0x32, 0x2b, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x73, - 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x42, 0x33, 0x5a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, - 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2f, 0x76, 0x33, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x62, 0x06, 0x70, 0x72, + 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x42, 0x33, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, + 0x01, 0x5a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, + 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.validate.go index 3bee0479f..ac0143f27 100644 --- a/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.validate.go +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.validate.go @@ -11,6 +11,7 @@ import ( "net/mail" "net/url" "regexp" + "sort" "strings" "time" "unicode/utf8" @@ -31,23 +32,59 @@ var ( _ = (*url.URL)(nil) _ = (*mail.Address)(nil) _ = anypb.Any{} + _ = sort.Sort ) // Validate checks the field values on FieldSecurityAnnotation with the rules // defined in the proto definition for this message. If any rules are -// violated, an error is returned. +// violated, the first error encountered is returned, or nil if there are no violations. func (m *FieldSecurityAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FieldSecurityAnnotation with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FieldSecurityAnnotationMultiError, or nil if none found. +func (m *FieldSecurityAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *FieldSecurityAnnotation) validate(all bool) error { if m == nil { return nil } + var errors []error + // no validation rules for ConfigureForUntrustedDownstream // no validation rules for ConfigureForUntrustedUpstream + if len(errors) > 0 { + return FieldSecurityAnnotationMultiError(errors) + } + return nil } +// FieldSecurityAnnotationMultiError is an error wrapping multiple validation +// errors returned by FieldSecurityAnnotation.ValidateAll() if the designated +// constraints aren't met. +type FieldSecurityAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FieldSecurityAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FieldSecurityAnnotationMultiError) AllErrors() []error { return m } + // FieldSecurityAnnotationValidationError is the validation error returned by // FieldSecurityAnnotation.Validate if the designated constraints aren't met. type FieldSecurityAnnotationValidationError struct { diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.go index 1fbfafa82..274eace05 100644 --- a/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.go +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.32.0 // protoc v3.21.5 // source: xds/annotations/v3/sensitive.proto diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.validate.go index 7f368572a..c101d3acc 100644 --- a/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.validate.go +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.validate.go @@ -11,6 +11,7 @@ import ( "net/mail" "net/url" "regexp" + "sort" "strings" "time" "unicode/utf8" @@ -31,4 +32,5 @@ var ( _ = (*url.URL)(nil) _ = (*mail.Address)(nil) _ = anypb.Any{} + _ = sort.Sort ) diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.go index 842025bd7..2497e0b2f 100644 --- a/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.go +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.32.0 // protoc v3.21.5 // source: xds/annotations/v3/status.proto diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.validate.go index a8ebf097d..a87dbee8d 100644 --- a/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.validate.go +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.validate.go @@ -11,6 +11,7 @@ import ( "net/mail" "net/url" "regexp" + "sort" "strings" "time" "unicode/utf8" @@ -31,21 +32,57 @@ var ( _ = (*url.URL)(nil) _ = (*mail.Address)(nil) _ = anypb.Any{} + _ = sort.Sort ) // Validate checks the field values on FileStatusAnnotation with the rules // defined in the proto definition for this message. If any rules are -// violated, an error is returned. +// violated, the first error encountered is returned, or nil if there are no violations. func (m *FileStatusAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FileStatusAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FileStatusAnnotationMultiError, or nil if none found. +func (m *FileStatusAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *FileStatusAnnotation) validate(all bool) error { if m == nil { return nil } + var errors []error + // no validation rules for WorkInProgress + if len(errors) > 0 { + return FileStatusAnnotationMultiError(errors) + } + return nil } +// FileStatusAnnotationMultiError is an error wrapping multiple validation +// errors returned by FileStatusAnnotation.ValidateAll() if the designated +// constraints aren't met. +type FileStatusAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FileStatusAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FileStatusAnnotationMultiError) AllErrors() []error { return m } + // FileStatusAnnotationValidationError is the validation error returned by // FileStatusAnnotation.Validate if the designated constraints aren't met. type FileStatusAnnotationValidationError struct { @@ -104,17 +141,52 @@ var _ interface { // Validate checks the field values on MessageStatusAnnotation with the rules // defined in the proto definition for this message. If any rules are -// violated, an error is returned. +// violated, the first error encountered is returned, or nil if there are no violations. func (m *MessageStatusAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MessageStatusAnnotation with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MessageStatusAnnotationMultiError, or nil if none found. +func (m *MessageStatusAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *MessageStatusAnnotation) validate(all bool) error { if m == nil { return nil } + var errors []error + // no validation rules for WorkInProgress + if len(errors) > 0 { + return MessageStatusAnnotationMultiError(errors) + } + return nil } +// MessageStatusAnnotationMultiError is an error wrapping multiple validation +// errors returned by MessageStatusAnnotation.ValidateAll() if the designated +// constraints aren't met. +type MessageStatusAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MessageStatusAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MessageStatusAnnotationMultiError) AllErrors() []error { return m } + // MessageStatusAnnotationValidationError is the validation error returned by // MessageStatusAnnotation.Validate if the designated constraints aren't met. type MessageStatusAnnotationValidationError struct { @@ -173,17 +245,52 @@ var _ interface { // Validate checks the field values on FieldStatusAnnotation with the rules // defined in the proto definition for this message. If any rules are -// violated, an error is returned. +// violated, the first error encountered is returned, or nil if there are no violations. func (m *FieldStatusAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FieldStatusAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FieldStatusAnnotationMultiError, or nil if none found. +func (m *FieldStatusAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *FieldStatusAnnotation) validate(all bool) error { if m == nil { return nil } + var errors []error + // no validation rules for WorkInProgress + if len(errors) > 0 { + return FieldStatusAnnotationMultiError(errors) + } + return nil } +// FieldStatusAnnotationMultiError is an error wrapping multiple validation +// errors returned by FieldStatusAnnotation.ValidateAll() if the designated +// constraints aren't met. +type FieldStatusAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FieldStatusAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FieldStatusAnnotationMultiError) AllErrors() []error { return m } + // FieldStatusAnnotationValidationError is the validation error returned by // FieldStatusAnnotation.Validate if the designated constraints aren't met. type FieldStatusAnnotationValidationError struct { @@ -241,20 +348,55 @@ var _ interface { } = FieldStatusAnnotationValidationError{} // Validate checks the field values on StatusAnnotation with the rules defined -// in the proto definition for this message. If any rules are violated, an -// error is returned. +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. func (m *StatusAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StatusAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// StatusAnnotationMultiError, or nil if none found. +func (m *StatusAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *StatusAnnotation) validate(all bool) error { if m == nil { return nil } + var errors []error + // no validation rules for WorkInProgress // no validation rules for PackageVersionStatus + if len(errors) > 0 { + return StatusAnnotationMultiError(errors) + } + return nil } +// StatusAnnotationMultiError is an error wrapping multiple validation errors +// returned by StatusAnnotation.ValidateAll() if the designated constraints +// aren't met. +type StatusAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StatusAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StatusAnnotationMultiError) AllErrors() []error { return m } + // StatusAnnotationValidationError is the validation error returned by // StatusAnnotation.Validate if the designated constraints aren't met. type StatusAnnotationValidationError struct { diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.go index 5412c812a..2307dc874 100644 --- a/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.go +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.32.0 // protoc v3.21.5 // source: xds/annotations/v3/versioning.proto diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.validate.go index 80c53b21c..042c266e1 100644 --- a/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.validate.go +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.validate.go @@ -11,6 +11,7 @@ import ( "net/mail" "net/url" "regexp" + "sort" "strings" "time" "unicode/utf8" @@ -31,21 +32,57 @@ var ( _ = (*url.URL)(nil) _ = (*mail.Address)(nil) _ = anypb.Any{} + _ = sort.Sort ) // Validate checks the field values on VersioningAnnotation with the rules // defined in the proto definition for this message. If any rules are -// violated, an error is returned. +// violated, the first error encountered is returned, or nil if there are no violations. func (m *VersioningAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on VersioningAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// VersioningAnnotationMultiError, or nil if none found. +func (m *VersioningAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *VersioningAnnotation) validate(all bool) error { if m == nil { return nil } + var errors []error + // no validation rules for PreviousMessageType + if len(errors) > 0 { + return VersioningAnnotationMultiError(errors) + } + return nil } +// VersioningAnnotationMultiError is an error wrapping multiple validation +// errors returned by VersioningAnnotation.ValidateAll() if the designated +// constraints aren't met. +type VersioningAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m VersioningAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m VersioningAnnotationMultiError) AllErrors() []error { return m } + // VersioningAnnotationValidationError is the validation error returned by // VersioningAnnotation.Validate if the designated constraints aren't met. type VersioningAnnotationValidationError struct { diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go index 5a22c3266..3c361216c 100644 --- a/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.32.0 // protoc v3.21.5 // source: xds/core/v3/authority.proto @@ -81,12 +81,12 @@ var file_xds_core_v3_authority_proto_rawDesc = []byte{ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x28, 0x0a, 0x09, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x42, 0x56, - 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, - 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0xd2, 0xc6, - 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x42, 0x0e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, + 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.validate.go index 06b55362d..94317c2af 100644 --- a/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.validate.go +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.validate.go @@ -11,6 +11,7 @@ import ( "net/mail" "net/url" "regexp" + "sort" "strings" "time" "unicode/utf8" @@ -31,25 +32,65 @@ var ( _ = (*url.URL)(nil) _ = (*mail.Address)(nil) _ = anypb.Any{} + _ = sort.Sort ) // Validate checks the field values on Authority with the rules defined in the -// proto definition for this message. If any rules are violated, an error is returned. +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. func (m *Authority) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Authority with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in AuthorityMultiError, or nil +// if none found. +func (m *Authority) ValidateAll() error { + return m.validate(true) +} + +func (m *Authority) validate(all bool) error { if m == nil { return nil } + var errors []error + if utf8.RuneCountInString(m.GetName()) < 1 { - return AuthorityValidationError{ + err := AuthorityValidationError{ field: "Name", reason: "value length must be at least 1 runes", } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return AuthorityMultiError(errors) } return nil } +// AuthorityMultiError is an error wrapping multiple validation errors returned +// by Authority.ValidateAll() if the designated constraints aren't met. +type AuthorityMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m AuthorityMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m AuthorityMultiError) AllErrors() []error { return m } + // AuthorityValidationError is the validation error returned by // Authority.Validate if the designated constraints aren't met. type AuthorityValidationError struct { diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.go index e915cdb9d..d7be5c4d2 100644 --- a/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.go +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.32.0 // protoc v3.21.5 // source: xds/core/v3/cidr.proto @@ -9,9 +9,9 @@ package v3 import ( _ "github.com/cncf/xds/go/xds/annotations/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -28,8 +28,8 @@ type CidrRange struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - AddressPrefix string `protobuf:"bytes,1,opt,name=address_prefix,json=addressPrefix,proto3" json:"address_prefix,omitempty"` - PrefixLen *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=prefix_len,json=prefixLen,proto3" json:"prefix_len,omitempty"` + AddressPrefix string `protobuf:"bytes,1,opt,name=address_prefix,json=addressPrefix,proto3" json:"address_prefix,omitempty"` + PrefixLen *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=prefix_len,json=prefixLen,proto3" json:"prefix_len,omitempty"` } func (x *CidrRange) Reset() { @@ -71,7 +71,7 @@ func (x *CidrRange) GetAddressPrefix() string { return "" } -func (x *CidrRange) GetPrefixLen() *wrappers.UInt32Value { +func (x *CidrRange) GetPrefixLen() *wrapperspb.UInt32Value { if x != nil { return x.PrefixLen } @@ -97,12 +97,12 @@ var file_xds_core_v3_cidr_proto_rawDesc = []byte{ 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x2a, 0x03, 0x18, 0x80, 0x01, 0x52, 0x09, 0x70, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x4c, 0x65, 0x6e, 0x42, 0x56, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0e, - 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, - 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, - 0x65, 0x2f, 0x76, 0x33, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x62, 0x06, 0x70, 0x72, + 0x78, 0x4c, 0x65, 0x6e, 0x42, 0x56, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, + 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, + 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } @@ -120,8 +120,8 @@ func file_xds_core_v3_cidr_proto_rawDescGZIP() []byte { var file_xds_core_v3_cidr_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_xds_core_v3_cidr_proto_goTypes = []interface{}{ - (*CidrRange)(nil), // 0: xds.core.v3.CidrRange - (*wrappers.UInt32Value)(nil), // 1: google.protobuf.UInt32Value + (*CidrRange)(nil), // 0: xds.core.v3.CidrRange + (*wrapperspb.UInt32Value)(nil), // 1: google.protobuf.UInt32Value } var file_xds_core_v3_cidr_proto_depIdxs = []int32{ 1, // 0: xds.core.v3.CidrRange.prefix_len:type_name -> google.protobuf.UInt32Value diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.validate.go index eb48b32ba..43327f56b 100644 --- a/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.validate.go +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.validate.go @@ -11,6 +11,7 @@ import ( "net/mail" "net/url" "regexp" + "sort" "strings" "time" "unicode/utf8" @@ -31,36 +32,80 @@ var ( _ = (*url.URL)(nil) _ = (*mail.Address)(nil) _ = anypb.Any{} + _ = sort.Sort ) // Validate checks the field values on CidrRange with the rules defined in the -// proto definition for this message. If any rules are violated, an error is returned. +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. func (m *CidrRange) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CidrRange with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in CidrRangeMultiError, or nil +// if none found. +func (m *CidrRange) ValidateAll() error { + return m.validate(true) +} + +func (m *CidrRange) validate(all bool) error { if m == nil { return nil } + var errors []error + if utf8.RuneCountInString(m.GetAddressPrefix()) < 1 { - return CidrRangeValidationError{ + err := CidrRangeValidationError{ field: "AddressPrefix", reason: "value length must be at least 1 runes", } + if !all { + return err + } + errors = append(errors, err) } if wrapper := m.GetPrefixLen(); wrapper != nil { if wrapper.GetValue() > 128 { - return CidrRangeValidationError{ + err := CidrRangeValidationError{ field: "PrefixLen", reason: "value must be less than or equal to 128", } + if !all { + return err + } + errors = append(errors, err) } } + if len(errors) > 0 { + return CidrRangeMultiError(errors) + } + return nil } +// CidrRangeMultiError is an error wrapping multiple validation errors returned +// by CidrRange.ValidateAll() if the designated constraints aren't met. +type CidrRangeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CidrRangeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CidrRangeMultiError) AllErrors() []error { return m } + // CidrRangeValidationError is the validation error returned by // CidrRange.Validate if the designated constraints aren't met. type CidrRangeValidationError struct { diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go index e91c6abe7..52b520af4 100644 --- a/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.32.0 // protoc v3.21.5 // source: xds/core/v3/collection_entry.proto @@ -9,9 +9,9 @@ package v3 import ( _ "github.com/cncf/xds/go/xds/annotations/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any1 "github.com/golang/protobuf/ptypes/any" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" reflect "reflect" sync "sync" ) @@ -109,9 +109,9 @@ type CollectionEntry_InlineEntry struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - Resource *any1.Any `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + Resource *anypb.Any `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` } func (x *CollectionEntry_InlineEntry) Reset() { @@ -160,7 +160,7 @@ func (x *CollectionEntry_InlineEntry) GetVersion() string { return "" } -func (x *CollectionEntry_InlineEntry) GetResource() *any1.Any { +func (x *CollectionEntry_InlineEntry) GetResource() *anypb.Any { if x != nil { return x.Resource } @@ -201,12 +201,12 @@ var file_xds_core_v3_collection_entry_proto_rawDesc = []byte{ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x19, 0x0a, 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, - 0x42, 0x5c, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, - 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x14, 0x43, 0x6f, 0x6c, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, - 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, - 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x62, 0x06, + 0x42, 0x5c, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x42, 0x14, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, + 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } @@ -227,7 +227,7 @@ var file_xds_core_v3_collection_entry_proto_goTypes = []interface{}{ (*CollectionEntry)(nil), // 0: xds.core.v3.CollectionEntry (*CollectionEntry_InlineEntry)(nil), // 1: xds.core.v3.CollectionEntry.InlineEntry (*ResourceLocator)(nil), // 2: xds.core.v3.ResourceLocator - (*any1.Any)(nil), // 3: google.protobuf.Any + (*anypb.Any)(nil), // 3: google.protobuf.Any } var file_xds_core_v3_collection_entry_proto_depIdxs = []int32{ 2, // 0: xds.core.v3.CollectionEntry.locator:type_name -> xds.core.v3.ResourceLocator diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.validate.go index a81262530..610990b7f 100644 --- a/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.validate.go +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.validate.go @@ -11,6 +11,7 @@ import ( "net/mail" "net/url" "regexp" + "sort" "strings" "time" "unicode/utf8" @@ -31,21 +32,66 @@ var ( _ = (*url.URL)(nil) _ = (*mail.Address)(nil) _ = anypb.Any{} + _ = sort.Sort ) // Validate checks the field values on CollectionEntry with the rules defined -// in the proto definition for this message. If any rules are violated, an -// error is returned. +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. func (m *CollectionEntry) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CollectionEntry with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CollectionEntryMultiError, or nil if none found. +func (m *CollectionEntry) ValidateAll() error { + return m.validate(true) +} + +func (m *CollectionEntry) validate(all bool) error { if m == nil { return nil } - switch m.ResourceSpecifier.(type) { + var errors []error + oneofResourceSpecifierPresent := false + switch v := m.ResourceSpecifier.(type) { case *CollectionEntry_Locator: - - if v, ok := interface{}(m.GetLocator()).(interface{ Validate() error }); ok { + if v == nil { + err := CollectionEntryValidationError{ + field: "ResourceSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofResourceSpecifierPresent = true + + if all { + switch v := interface{}(m.GetLocator()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CollectionEntryValidationError{ + field: "Locator", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CollectionEntryValidationError{ + field: "Locator", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocator()).(interface{ Validate() error }); ok { if err := v.Validate(); err != nil { return CollectionEntryValidationError{ field: "Locator", @@ -56,8 +102,38 @@ func (m *CollectionEntry) Validate() error { } case *CollectionEntry_InlineEntry_: - - if v, ok := interface{}(m.GetInlineEntry()).(interface{ Validate() error }); ok { + if v == nil { + err := CollectionEntryValidationError{ + field: "ResourceSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofResourceSpecifierPresent = true + + if all { + switch v := interface{}(m.GetInlineEntry()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CollectionEntryValidationError{ + field: "InlineEntry", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CollectionEntryValidationError{ + field: "InlineEntry", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetInlineEntry()).(interface{ Validate() error }); ok { if err := v.Validate(); err != nil { return CollectionEntryValidationError{ field: "InlineEntry", @@ -68,16 +144,43 @@ func (m *CollectionEntry) Validate() error { } default: - return CollectionEntryValidationError{ + _ = v // ensures v is used + } + if !oneofResourceSpecifierPresent { + err := CollectionEntryValidationError{ field: "ResourceSpecifier", reason: "value is required", } + if !all { + return err + } + errors = append(errors, err) + } + if len(errors) > 0 { + return CollectionEntryMultiError(errors) } return nil } +// CollectionEntryMultiError is an error wrapping multiple validation errors +// returned by CollectionEntry.ValidateAll() if the designated constraints +// aren't met. +type CollectionEntryMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CollectionEntryMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CollectionEntryMultiError) AllErrors() []error { return m } + // CollectionEntryValidationError is the validation error returned by // CollectionEntry.Validate if the designated constraints aren't met. type CollectionEntryValidationError struct { @@ -134,22 +237,59 @@ var _ interface { // Validate checks the field values on CollectionEntry_InlineEntry with the // rules defined in the proto definition for this message. If any rules are -// violated, an error is returned. +// violated, the first error encountered is returned, or nil if there are no violations. func (m *CollectionEntry_InlineEntry) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CollectionEntry_InlineEntry with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CollectionEntry_InlineEntryMultiError, or nil if none found. +func (m *CollectionEntry_InlineEntry) ValidateAll() error { + return m.validate(true) +} + +func (m *CollectionEntry_InlineEntry) validate(all bool) error { if m == nil { return nil } + var errors []error + if !_CollectionEntry_InlineEntry_Name_Pattern.MatchString(m.GetName()) { - return CollectionEntry_InlineEntryValidationError{ + err := CollectionEntry_InlineEntryValidationError{ field: "Name", reason: "value does not match regex pattern \"^[0-9a-zA-Z_\\\\-\\\\.~:]+$\"", } + if !all { + return err + } + errors = append(errors, err) } // no validation rules for Version - if v, ok := interface{}(m.GetResource()).(interface{ Validate() error }); ok { + if all { + switch v := interface{}(m.GetResource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CollectionEntry_InlineEntryValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CollectionEntry_InlineEntryValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResource()).(interface{ Validate() error }); ok { if err := v.Validate(); err != nil { return CollectionEntry_InlineEntryValidationError{ field: "Resource", @@ -159,9 +299,30 @@ func (m *CollectionEntry_InlineEntry) Validate() error { } } + if len(errors) > 0 { + return CollectionEntry_InlineEntryMultiError(errors) + } + return nil } +// CollectionEntry_InlineEntryMultiError is an error wrapping multiple +// validation errors returned by CollectionEntry_InlineEntry.ValidateAll() if +// the designated constraints aren't met. +type CollectionEntry_InlineEntryMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CollectionEntry_InlineEntryMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CollectionEntry_InlineEntryMultiError) AllErrors() []error { return m } + // CollectionEntry_InlineEntryValidationError is the validation error returned // by CollectionEntry_InlineEntry.Validate if the designated constraints // aren't met. diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go index f3f37162b..563775a1f 100644 --- a/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.32.0 // protoc v3.21.5 // source: xds/core/v3/context_params.proto @@ -84,13 +84,13 @@ var file_xds_core_v3_context_params_proto_rawDesc = []byte{ 0x6d, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x5a, 0x0a, - 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x12, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, - 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, - 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, - 0x33, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x5a, 0xd2, + 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, + 0x12, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, + 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.validate.go index 31277a628..1c9accaa3 100644 --- a/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.validate.go +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.validate.go @@ -11,6 +11,7 @@ import ( "net/mail" "net/url" "regexp" + "sort" "strings" "time" "unicode/utf8" @@ -31,21 +32,57 @@ var ( _ = (*url.URL)(nil) _ = (*mail.Address)(nil) _ = anypb.Any{} + _ = sort.Sort ) // Validate checks the field values on ContextParams with the rules defined in -// the proto definition for this message. If any rules are violated, an error -// is returned. +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. func (m *ContextParams) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ContextParams with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ContextParamsMultiError, or +// nil if none found. +func (m *ContextParams) ValidateAll() error { + return m.validate(true) +} + +func (m *ContextParams) validate(all bool) error { if m == nil { return nil } + var errors []error + // no validation rules for Params + if len(errors) > 0 { + return ContextParamsMultiError(errors) + } + return nil } +// ContextParamsMultiError is an error wrapping multiple validation errors +// returned by ContextParams.ValidateAll() if the designated constraints +// aren't met. +type ContextParamsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ContextParamsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ContextParamsMultiError) AllErrors() []error { return m } + // ContextParamsValidationError is the validation error returned by // ContextParams.Validate if the designated constraints aren't met. type ContextParamsValidationError struct { diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.go index 41db466bd..476fa47c2 100644 --- a/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.go +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.32.0 // protoc v3.21.5 // source: xds/core/v3/extension.proto @@ -8,9 +8,9 @@ package v3 import ( _ "github.com/envoyproxy/protoc-gen-validate/validate" - any1 "github.com/golang/protobuf/ptypes/any" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" reflect "reflect" sync "sync" ) @@ -27,8 +27,8 @@ type TypedExtensionConfig struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - TypedConfig *any1.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + TypedConfig *anypb.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` } func (x *TypedExtensionConfig) Reset() { @@ -70,7 +70,7 @@ func (x *TypedExtensionConfig) GetName() string { return "" } -func (x *TypedExtensionConfig) GetTypedConfig() *any1.Any { +func (x *TypedExtensionConfig) GetTypedConfig() *anypb.Any { if x != nil { return x.TypedConfig } @@ -116,7 +116,7 @@ func file_xds_core_v3_extension_proto_rawDescGZIP() []byte { var file_xds_core_v3_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_xds_core_v3_extension_proto_goTypes = []interface{}{ (*TypedExtensionConfig)(nil), // 0: xds.core.v3.TypedExtensionConfig - (*any1.Any)(nil), // 1: google.protobuf.Any + (*anypb.Any)(nil), // 1: google.protobuf.Any } var file_xds_core_v3_extension_proto_depIdxs = []int32{ 1, // 0: xds.core.v3.TypedExtensionConfig.typed_config:type_name -> google.protobuf.Any diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.validate.go index 2acbda3c6..839f3fef7 100644 --- a/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.validate.go +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.validate.go @@ -11,6 +11,7 @@ import ( "net/mail" "net/url" "regexp" + "sort" "strings" "time" "unicode/utf8" @@ -31,37 +32,81 @@ var ( _ = (*url.URL)(nil) _ = (*mail.Address)(nil) _ = anypb.Any{} + _ = sort.Sort ) // Validate checks the field values on TypedExtensionConfig with the rules // defined in the proto definition for this message. If any rules are -// violated, an error is returned. +// violated, the first error encountered is returned, or nil if there are no violations. func (m *TypedExtensionConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TypedExtensionConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// TypedExtensionConfigMultiError, or nil if none found. +func (m *TypedExtensionConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *TypedExtensionConfig) validate(all bool) error { if m == nil { return nil } + var errors []error + if utf8.RuneCountInString(m.GetName()) < 1 { - return TypedExtensionConfigValidationError{ + err := TypedExtensionConfigValidationError{ field: "Name", reason: "value length must be at least 1 runes", } + if !all { + return err + } + errors = append(errors, err) } if m.GetTypedConfig() == nil { - return TypedExtensionConfigValidationError{ + err := TypedExtensionConfigValidationError{ field: "TypedConfig", reason: "value is required", } + if !all { + return err + } + errors = append(errors, err) } if a := m.GetTypedConfig(); a != nil { } + if len(errors) > 0 { + return TypedExtensionConfigMultiError(errors) + } + return nil } +// TypedExtensionConfigMultiError is an error wrapping multiple validation +// errors returned by TypedExtensionConfig.ValidateAll() if the designated +// constraints aren't met. +type TypedExtensionConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TypedExtensionConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TypedExtensionConfigMultiError) AllErrors() []error { return m } + // TypedExtensionConfigValidationError is the validation error returned by // TypedExtensionConfig.Validate if the designated constraints aren't met. type TypedExtensionConfigValidationError struct { diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go index 3b4c853dc..9402230d5 100644 --- a/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.32.0 // protoc v3.21.5 // source: xds/core/v3/resource.proto @@ -8,9 +8,9 @@ package v3 import ( _ "github.com/cncf/xds/go/xds/annotations/v3" - any1 "github.com/golang/protobuf/ptypes/any" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" reflect "reflect" sync "sync" ) @@ -29,7 +29,7 @@ type Resource struct { Name *ResourceName `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - Resource *any1.Any `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` + Resource *anypb.Any `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` } func (x *Resource) Reset() { @@ -78,7 +78,7 @@ func (x *Resource) GetVersion() string { return "" } -func (x *Resource) GetResource() *any1.Any { +func (x *Resource) GetResource() *anypb.Any { if x != nil { return x.Resource } @@ -105,12 +105,12 @@ var file_xds_core_v3_resource_proto_rawDesc = []byte{ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x55, - 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, - 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0xd2, 0xc6, 0xa4, - 0xe1, 0x06, 0x02, 0x08, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, + 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -129,7 +129,7 @@ var file_xds_core_v3_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_xds_core_v3_resource_proto_goTypes = []interface{}{ (*Resource)(nil), // 0: xds.core.v3.Resource (*ResourceName)(nil), // 1: xds.core.v3.ResourceName - (*any1.Any)(nil), // 2: google.protobuf.Any + (*anypb.Any)(nil), // 2: google.protobuf.Any } var file_xds_core_v3_resource_proto_depIdxs = []int32{ 1, // 0: xds.core.v3.Resource.name:type_name -> xds.core.v3.ResourceName diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.validate.go index 4e49352cc..dc972171c 100644 --- a/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.validate.go +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.validate.go @@ -11,6 +11,7 @@ import ( "net/mail" "net/url" "regexp" + "sort" "strings" "time" "unicode/utf8" @@ -31,16 +32,51 @@ var ( _ = (*url.URL)(nil) _ = (*mail.Address)(nil) _ = anypb.Any{} + _ = sort.Sort ) // Validate checks the field values on Resource with the rules defined in the -// proto definition for this message. If any rules are violated, an error is returned. +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. func (m *Resource) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Resource with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ResourceMultiError, or nil +// if none found. +func (m *Resource) ValidateAll() error { + return m.validate(true) +} + +func (m *Resource) validate(all bool) error { if m == nil { return nil } - if v, ok := interface{}(m.GetName()).(interface{ Validate() error }); ok { + var errors []error + + if all { + switch v := interface{}(m.GetName()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "Name", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "Name", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetName()).(interface{ Validate() error }); ok { if err := v.Validate(); err != nil { return ResourceValidationError{ field: "Name", @@ -52,7 +88,26 @@ func (m *Resource) Validate() error { // no validation rules for Version - if v, ok := interface{}(m.GetResource()).(interface{ Validate() error }); ok { + if all { + switch v := interface{}(m.GetResource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResource()).(interface{ Validate() error }); ok { if err := v.Validate(); err != nil { return ResourceValidationError{ field: "Resource", @@ -62,9 +117,29 @@ func (m *Resource) Validate() error { } } + if len(errors) > 0 { + return ResourceMultiError(errors) + } + return nil } +// ResourceMultiError is an error wrapping multiple validation errors returned +// by Resource.ValidateAll() if the designated constraints aren't met. +type ResourceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceMultiError) AllErrors() []error { return m } + // ResourceValidationError is the validation error returned by // Resource.Validate if the designated constraints aren't met. type ResourceValidationError struct { diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go index 8123f1140..50fe599db 100644 --- a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.32.0 // protoc v3.21.5 // source: xds/core/v3/resource_locator.proto @@ -304,12 +304,12 @@ var file_xds_core_v3_resource_locator_proto_rawDesc = []byte{ 0x08, 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x02, 0x42, 0x19, 0x0a, 0x17, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x42, 0x5c, - 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, - 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, - 0x65, 0x2f, 0x76, 0x33, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x62, 0x06, 0x70, 0x72, + 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x42, 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, + 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.validate.go index ff91eecd7..1686e98d1 100644 --- a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.validate.go +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.validate.go @@ -11,6 +11,7 @@ import ( "net/mail" "net/url" "regexp" + "sort" "strings" "time" "unicode/utf8" @@ -31,21 +32,40 @@ var ( _ = (*url.URL)(nil) _ = (*mail.Address)(nil) _ = anypb.Any{} + _ = sort.Sort ) // Validate checks the field values on ResourceLocator with the rules defined -// in the proto definition for this message. If any rules are violated, an -// error is returned. +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. func (m *ResourceLocator) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResourceLocator with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ResourceLocatorMultiError, or nil if none found. +func (m *ResourceLocator) ValidateAll() error { + return m.validate(true) +} + +func (m *ResourceLocator) validate(all bool) error { if m == nil { return nil } + var errors []error + if _, ok := ResourceLocator_Scheme_name[int32(m.GetScheme())]; !ok { - return ResourceLocatorValidationError{ + err := ResourceLocatorValidationError{ field: "Scheme", reason: "value must be one of the defined enum values", } + if !all { + return err + } + errors = append(errors, err) } // no validation rules for Id @@ -53,16 +73,39 @@ func (m *ResourceLocator) Validate() error { // no validation rules for Authority if utf8.RuneCountInString(m.GetResourceType()) < 1 { - return ResourceLocatorValidationError{ + err := ResourceLocatorValidationError{ field: "ResourceType", reason: "value length must be at least 1 runes", } + if !all { + return err + } + errors = append(errors, err) } for idx, item := range m.GetDirectives() { _, _ = idx, item - if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceLocatorValidationError{ + field: fmt.Sprintf("Directives[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceLocatorValidationError{ + field: fmt.Sprintf("Directives[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { if err := v.Validate(); err != nil { return ResourceLocatorValidationError{ field: fmt.Sprintf("Directives[%v]", idx), @@ -74,11 +117,39 @@ func (m *ResourceLocator) Validate() error { } - switch m.ContextParamSpecifier.(type) { - + switch v := m.ContextParamSpecifier.(type) { case *ResourceLocator_ExactContext: + if v == nil { + err := ResourceLocatorValidationError{ + field: "ContextParamSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } - if v, ok := interface{}(m.GetExactContext()).(interface{ Validate() error }); ok { + if all { + switch v := interface{}(m.GetExactContext()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceLocatorValidationError{ + field: "ExactContext", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceLocatorValidationError{ + field: "ExactContext", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExactContext()).(interface{ Validate() error }); ok { if err := v.Validate(); err != nil { return ResourceLocatorValidationError{ field: "ExactContext", @@ -88,11 +159,34 @@ func (m *ResourceLocator) Validate() error { } } + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return ResourceLocatorMultiError(errors) } return nil } +// ResourceLocatorMultiError is an error wrapping multiple validation errors +// returned by ResourceLocator.ValidateAll() if the designated constraints +// aren't met. +type ResourceLocatorMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceLocatorMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceLocatorMultiError) AllErrors() []error { return m } + // ResourceLocatorValidationError is the validation error returned by // ResourceLocator.Validate if the designated constraints aren't met. type ResourceLocatorValidationError struct { @@ -149,17 +243,61 @@ var _ interface { // Validate checks the field values on ResourceLocator_Directive with the rules // defined in the proto definition for this message. If any rules are -// violated, an error is returned. +// violated, the first error encountered is returned, or nil if there are no violations. func (m *ResourceLocator_Directive) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResourceLocator_Directive with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ResourceLocator_DirectiveMultiError, or nil if none found. +func (m *ResourceLocator_Directive) ValidateAll() error { + return m.validate(true) +} + +func (m *ResourceLocator_Directive) validate(all bool) error { if m == nil { return nil } - switch m.Directive.(type) { + var errors []error + oneofDirectivePresent := false + switch v := m.Directive.(type) { case *ResourceLocator_Directive_Alt: - - if v, ok := interface{}(m.GetAlt()).(interface{ Validate() error }); ok { + if v == nil { + err := ResourceLocator_DirectiveValidationError{ + field: "Directive", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofDirectivePresent = true + + if all { + switch v := interface{}(m.GetAlt()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceLocator_DirectiveValidationError{ + field: "Alt", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceLocator_DirectiveValidationError{ + field: "Alt", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAlt()).(interface{ Validate() error }); ok { if err := v.Validate(); err != nil { return ResourceLocator_DirectiveValidationError{ field: "Alt", @@ -170,32 +308,78 @@ func (m *ResourceLocator_Directive) Validate() error { } case *ResourceLocator_Directive_Entry: + if v == nil { + err := ResourceLocator_DirectiveValidationError{ + field: "Directive", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofDirectivePresent = true if utf8.RuneCountInString(m.GetEntry()) < 1 { - return ResourceLocator_DirectiveValidationError{ + err := ResourceLocator_DirectiveValidationError{ field: "Entry", reason: "value length must be at least 1 runes", } + if !all { + return err + } + errors = append(errors, err) } if !_ResourceLocator_Directive_Entry_Pattern.MatchString(m.GetEntry()) { - return ResourceLocator_DirectiveValidationError{ + err := ResourceLocator_DirectiveValidationError{ field: "Entry", reason: "value does not match regex pattern \"^[0-9a-zA-Z_\\\\-\\\\./~:]+$\"", } + if !all { + return err + } + errors = append(errors, err) } default: - return ResourceLocator_DirectiveValidationError{ + _ = v // ensures v is used + } + if !oneofDirectivePresent { + err := ResourceLocator_DirectiveValidationError{ field: "Directive", reason: "value is required", } + if !all { + return err + } + errors = append(errors, err) + } + if len(errors) > 0 { + return ResourceLocator_DirectiveMultiError(errors) } return nil } +// ResourceLocator_DirectiveMultiError is an error wrapping multiple validation +// errors returned by ResourceLocator_Directive.ValidateAll() if the +// designated constraints aren't met. +type ResourceLocator_DirectiveMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceLocator_DirectiveMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceLocator_DirectiveMultiError) AllErrors() []error { return m } + // ResourceLocator_DirectiveValidationError is the validation error returned by // ResourceLocator_Directive.Validate if the designated constraints aren't met. type ResourceLocator_DirectiveValidationError struct { diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go index 19e67f6ac..92d5fa853 100644 --- a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.32.0 // protoc v3.21.5 // source: xds/core/v3/resource_name.proto @@ -114,13 +114,13 @@ var file_xds_core_v3_resource_name_proto_rawDesc = []byte{ 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x42, 0x59, 0x0a, - 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x11, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, - 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, - 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x72, 0x61, 0x6d, 0x73, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x42, 0x59, 0xd2, + 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, + 0x11, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, + 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.validate.go index db525b978..270e921bc 100644 --- a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.validate.go +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.validate.go @@ -11,6 +11,7 @@ import ( "net/mail" "net/url" "regexp" + "sort" "strings" "time" "unicode/utf8" @@ -31,28 +32,66 @@ var ( _ = (*url.URL)(nil) _ = (*mail.Address)(nil) _ = anypb.Any{} + _ = sort.Sort ) // Validate checks the field values on ResourceName with the rules defined in -// the proto definition for this message. If any rules are violated, an error -// is returned. +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. func (m *ResourceName) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResourceName with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ResourceNameMultiError, or +// nil if none found. +func (m *ResourceName) ValidateAll() error { + return m.validate(true) +} + +func (m *ResourceName) validate(all bool) error { if m == nil { return nil } + var errors []error + // no validation rules for Id // no validation rules for Authority if utf8.RuneCountInString(m.GetResourceType()) < 1 { - return ResourceNameValidationError{ + err := ResourceNameValidationError{ field: "ResourceType", reason: "value length must be at least 1 runes", } + if !all { + return err + } + errors = append(errors, err) } - if v, ok := interface{}(m.GetContext()).(interface{ Validate() error }); ok { + if all { + switch v := interface{}(m.GetContext()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceNameValidationError{ + field: "Context", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceNameValidationError{ + field: "Context", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetContext()).(interface{ Validate() error }); ok { if err := v.Validate(); err != nil { return ResourceNameValidationError{ field: "Context", @@ -62,9 +101,29 @@ func (m *ResourceName) Validate() error { } } + if len(errors) > 0 { + return ResourceNameMultiError(errors) + } + return nil } +// ResourceNameMultiError is an error wrapping multiple validation errors +// returned by ResourceName.ValidateAll() if the designated constraints aren't met. +type ResourceNameMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceNameMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceNameMultiError) AllErrors() []error { return m } + // ResourceNameValidationError is the validation error returned by // ResourceName.Validate if the designated constraints aren't met. type ResourceNameValidationError struct { diff --git a/vendor/github.com/digitalocean/godo/CHANGELOG.md b/vendor/github.com/digitalocean/godo/CHANGELOG.md index eb84bc9a8..1d3102b2e 100644 --- a/vendor/github.com/digitalocean/godo/CHANGELOG.md +++ b/vendor/github.com/digitalocean/godo/CHANGELOG.md @@ -1,5 +1,46 @@ # Change Log +## [v1.116.0] - 2024-05-16 + +- #693 - @guptado - Introduce VPC peering methods + +## [v1.115.0] - 2024-05-08 + +- #688 - @asaha2 - load balancers: support glb active-passive fail-over settings, currently in closed beta + +## [v1.114.0] - 2024-04-12 + +- #686 - @greeshmapill - APPS-8386: Add comments to mark deprecation of unused instance size fields +- #685 - @jcodybaker - APPS-8711: container termination controls +- #682 - @dependabot[bot] - Bump golang.org/x/net from 0.17.0 to 0.23.0 + +## [v1.113.0] - 2024-04-12 + +- #679 - @bhardwajRahul - Enable ui_connection parameter for Opensearch +- #678 - @bhardwajRahul - Enable Opensearch option in Godo + +## [v1.112.0] - 2024-04-08 + +- #672 - @dependabot[bot] - Bump google.golang.org/protobuf from 1.28.0 to 1.33.0 +- #675 - @bhardwajRahul - Add ListDatabaseEvents to Godo + +## [v1.111.0] - 2024-04-02 + +- #674 - @asaha2 - load balancers: introduce glb settings in godo, currently in closed beta + +## [v1.110.0] - 2024-03-14 + +- #667 - @dwilsondo - Include DBaaS metrics credential endpoint operations +- #670 - @guptado - [NETPROD-3583] Added name param in ListOption to get resource by name +- #671 - @greeshmapill - APPS-8383: Add deprecation intent and bandwidth allowance to app instance size spec + +## [v1.109.0] - 2024-02-09 + +- #668 - @greeshmapill - APPS-8315: Update app instance size spec +- #665 - @jcodybaker - APPS-8263: methods for managing App Platform dev DBs +- #663 - @dwilsondo - Include replica connection info on DBaaS clusters & DBaaS PG pools +- #662 - @ddatta-do - load balancer : add regional network as new LB type + ## [v1.108.0] - 2024-01-17 - #660 - @dweinshenker - Enable CRUD operations for replicas with storage_size_mib diff --git a/vendor/github.com/digitalocean/godo/apps.gen.go b/vendor/github.com/digitalocean/godo/apps.gen.go index 7ec24c334..f734ab022 100644 --- a/vendor/github.com/digitalocean/godo/apps.gen.go +++ b/vendor/github.com/digitalocean/godo/apps.gen.go @@ -98,6 +98,8 @@ type App struct { BuildConfig *AppBuildConfig `json:"build_config,omitempty"` // The id of the project for the app. This will be empty if there is a fleet (project) lookup failure. ProjectID string `json:"project_id,omitempty"` + // The dedicated egress ip addresses associated with the app. + DedicatedIps []*AppDedicatedIp `json:"dedicated_ips,omitempty"` } // AppAlertSpec Configuration of an alert for the app or a individual component. @@ -221,6 +223,26 @@ const ( AppDatabaseSpecEngine_MongoDB AppDatabaseSpecEngine = "MONGODB" ) +// AppDedicatedIp Represents a dedicated egress ip. +type AppDedicatedIp struct { + // The ip address of the dedicated egress ip. + Ip string `json:"ip,omitempty"` + // The id of the dedictated egress ip. + ID string `json:"id,omitempty"` + Status AppDedicatedIpStatus `json:"status,omitempty"` +} + +// AppDedicatedIpStatus the model 'AppDedicatedIpStatus' +type AppDedicatedIpStatus string + +// List of AppDedicatedIPStatus +const ( + APPDEDICATEDIPSTATUS_Unknown AppDedicatedIpStatus = "UNKNOWN" + APPDEDICATEDIPSTATUS_Assigning AppDedicatedIpStatus = "ASSIGNING" + APPDEDICATEDIPSTATUS_Assigned AppDedicatedIpStatus = "ASSIGNED" + APPDEDICATEDIPSTATUS_Removed AppDedicatedIpStatus = "REMOVED" +) + // AppDomainSpec struct for AppDomainSpec type AppDomainSpec struct { Domain string `json:"domain"` @@ -366,6 +388,7 @@ type AppJobSpec struct { Alerts []*AppAlertSpec `json:"alerts,omitempty"` // A list of configured log forwarding destinations. LogDestinations []*AppLogDestinationSpec `json:"log_destinations,omitempty"` + Termination *AppJobSpecTermination `json:"termination,omitempty"` } // AppJobSpecKind - UNSPECIFIED: Default job type, will auto-complete to POST_DEPLOY kind. - PRE_DEPLOY: Indicates a job that runs before an app deployment. - POST_DEPLOY: Indicates a job that runs after an app deployment. - FAILED_DEPLOY: Indicates a job that runs after a component fails to deploy. @@ -379,6 +402,12 @@ const ( AppJobSpecKind_FailedDeploy AppJobSpecKind = "FAILED_DEPLOY" ) +// AppJobSpecTermination struct for AppJobSpecTermination +type AppJobSpecTermination struct { + // The number of seconds to wait between sending a TERM signal to a container and issuing a KILL which causes immediate shutdown. Default: 120, Minimum 1, Maximum 600. + GracePeriodSeconds int32 `json:"grace_period_seconds,omitempty"` +} + // AppLogDestinationSpec struct for AppLogDestinationSpec type AppLogDestinationSpec struct { // Name of the log destination. @@ -462,22 +491,23 @@ type AppServiceSpec struct { // A list of configured alerts which apply to the component. Alerts []*AppAlertSpec `json:"alerts,omitempty"` // A list of configured log forwarding destinations. - LogDestinations []*AppLogDestinationSpec `json:"log_destinations,omitempty"` + LogDestinations []*AppLogDestinationSpec `json:"log_destinations,omitempty"` + Termination *AppServiceSpecTermination `json:"termination,omitempty"` } // AppServiceSpecHealthCheck struct for AppServiceSpecHealthCheck type AppServiceSpecHealthCheck struct { // Deprecated. Use http_path instead. Path string `json:"path,omitempty"` - // The number of seconds to wait before beginning health checks. Default: 0 seconds; start health checks as soon as the service starts. + // The number of seconds to wait before beginning health checks. Default: 0 seconds, Minimum 0, Maximum 3600. InitialDelaySeconds int32 `json:"initial_delay_seconds,omitempty"` - // The number of seconds to wait between health checks. Default: 10 seconds. + // The number of seconds to wait between health checks. Default: 10 seconds, Minimum 1, Maximum 300. PeriodSeconds int32 `json:"period_seconds,omitempty"` - // The number of seconds after which the check times out. Default: 1 second. + // The number of seconds after which the check times out. Default: 1 second, Minimum 1, Maximum 120. TimeoutSeconds int32 `json:"timeout_seconds,omitempty"` - // The number of successful health checks before considered healthy. Default: 1. + // The number of successful health checks before considered healthy. Default: 1, Minimum 1, Maximum 50. SuccessThreshold int32 `json:"success_threshold,omitempty"` - // The number of failed health checks before considered unhealthy. Default: 9. + // The number of failed health checks before considered unhealthy. Default: 9, Minimum 1, Maximum 50. FailureThreshold int32 `json:"failure_threshold,omitempty"` // The route path used for the HTTP health check ping. If not set, the HTTP health check will be disabled and a TCP health check used instead. HTTPPath string `json:"http_path,omitempty"` @@ -485,6 +515,14 @@ type AppServiceSpecHealthCheck struct { Port int64 `json:"port,omitempty"` } +// AppServiceSpecTermination struct for AppServiceSpecTermination +type AppServiceSpecTermination struct { + // The number of seconds to wait between selecting a container instance for termination and issuing the TERM signal. Selecting a container instance for termination begins an asynchronous drain of new requests on upstream load-balancers. Default: 15 seconds, Minimum 1, Maximum 110. + DrainSeconds int32 `json:"drain_seconds,omitempty"` + // The number of seconds to wait between sending a TERM signal to a container and issuing a KILL which causes immediate shutdown. Default: 120, Minimum 1, Maximum 600. + GracePeriodSeconds int32 `json:"grace_period_seconds,omitempty"` +} + // AppSpec The desired configuration of an application. type AppSpec struct { // The name of the app. Must be unique across all apps in the same account. @@ -579,7 +617,14 @@ type AppWorkerSpec struct { // A list of configured alerts which apply to the component. Alerts []*AppAlertSpec `json:"alerts,omitempty"` // A list of configured log forwarding destinations. - LogDestinations []*AppLogDestinationSpec `json:"log_destinations,omitempty"` + LogDestinations []*AppLogDestinationSpec `json:"log_destinations,omitempty"` + Termination *AppWorkerSpecTermination `json:"termination,omitempty"` +} + +// AppWorkerSpecTermination struct for AppWorkerSpecTermination +type AppWorkerSpecTermination struct { + // The number of seconds to wait between sending a TERM signal to a container and issuing a KILL which causes immediate shutdown. Default: 120, Minimum 1, Maximum 600. + GracePeriodSeconds int32 `json:"grace_period_seconds,omitempty"` } // Buildpack struct for Buildpack @@ -997,6 +1042,41 @@ type AppDomainValidation struct { TXTValue string `json:"txt_value,omitempty"` } +// GetAppDatabaseConnectionDetailsResponse struct for GetAppDatabaseConnectionDetailsResponse +type GetAppDatabaseConnectionDetailsResponse struct { + ConnectionDetails []*GetDatabaseConnectionDetailsResponse `json:"connection_details,omitempty"` +} + +// GetDatabaseConnectionDetailsResponse struct for GetDatabaseConnectionDetailsResponse +type GetDatabaseConnectionDetailsResponse struct { + Host string `json:"host,omitempty"` + Port int64 `json:"port,omitempty"` + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + DatabaseName string `json:"database_name,omitempty"` + SslMode string `json:"ssl_mode,omitempty"` + DatabaseURL string `json:"database_url,omitempty"` + ComponentName string `json:"component_name,omitempty"` + Pools []*GetDatabaseConnectionDetailsResponsePool `json:"pools,omitempty"` +} + +// GetDatabaseConnectionDetailsResponsePool struct for GetDatabaseConnectionDetailsResponsePool +type GetDatabaseConnectionDetailsResponsePool struct { + PoolName string `json:"pool_name,omitempty"` + Host string `json:"host,omitempty"` + Port int64 `json:"port,omitempty"` + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + DatabaseName string `json:"database_name,omitempty"` + SslMode string `json:"ssl_mode,omitempty"` + DatabaseURL string `json:"database_url,omitempty"` +} + +// GetDatabaseTrustedSourceResponse struct for GetDatabaseTrustedSourceResponse +type GetDatabaseTrustedSourceResponse struct { + IsEnabled bool `json:"is_enabled,omitempty"` +} + // GitHubSourceSpec struct for GitHubSourceSpec type GitHubSourceSpec struct { Repo string `json:"repo,omitempty"` @@ -1052,16 +1132,27 @@ const ( // AppInstanceSize struct for AppInstanceSize type AppInstanceSize struct { - Name string `json:"name,omitempty"` - Slug string `json:"slug,omitempty"` - CPUType AppInstanceSizeCPUType `json:"cpu_type,omitempty"` - CPUs string `json:"cpus,omitempty"` - MemoryBytes string `json:"memory_bytes,omitempty"` - USDPerMonth string `json:"usd_per_month,omitempty"` - USDPerSecond string `json:"usd_per_second,omitempty"` - TierSlug string `json:"tier_slug,omitempty"` - TierUpgradeTo string `json:"tier_upgrade_to,omitempty"` - TierDowngradeTo string `json:"tier_downgrade_to,omitempty"` + Name string `json:"name,omitempty"` + Slug string `json:"slug,omitempty"` + CPUType AppInstanceSizeCPUType `json:"cpu_type,omitempty"` + CPUs string `json:"cpus,omitempty"` + MemoryBytes string `json:"memory_bytes,omitempty"` + USDPerMonth string `json:"usd_per_month,omitempty"` + USDPerSecond string `json:"usd_per_second,omitempty"` + TierSlug string `json:"tier_slug,omitempty"` + // (Deprecated) The slug of the corresponding upgradable instance size on the higher tier. + TierUpgradeTo string `json:"tier_upgrade_to,omitempty"` + // (Deprecated) The slug of the corresponding downgradable instance size on the lower tier. + TierDowngradeTo string `json:"tier_downgrade_to,omitempty"` + // Indicates if the tier instance size can enable autoscaling. + Scalable bool `json:"scalable,omitempty"` + FeaturePreview bool `json:"feature_preview,omitempty"` + // Indicates if the tier instance size allows more than one instance. + SingleInstanceOnly bool `json:"single_instance_only,omitempty"` + // Indicates if the tier instance size is intended for deprecation. + DeprecationIntent bool `json:"deprecation_intent,omitempty"` + // The bandwidth allowance in GiB for the tier instance size. + BandwidthAllowanceGib string `json:"bandwidth_allowance_gib,omitempty"` } // AppInstanceSizeCPUType the model 'AppInstanceSizeCPUType' @@ -1102,9 +1193,9 @@ type AppProposeResponse struct { Spec *AppSpec `json:"spec,omitempty"` // The monthly cost of the proposed app in USD. AppCost float32 `json:"app_cost,omitempty"` - // The monthly cost of the proposed app in USD using the next pricing plan tier. For example, if you propose an app that uses the Basic tier, the `app_tier_upgrade_cost` field displays the monthly cost of the app if it were to use the Professional tier. If the proposed app already uses the most expensive tier, the field is empty. + // (Deprecated) The monthly cost of the proposed app in USD using the next pricing plan tier. For example, if you propose an app that uses the Basic tier, the `app_tier_upgrade_cost` field displays the monthly cost of the app if it were to use the Professional tier. If the proposed app already uses the most expensive tier, the field is empty. AppTierUpgradeCost float32 `json:"app_tier_upgrade_cost,omitempty"` - // The monthly cost of the proposed app in USD using the previous pricing plan tier. For example, if you propose an app that uses the Professional tier, the `app_tier_downgrade_cost` field displays the monthly cost of the app if it were to use the Basic tier. If the proposed app already uses the lest expensive tier, the field is empty. + // (Deprecated) The monthly cost of the proposed app in USD using the previous pricing plan tier. For example, if you propose an app that uses the Professional tier, the `app_tier_downgrade_cost` field displays the monthly cost of the app if it were to use the Basic tier. If the proposed app already uses the lest expensive tier, the field is empty. AppTierDowngradeCost float32 `json:"app_tier_downgrade_cost,omitempty"` // The number of existing starter tier apps the account has. ExistingStarterApps string `json:"existing_starter_apps,omitempty"` @@ -1127,6 +1218,17 @@ type AppRegion struct { Default bool `json:"default,omitempty"` } +// ResetDatabasePasswordRequest struct for ResetDatabasePasswordRequest +type ResetDatabasePasswordRequest struct { + AppID string `json:"app_id,omitempty"` + ComponentName string `json:"component_name,omitempty"` +} + +// ResetDatabasePasswordResponse struct for ResetDatabasePasswordResponse +type ResetDatabasePasswordResponse struct { + Deployment *Deployment `json:"deployment,omitempty"` +} + // AppStringMatch struct for AppStringMatch type AppStringMatch struct { // Exact string match. Only 1 of `exact`, `prefix`, or `regex` must be set. @@ -1144,6 +1246,18 @@ type AppTier struct { BuildSeconds string `json:"build_seconds,omitempty"` } +// ToggleDatabaseTrustedSourceRequest struct for ToggleDatabaseTrustedSourceRequest +type ToggleDatabaseTrustedSourceRequest struct { + AppID string `json:"app_id,omitempty"` + ComponentName string `json:"component_name,omitempty"` + Enable bool `json:"enable,omitempty"` +} + +// ToggleDatabaseTrustedSourceResponse struct for ToggleDatabaseTrustedSourceResponse +type ToggleDatabaseTrustedSourceResponse struct { + IsEnabled bool `json:"is_enabled,omitempty"` +} + // UpgradeBuildpackResponse struct for UpgradeBuildpackResponse type UpgradeBuildpackResponse struct { // The components that were affected by the upgrade. diff --git a/vendor/github.com/digitalocean/godo/apps.go b/vendor/github.com/digitalocean/godo/apps.go index 880ce0921..cd72f7408 100644 --- a/vendor/github.com/digitalocean/godo/apps.go +++ b/vendor/github.com/digitalocean/godo/apps.go @@ -56,6 +56,19 @@ type AppsService interface { ListBuildpacks(ctx context.Context) ([]*Buildpack, *Response, error) UpgradeBuildpack(ctx context.Context, appID string, opts UpgradeBuildpackOptions) (*UpgradeBuildpackResponse, *Response, error) + + GetAppDatabaseConnectionDetails(ctx context.Context, appID string) ([]*GetDatabaseConnectionDetailsResponse, *Response, error) + ResetDatabasePassword(ctx context.Context, appID string, component string) (*Deployment, *Response, error) + ToggleDatabaseTrustedSource( + ctx context.Context, + appID string, + component string, + opts ToggleDatabaseTrustedSourceOptions, + ) ( + *ToggleDatabaseTrustedSourceResponse, + *Response, + error, + ) } // AppLogs represent app logs. @@ -90,6 +103,12 @@ type UpgradeBuildpackOptions struct { TriggerDeployment bool `json:"trigger_deployment,omitempty"` } +// ToggleDatabaseTrustedSourceOptions provides optional parameters for ToggleDatabaseTrustedSource. +type ToggleDatabaseTrustedSourceOptions struct { + // Enable, if true, indicates the database should enable the trusted sources firewall. + Enable bool +} + type appRoot struct { App *App `json:"app"` } @@ -498,6 +517,60 @@ func (s *AppsServiceOp) UpgradeBuildpack(ctx context.Context, appID string, opts return root, resp, nil } +// GetAppDatabaseConnectionDetails retrieves credentials for databases associated with the app. +func (s *AppsServiceOp) GetAppDatabaseConnectionDetails(ctx context.Context, appID string) ([]*GetDatabaseConnectionDetailsResponse, *Response, error) { + path := fmt.Sprintf("%s/%s/database_connection_details", appsBasePath, appID) + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(GetAppDatabaseConnectionDetailsResponse) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.ConnectionDetails, resp, nil +} + +// ResetDatabasePassword resets credentials for a database component associated with the app. +func (s *AppsServiceOp) ResetDatabasePassword(ctx context.Context, appID string, component string) (*Deployment, *Response, error) { + path := fmt.Sprintf("%s/%s/components/%s/reset_password", appsBasePath, appID, component) + req, err := s.client.NewRequest(ctx, http.MethodPost, path, nil) + if err != nil { + return nil, nil, err + } + root := new(deploymentRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Deployment, resp, nil +} + +// ToggleDatabaseTrustedSource enables/disables trusted sources on the specified dev database component. +func (s *AppsServiceOp) ToggleDatabaseTrustedSource( + ctx context.Context, + appID string, + component string, + opts ToggleDatabaseTrustedSourceOptions, +) ( + *ToggleDatabaseTrustedSourceResponse, + *Response, + error, +) { + path := fmt.Sprintf("%s/%s/components/%s/trusted_sources", appsBasePath, appID, component) + req, err := s.client.NewRequest(ctx, http.MethodPost, path, opts) + if err != nil { + return nil, nil, err + } + root := new(ToggleDatabaseTrustedSourceResponse) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root, resp, nil +} + // AppComponentType is an app component type. type AppComponentType string diff --git a/vendor/github.com/digitalocean/godo/apps_accessors.go b/vendor/github.com/digitalocean/godo/apps_accessors.go index 059d64731..734c27ea9 100644 --- a/vendor/github.com/digitalocean/godo/apps_accessors.go +++ b/vendor/github.com/digitalocean/godo/apps_accessors.go @@ -37,6 +37,14 @@ func (a *App) GetCreatedAt() time.Time { return a.CreatedAt } +// GetDedicatedIps returns the DedicatedIps field. +func (a *App) GetDedicatedIps() []*AppDedicatedIp { + if a == nil { + return nil + } + return a.DedicatedIps +} + // GetDefaultIngress returns the DefaultIngress field. func (a *App) GetDefaultIngress() string { if a == nil { @@ -557,6 +565,30 @@ func (a *AppDatabaseSpec) GetVersion() string { return a.Version } +// GetID returns the ID field. +func (a *AppDedicatedIp) GetID() string { + if a == nil { + return "" + } + return a.ID +} + +// GetIp returns the Ip field. +func (a *AppDedicatedIp) GetIp() string { + if a == nil { + return "" + } + return a.Ip +} + +// GetStatus returns the Status field. +func (a *AppDedicatedIp) GetStatus() AppDedicatedIpStatus { + if a == nil { + return "" + } + return a.Status +} + // GetCertificateExpiresAt returns the CertificateExpiresAt field. func (a *AppDomain) GetCertificateExpiresAt() time.Time { if a == nil { @@ -981,6 +1013,14 @@ func (a *AppIngressSpecRuleStringMatch) GetPrefix() string { return a.Prefix } +// GetBandwidthAllowanceGib returns the BandwidthAllowanceGib field. +func (a *AppInstanceSize) GetBandwidthAllowanceGib() string { + if a == nil { + return "" + } + return a.BandwidthAllowanceGib +} + // GetCPUs returns the CPUs field. func (a *AppInstanceSize) GetCPUs() string { if a == nil { @@ -997,6 +1037,22 @@ func (a *AppInstanceSize) GetCPUType() AppInstanceSizeCPUType { return a.CPUType } +// GetDeprecationIntent returns the DeprecationIntent field. +func (a *AppInstanceSize) GetDeprecationIntent() bool { + if a == nil { + return false + } + return a.DeprecationIntent +} + +// GetFeaturePreview returns the FeaturePreview field. +func (a *AppInstanceSize) GetFeaturePreview() bool { + if a == nil { + return false + } + return a.FeaturePreview +} + // GetMemoryBytes returns the MemoryBytes field. func (a *AppInstanceSize) GetMemoryBytes() string { if a == nil { @@ -1013,6 +1069,22 @@ func (a *AppInstanceSize) GetName() string { return a.Name } +// GetScalable returns the Scalable field. +func (a *AppInstanceSize) GetScalable() bool { + if a == nil { + return false + } + return a.Scalable +} + +// GetSingleInstanceOnly returns the SingleInstanceOnly field. +func (a *AppInstanceSize) GetSingleInstanceOnly() bool { + if a == nil { + return false + } + return a.SingleInstanceOnly +} + // GetSlug returns the Slug field. func (a *AppInstanceSize) GetSlug() string { if a == nil { @@ -1189,6 +1261,22 @@ func (a *AppJobSpec) GetSourceDir() string { return a.SourceDir } +// GetTermination returns the Termination field. +func (a *AppJobSpec) GetTermination() *AppJobSpecTermination { + if a == nil { + return nil + } + return a.Termination +} + +// GetGracePeriodSeconds returns the GracePeriodSeconds field. +func (a *AppJobSpecTermination) GetGracePeriodSeconds() int32 { + if a == nil { + return 0 + } + return a.GracePeriodSeconds +} + // GetDatadog returns the Datadog field. func (a *AppLogDestinationSpec) GetDatadog() *AppLogDestinationSpecDataDog { if a == nil { @@ -1653,6 +1741,14 @@ func (a *AppServiceSpec) GetSourceDir() string { return a.SourceDir } +// GetTermination returns the Termination field. +func (a *AppServiceSpec) GetTermination() *AppServiceSpecTermination { + if a == nil { + return nil + } + return a.Termination +} + // GetFailureThreshold returns the FailureThreshold field. func (a *AppServiceSpecHealthCheck) GetFailureThreshold() int32 { if a == nil { @@ -1717,6 +1813,22 @@ func (a *AppServiceSpecHealthCheck) GetTimeoutSeconds() int32 { return a.TimeoutSeconds } +// GetDrainSeconds returns the DrainSeconds field. +func (a *AppServiceSpecTermination) GetDrainSeconds() int32 { + if a == nil { + return 0 + } + return a.DrainSeconds +} + +// GetGracePeriodSeconds returns the GracePeriodSeconds field. +func (a *AppServiceSpecTermination) GetGracePeriodSeconds() int32 { + if a == nil { + return 0 + } + return a.GracePeriodSeconds +} + // GetAlerts returns the Alerts field. func (a *AppSpec) GetAlerts() []*AppAlertSpec { if a == nil { @@ -2165,6 +2277,22 @@ func (a *AppWorkerSpec) GetSourceDir() string { return a.SourceDir } +// GetTermination returns the Termination field. +func (a *AppWorkerSpec) GetTermination() *AppWorkerSpecTermination { + if a == nil { + return nil + } + return a.Termination +} + +// GetGracePeriodSeconds returns the GracePeriodSeconds field. +func (a *AppWorkerSpecTermination) GetGracePeriodSeconds() int32 { + if a == nil { + return 0 + } + return a.GracePeriodSeconds +} + // GetDescription returns the Description field. func (b *Buildpack) GetDescription() []string { if b == nil { @@ -3117,6 +3245,158 @@ func (d *DetectResponseServerlessPackage) GetName() string { return d.Name } +// GetConnectionDetails returns the ConnectionDetails field. +func (g *GetAppDatabaseConnectionDetailsResponse) GetConnectionDetails() []*GetDatabaseConnectionDetailsResponse { + if g == nil { + return nil + } + return g.ConnectionDetails +} + +// GetComponentName returns the ComponentName field. +func (g *GetDatabaseConnectionDetailsResponse) GetComponentName() string { + if g == nil { + return "" + } + return g.ComponentName +} + +// GetDatabaseName returns the DatabaseName field. +func (g *GetDatabaseConnectionDetailsResponse) GetDatabaseName() string { + if g == nil { + return "" + } + return g.DatabaseName +} + +// GetDatabaseURL returns the DatabaseURL field. +func (g *GetDatabaseConnectionDetailsResponse) GetDatabaseURL() string { + if g == nil { + return "" + } + return g.DatabaseURL +} + +// GetHost returns the Host field. +func (g *GetDatabaseConnectionDetailsResponse) GetHost() string { + if g == nil { + return "" + } + return g.Host +} + +// GetPassword returns the Password field. +func (g *GetDatabaseConnectionDetailsResponse) GetPassword() string { + if g == nil { + return "" + } + return g.Password +} + +// GetPools returns the Pools field. +func (g *GetDatabaseConnectionDetailsResponse) GetPools() []*GetDatabaseConnectionDetailsResponsePool { + if g == nil { + return nil + } + return g.Pools +} + +// GetPort returns the Port field. +func (g *GetDatabaseConnectionDetailsResponse) GetPort() int64 { + if g == nil { + return 0 + } + return g.Port +} + +// GetSslMode returns the SslMode field. +func (g *GetDatabaseConnectionDetailsResponse) GetSslMode() string { + if g == nil { + return "" + } + return g.SslMode +} + +// GetUsername returns the Username field. +func (g *GetDatabaseConnectionDetailsResponse) GetUsername() string { + if g == nil { + return "" + } + return g.Username +} + +// GetDatabaseName returns the DatabaseName field. +func (g *GetDatabaseConnectionDetailsResponsePool) GetDatabaseName() string { + if g == nil { + return "" + } + return g.DatabaseName +} + +// GetDatabaseURL returns the DatabaseURL field. +func (g *GetDatabaseConnectionDetailsResponsePool) GetDatabaseURL() string { + if g == nil { + return "" + } + return g.DatabaseURL +} + +// GetHost returns the Host field. +func (g *GetDatabaseConnectionDetailsResponsePool) GetHost() string { + if g == nil { + return "" + } + return g.Host +} + +// GetPassword returns the Password field. +func (g *GetDatabaseConnectionDetailsResponsePool) GetPassword() string { + if g == nil { + return "" + } + return g.Password +} + +// GetPoolName returns the PoolName field. +func (g *GetDatabaseConnectionDetailsResponsePool) GetPoolName() string { + if g == nil { + return "" + } + return g.PoolName +} + +// GetPort returns the Port field. +func (g *GetDatabaseConnectionDetailsResponsePool) GetPort() int64 { + if g == nil { + return 0 + } + return g.Port +} + +// GetSslMode returns the SslMode field. +func (g *GetDatabaseConnectionDetailsResponsePool) GetSslMode() string { + if g == nil { + return "" + } + return g.SslMode +} + +// GetUsername returns the Username field. +func (g *GetDatabaseConnectionDetailsResponsePool) GetUsername() string { + if g == nil { + return "" + } + return g.Username +} + +// GetIsEnabled returns the IsEnabled field. +func (g *GetDatabaseTrustedSourceResponse) GetIsEnabled() bool { + if g == nil { + return false + } + return g.IsEnabled +} + // GetBranch returns the Branch field. func (g *GitHubSourceSpec) GetBranch() string { if g == nil { @@ -3253,6 +3533,62 @@ func (l *ListBuildpacksResponse) GetBuildpacks() []*Buildpack { return l.Buildpacks } +// GetAppID returns the AppID field. +func (r *ResetDatabasePasswordRequest) GetAppID() string { + if r == nil { + return "" + } + return r.AppID +} + +// GetComponentName returns the ComponentName field. +func (r *ResetDatabasePasswordRequest) GetComponentName() string { + if r == nil { + return "" + } + return r.ComponentName +} + +// GetDeployment returns the Deployment field. +func (r *ResetDatabasePasswordResponse) GetDeployment() *Deployment { + if r == nil { + return nil + } + return r.Deployment +} + +// GetAppID returns the AppID field. +func (t *ToggleDatabaseTrustedSourceRequest) GetAppID() string { + if t == nil { + return "" + } + return t.AppID +} + +// GetComponentName returns the ComponentName field. +func (t *ToggleDatabaseTrustedSourceRequest) GetComponentName() string { + if t == nil { + return "" + } + return t.ComponentName +} + +// GetEnable returns the Enable field. +func (t *ToggleDatabaseTrustedSourceRequest) GetEnable() bool { + if t == nil { + return false + } + return t.Enable +} + +// GetIsEnabled returns the IsEnabled field. +func (t *ToggleDatabaseTrustedSourceResponse) GetIsEnabled() bool { + if t == nil { + return false + } + return t.IsEnabled +} + // GetAffectedComponents returns the AffectedComponents field. func (u *UpgradeBuildpackResponse) GetAffectedComponents() []string { if u == nil { diff --git a/vendor/github.com/digitalocean/godo/certificates.go b/vendor/github.com/digitalocean/godo/certificates.go index faf26a3ee..7612acf0f 100644 --- a/vendor/github.com/digitalocean/godo/certificates.go +++ b/vendor/github.com/digitalocean/godo/certificates.go @@ -2,6 +2,7 @@ package godo import ( "context" + "fmt" "net/http" "path" ) @@ -13,6 +14,7 @@ const certificatesBasePath = "/v2/certificates" type CertificatesService interface { Get(context.Context, string) (*Certificate, *Response, error) List(context.Context, *ListOptions) ([]Certificate, *Response, error) + ListByName(context.Context, string, *ListOptions) ([]Certificate, *Response, error) Create(context.Context, *CertificateRequest) (*Certificate, *Response, error) Delete(context.Context, string) (*Response, error) } @@ -101,6 +103,39 @@ func (c *CertificatesServiceOp) List(ctx context.Context, opt *ListOptions) ([]C return root.Certificates, resp, nil } +func (c *CertificatesServiceOp) ListByName(ctx context.Context, name string, opt *ListOptions) ([]Certificate, *Response, error) { + + if len(name) < 1 { + return nil, nil, NewArgError("name", "cannot be an empty string") + } + + path := fmt.Sprintf("%s?name=%s", certificatesBasePath, name) + urlStr, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + req, err := c.client.NewRequest(ctx, http.MethodGet, urlStr, nil) + if err != nil { + return nil, nil, err + } + + root := new(certificatesRoot) + resp, err := c.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + if l := root.Links; l != nil { + resp.Links = l + } + if m := root.Meta; m != nil { + resp.Meta = m + } + + return root.Certificates, resp, err +} + // Create a new certificate with provided configuration. func (c *CertificatesServiceOp) Create(ctx context.Context, cr *CertificateRequest) (*Certificate, *Response, error) { req, err := c.client.NewRequest(ctx, http.MethodPost, certificatesBasePath, cr) diff --git a/vendor/github.com/digitalocean/godo/databases.go b/vendor/github.com/digitalocean/godo/databases.go index c0a4510a7..b915391c8 100644 --- a/vendor/github.com/digitalocean/godo/databases.go +++ b/vendor/github.com/digitalocean/godo/databases.go @@ -34,6 +34,8 @@ const ( databasePromoteReplicaToPrimaryPath = databaseReplicaPath + "/promote" databaseTopicPath = databaseBasePath + "/%s/topics/%s" databaseTopicsPath = databaseBasePath + "/%s/topics" + databaseMetricsCredentialsPath = databaseBasePath + "/metrics/credentials" + databaseEvents = databaseBasePath + "/%s/events" ) // SQL Mode constants allow for MySQL-specific SQL flavor configuration. @@ -154,6 +156,9 @@ type DatabasesService interface { GetTopic(context.Context, string, string) (*DatabaseTopic, *Response, error) DeleteTopic(context.Context, string, string) (*Response, error) UpdateTopic(context.Context, string, string, *DatabaseUpdateTopicRequest) (*Response, error) + GetMetricsCredentials(context.Context) (*DatabaseMetricsCredentials, *Response, error) + UpdateMetricsCredentials(context.Context, *DatabaseUpdateMetricsCredentialsRequest) (*Response, error) + ListDatabaseEvents(context.Context, string, *ListOptions) ([]DatabaseEvent, *Response, error) } // DatabasesServiceOp handles communication with the Databases related methods @@ -170,24 +175,28 @@ var _ DatabasesService = &DatabasesServiceOp{} // "pg", "mysql" or "redis". A Database also includes connection information and other // properties of the service like region, size and current status. type Database struct { - ID string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - EngineSlug string `json:"engine,omitempty"` - VersionSlug string `json:"version,omitempty"` - Connection *DatabaseConnection `json:"connection,omitempty"` - PrivateConnection *DatabaseConnection `json:"private_connection,omitempty"` - Users []DatabaseUser `json:"users,omitempty"` - NumNodes int `json:"num_nodes,omitempty"` - SizeSlug string `json:"size,omitempty"` - DBNames []string `json:"db_names,omitempty"` - RegionSlug string `json:"region,omitempty"` - Status string `json:"status,omitempty"` - MaintenanceWindow *DatabaseMaintenanceWindow `json:"maintenance_window,omitempty"` - CreatedAt time.Time `json:"created_at,omitempty"` - PrivateNetworkUUID string `json:"private_network_uuid,omitempty"` - Tags []string `json:"tags,omitempty"` - ProjectID string `json:"project_id,omitempty"` - StorageSizeMib uint64 `json:"storage_size_mib,omitempty"` + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + EngineSlug string `json:"engine,omitempty"` + VersionSlug string `json:"version,omitempty"` + Connection *DatabaseConnection `json:"connection,omitempty"` + UIConnection *DatabaseConnection `json:"ui_connection,omitempty"` + PrivateConnection *DatabaseConnection `json:"private_connection,omitempty"` + StandbyConnection *DatabaseConnection `json:"standby_connection,omitempty"` + StandbyPrivateConnection *DatabaseConnection `json:"standby_private_connection,omitempty"` + Users []DatabaseUser `json:"users,omitempty"` + NumNodes int `json:"num_nodes,omitempty"` + SizeSlug string `json:"size,omitempty"` + DBNames []string `json:"db_names,omitempty"` + RegionSlug string `json:"region,omitempty"` + Status string `json:"status,omitempty"` + MaintenanceWindow *DatabaseMaintenanceWindow `json:"maintenance_window,omitempty"` + CreatedAt time.Time `json:"created_at,omitempty"` + PrivateNetworkUUID string `json:"private_network_uuid,omitempty"` + Tags []string `json:"tags,omitempty"` + ProjectID string `json:"project_id,omitempty"` + StorageSizeMib uint64 `json:"storage_size_mib,omitempty"` + MetricsEndpoints []*ServiceAddress `json:"metrics_endpoints,omitempty"` } // DatabaseCA represents a database ca. @@ -208,6 +217,12 @@ type DatabaseConnection struct { ApplicationPorts map[string]uint32 `json:"application_ports,omitempty"` } +// ServiceAddress represents a host:port for a generic service (e.g. metrics endpoint) +type ServiceAddress struct { + Host string `json:"host"` + Port int `json:"port"` +} + // DatabaseUser represents a user in the database type DatabaseUser struct { Name string `json:"name,omitempty"` @@ -381,13 +396,15 @@ type DatabaseReplica struct { // DatabasePool represents a database connection pool type DatabasePool struct { - User string `json:"user"` - Name string `json:"name"` - Size int `json:"size"` - Database string `json:"db"` - Mode string `json:"mode"` - Connection *DatabaseConnection `json:"connection"` - PrivateConnection *DatabaseConnection `json:"private_connection,omitempty"` + User string `json:"user"` + Name string `json:"name"` + Size int `json:"size"` + Database string `json:"db"` + Mode string `json:"mode"` + Connection *DatabaseConnection `json:"connection"` + PrivateConnection *DatabaseConnection `json:"private_connection,omitempty"` + StandbyConnection *DatabaseConnection `json:"standby_connection,omitempty"` + StandbyPrivateConnection *DatabaseConnection `json:"standby_private_connection,omitempty"` } // DatabaseCreatePoolRequest is used to create a new database connection pool @@ -662,6 +679,19 @@ type databaseTopicsRoot struct { Topics []DatabaseTopic `json:"topics"` } +type databaseMetricsCredentialsRoot struct { + Credentials *DatabaseMetricsCredentials `json:"credentials"` +} + +type DatabaseMetricsCredentials struct { + BasicAuthUsername string `json:"basic_auth_username"` + BasicAuthPassword string `json:"basic_auth_password"` +} + +type DatabaseUpdateMetricsCredentialsRequest struct { + Credentials *DatabaseMetricsCredentials `json:"credentials"` +} + // DatabaseOptions represents the available database engines type DatabaseOptions struct { MongoDBOptions DatabaseEngineOptions `json:"mongodb"` @@ -669,6 +699,7 @@ type DatabaseOptions struct { PostgresSQLOptions DatabaseEngineOptions `json:"pg"` RedisOptions DatabaseEngineOptions `json:"redis"` KafkaOptions DatabaseEngineOptions `json:"kafka"` + OpensearchOptions DatabaseEngineOptions `json:"opensearch"` } // DatabaseEngineOptions represents the configuration options that are available for a given database engine @@ -684,6 +715,23 @@ type DatabaseLayout struct { Sizes []string `json:"sizes"` } +// ListDatabaseEvents contains a list of project events. +type ListDatabaseEvents struct { + Events []DatabaseEvent `json:"events"` +} + +// DatbaseEvent contains the information about a Datbase event. +type DatabaseEvent struct { + ID string `json:"id"` + ServiceName string `json:"cluster_name"` + EventType string `json:"event_type"` + CreateTime string `json:"create_time"` +} + +type ListDatabaseEventsRoot struct { + Events []DatabaseEvent `json:"events"` +} + // URN returns a URN identifier for the database func (d Database) URN() string { return ToURN("dbaas", d.ID) @@ -1462,3 +1510,52 @@ func (svc *DatabasesServiceOp) DeleteTopic(ctx context.Context, databaseID, name } return resp, nil } + +// GetMetricsCredentials gets the credentials required to access a user's metrics endpoints +func (svc *DatabasesServiceOp) GetMetricsCredentials(ctx context.Context) (*DatabaseMetricsCredentials, *Response, error) { + req, err := svc.client.NewRequest(ctx, http.MethodGet, databaseMetricsCredentialsPath, nil) + if err != nil { + return nil, nil, err + } + + root := new(databaseMetricsCredentialsRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Credentials, resp, nil +} + +// UpdateMetricsAuth updates the credentials required to access a user's metrics endpoints +func (svc *DatabasesServiceOp) UpdateMetricsCredentials(ctx context.Context, updateCreds *DatabaseUpdateMetricsCredentialsRequest) (*Response, error) { + req, err := svc.client.NewRequest(ctx, http.MethodPut, databaseMetricsCredentialsPath, updateCreds) + if err != nil { + return nil, err + } + resp, err := svc.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} + +// ListDatabaseEvents returns all the events for a given cluster +func (svc *DatabasesServiceOp) ListDatabaseEvents(ctx context.Context, databaseID string, opts *ListOptions) ([]DatabaseEvent, *Response, error) { + path := fmt.Sprintf(databaseEvents, databaseID) + path, err := addOptions(path, opts) + if err != nil { + return nil, nil, err + } + root := new(ListDatabaseEventsRoot) + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Events, resp, nil +} diff --git a/vendor/github.com/digitalocean/godo/godo.go b/vendor/github.com/digitalocean/godo/godo.go index 95d203232..1ec4df208 100644 --- a/vendor/github.com/digitalocean/godo/godo.go +++ b/vendor/github.com/digitalocean/godo/godo.go @@ -21,7 +21,7 @@ import ( ) const ( - libraryVersion = "1.108.0" + libraryVersion = "1.116.0" defaultBaseURL = "https://api.digitalocean.com/" userAgent = "godo/" + libraryVersion mediaType = "application/json" diff --git a/vendor/github.com/digitalocean/godo/load_balancers.go b/vendor/github.com/digitalocean/godo/load_balancers.go index 7e1cfc164..396790530 100644 --- a/vendor/github.com/digitalocean/godo/load_balancers.go +++ b/vendor/github.com/digitalocean/godo/load_balancers.go @@ -7,6 +7,7 @@ import ( ) const ( + cachePath = "cache" dropletsPath = "droplets" forwardingRulesPath = "forwarding_rules" loadBalancersBasePath = "/v2/load_balancers" @@ -14,8 +15,9 @@ const ( // Load Balancer types. const ( - LoadBalancerTypeGlobal = "GLOBAL" - LoadBalancerTypeRegional = "REGIONAL" + LoadBalancerTypeGlobal = "GLOBAL" + LoadBalancerTypeRegional = "REGIONAL" + LoadBalancerTypeRegionalNetwork = "REGIONAL_NETWORK" ) // LoadBalancersService is an interface for managing load balancers with the DigitalOcean API. @@ -30,6 +32,7 @@ type LoadBalancersService interface { RemoveDroplets(ctx context.Context, lbID string, dropletIDs ...int) (*Response, error) AddForwardingRules(ctx context.Context, lbID string, rules ...ForwardingRule) (*Response, error) RemoveForwardingRules(ctx context.Context, lbID string, rules ...ForwardingRule) (*Response, error) + PurgeCache(ctx context.Context, lbID string) (*Response, error) } // LoadBalancer represents a DigitalOcean load balancer configuration. @@ -62,6 +65,9 @@ type LoadBalancer struct { ProjectID string `json:"project_id,omitempty"` HTTPIdleTimeoutSeconds *uint64 `json:"http_idle_timeout_seconds,omitempty"` Firewall *LBFirewall `json:"firewall,omitempty"` + Domains []*LBDomain `json:"domains,omitempty"` + GLBSettings *GLBSettings `json:"glb_settings,omitempty"` + TargetLoadBalancerIDs []string `json:"target_load_balancer_ids,omitempty"` } // String creates a human-readable description of a LoadBalancer. @@ -89,12 +95,12 @@ func (l LoadBalancer) AsRequest() *LoadBalancerRequest { RedirectHttpToHttps: l.RedirectHttpToHttps, EnableProxyProtocol: l.EnableProxyProtocol, EnableBackendKeepalive: l.EnableBackendKeepalive, - HealthCheck: l.HealthCheck, VPCUUID: l.VPCUUID, DisableLetsEncryptDNSRecords: l.DisableLetsEncryptDNSRecords, ValidateOnly: l.ValidateOnly, ProjectID: l.ProjectID, HTTPIdleTimeoutSeconds: l.HTTPIdleTimeoutSeconds, + TargetLoadBalancerIDs: append([]string(nil), l.TargetLoadBalancerIDs...), } if l.DisableLetsEncryptDNSRecords != nil { @@ -105,10 +111,12 @@ func (l LoadBalancer) AsRequest() *LoadBalancerRequest { r.HealthCheck = &HealthCheck{} *r.HealthCheck = *l.HealthCheck } + if l.StickySessions != nil { r.StickySessions = &StickySessions{} *r.StickySessions = *l.StickySessions } + if l.Region != nil { r.Region = l.Region.Slug } @@ -117,6 +125,18 @@ func (l LoadBalancer) AsRequest() *LoadBalancerRequest { r.Firewall = l.Firewall.deepCopy() } + for _, domain := range l.Domains { + lbDomain := &LBDomain{} + *lbDomain = *domain + lbDomain.VerificationErrorReasons = append([]string(nil), domain.VerificationErrorReasons...) + lbDomain.SSLValidationErrorReasons = append([]string(nil), domain.SSLValidationErrorReasons...) + r.Domains = append(r.Domains, lbDomain) + } + + if l.GLBSettings != nil { + r.GLBSettings = l.GLBSettings.deepCopy() + } + return &r } @@ -215,6 +235,9 @@ type LoadBalancerRequest struct { ProjectID string `json:"project_id,omitempty"` HTTPIdleTimeoutSeconds *uint64 `json:"http_idle_timeout_seconds,omitempty"` Firewall *LBFirewall `json:"firewall,omitempty"` + Domains []*LBDomain `json:"domains,omitempty"` + GLBSettings *GLBSettings `json:"glb_settings,omitempty"` + TargetLoadBalancerIDs []string `json:"target_load_balancer_ids,omitempty"` } // String creates a human-readable description of a LoadBalancerRequest. @@ -238,6 +261,70 @@ func (l dropletIDsRequest) String() string { return Stringify(l) } +// LBDomain defines domain names required to ingress traffic to a Global LB +type LBDomain struct { + // Name defines the domain fqdn + Name string `json:"name"` + // IsManaged indicates if the domain is DO-managed + IsManaged bool `json:"is_managed"` + // CertificateID indicates ID of a TLS certificate + CertificateID string `json:"certificate_id,omitempty"` + // Status indicates the domain validation status + Status string `json:"status,omitempty"` + // VerificationErrorReasons indicates any domain verification errors + VerificationErrorReasons []string `json:"verification_error_reasons,omitempty"` + // SSLValidationErrorReasons indicates any domain SSL validation errors + SSLValidationErrorReasons []string `json:"ssl_validation_error_reasons,omitempty"` +} + +// String creates a human-readable description of a LBDomain +func (d LBDomain) String() string { + return Stringify(d) +} + +// GLBSettings define settings for configuring a Global LB +type GLBSettings struct { + // TargetProtocol is the outgoing traffic protocol. + TargetProtocol string `json:"target_protocol"` + // EntryPort is the outgoing traffic port. + TargetPort uint32 `json:"target_port"` + // CDNSettings is the CDN configurations + CDN *CDNSettings `json:"cdn"` + // RegionPriorities embeds regional priority information for regional active-passive failover policy + RegionPriorities map[string]uint32 `json:"region_priorities,omitempty"` + // FailoverThreshold embeds failover threshold percentage for regional active-passive failover policy + FailoverThreshold uint32 `json:"failover_threshold,omitempty"` +} + +// String creates a human-readable description of a GLBSettings +func (s GLBSettings) String() string { + return Stringify(s) +} + +func (s GLBSettings) deepCopy() *GLBSettings { + settings := &GLBSettings{ + TargetProtocol: s.TargetProtocol, + TargetPort: s.TargetPort, + RegionPriorities: s.RegionPriorities, + FailoverThreshold: s.FailoverThreshold, + } + if s.CDN != nil { + settings.CDN = &CDNSettings{IsEnabled: s.CDN.IsEnabled} + } + return settings +} + +// CDNSettings define CDN settings for a Global LB +type CDNSettings struct { + // IsEnabled is the caching enabled flag + IsEnabled bool `json:"is_enabled"` +} + +// String creates a human-readable description of a CDNSettings +func (c CDNSettings) String() string { + return Stringify(c) +} + type loadBalancersRoot struct { LoadBalancers []LoadBalancer `json:"load_balancers"` Links *Links `json:"links"` @@ -393,3 +480,15 @@ func (l *LoadBalancersServiceOp) RemoveForwardingRules(ctx context.Context, lbID return l.client.Do(ctx, req, nil) } + +// PurgeCache purges the CDN cache of a global load balancer by its identifier. +func (l *LoadBalancersServiceOp) PurgeCache(ctx context.Context, ldID string) (*Response, error) { + path := fmt.Sprintf("%s/%s/%s", loadBalancersBasePath, ldID, cachePath) + + req, err := l.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + + return l.client.Do(ctx, req, nil) +} diff --git a/vendor/github.com/digitalocean/godo/vpc_peerings.go b/vendor/github.com/digitalocean/godo/vpc_peerings.go new file mode 100644 index 000000000..e6dfc043a --- /dev/null +++ b/vendor/github.com/digitalocean/godo/vpc_peerings.go @@ -0,0 +1,199 @@ +package godo + +import ( + "context" + "net/http" + "time" +) + +const vpcPeeringsPath = "/v2/vpc_peerings" + +type vpcPeeringRoot struct { + VPCPeering *VPCPeering `json:"vpc_peering"` +} + +type vpcPeeringsRoot struct { + VPCPeerings []*VPCPeering `json:"vpc_peerings"` + Links *Links `json:"links"` + Meta *Meta `json:"meta"` +} + +// VPCPeering represents a DigitalOcean Virtual Private Cloud Peering configuration. +type VPCPeering struct { + // ID is the generated ID of the VPC Peering + ID string `json:"id"` + // Name is the name of the VPC Peering + Name string `json:"name"` + // VPCIDs is the IDs of the pair of VPCs between which a peering is created + VPCIDs []string `json:"vpc_ids"` + // CreatedAt is time when this VPC Peering was first created + CreatedAt time.Time `json:"created_at"` + // Status is the status of the VPC Peering + Status string `json:"status"` +} + +// VPCPeeringCreateRequest represents a request to create a Virtual Private Cloud Peering +// for a list of associated VPC IDs. +type VPCPeeringCreateRequest struct { + // Name is the name of the VPC Peering + Name string `json:"name"` + // VPCIDs is the IDs of the pair of VPCs between which a peering is created + VPCIDs []string `json:"vpc_ids"` +} + +// VPCPeeringUpdateRequest represents a request to update a Virtual Private Cloud Peering. +type VPCPeeringUpdateRequest struct { + // Name is the name of the VPC Peering + Name string `json:"name"` +} + +// VPCPeeringCreateRequestByVPCID represents a request to create a Virtual Private Cloud Peering +// for an associated VPC ID. +type VPCPeeringCreateRequestByVPCID struct { + // Name is the name of the VPC Peering + Name string `json:"name"` + // VPCID is the ID of one of the VPCs with which the peering has to be created + VPCID string `json:"vpc_id"` +} + +// CreateVPCPeering creates a new Virtual Private Cloud Peering. +func (v *VPCsServiceOp) CreateVPCPeering(ctx context.Context, create *VPCPeeringCreateRequest) (*VPCPeering, *Response, error) { + path := vpcPeeringsPath + req, err := v.client.NewRequest(ctx, http.MethodPost, path, create) + if err != nil { + return nil, nil, err + } + + root := new(vpcPeeringRoot) + resp, err := v.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.VPCPeering, resp, nil +} + +// GetVPCPeering retrieves a Virtual Private Cloud Peering. +func (v *VPCsServiceOp) GetVPCPeering(ctx context.Context, id string) (*VPCPeering, *Response, error) { + path := vpcPeeringsPath + "/" + id + req, err := v.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(vpcPeeringRoot) + resp, err := v.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.VPCPeering, resp, nil +} + +// ListVPCPeerings lists all Virtual Private Cloud Peerings. +func (v *VPCsServiceOp) ListVPCPeerings(ctx context.Context, opt *ListOptions) ([]*VPCPeering, *Response, error) { + path, err := addOptions(vpcPeeringsPath, opt) + if err != nil { + return nil, nil, err + } + req, err := v.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(vpcPeeringsRoot) + resp, err := v.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + if m := root.Meta; m != nil { + resp.Meta = m + } + return root.VPCPeerings, resp, nil +} + +// UpdateVPCPeering updates a Virtual Private Cloud Peering. +func (v *VPCsServiceOp) UpdateVPCPeering(ctx context.Context, id string, update *VPCPeeringUpdateRequest) (*VPCPeering, *Response, error) { + path := vpcPeeringsPath + "/" + id + req, err := v.client.NewRequest(ctx, http.MethodPatch, path, update) + if err != nil { + return nil, nil, err + } + + root := new(vpcPeeringRoot) + resp, err := v.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.VPCPeering, resp, nil +} + +// DeleteVPCPeering deletes a Virtual Private Cloud Peering. +func (v *VPCsServiceOp) DeleteVPCPeering(ctx context.Context, id string) (*Response, error) { + path := vpcPeeringsPath + "/" + id + req, err := v.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + + resp, err := v.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} + +// CreateVPCPeeringByVPCID creates a new Virtual Private Cloud Peering for requested VPC ID. +func (v *VPCsServiceOp) CreateVPCPeeringByVPCID(ctx context.Context, id string, create *VPCPeeringCreateRequestByVPCID) (*VPCPeering, *Response, error) { + path := vpcsBasePath + "/" + id + "/peerings" + req, err := v.client.NewRequest(ctx, http.MethodPost, path, create) + if err != nil { + return nil, nil, err + } + + root := new(vpcPeeringRoot) + resp, err := v.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.VPCPeering, resp, nil +} + +// ListVPCPeeringsByVPCID lists all Virtual Private Cloud Peerings for requested VPC ID. +func (v *VPCsServiceOp) ListVPCPeeringsByVPCID(ctx context.Context, id string, opt *ListOptions) ([]*VPCPeering, *Response, error) { + path, err := addOptions(vpcsBasePath+"/"+id+"/peerings", opt) + req, err := v.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(vpcPeeringsRoot) + resp, err := v.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + if m := root.Meta; m != nil { + resp.Meta = m + } + return root.VPCPeerings, resp, nil +} + +// UpdateVPCPeeringByVPCID updates a Virtual Private Cloud Peering for requested VPC ID. +func (v *VPCsServiceOp) UpdateVPCPeeringByVPCID(ctx context.Context, vpcID, peerID string, update *VPCPeeringUpdateRequest) (*VPCPeering, *Response, error) { + path := vpcsBasePath + "/" + vpcID + "/peerings" + "/" + peerID + req, err := v.client.NewRequest(ctx, http.MethodPatch, path, update) + if err != nil { + return nil, nil, err + } + + root := new(vpcPeeringRoot) + resp, err := v.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.VPCPeering, resp, nil +} diff --git a/vendor/github.com/digitalocean/godo/vpcs.go b/vendor/github.com/digitalocean/godo/vpcs.go index f4f22e18e..67525190d 100644 --- a/vendor/github.com/digitalocean/godo/vpcs.go +++ b/vendor/github.com/digitalocean/godo/vpcs.go @@ -19,6 +19,14 @@ type VPCsService interface { Update(context.Context, string, *VPCUpdateRequest) (*VPC, *Response, error) Set(context.Context, string, ...VPCSetField) (*VPC, *Response, error) Delete(context.Context, string) (*Response, error) + CreateVPCPeering(context.Context, *VPCPeeringCreateRequest) (*VPCPeering, *Response, error) + GetVPCPeering(context.Context, string) (*VPCPeering, *Response, error) + ListVPCPeerings(context.Context, *ListOptions) ([]*VPCPeering, *Response, error) + UpdateVPCPeering(context.Context, string, *VPCPeeringUpdateRequest) (*VPCPeering, *Response, error) + DeleteVPCPeering(context.Context, string) (*Response, error) + CreateVPCPeeringByVPCID(context.Context, string, *VPCPeeringCreateRequestByVPCID) (*VPCPeering, *Response, error) + ListVPCPeeringsByVPCID(context.Context, string, *ListOptions) ([]*VPCPeering, *Response, error) + UpdateVPCPeeringByVPCID(context.Context, string, string, *VPCPeeringUpdateRequest) (*VPCPeering, *Response, error) } var _ VPCsService = &VPCsServiceOp{} diff --git a/vendor/github.com/distribution/reference/README.md b/vendor/github.com/distribution/reference/README.md index e2531e49c..172a02e0b 100644 --- a/vendor/github.com/distribution/reference/README.md +++ b/vendor/github.com/distribution/reference/README.md @@ -10,7 +10,7 @@ Go library to handle references to container images. [![codecov](https://codecov.io/gh/distribution/reference/branch/main/graph/badge.svg)](https://codecov.io/gh/distribution/reference) [![FOSSA Status](https://app.fossa.com/api/projects/custom%2B162%2Fgithub.com%2Fdistribution%2Freference.svg?type=shield)](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fdistribution%2Freference?ref=badge_shield) -This repository contains a library for handling refrences to container images held in container registries. Please see [godoc](https://pkg.go.dev/github.com/distribution/reference) for details. +This repository contains a library for handling references to container images held in container registries. Please see [godoc](https://pkg.go.dev/github.com/distribution/reference) for details. ## Contribution diff --git a/vendor/github.com/distribution/reference/normalize.go b/vendor/github.com/distribution/reference/normalize.go index a30229d01..f4128314c 100644 --- a/vendor/github.com/distribution/reference/normalize.go +++ b/vendor/github.com/distribution/reference/normalize.go @@ -123,20 +123,51 @@ func ParseDockerRef(ref string) (Named, error) { // splitDockerDomain splits a repository name to domain and remote-name. // If no valid domain is found, the default domain is used. Repository name // needs to be already validated before. -func splitDockerDomain(name string) (domain, remainder string) { - i := strings.IndexRune(name, '/') - if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != localhost && strings.ToLower(name[:i]) == name[:i]) { - domain, remainder = defaultDomain, name - } else { - domain, remainder = name[:i], name[i+1:] - } - if domain == legacyDefaultDomain { - domain = defaultDomain - } - if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { - remainder = officialRepoPrefix + remainder - } - return +func splitDockerDomain(name string) (domain, remoteName string) { + maybeDomain, maybeRemoteName, ok := strings.Cut(name, "/") + if !ok { + // Fast-path for single element ("familiar" names), such as "ubuntu" + // or "ubuntu:latest". Familiar names must be handled separately, to + // prevent them from being handled as "hostname:port". + // + // Canonicalize them as "docker.io/library/name[:tag]" + + // FIXME(thaJeztah): account for bare "localhost" or "example.com" names, which SHOULD be considered a domain. + return defaultDomain, officialRepoPrefix + name + } + + switch { + case maybeDomain == localhost: + // localhost is a reserved namespace and always considered a domain. + domain, remoteName = maybeDomain, maybeRemoteName + case maybeDomain == legacyDefaultDomain: + // canonicalize the Docker Hub and legacy "Docker Index" domains. + domain, remoteName = defaultDomain, maybeRemoteName + case strings.ContainsAny(maybeDomain, ".:"): + // Likely a domain or IP-address: + // + // - contains a "." (e.g., "example.com" or "127.0.0.1") + // - contains a ":" (e.g., "example:5000", "::1", or "[::1]:5000") + domain, remoteName = maybeDomain, maybeRemoteName + case strings.ToLower(maybeDomain) != maybeDomain: + // Uppercase namespaces are not allowed, so if the first element + // is not lowercase, we assume it to be a domain-name. + domain, remoteName = maybeDomain, maybeRemoteName + default: + // None of the above: it's not a domain, so use the default, and + // use the name input the remote-name. + domain, remoteName = defaultDomain, name + } + + if domain == defaultDomain && !strings.ContainsRune(remoteName, '/') { + // Canonicalize "familiar" names, but only on Docker Hub, not + // on other domains: + // + // "docker.io/ubuntu[:tag]" => "docker.io/library/ubuntu[:tag]" + remoteName = officialRepoPrefix + remoteName + } + + return domain, remoteName } // familiarizeName returns a shortened version of the name familiar diff --git a/vendor/github.com/distribution/reference/reference.go b/vendor/github.com/distribution/reference/reference.go index e98c44daa..900398bde 100644 --- a/vendor/github.com/distribution/reference/reference.go +++ b/vendor/github.com/distribution/reference/reference.go @@ -35,8 +35,13 @@ import ( ) const ( + // RepositoryNameTotalLengthMax is the maximum total number of characters in a repository name. + RepositoryNameTotalLengthMax = 255 + // NameTotalLengthMax is the maximum total number of characters in a repository name. - NameTotalLengthMax = 255 + // + // Deprecated: use [RepositoryNameTotalLengthMax] instead. + NameTotalLengthMax = RepositoryNameTotalLengthMax ) var ( @@ -55,8 +60,8 @@ var ( // ErrNameEmpty is returned for empty, invalid repository names. ErrNameEmpty = errors.New("repository name must have at least one component") - // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. - ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) + // ErrNameTooLong is returned when a repository name is longer than RepositoryNameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", RepositoryNameTotalLengthMax) // ErrNameNotCanonical is returned when a name is not canonical. ErrNameNotCanonical = errors.New("repository name must be canonical") @@ -165,6 +170,9 @@ func Path(named Named) (name string) { return path } +// splitDomain splits a named reference into a hostname and path string. +// If no valid hostname is found, the hostname is empty and the full value +// is returned as name func splitDomain(name string) (string, string) { match := anchoredNameRegexp.FindStringSubmatch(name) if len(match) != 3 { @@ -173,19 +181,6 @@ func splitDomain(name string) (string, string) { return match[1], match[2] } -// SplitHostname splits a named reference into a -// hostname and name string. If no valid hostname is -// found, the hostname is empty and the full value -// is returned as name -// -// Deprecated: Use [Domain] or [Path]. -func SplitHostname(named Named) (string, string) { - if r, ok := named.(namedRepository); ok { - return r.Domain(), r.Path() - } - return splitDomain(named.Name()) -} - // Parse parses s and returns a syntactically valid Reference. // If an error was encountered it is returned, along with a nil Reference. func Parse(s string) (Reference, error) { @@ -200,10 +195,6 @@ func Parse(s string) (Reference, error) { return nil, ErrReferenceInvalidFormat } - if len(matches[1]) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - var repo repository nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) @@ -215,6 +206,10 @@ func Parse(s string) (Reference, error) { repo.path = matches[1] } + if len(repo.path) > RepositoryNameTotalLengthMax { + return nil, ErrNameTooLong + } + ref := reference{ namedRepository: repo, tag: matches[2], @@ -253,14 +248,15 @@ func ParseNamed(s string) (Named, error) { // WithName returns a named object representing the given string. If the input // is invalid ErrReferenceInvalidFormat will be returned. func WithName(name string) (Named, error) { - if len(name) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - match := anchoredNameRegexp.FindStringSubmatch(name) if match == nil || len(match) != 3 { return nil, ErrReferenceInvalidFormat } + + if len(match[2]) > RepositoryNameTotalLengthMax { + return nil, ErrNameTooLong + } + return repository{ domain: match[1], path: match[2], diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS index 48d04f9a9..36315d429 100644 --- a/vendor/github.com/docker/docker/AUTHORS +++ b/vendor/github.com/docker/docker/AUTHORS @@ -669,6 +669,7 @@ Erik Hollensbe Erik Inge Bolsø Erik Kristensen Erik Sipsma +Erik Sjölund Erik St. Martin Erik Weathers Erno Hopearuoho @@ -731,6 +732,7 @@ Feroz Salam Ferran Rodenas Filipe Brandenburger Filipe Oliveira +Filipe Pina Flavio Castelli Flavio Crisciani Florian @@ -875,6 +877,8 @@ Hsing-Yu (David) Chen hsinko <21551195@zju.edu.cn> Hu Keping Hu Tao +Huajin Tong +huang-jl <1046678590@qq.com> HuanHuan Ye Huanzhong Zhang Huayi Zhang @@ -969,6 +973,7 @@ Jannick Fahlbusch Januar Wayong Jared Biel Jared Hocutt +Jaroslav Jindrak Jaroslaw Zabiello Jasmine Hegman Jason A. Donenfeld @@ -1012,6 +1017,7 @@ Jeffrey Bolle Jeffrey Morgan Jeffrey van Gogh Jenny Gebske +Jeongseok Kang Jeremy Chambers Jeremy Grosser Jeremy Huntwork @@ -1029,6 +1035,7 @@ Jezeniel Zapanta Jhon Honce Ji.Zhilong Jian Liao +Jian Zeng Jian Zhang Jiang Jinyang Jianyong Wu @@ -1967,6 +1974,7 @@ Sergey Evstifeev Sergii Kabashniuk Sergio Lopez Serhat Gülçiçek +Serhii Nakon SeungUkLee Sevki Hasirci Shane Canon @@ -2253,6 +2261,7 @@ VladimirAus Vladislav Kolesnikov Vlastimil Zeman Vojtech Vitek (V-Teq) +voloder <110066198+voloder@users.noreply.github.com> Walter Leibbrandt Walter Stanish Wang Chao diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go index 37e553d41..b11c2fe02 100644 --- a/vendor/github.com/docker/docker/api/common.go +++ b/vendor/github.com/docker/docker/api/common.go @@ -2,8 +2,17 @@ package api // import "github.com/docker/docker/api" // Common constants for daemon and client. const ( - // DefaultVersion of Current REST API - DefaultVersion = "1.44" + // DefaultVersion of the current REST API. + DefaultVersion = "1.45" + + // MinSupportedAPIVersion is the minimum API version that can be supported + // by the API server, specified as "major.minor". Note that the daemon + // may be configured with a different minimum API version, as returned + // in [github.com/docker/docker/api/types.Version.MinAPIVersion]. + // + // API requests for API versions lower than the configured version produce + // an error. + MinSupportedAPIVersion = "1.24" // NoBaseImageSpecifier is the symbol used by the FROM // command to specify that no base image is to be used. diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index 201b54906..5677340db 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -19,10 +19,10 @@ produces: consumes: - "application/json" - "text/plain" -basePath: "/v1.44" +basePath: "/v1.45" info: title: "Docker Engine API" - version: "1.44" + version: "1.45" x-logo: url: "https://docs.docker.com/assets/images/logo-docker-main.png" description: | @@ -55,8 +55,8 @@ info: the URL is not supported by the daemon, a HTTP `400 Bad Request` error message is returned. - If you omit the version-prefix, the current version of the API (v1.44) is used. - For example, calling `/info` is the same as calling `/v1.44/info`. Using the + If you omit the version-prefix, the current version of the API (v1.45) is used. + For example, calling `/info` is the same as calling `/v1.45/info`. Using the API without a version-prefix is deprecated and will be removed in a future release. Engine releases in the near future should support this version of the API, @@ -427,6 +427,10 @@ definitions: type: "object" additionalProperties: type: "string" + Subpath: + description: "Source path inside the volume. Must be relative without any back traversals." + type: "string" + example: "dir-inside-volume/subdirectory" TmpfsOptions: description: "Optional configuration for the `tmpfs` type." type: "object" @@ -8770,8 +8774,7 @@ paths:


- > **Deprecated**: This field is deprecated and will always - > be "false" in future. + > **Deprecated**: This field is deprecated and will always be "false". type: "boolean" example: false name: @@ -8814,13 +8817,8 @@ paths: description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: - - `is-automated=(true|false)` (deprecated, see below) - `is-official=(true|false)` - `stars=` Matches images that has at least 'number' stars. - - The `is-automated` filter is deprecated. The `is_automated` field has - been deprecated by Docker Hub's search API. Consequently, searching - for `is-automated=true` will yield no results. type: "string" tags: ["Image"] /images/prune: diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go index 24b00a275..882201f0e 100644 --- a/vendor/github.com/docker/docker/api/types/client.go +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -157,42 +157,12 @@ type ImageBuildResponse struct { OSType string } -// ImageCreateOptions holds information to create images. -type ImageCreateOptions struct { - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry. - Platform string // Platform is the target platform of the image if it needs to be pulled from the registry. -} - // ImageImportSource holds source information for ImageImport type ImageImportSource struct { Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. } -// ImageImportOptions holds information to import images from the client host. -type ImageImportOptions struct { - Tag string // Tag is the name to tag this image with. This attribute is deprecated. - Message string // Message is the message to tag the image with - Changes []string // Changes are the raw changes to apply to this image - Platform string // Platform is the target platform of the image -} - -// ImageListOptions holds parameters to list images with. -type ImageListOptions struct { - // All controls whether all images in the graph are filtered, or just - // the heads. - All bool - - // Filters is a JSON-encoded set of filter arguments. - Filters filters.Args - - // SharedSize indicates whether the shared size of images should be computed. - SharedSize bool - - // ContainerCount indicates whether container count should be computed. - ContainerCount bool -} - // ImageLoadResponse returns information to the client about a load process. type ImageLoadResponse struct { // Body must be closed to avoid a resource leak @@ -200,14 +170,6 @@ type ImageLoadResponse struct { JSON bool } -// ImagePullOptions holds information to pull images. -type ImagePullOptions struct { - All bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - PrivilegeFunc RequestPrivilegeFunc - Platform string -} - // RequestPrivilegeFunc is a function interface that // clients can supply to retry operations after // getting an authorization error. @@ -216,15 +178,6 @@ type ImagePullOptions struct { // if the privilege request fails. type RequestPrivilegeFunc func() (string, error) -// ImagePushOptions holds information to push images. -type ImagePushOptions ImagePullOptions - -// ImageRemoveOptions holds parameters to remove images. -type ImageRemoveOptions struct { - Force bool - PruneChildren bool -} - // ImageSearchOptions holds parameters to search images with. type ImageSearchOptions struct { RegistryAuth string diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go index be41d6315..86f46b74a 100644 --- a/vendor/github.com/docker/docker/api/types/container/config.go +++ b/vendor/github.com/docker/docker/api/types/container/config.go @@ -5,8 +5,8 @@ import ( "time" "github.com/docker/docker/api/types/strslice" - dockerspec "github.com/docker/docker/image/spec/specs-go/v1" "github.com/docker/go-connections/nat" + dockerspec "github.com/moby/docker-image-spec/specs-go/v1" ) // MinimumDuration puts a minimum on user configured duration. diff --git a/vendor/github.com/docker/docker/api/types/image/opts.go b/vendor/github.com/docker/docker/api/types/image/opts.go index 3cefecb0d..c6b1f351b 100644 --- a/vendor/github.com/docker/docker/api/types/image/opts.go +++ b/vendor/github.com/docker/docker/api/types/image/opts.go @@ -1,9 +1,57 @@ package image -import ocispec "github.com/opencontainers/image-spec/specs-go/v1" +import "github.com/docker/docker/api/types/filters" -// GetImageOpts holds parameters to inspect an image. -type GetImageOpts struct { - Platform *ocispec.Platform - Details bool +// ImportOptions holds information to import images from the client host. +type ImportOptions struct { + Tag string // Tag is the name to tag this image with. This attribute is deprecated. + Message string // Message is the message to tag the image with + Changes []string // Changes are the raw changes to apply to this image + Platform string // Platform is the target platform of the image +} + +// CreateOptions holds information to create images. +type CreateOptions struct { + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry. + Platform string // Platform is the target platform of the image if it needs to be pulled from the registry. +} + +// PullOptions holds information to pull images. +type PullOptions struct { + All bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + + // PrivilegeFunc is a function that clients can supply to retry operations + // after getting an authorization error. This function returns the registry + // authentication header value in base64 encoded format, or an error if the + // privilege request fails. + // + // Also see [github.com/docker/docker/api/types.RequestPrivilegeFunc]. + PrivilegeFunc func() (string, error) + Platform string +} + +// PushOptions holds information to push images. +type PushOptions PullOptions + +// ListOptions holds parameters to list images with. +type ListOptions struct { + // All controls whether all images in the graph are filtered, or just + // the heads. + All bool + + // Filters is a JSON-encoded set of filter arguments. + Filters filters.Args + + // SharedSize indicates whether the shared size of images should be computed. + SharedSize bool + + // ContainerCount indicates whether container count should be computed. + ContainerCount bool +} + +// RemoveOptions holds parameters to remove images. +type RemoveOptions struct { + Force bool + PruneChildren bool } diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go index 57edf2ef1..6fe04da25 100644 --- a/vendor/github.com/docker/docker/api/types/mount/mount.go +++ b/vendor/github.com/docker/docker/api/types/mount/mount.go @@ -96,6 +96,7 @@ type BindOptions struct { type VolumeOptions struct { NoCopy bool `json:",omitempty"` Labels map[string]string `json:",omitempty"` + Subpath string `json:",omitempty"` DriverConfig *Driver `json:",omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go index 05cb31075..6bbae93ef 100644 --- a/vendor/github.com/docker/docker/api/types/registry/registry.go +++ b/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -94,7 +94,7 @@ type SearchResult struct { Name string `json:"name"` // IsAutomated indicates whether the result is automated. // - // Deprecated: the "is_automated" field is deprecated and will always be "false" in the future. + // Deprecated: the "is_automated" field is deprecated and will always be "false". IsAutomated bool `json:"is_automated"` // Description is a textual description of the repository Description string `json:"description"` diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go index 56a8b77d4..ca07162a2 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -82,7 +82,7 @@ type ImageInspect struct { // Depending on how the image was created, this field may be empty. // // Deprecated: this field is omitted in API v1.45, but kept for backward compatibility. - Container string + Container string `json:",omitempty"` // ContainerConfig is an optional field containing the configuration of the // container that was last committed when creating the image. @@ -91,7 +91,7 @@ type ImageInspect struct { // and it is not in active use anymore. // // Deprecated: this field is omitted in API v1.45, but kept for backward compatibility. - ContainerConfig *container.Config + ContainerConfig *container.Config `json:",omitempty"` // DockerVersion is the version of Docker that was used to build the image. // diff --git a/vendor/github.com/docker/docker/api/types/types_deprecated.go b/vendor/github.com/docker/docker/api/types/types_deprecated.go index e332a7bb6..231a5cca4 100644 --- a/vendor/github.com/docker/docker/api/types/types_deprecated.go +++ b/vendor/github.com/docker/docker/api/types/types_deprecated.go @@ -1,138 +1,35 @@ package types import ( - "github.com/docker/docker/api/types/checkpoint" - "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/image" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/api/types/system" ) -// CheckpointCreateOptions holds parameters to create a checkpoint from a container. +// ImageImportOptions holds information to import images from the client host. // -// Deprecated: use [checkpoint.CreateOptions]. -type CheckpointCreateOptions = checkpoint.CreateOptions +// Deprecated: use [image.ImportOptions]. +type ImageImportOptions = image.ImportOptions -// CheckpointListOptions holds parameters to list checkpoints for a container +// ImageCreateOptions holds information to create images. // -// Deprecated: use [checkpoint.ListOptions]. -type CheckpointListOptions = checkpoint.ListOptions +// Deprecated: use [image.CreateOptions]. +type ImageCreateOptions = image.CreateOptions -// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container +// ImagePullOptions holds information to pull images. // -// Deprecated: use [checkpoint.DeleteOptions]. -type CheckpointDeleteOptions = checkpoint.DeleteOptions +// Deprecated: use [image.PullOptions]. +type ImagePullOptions = image.PullOptions -// Checkpoint represents the details of a checkpoint when listing endpoints. +// ImagePushOptions holds information to push images. // -// Deprecated: use [checkpoint.Summary]. -type Checkpoint = checkpoint.Summary +// Deprecated: use [image.PushOptions]. +type ImagePushOptions = image.PushOptions -// Info contains response of Engine API: -// GET "/info" +// ImageListOptions holds parameters to list images with. // -// Deprecated: use [system.Info]. -type Info = system.Info +// Deprecated: use [image.ListOptions]. +type ImageListOptions = image.ListOptions -// Commit holds the Git-commit (SHA1) that a binary was built from, as reported -// in the version-string of external tools, such as containerd, or runC. +// ImageRemoveOptions holds parameters to remove images. // -// Deprecated: use [system.Commit]. -type Commit = system.Commit - -// PluginsInfo is a temp struct holding Plugins name -// registered with docker daemon. It is used by [system.Info] struct -// -// Deprecated: use [system.PluginsInfo]. -type PluginsInfo = system.PluginsInfo - -// NetworkAddressPool is a temp struct used by [system.Info] struct. -// -// Deprecated: use [system.NetworkAddressPool]. -type NetworkAddressPool = system.NetworkAddressPool - -// Runtime describes an OCI runtime. -// -// Deprecated: use [system.Runtime]. -type Runtime = system.Runtime - -// SecurityOpt contains the name and options of a security option. -// -// Deprecated: use [system.SecurityOpt]. -type SecurityOpt = system.SecurityOpt - -// KeyValue holds a key/value pair. -// -// Deprecated: use [system.KeyValue]. -type KeyValue = system.KeyValue - -// ImageDeleteResponseItem image delete response item. -// -// Deprecated: use [image.DeleteResponse]. -type ImageDeleteResponseItem = image.DeleteResponse - -// ImageSummary image summary. -// -// Deprecated: use [image.Summary]. -type ImageSummary = image.Summary - -// ImageMetadata contains engine-local data about the image. -// -// Deprecated: use [image.Metadata]. -type ImageMetadata = image.Metadata - -// ServiceCreateResponse contains the information returned to a client -// on the creation of a new service. -// -// Deprecated: use [swarm.ServiceCreateResponse]. -type ServiceCreateResponse = swarm.ServiceCreateResponse - -// ServiceUpdateResponse service update response. -// -// Deprecated: use [swarm.ServiceUpdateResponse]. -type ServiceUpdateResponse = swarm.ServiceUpdateResponse - -// ContainerStartOptions holds parameters to start containers. -// -// Deprecated: use [container.StartOptions]. -type ContainerStartOptions = container.StartOptions - -// ResizeOptions holds parameters to resize a TTY. -// It can be used to resize container TTYs and -// exec process TTYs too. -// -// Deprecated: use [container.ResizeOptions]. -type ResizeOptions = container.ResizeOptions - -// ContainerAttachOptions holds parameters to attach to a container. -// -// Deprecated: use [container.AttachOptions]. -type ContainerAttachOptions = container.AttachOptions - -// ContainerCommitOptions holds parameters to commit changes into a container. -// -// Deprecated: use [container.CommitOptions]. -type ContainerCommitOptions = container.CommitOptions - -// ContainerListOptions holds parameters to list containers with. -// -// Deprecated: use [container.ListOptions]. -type ContainerListOptions = container.ListOptions - -// ContainerLogsOptions holds parameters to filter logs with. -// -// Deprecated: use [container.LogsOptions]. -type ContainerLogsOptions = container.LogsOptions - -// ContainerRemoveOptions holds parameters to remove containers. -// -// Deprecated: use [container.RemoveOptions]. -type ContainerRemoveOptions = container.RemoveOptions - -// DecodeSecurityOptions decodes a security options string slice to a type safe -// [system.SecurityOpt]. -// -// Deprecated: use [system.DecodeSecurityOptions]. -func DecodeSecurityOptions(opts []string) ([]system.SecurityOpt, error) { - return system.DecodeSecurityOptions(opts) -} +// Deprecated: use [image.RemoveOptions]. +type ImageRemoveOptions = image.RemoveOptions diff --git a/vendor/github.com/docker/docker/api/types/versions/README.md b/vendor/github.com/docker/docker/api/types/versions/README.md deleted file mode 100644 index 1ef911edb..000000000 --- a/vendor/github.com/docker/docker/api/types/versions/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# Legacy API type versions - -This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. - -Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. - -## Package name conventions - -The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: - -1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. -2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. - -For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go index 55fc5d389..bbd9ff0b8 100644 --- a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go +++ b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go @@ -238,13 +238,13 @@ type TopologyRequirement struct { // If requisite is specified, all topologies in preferred list MUST // also be present in the list of requisite topologies. // - // If the SP is unable to to make the provisioned volume available + // If the SP is unable to make the provisioned volume available // from any of the preferred topologies, the SP MAY choose a topology // from the list of requisite topologies. // If the list of requisite topologies is not specified, then the SP // MAY choose from the list of all possible topologies. // If the list of requisite topologies is specified and the SP is - // unable to to make the provisioned volume available from any of the + // unable to make the provisioned volume available from any of the // requisite topologies it MUST fail the CreateVolume call. // // Example 1: @@ -254,7 +254,7 @@ type TopologyRequirement struct { // {"region": "R1", "zone": "Z3"} // preferred = // {"region": "R1", "zone": "Z3"} - // then the the SP SHOULD first attempt to make the provisioned volume + // then the SP SHOULD first attempt to make the provisioned volume // available from "zone" "Z3" in the "region" "R1" and fall back to // "zone" "Z2" in the "region" "R1" if that is not possible. // @@ -268,7 +268,7 @@ type TopologyRequirement struct { // preferred = // {"region": "R1", "zone": "Z4"}, // {"region": "R1", "zone": "Z2"} - // then the the SP SHOULD first attempt to make the provisioned volume + // then the SP SHOULD first attempt to make the provisioned volume // accessible from "zone" "Z4" in the "region" "R1" and fall back to // "zone" "Z2" in the "region" "R1" if that is not possible. If that // is not possible, the SP may choose between either the "zone" @@ -287,7 +287,7 @@ type TopologyRequirement struct { // preferred = // {"region": "R1", "zone": "Z5"}, // {"region": "R1", "zone": "Z3"} - // then the the SP SHOULD first attempt to make the provisioned volume + // then the SP SHOULD first attempt to make the provisioned volume // accessible from the combination of the two "zones" "Z5" and "Z3" in // the "region" "R1". If that's not possible, it should fall back to // a combination of "Z5" and other possibilities from the list of diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go index 68ef31b78..68e6ec5ed 100644 --- a/vendor/github.com/docker/docker/client/distribution_inspect.go +++ b/vendor/github.com/docker/docker/client/distribution_inspect.go @@ -10,11 +10,11 @@ import ( ) // DistributionInspect returns the image digest with the full manifest. -func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error) { +func (cli *Client) DistributionInspect(ctx context.Context, imageRef, encodedRegistryAuth string) (registry.DistributionInspect, error) { // Contact the registry to retrieve digest and platform information var distributionInspect registry.DistributionInspect - if image == "" { - return distributionInspect, objectNotFoundError{object: "distribution", id: image} + if imageRef == "" { + return distributionInspect, objectNotFoundError{object: "distribution", id: imageRef} } if err := cli.NewVersionError(ctx, "1.30", "distribution inspect"); err != nil { @@ -28,7 +28,7 @@ func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegist } } - resp, err := cli.get(ctx, "/distribution/"+image+"/json", url.Values{}, headers) + resp, err := cli.get(ctx, "/distribution/"+imageRef+"/json", url.Values{}, headers) defer ensureReaderClosed(resp) if err != nil { return distributionInspect, err diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go index 29cd0b437..7c7873dca 100644 --- a/vendor/github.com/docker/docker/client/image_create.go +++ b/vendor/github.com/docker/docker/client/image_create.go @@ -8,13 +8,13 @@ import ( "strings" "github.com/distribution/reference" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/registry" ) // ImageCreate creates a new image based on the parent options. // It returns the JSON content in the response body. -func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { +func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options image.CreateOptions) (io.ReadCloser, error) { ref, err := reference.ParseNormalizedNamed(parentReference) if err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go index cd376a14e..5a890b0c5 100644 --- a/vendor/github.com/docker/docker/client/image_import.go +++ b/vendor/github.com/docker/docker/client/image_import.go @@ -8,11 +8,12 @@ import ( "github.com/distribution/reference" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/image" ) // ImageImport creates a new image based on the source options. // It returns the JSON content in the response body. -func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { +func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options image.ImportOptions) (io.ReadCloser, error) { if ref != "" { // Check if the given image name can be resolved if _, err := reference.ParseNormalizedNamed(ref); err != nil { diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go index fa6aecfc6..a9cc1e21e 100644 --- a/vendor/github.com/docker/docker/client/image_list.go +++ b/vendor/github.com/docker/docker/client/image_list.go @@ -5,14 +5,13 @@ import ( "encoding/json" "net/url" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/versions" ) // ImageList returns a list of images in the docker host. -func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]image.Summary, error) { +func (cli *Client) ImageList(ctx context.Context, options image.ListOptions) ([]image.Summary, error) { var images []image.Summary // Make sure we negotiated (if the client is configured to do so), diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go index d92049d58..6438cf6a9 100644 --- a/vendor/github.com/docker/docker/client/image_pull.go +++ b/vendor/github.com/docker/docker/client/image_pull.go @@ -7,7 +7,7 @@ import ( "strings" "github.com/distribution/reference" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/image" "github.com/docker/docker/errdefs" ) @@ -19,7 +19,7 @@ import ( // FIXME(vdemeester): there is currently used in a few way in docker/docker // - if not in trusted content, ref is used to pass the whole reference, and tag is empty // - if in trusted content, ref is used to pass the reference name, and tag for the digest -func (cli *Client) ImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) { +func (cli *Client) ImagePull(ctx context.Context, refStr string, options image.PullOptions) (io.ReadCloser, error) { ref, err := reference.ParseNormalizedNamed(refStr) if err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go index 6839a89e0..e6a6b11ee 100644 --- a/vendor/github.com/docker/docker/client/image_push.go +++ b/vendor/github.com/docker/docker/client/image_push.go @@ -8,7 +8,7 @@ import ( "net/url" "github.com/distribution/reference" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/errdefs" ) @@ -17,7 +17,7 @@ import ( // It executes the privileged function if the operation is unauthorized // and it tries one more time. // It's up to the caller to handle the io.ReadCloser and close it properly. -func (cli *Client) ImagePush(ctx context.Context, image string, options types.ImagePushOptions) (io.ReadCloser, error) { +func (cli *Client) ImagePush(ctx context.Context, image string, options image.PushOptions) (io.ReadCloser, error) { ref, err := reference.ParseNormalizedNamed(image) if err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go index b936d2083..652d1bfa3 100644 --- a/vendor/github.com/docker/docker/client/image_remove.go +++ b/vendor/github.com/docker/docker/client/image_remove.go @@ -5,12 +5,11 @@ import ( "encoding/json" "net/url" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/image" ) // ImageRemove removes an image from the docker host. -func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]image.DeleteResponse, error) { +func (cli *Client) ImageRemove(ctx context.Context, imageID string, options image.RemoveOptions) ([]image.DeleteResponse, error) { query := url.Values{} if options.Force { diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go index 302f5fb13..45d233f25 100644 --- a/vendor/github.com/docker/docker/client/interface.go +++ b/vendor/github.com/docker/docker/client/interface.go @@ -90,15 +90,15 @@ type ImageAPIClient interface { ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) BuildCancel(ctx context.Context, id string) error - ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) + ImageCreate(ctx context.Context, parentReference string, options image.CreateOptions) (io.ReadCloser, error) ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) - ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) + ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options image.ImportOptions) (io.ReadCloser, error) ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error) - ImageList(ctx context.Context, options types.ImageListOptions) ([]image.Summary, error) + ImageList(ctx context.Context, options image.ListOptions) ([]image.Summary, error) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) - ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) - ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) - ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]image.DeleteResponse, error) + ImagePull(ctx context.Context, ref string, options image.PullOptions) (io.ReadCloser, error) + ImagePush(ctx context.Context, ref string, options image.PushOptions) (io.ReadCloser, error) + ImageRemove(ctx context.Context, image string, options image.RemoveOptions) ([]image.DeleteResponse, error) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) ImageTag(ctx context.Context, image, ref string) error diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md index 5edd5a7ca..9e790390b 100644 --- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,5 +1,17 @@ # Change history of go-restful + +## [v3.12.0] - 2024-03-11 +- add Flush method #529 (#538) +- fix: Improper handling of empty POST requests (#543) + +## [v3.11.3] - 2024-01-09 +- better not have 2 tags on one commit + +## [v3.11.1, v3.11.2] - 2024-01-09 + +- fix by restoring custom JSON handler functions (Mike Beaumont #540) + ## [v3.11.0] - 2023-08-19 - restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled. diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md index e3e30080e..7234604e4 100644 --- a/vendor/github.com/emicklei/go-restful/v3/README.md +++ b/vendor/github.com/emicklei/go-restful/v3/README.md @@ -2,7 +2,6 @@ go-restful ========== package for building REST-style Web Services using Google Go -[![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful) [![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful) [![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://pkg.go.dev/github.com/emicklei/go-restful) [![codecov](https://codecov.io/gh/emicklei/go-restful/branch/master/graph/badge.svg)](https://codecov.io/gh/emicklei/go-restful) @@ -95,8 +94,7 @@ There are several hooks to customize the behavior of the go-restful package. - Trace logging - Compression - Encoders for other serializers -- Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .` -- Use the package variable `TrimRightSlashEnabled` (default true) to control the behavior of matching routes that end with a slash `/` +- Use the package variable `TrimRightSlashEnabled` (default true) to control the behavior of matching routes that end with a slash `/` ## Resources diff --git a/vendor/github.com/emicklei/go-restful/v3/compress.go b/vendor/github.com/emicklei/go-restful/v3/compress.go index 1ff239f99..80adf55fd 100644 --- a/vendor/github.com/emicklei/go-restful/v3/compress.go +++ b/vendor/github.com/emicklei/go-restful/v3/compress.go @@ -49,6 +49,16 @@ func (c *CompressingResponseWriter) CloseNotify() <-chan bool { return c.writer.(http.CloseNotifier).CloseNotify() } +// Flush is part of http.Flusher interface. Noop if the underlying writer doesn't support it. +func (c *CompressingResponseWriter) Flush() { + flusher, ok := c.writer.(http.Flusher) + if !ok { + // writer doesn't support http.Flusher interface + return + } + flusher.Flush() +} + // Close the underlying compressor func (c *CompressingResponseWriter) Close() error { if c.isCompressorClosed() { diff --git a/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go b/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go index 66dfc824f..9808752ac 100644 --- a/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go +++ b/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go @@ -5,11 +5,18 @@ package restful // that can be found in the LICENSE file. import ( + "encoding/json" "encoding/xml" "strings" "sync" ) +var ( + MarshalIndent = json.MarshalIndent + NewDecoder = json.NewDecoder + NewEncoder = json.NewEncoder +) + // EntityReaderWriter can read and write values using an encoding such as JSON,XML. type EntityReaderWriter interface { // Read a serialized version of the value from the request. diff --git a/vendor/github.com/emicklei/go-restful/v3/json.go b/vendor/github.com/emicklei/go-restful/v3/json.go deleted file mode 100644 index 871165166..000000000 --- a/vendor/github.com/emicklei/go-restful/v3/json.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !jsoniter - -package restful - -import "encoding/json" - -var ( - MarshalIndent = json.MarshalIndent - NewDecoder = json.NewDecoder - NewEncoder = json.NewEncoder -) diff --git a/vendor/github.com/emicklei/go-restful/v3/jsoniter.go b/vendor/github.com/emicklei/go-restful/v3/jsoniter.go deleted file mode 100644 index 11b8f8ae7..000000000 --- a/vendor/github.com/emicklei/go-restful/v3/jsoniter.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build jsoniter - -package restful - -import "github.com/json-iterator/go" - -var ( - json = jsoniter.ConfigCompatibleWithStandardLibrary - MarshalIndent = json.MarshalIndent - NewDecoder = json.NewDecoder - NewEncoder = json.NewEncoder -) diff --git a/vendor/github.com/emicklei/go-restful/v3/jsr311.go b/vendor/github.com/emicklei/go-restful/v3/jsr311.go index 07a0c91e9..a9b3faaa8 100644 --- a/vendor/github.com/emicklei/go-restful/v3/jsr311.go +++ b/vendor/github.com/emicklei/go-restful/v3/jsr311.go @@ -155,7 +155,7 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length") if (method == http.MethodPost || method == http.MethodPut || - method == http.MethodPatch) && length == "" { + method == http.MethodPatch) && (length == "" || length == "0") { return nil, NewError( http.StatusUnsupportedMediaType, fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")), diff --git a/vendor/github.com/expr-lang/expr/README.md b/vendor/github.com/expr-lang/expr/README.md index bd34c7d24..5a587162c 100644 --- a/vendor/github.com/expr-lang/expr/README.md +++ b/vendor/github.com/expr-lang/expr/README.md @@ -162,6 +162,8 @@ func main() { * [Visually.io](https://visually.io) employs Expr as a business rule engine for its personalization targeting algorithm. * [Akvorado](https://github.com/akvorado/akvorado) utilizes Expr to classify exporters and interfaces in network flows. * [keda.sh](https://keda.sh) uses Expr to allow customization of its Kubernetes-based event-driven autoscaling. +* [Span Digital](https://spandigital.com/) uses Expr in it's Knowledge Management products. +* [Xiaohongshu](https://www.xiaohongshu.com/) combining yaml with Expr for dynamically policies delivery. [Add your company too](https://github.com/expr-lang/expr/edit/master/README.md) diff --git a/vendor/github.com/expr-lang/expr/ast/print.go b/vendor/github.com/expr-lang/expr/ast/print.go index fa593ae28..6a7d698a9 100644 --- a/vendor/github.com/expr-lang/expr/ast/print.go +++ b/vendor/github.com/expr-lang/expr/ast/print.go @@ -65,8 +65,7 @@ func (n *BinaryNode) String() string { var lhs, rhs string var lwrap, rwrap bool - lb, ok := n.Left.(*BinaryNode) - if ok { + if lb, ok := n.Left.(*BinaryNode); ok { if operator.Less(lb.Operator, n.Operator) { lwrap = true } @@ -77,9 +76,7 @@ func (n *BinaryNode) String() string { lwrap = true } } - - rb, ok := n.Right.(*BinaryNode) - if ok { + if rb, ok := n.Right.(*BinaryNode); ok { if operator.Less(rb.Operator, n.Operator) { rwrap = true } @@ -88,6 +85,13 @@ func (n *BinaryNode) String() string { } } + if _, ok := n.Left.(*ConditionalNode); ok { + lwrap = true + } + if _, ok := n.Right.(*ConditionalNode); ok { + rwrap = true + } + if lwrap { lhs = fmt.Sprintf("(%s)", n.Left.String()) } else { @@ -108,20 +112,25 @@ func (n *ChainNode) String() string { } func (n *MemberNode) String() string { + node := n.Node.String() + if _, ok := n.Node.(*BinaryNode); ok { + node = fmt.Sprintf("(%s)", node) + } + if n.Optional { if str, ok := n.Property.(*StringNode); ok && utils.IsValidIdentifier(str.Value) { - return fmt.Sprintf("%s?.%s", n.Node.String(), str.Value) + return fmt.Sprintf("%s?.%s", node, str.Value) } else { - return fmt.Sprintf("%s?.[%s]", n.Node.String(), n.Property.String()) + return fmt.Sprintf("%s?.[%s]", node, n.Property.String()) } } if str, ok := n.Property.(*StringNode); ok && utils.IsValidIdentifier(str.Value) { if _, ok := n.Node.(*PointerNode); ok { return fmt.Sprintf(".%s", str.Value) } - return fmt.Sprintf("%s.%s", n.Node.String(), str.Value) + return fmt.Sprintf("%s.%s", node, str.Value) } - return fmt.Sprintf("%s[%s]", n.Node.String(), n.Property.String()) + return fmt.Sprintf("%s[%s]", node, n.Property.String()) } func (n *SliceNode) String() string { @@ -202,5 +211,11 @@ func (n *MapNode) String() string { } func (n *PairNode) String() string { - return fmt.Sprintf("%s: %s", n.Key.String(), n.Value.String()) + if str, ok := n.Key.(*StringNode); ok { + if utils.IsValidIdentifier(str.Value) { + return fmt.Sprintf("%s: %s", str.Value, n.Value.String()) + } + return fmt.Sprintf("%s: %s", str.String(), n.Value.String()) + } + return fmt.Sprintf("(%s): %s", n.Key.String(), n.Value.String()) } diff --git a/vendor/github.com/expr-lang/expr/builtin/builtin.go b/vendor/github.com/expr-lang/expr/builtin/builtin.go index 7bf377df2..cc6f197cd 100644 --- a/vendor/github.com/expr-lang/expr/builtin/builtin.go +++ b/vendor/github.com/expr-lang/expr/builtin/builtin.go @@ -83,6 +83,11 @@ var Builtins = []*Function{ Predicate: true, Types: types(new(func([]any, func(any) bool) int)), }, + { + Name: "sum", + Predicate: true, + Types: types(new(func([]any, func(any) bool) int)), + }, { Name: "groupBy", Predicate: true, @@ -387,13 +392,6 @@ var Builtins = []*Function{ return validateAggregateFunc("min", args) }, }, - { - Name: "sum", - Func: sum, - Validate: func(args []reflect.Type) (reflect.Type, error) { - return validateAggregateFunc("sum", args) - }, - }, { Name: "mean", Func: func(args ...any) (any, error) { @@ -474,9 +472,27 @@ var Builtins = []*Function{ { Name: "now", Func: func(args ...any) (any, error) { - return time.Now(), nil + if len(args) == 0 { + return time.Now(), nil + } + if len(args) == 1 { + if tz, ok := args[0].(*time.Location); ok { + return time.Now().In(tz), nil + } + } + return nil, fmt.Errorf("invalid number of arguments (expected 0, got %d)", len(args)) + }, + Validate: func(args []reflect.Type) (reflect.Type, error) { + if len(args) == 0 { + return timeType, nil + } + if len(args) == 1 { + if args[0] != nil && args[0].AssignableTo(locationType) { + return timeType, nil + } + } + return anyType, fmt.Errorf("invalid number of arguments (expected 0, got %d)", len(args)) }, - Types: types(new(func() time.Time)), }, { Name: "duration", @@ -488,9 +504,17 @@ var Builtins = []*Function{ { Name: "date", Func: func(args ...any) (any, error) { + tz, ok := args[0].(*time.Location) + if ok { + args = args[1:] + } + date := args[0].(string) if len(args) == 2 { layout := args[1].(string) + if tz != nil { + return time.ParseInLocation(layout, date, tz) + } return time.Parse(layout, date) } if len(args) == 3 { @@ -517,18 +541,43 @@ var Builtins = []*Function{ time.RFC1123, } for _, layout := range layouts { - t, err := time.Parse(layout, date) - if err == nil { - return t, nil + if tz == nil { + t, err := time.Parse(layout, date) + if err == nil { + return t, nil + } + } else { + t, err := time.ParseInLocation(layout, date, tz) + if err == nil { + return t, nil + } } } return nil, fmt.Errorf("invalid date %s", date) }, - Types: types( - new(func(string) time.Time), - new(func(string, string) time.Time), - new(func(string, string, string) time.Time), - ), + Validate: func(args []reflect.Type) (reflect.Type, error) { + if len(args) < 1 { + return anyType, fmt.Errorf("invalid number of arguments (expected at least 1, got %d)", len(args)) + } + if args[0] != nil && args[0].AssignableTo(locationType) { + args = args[1:] + } + if len(args) > 3 { + return anyType, fmt.Errorf("invalid number of arguments (expected at most 3, got %d)", len(args)) + } + return timeType, nil + }, + }, + { + Name: "timezone", + Func: func(args ...any) (any, error) { + tz, err := time.LoadLocation(args[0].(string)) + if err != nil { + return nil, err + } + return tz, nil + }, + Types: types(time.LoadLocation), }, { Name: "first", diff --git a/vendor/github.com/expr-lang/expr/builtin/lib.go b/vendor/github.com/expr-lang/expr/builtin/lib.go index e3a6c0aef..e3cd61b96 100644 --- a/vendor/github.com/expr-lang/expr/builtin/lib.go +++ b/vendor/github.com/expr-lang/expr/builtin/lib.go @@ -258,45 +258,6 @@ func String(arg any) any { return fmt.Sprintf("%v", arg) } -func sum(args ...any) (any, error) { - var total int - var fTotal float64 - - for _, arg := range args { - rv := reflect.ValueOf(deref.Deref(arg)) - - switch rv.Kind() { - case reflect.Array, reflect.Slice: - size := rv.Len() - for i := 0; i < size; i++ { - elemSum, err := sum(rv.Index(i).Interface()) - if err != nil { - return nil, err - } - switch elemSum := elemSum.(type) { - case int: - total += elemSum - case float64: - fTotal += elemSum - } - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - total += int(rv.Int()) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - total += int(rv.Uint()) - case reflect.Float32, reflect.Float64: - fTotal += rv.Float() - default: - return nil, fmt.Errorf("invalid argument for sum (type %T)", arg) - } - } - - if fTotal != 0.0 { - return fTotal + float64(total), nil - } - return total, nil -} - func minMax(name string, fn func(any, any) bool, args ...any) (any, error) { var val any for _, arg := range args { diff --git a/vendor/github.com/expr-lang/expr/builtin/utils.go b/vendor/github.com/expr-lang/expr/builtin/utils.go index 7d3b6ee8e..29a95731a 100644 --- a/vendor/github.com/expr-lang/expr/builtin/utils.go +++ b/vendor/github.com/expr-lang/expr/builtin/utils.go @@ -3,14 +3,17 @@ package builtin import ( "fmt" "reflect" + "time" ) var ( - anyType = reflect.TypeOf(new(any)).Elem() - integerType = reflect.TypeOf(0) - floatType = reflect.TypeOf(float64(0)) - arrayType = reflect.TypeOf([]any{}) - mapType = reflect.TypeOf(map[any]any{}) + anyType = reflect.TypeOf(new(any)).Elem() + integerType = reflect.TypeOf(0) + floatType = reflect.TypeOf(float64(0)) + arrayType = reflect.TypeOf([]any{}) + mapType = reflect.TypeOf(map[any]any{}) + timeType = reflect.TypeOf(new(time.Time)).Elem() + locationType = reflect.TypeOf(new(time.Location)) ) func kind(t reflect.Type) reflect.Kind { diff --git a/vendor/github.com/expr-lang/expr/checker/checker.go b/vendor/github.com/expr-lang/expr/checker/checker.go index b46178d43..ecf7a04d6 100644 --- a/vendor/github.com/expr-lang/expr/checker/checker.go +++ b/vendor/github.com/expr-lang/expr/checker/checker.go @@ -653,6 +653,10 @@ func (v *checker) BuiltinNode(node *ast.BuiltinNode) (reflect.Type, info) { return v.error(node.Arguments[0], "builtin %v takes only array (got %v)", node.Name, collection) } + if len(node.Arguments) == 1 { + return integerType, info{} + } + v.begin(collection) closure, _ := v.visit(node.Arguments[1]) v.end() @@ -668,6 +672,29 @@ func (v *checker) BuiltinNode(node *ast.BuiltinNode) (reflect.Type, info) { } return v.error(node.Arguments[1], "predicate should has one input and one output param") + case "sum": + collection, _ := v.visit(node.Arguments[0]) + if !isArray(collection) && !isAny(collection) { + return v.error(node.Arguments[0], "builtin %v takes only array (got %v)", node.Name, collection) + } + + if len(node.Arguments) == 2 { + v.begin(collection) + closure, _ := v.visit(node.Arguments[1]) + v.end() + + if isFunc(closure) && + closure.NumOut() == 1 && + closure.NumIn() == 1 && isAny(closure.In(0)) { + return closure.Out(0), info{} + } + } else { + if isAny(collection) { + return anyType, info{} + } + return collection.Elem(), info{} + } + case "find", "findLast": collection, _ := v.visit(node.Arguments[0]) if !isArray(collection) && !isAny(collection) { diff --git a/vendor/github.com/expr-lang/expr/compiler/compiler.go b/vendor/github.com/expr-lang/expr/compiler/compiler.go index 808b53c9b..1aa5ce188 100644 --- a/vendor/github.com/expr-lang/expr/compiler/compiler.go +++ b/vendor/github.com/expr-lang/expr/compiler/compiler.go @@ -92,6 +92,13 @@ type scope struct { index int } +func (c *compiler) nodeParent() ast.Node { + if len(c.nodes) > 1 { + return c.nodes[len(c.nodes)-2] + } + return nil +} + func (c *compiler) emitLocation(loc file.Location, op Opcode, arg int) int { c.bytecode = append(c.bytecode, op) current := len(c.bytecode) @@ -395,34 +402,12 @@ func (c *compiler) UnaryNode(node *ast.UnaryNode) { } func (c *compiler) BinaryNode(node *ast.BinaryNode) { - l := kind(node.Left) - r := kind(node.Right) - - leftIsSimple := isSimpleType(node.Left) - rightIsSimple := isSimpleType(node.Right) - leftAndRightAreSimple := leftIsSimple && rightIsSimple - switch node.Operator { case "==": - c.compile(node.Left) - c.derefInNeeded(node.Left) - c.compile(node.Right) - c.derefInNeeded(node.Right) - - if l == r && l == reflect.Int && leftAndRightAreSimple { - c.emit(OpEqualInt) - } else if l == r && l == reflect.String && leftAndRightAreSimple { - c.emit(OpEqualString) - } else { - c.emit(OpEqual) - } + c.equalBinaryNode(node) case "!=": - c.compile(node.Left) - c.derefInNeeded(node.Left) - c.compile(node.Right) - c.derefInNeeded(node.Right) - c.emit(OpEqual) + c.equalBinaryNode(node) c.emit(OpNot) case "or", "||": @@ -580,6 +565,28 @@ func (c *compiler) BinaryNode(node *ast.BinaryNode) { } } +func (c *compiler) equalBinaryNode(node *ast.BinaryNode) { + l := kind(node.Left) + r := kind(node.Right) + + leftIsSimple := isSimpleType(node.Left) + rightIsSimple := isSimpleType(node.Right) + leftAndRightAreSimple := leftIsSimple && rightIsSimple + + c.compile(node.Left) + c.derefInNeeded(node.Left) + c.compile(node.Right) + c.derefInNeeded(node.Right) + + if l == r && l == reflect.Int && leftAndRightAreSimple { + c.emit(OpEqualInt) + } else if l == r && l == reflect.String && leftAndRightAreSimple { + c.emit(OpEqualString) + } else { + c.emit(OpEqual) + } +} + func isSimpleType(node ast.Node) bool { if node == nil { return false @@ -594,9 +601,21 @@ func isSimpleType(node ast.Node) bool { func (c *compiler) ChainNode(node *ast.ChainNode) { c.chains = append(c.chains, []int{}) c.compile(node.Node) - // Chain activate (got nit somewhere) for _, ph := range c.chains[len(c.chains)-1] { - c.patchJump(ph) + c.patchJump(ph) // If chain activated jump here (got nit somewhere). + } + parent := c.nodeParent() + if binary, ok := parent.(*ast.BinaryNode); ok && binary.Operator == "??" { + // If chain is used in nil coalescing operator, we can omit + // nil push at the end of the chain. The ?? operator will + // handle it. + } else { + // We need to put the nil on the stack, otherwise "typed" + // nil will be used as a result of the chain. + j := c.emit(OpJumpIfNotNil, placeholder) + c.emit(OpPop) + c.emit(OpNil) + c.patchJump(j) } c.chains = c.chains[:len(c.chains)-1] } @@ -800,7 +819,11 @@ func (c *compiler) BuiltinNode(node *ast.BuiltinNode) { c.compile(node.Arguments[0]) c.emit(OpBegin) c.emitLoop(func() { - c.compile(node.Arguments[1]) + if len(node.Arguments) == 2 { + c.compile(node.Arguments[1]) + } else { + c.emit(OpPointer) + } c.emitCond(func() { c.emit(OpIncrementCount) }) @@ -809,6 +832,25 @@ func (c *compiler) BuiltinNode(node *ast.BuiltinNode) { c.emit(OpEnd) return + case "sum": + c.compile(node.Arguments[0]) + c.emit(OpBegin) + c.emit(OpInt, 0) + c.emit(OpSetAcc) + c.emitLoop(func() { + if len(node.Arguments) == 2 { + c.compile(node.Arguments[1]) + } else { + c.emit(OpPointer) + } + c.emit(OpGetAcc) + c.emit(OpAdd) + c.emit(OpSetAcc) + }) + c.emit(OpGetAcc) + c.emit(OpEnd) + return + case "find": c.compile(node.Arguments[0]) c.emit(OpBegin) diff --git a/vendor/github.com/expr-lang/expr/expr.go b/vendor/github.com/expr-lang/expr/expr.go index ba786c017..83e0a167e 100644 --- a/vendor/github.com/expr-lang/expr/expr.go +++ b/vendor/github.com/expr-lang/expr/expr.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "reflect" + "time" "github.com/expr-lang/expr/ast" "github.com/expr-lang/expr/builtin" @@ -183,6 +184,17 @@ func WithContext(name string) Option { }) } +// Timezone sets default timezone for date() and now() builtin functions. +func Timezone(name string) Option { + tz, err := time.LoadLocation(name) + if err != nil { + panic(err) + } + return Patch(patcher.WithTimezone{ + Location: tz, + }) +} + // Compile parses and compiles given input expression to bytecode program. func Compile(input string, ops ...Option) (*vm.Program, error) { config := conf.CreateNew() diff --git a/vendor/github.com/expr-lang/expr/optimizer/optimizer.go b/vendor/github.com/expr-lang/expr/optimizer/optimizer.go index 6d1fb0b54..4ceb3fa43 100644 --- a/vendor/github.com/expr-lang/expr/optimizer/optimizer.go +++ b/vendor/github.com/expr-lang/expr/optimizer/optimizer.go @@ -37,5 +37,7 @@ func Optimize(node *Node, config *conf.Config) error { Walk(node, &filterLast{}) Walk(node, &filterFirst{}) Walk(node, &predicateCombination{}) + Walk(node, &sumArray{}) + Walk(node, &sumMap{}) return nil } diff --git a/vendor/github.com/expr-lang/expr/optimizer/predicate_combination.go b/vendor/github.com/expr-lang/expr/optimizer/predicate_combination.go index 2733781df..6e8a7f7cf 100644 --- a/vendor/github.com/expr-lang/expr/optimizer/predicate_combination.go +++ b/vendor/github.com/expr-lang/expr/optimizer/predicate_combination.go @@ -5,6 +5,14 @@ import ( "github.com/expr-lang/expr/parser/operator" ) +/* +predicateCombination is a visitor that combines multiple predicate calls into a single call. +For example, the following expression: + + all(x, x > 1) && all(x, x < 10) -> all(x, x > 1 && x < 10) + any(x, x > 1) || any(x, x < 10) -> any(x, x > 1 || x < 10) + none(x, x > 1) && none(x, x < 10) -> none(x, x > 1 || x < 10) +*/ type predicateCombination struct{} func (v *predicateCombination) Visit(node *Node) { @@ -36,10 +44,12 @@ func (v *predicateCombination) Visit(node *Node) { } func combinedOperator(fn, op string) (string, bool) { - switch fn { - case "all", "any": + switch { + case fn == "all" && (op == "and" || op == "&&"): + return op, true + case fn == "any" && (op == "or" || op == "||"): return op, true - case "one", "none": + case fn == "none" && (op == "and" || op == "&&"): switch op { case "and": return "or", true diff --git a/vendor/github.com/expr-lang/expr/optimizer/sum_array.go b/vendor/github.com/expr-lang/expr/optimizer/sum_array.go new file mode 100644 index 000000000..0a05d1f2e --- /dev/null +++ b/vendor/github.com/expr-lang/expr/optimizer/sum_array.go @@ -0,0 +1,37 @@ +package optimizer + +import ( + "fmt" + + . "github.com/expr-lang/expr/ast" +) + +type sumArray struct{} + +func (*sumArray) Visit(node *Node) { + if sumBuiltin, ok := (*node).(*BuiltinNode); ok && + sumBuiltin.Name == "sum" && + len(sumBuiltin.Arguments) == 1 { + if array, ok := sumBuiltin.Arguments[0].(*ArrayNode); ok && + len(array.Nodes) >= 2 { + Patch(node, sumArrayFold(array)) + } + } +} + +func sumArrayFold(array *ArrayNode) *BinaryNode { + if len(array.Nodes) > 2 { + return &BinaryNode{ + Operator: "+", + Left: array.Nodes[0], + Right: sumArrayFold(&ArrayNode{Nodes: array.Nodes[1:]}), + } + } else if len(array.Nodes) == 2 { + return &BinaryNode{ + Operator: "+", + Left: array.Nodes[0], + Right: array.Nodes[1], + } + } + panic(fmt.Errorf("sumArrayFold: invalid array length %d", len(array.Nodes))) +} diff --git a/vendor/github.com/expr-lang/expr/optimizer/sum_map.go b/vendor/github.com/expr-lang/expr/optimizer/sum_map.go new file mode 100644 index 000000000..a41a53732 --- /dev/null +++ b/vendor/github.com/expr-lang/expr/optimizer/sum_map.go @@ -0,0 +1,25 @@ +package optimizer + +import ( + . "github.com/expr-lang/expr/ast" +) + +type sumMap struct{} + +func (*sumMap) Visit(node *Node) { + if sumBuiltin, ok := (*node).(*BuiltinNode); ok && + sumBuiltin.Name == "sum" && + len(sumBuiltin.Arguments) == 1 { + if mapBuiltin, ok := sumBuiltin.Arguments[0].(*BuiltinNode); ok && + mapBuiltin.Name == "map" && + len(mapBuiltin.Arguments) == 2 { + Patch(node, &BuiltinNode{ + Name: "sum", + Arguments: []Node{ + mapBuiltin.Arguments[0], + mapBuiltin.Arguments[1], + }, + }) + } + } +} diff --git a/vendor/github.com/expr-lang/expr/parser/parser.go b/vendor/github.com/expr-lang/expr/parser/parser.go index 9cb79cbbb..6d96561ad 100644 --- a/vendor/github.com/expr-lang/expr/parser/parser.go +++ b/vendor/github.com/expr-lang/expr/parser/parser.go @@ -33,7 +33,8 @@ var predicates = map[string]struct { "one": {[]arg{expr, closure}}, "filter": {[]arg{expr, closure}}, "map": {[]arg{expr, closure}}, - "count": {[]arg{expr, closure}}, + "count": {[]arg{expr, closure | optional}}, + "sum": {[]arg{expr, closure | optional}}, "find": {[]arg{expr, closure}}, "findIndex": {[]arg{expr, closure}}, "findLast": {[]arg{expr, closure}}, diff --git a/vendor/github.com/expr-lang/expr/patcher/with_context.go b/vendor/github.com/expr-lang/expr/patcher/with_context.go index 55b604261..f9861a2c2 100644 --- a/vendor/github.com/expr-lang/expr/patcher/with_context.go +++ b/vendor/github.com/expr-lang/expr/patcher/with_context.go @@ -22,11 +22,18 @@ func (w WithContext) Visit(node *ast.Node) { if fn.Kind() != reflect.Func { return } - if fn.NumIn() == 0 { - return - } - if fn.In(0).String() != "context.Context" { + switch fn.NumIn() { + case 0: return + case 1: + if fn.In(0).String() != "context.Context" { + return + } + default: + if fn.In(0).String() != "context.Context" && + fn.In(1).String() != "context.Context" { + return + } } ast.Patch(node, &ast.CallNode{ Callee: call.Callee, diff --git a/vendor/github.com/expr-lang/expr/patcher/with_timezone.go b/vendor/github.com/expr-lang/expr/patcher/with_timezone.go new file mode 100644 index 000000000..83eb28e95 --- /dev/null +++ b/vendor/github.com/expr-lang/expr/patcher/with_timezone.go @@ -0,0 +1,25 @@ +package patcher + +import ( + "time" + + "github.com/expr-lang/expr/ast" +) + +// WithTimezone passes Location to date() and now() functions. +type WithTimezone struct { + Location *time.Location +} + +func (t WithTimezone) Visit(node *ast.Node) { + if btin, ok := (*node).(*ast.BuiltinNode); ok { + switch btin.Name { + case "date", "now": + loc := &ast.ConstantNode{Value: t.Location} + ast.Patch(node, &ast.BuiltinNode{ + Name: btin.Name, + Arguments: append([]ast.Node{loc}, btin.Arguments...), + }) + } + } +} diff --git a/vendor/github.com/expr-lang/expr/vm/runtime/helpers[generated].go b/vendor/github.com/expr-lang/expr/vm/runtime/helpers[generated].go index 3529fdd58..d950f1111 100644 --- a/vendor/github.com/expr-lang/expr/vm/runtime/helpers[generated].go +++ b/vendor/github.com/expr-lang/expr/vm/runtime/helpers[generated].go @@ -334,6 +334,344 @@ func Equal(a, b interface{}) bool { case float64: return float64(x) == float64(y) } + case []any: + switch y := b.(type) { + case []string: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []uint: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []uint8: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []uint16: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []uint32: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []uint64: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []int: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []int8: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []int16: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []int32: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []int64: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []float32: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []float64: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []any: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + } + case []string: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []string: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []uint: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []uint: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []uint8: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []uint8: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []uint16: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []uint16: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []uint32: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []uint32: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []uint64: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []uint64: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []int: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []int: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []int8: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []int8: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []int16: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []int16: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []int32: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []int32: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []int64: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []int64: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []float32: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []float32: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []float64: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []float64: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } case string: switch y := b.(type) { case string: diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go index 889f9e77b..81094e87c 100644 --- a/vendor/github.com/fatih/color/color.go +++ b/vendor/github.com/fatih/color/color.go @@ -65,6 +65,29 @@ const ( CrossedOut ) +const ( + ResetBold Attribute = iota + 22 + ResetItalic + ResetUnderline + ResetBlinking + _ + ResetReversed + ResetConcealed + ResetCrossedOut +) + +var mapResetAttributes map[Attribute]Attribute = map[Attribute]Attribute{ + Bold: ResetBold, + Faint: ResetBold, + Italic: ResetItalic, + Underline: ResetUnderline, + BlinkSlow: ResetBlinking, + BlinkRapid: ResetBlinking, + ReverseVideo: ResetReversed, + Concealed: ResetConcealed, + CrossedOut: ResetCrossedOut, +} + // Foreground text colors const ( FgBlack Attribute = iota + 30 @@ -246,10 +269,7 @@ func (c *Color) Printf(format string, a ...interface{}) (n int, err error) { // On Windows, users should wrap w with colorable.NewColorable() if w is of // type *os.File. func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - c.SetWriter(w) - defer c.UnsetWriter(w) - - return fmt.Fprintln(w, a...) + return fmt.Fprintln(w, c.wrap(sprintln(a...))) } // Println formats using the default formats for its operands and writes to @@ -258,10 +278,7 @@ func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { // encountered. This is the standard fmt.Print() method wrapped with the given // color. func (c *Color) Println(a ...interface{}) (n int, err error) { - c.Set() - defer c.unset() - - return fmt.Fprintln(Output, a...) + return fmt.Fprintln(Output, c.wrap(sprintln(a...))) } // Sprint is just like Print, but returns a string instead of printing it. @@ -271,7 +288,7 @@ func (c *Color) Sprint(a ...interface{}) string { // Sprintln is just like Println, but returns a string instead of printing it. func (c *Color) Sprintln(a ...interface{}) string { - return c.wrap(fmt.Sprintln(a...)) + return c.wrap(sprintln(a...)) + "\n" } // Sprintf is just like Printf, but returns a string instead of printing it. @@ -353,7 +370,7 @@ func (c *Color) SprintfFunc() func(format string, a ...interface{}) string { // string. Windows users should use this in conjunction with color.Output. func (c *Color) SprintlnFunc() func(a ...interface{}) string { return func(a ...interface{}) string { - return c.wrap(fmt.Sprintln(a...)) + return c.wrap(sprintln(a...)) + "\n" } } @@ -383,7 +400,18 @@ func (c *Color) format() string { } func (c *Color) unformat() string { - return fmt.Sprintf("%s[%dm", escape, Reset) + //return fmt.Sprintf("%s[%dm", escape, Reset) + //for each element in sequence let's use the speficic reset escape, ou the generic one if not found + format := make([]string, len(c.params)) + for i, v := range c.params { + format[i] = strconv.Itoa(int(Reset)) + ra, ok := mapResetAttributes[v] + if ok { + format[i] = strconv.Itoa(int(ra)) + } + } + + return fmt.Sprintf("%s[%sm", escape, strings.Join(format, ";")) } // DisableColor disables the color output. Useful to not change any existing @@ -411,6 +439,12 @@ func (c *Color) isNoColorSet() bool { // Equals returns a boolean value indicating whether two colors are equal. func (c *Color) Equals(c2 *Color) bool { + if c == nil && c2 == nil { + return true + } + if c == nil || c2 == nil { + return false + } if len(c.params) != len(c2.params) { return false } @@ -614,3 +648,8 @@ func HiCyanString(format string, a ...interface{}) string { return colorString(f func HiWhiteString(format string, a ...interface{}) string { return colorString(format, FgHiWhite, a...) } + +// sprintln is a helper function to format a string with fmt.Sprintln and trim the trailing newline. +func sprintln(a ...interface{}) string { + return strings.TrimSuffix(fmt.Sprintln(a...), "\n") +} diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index 8969526a6..7c7f0c69c 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -1,6 +1,7 @@ # A minimal logging API for Go [![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-logr/logr)](https://goreportcard.com/report/github.com/go-logr/logr) [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr) logr offers an(other) opinion on how Go programs and libraries can do logging diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go index fb2f866f4..30568e768 100644 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -236,15 +236,14 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter { // implementation. It should be constructed with NewFormatter. Some of // its methods directly implement logr.LogSink. type Formatter struct { - outputFormat outputFormat - prefix string - values []any - valuesStr string - parentValuesStr string - depth int - opts *Options - group string // for slog groups - groupDepth int + outputFormat outputFormat + prefix string + values []any + valuesStr string + depth int + opts *Options + groupName string // for slog groups + groups []groupDef } // outputFormat indicates which outputFormat to use. @@ -257,6 +256,13 @@ const ( outputJSON ) +// groupDef represents a saved group. The values may be empty, but we don't +// know if we need to render the group until the final record is rendered. +type groupDef struct { + name string + values string +} + // PseudoStruct is a list of key-value pairs that gets logged as a struct. type PseudoStruct []any @@ -264,76 +270,102 @@ type PseudoStruct []any func (f Formatter) render(builtins, args []any) string { // Empirically bytes.Buffer is faster than strings.Builder for this. buf := bytes.NewBuffer(make([]byte, 0, 1024)) + if f.outputFormat == outputJSON { - buf.WriteByte('{') // for the whole line + buf.WriteByte('{') // for the whole record } + // Render builtins vals := builtins if hook := f.opts.RenderBuiltinsHook; hook != nil { vals = hook(f.sanitize(vals)) } - f.flatten(buf, vals, false, false) // keys are ours, no need to escape + f.flatten(buf, vals, false) // keys are ours, no need to escape continuing := len(builtins) > 0 - if f.parentValuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) + // Turn the inner-most group into a string + argsStr := func() string { + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + + vals = args + if hook := f.opts.RenderArgsHook; hook != nil { + vals = hook(f.sanitize(vals)) } - buf.WriteString(f.parentValuesStr) - continuing = true - } + f.flatten(buf, vals, true) // escape user-provided keys - groupDepth := f.groupDepth - if f.group != "" { - if f.valuesStr != "" || len(args) != 0 { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys - buf.WriteByte(f.colon()) - buf.WriteByte('{') // for the group - continuing = false - } else { - // The group was empty - groupDepth-- + return buf.String() + }() + + // Render the stack of groups from the inside out. + bodyStr := f.renderGroup(f.groupName, f.valuesStr, argsStr) + for i := len(f.groups) - 1; i >= 0; i-- { + grp := &f.groups[i] + if grp.values == "" && bodyStr == "" { + // no contents, so we must elide the whole group + continue } + bodyStr = f.renderGroup(grp.name, grp.values, bodyStr) } - if f.valuesStr != "" { + if bodyStr != "" { if continuing { buf.WriteByte(f.comma()) } - buf.WriteString(f.valuesStr) - continuing = true + buf.WriteString(bodyStr) } - vals = args - if hook := f.opts.RenderArgsHook; hook != nil { - vals = hook(f.sanitize(vals)) + if f.outputFormat == outputJSON { + buf.WriteByte('}') // for the whole record } - f.flatten(buf, vals, continuing, true) // escape user-provided keys - for i := 0; i < groupDepth; i++ { - buf.WriteByte('}') // for the groups + return buf.String() +} + +// renderGroup returns a string representation of the named group with rendered +// values and args. If the name is empty, this will return the values and args, +// joined. If the name is not empty, this will return a single key-value pair, +// where the value is a grouping of the values and args. If the values and +// args are both empty, this will return an empty string, even if the name was +// specified. +func (f Formatter) renderGroup(name string, values string, args string) string { + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + + needClosingBrace := false + if name != "" && (values != "" || args != "") { + buf.WriteString(f.quoted(name, true)) // escape user-provided keys + buf.WriteByte(f.colon()) + buf.WriteByte('{') + needClosingBrace = true } - if f.outputFormat == outputJSON { - buf.WriteByte('}') // for the whole line + continuing := false + if values != "" { + buf.WriteString(values) + continuing = true + } + + if args != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(args) + } + + if needClosingBrace { + buf.WriteByte('}') } return buf.String() } -// flatten renders a list of key-value pairs into a buffer. If continuing is -// true, it assumes that the buffer has previous values and will emit a -// separator (which depends on the output format) before the first pair it -// writes. If escapeKeys is true, the keys are assumed to have -// non-JSON-compatible characters in them and must be evaluated for escapes. +// flatten renders a list of key-value pairs into a buffer. If escapeKeys is +// true, the keys are assumed to have non-JSON-compatible characters in them +// and must be evaluated for escapes. // // This function returns a potentially modified version of kvList, which // ensures that there is a value for every key (adding a value if needed) and // that each key is a string (substituting a key if needed). -func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any { +func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, escapeKeys bool) []any { // This logic overlaps with sanitize() but saves one type-cast per key, // which can be measurable. if len(kvList)%2 != 0 { @@ -354,7 +386,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc } v := kvList[i+1] - if i > 0 || continuing { + if i > 0 { if f.outputFormat == outputJSON { buf.WriteByte(f.comma()) } else { @@ -766,46 +798,17 @@ func (f Formatter) sanitize(kvList []any) []any { // startGroup opens a new group scope (basically a sub-struct), which locks all // the current saved values and starts them anew. This is needed to satisfy // slog. -func (f *Formatter) startGroup(group string) { +func (f *Formatter) startGroup(name string) { // Unnamed groups are just inlined. - if group == "" { + if name == "" { return } - // Any saved values can no longer be changed. - buf := bytes.NewBuffer(make([]byte, 0, 1024)) - continuing := false - - if f.parentValuesStr != "" { - buf.WriteString(f.parentValuesStr) - continuing = true - } - - if f.group != "" && f.valuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys - buf.WriteByte(f.colon()) - buf.WriteByte('{') // for the group - continuing = false - } - - if f.valuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.valuesStr) - } - - // NOTE: We don't close the scope here - that's done later, when a log line - // is actually rendered (because we have N scopes to close). - - f.parentValuesStr = buf.String() + n := len(f.groups) + f.groups = append(f.groups[:n:n], groupDef{f.groupName, f.valuesStr}) // Start collecting new values. - f.group = group - f.groupDepth++ + f.groupName = name f.valuesStr = "" f.values = nil } @@ -900,7 +903,7 @@ func (f *Formatter) AddValues(kvList []any) { // Pre-render values, so we don't have to do it on each Info/Error call. buf := bytes.NewBuffer(make([]byte, 0, 1024)) - f.flatten(buf, vals, false, true) // escape user-provided keys + f.flatten(buf, vals, true) // escape user-provided keys f.valuesStr = buf.String() } diff --git a/vendor/github.com/go-ole/go-ole/SECURITY.md b/vendor/github.com/go-ole/go-ole/SECURITY.md new file mode 100644 index 000000000..dac281523 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/SECURITY.md @@ -0,0 +1,13 @@ +# Security Policy + +## Supported Versions + +Security updates are applied only to the latest release. + +## Reporting a Vulnerability + +If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. + +Please disclose it at [security advisory](https://github.com/go-ole/go-ole/security/advisories/new). + +This project is maintained by a team of volunteers on a reasonable-effort basis. As such, please give us at least 90 days to work on a fix before public exposure. diff --git a/vendor/github.com/go-ole/go-ole/appveyor.yml b/vendor/github.com/go-ole/go-ole/appveyor.yml index 0d557ac2f..8df7fa26e 100644 --- a/vendor/github.com/go-ole/go-ole/appveyor.yml +++ b/vendor/github.com/go-ole/go-ole/appveyor.yml @@ -6,14 +6,9 @@ version: "1.3.0.{build}-alpha-{branch}" -os: Windows Server 2012 R2 +os: Visual Studio 2019 -branches: - only: - - master - - v1.2 - - v1.1 - - v1.0 +build: off skip_tags: true @@ -21,20 +16,40 @@ clone_folder: c:\gopath\src\github.com\go-ole\go-ole environment: GOPATH: c:\gopath - matrix: - - GOARCH: amd64 - GOVERSION: 1.5 - GOROOT: c:\go - DOWNLOADPLATFORM: "x64" + GOROOT: c:\go + DOWNLOADPLATFORM: "x64" -install: - - choco install mingw - - SET PATH=c:\tools\mingw64\bin;%PATH% +before_test: # - Download COM Server - ps: Start-FileDownload "https://github.com/go-ole/test-com-server/releases/download/v1.0.2/test-com-server-${env:DOWNLOADPLATFORM}.zip" - 7z e test-com-server-%DOWNLOADPLATFORM%.zip -oc:\gopath\src\github.com\go-ole\go-ole > NUL - c:\gopath\src\github.com\go-ole\go-ole\build\register-assembly.bat - # - set + +test_script: + - go test -v -cover ./... + # go vet has false positives on unsafe.Pointer with windows/sys. Disabling since it is recommended to use go test instead. + # - go vet ./... + +branches: + only: + - master + - v1.2 + - v1.1 + - v1.0 + +matrix: + allow_failures: + - environment: + GOROOT: C:\go-x86 + DOWNLOADPLATFORM: "x86" + - environment: + GOROOT: C:\go118 + DOWNLOADPLATFORM: "x64" + - environment: + GOROOT: C:\go118-x86 + DOWNLOADPLATFORM: "x86" + +install: - go version - go env - go get -u golang.org/x/tools/cmd/cover @@ -45,10 +60,9 @@ build_script: - cd c:\gopath\src\github.com\go-ole\go-ole - go get -v -t ./... - go build - - go test -v -cover ./... # disable automatic tests -test: off +test: on # disable deployment deploy: off diff --git a/vendor/github.com/go-ole/go-ole/com.go b/vendor/github.com/go-ole/go-ole/com.go index a9bef150a..cabbac012 100644 --- a/vendor/github.com/go-ole/go-ole/com.go +++ b/vendor/github.com/go-ole/go-ole/com.go @@ -11,6 +11,7 @@ import ( var ( procCoInitialize = modole32.NewProc("CoInitialize") procCoInitializeEx = modole32.NewProc("CoInitializeEx") + procCoInitializeSecurity = modole32.NewProc("CoInitializeSecurity") procCoUninitialize = modole32.NewProc("CoUninitialize") procCoCreateInstance = modole32.NewProc("CoCreateInstance") procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") @@ -37,6 +38,9 @@ var ( procDispatchMessageW = moduser32.NewProc("DispatchMessageW") ) +// This is to enable calling COM Security initialization multiple times +var bSecurityInit bool = false + // coInitialize initializes COM library on current thread. // // MSDN documentation suggests that this function should not be called. Call @@ -68,6 +72,35 @@ func coInitializeEx(coinit uint32) (err error) { return } +// coInitializeSecurity: Registers security and sets the default security values +// for the process. +func coInitializeSecurity(cAuthSvc int32, + dwAuthnLevel uint32, + dwImpLevel uint32, + dwCapabilities uint32) (err error) { + // Check COM Security initialization has done previously + if !bSecurityInit { + // https://learn.microsoft.com/en-us/windows/win32/api/combaseapi/nf-combaseapi-coinitializesecurity + hr, _, _ := procCoInitializeSecurity.Call( + uintptr(0), // Allow *all* VSS writers to communicate back! + uintptr(cAuthSvc), // Default COM authentication service + uintptr(0), // Default COM authorization service + uintptr(0), // Reserved parameter + uintptr(dwAuthnLevel), // Strongest COM authentication level + uintptr(dwImpLevel), // Minimal impersonation abilities + uintptr(0), // Default COM authentication settings + uintptr(dwCapabilities), // Cloaking + uintptr(0)) // eserved parameter + if hr != 0 { + err = NewError(hr) + } else { + // COM Security initialization done make global flag true. + bSecurityInit = true + } + } + return +} + // CoInitialize initializes COM library on current thread. // // MSDN documentation suggests that this function should not be called. Call @@ -96,6 +129,15 @@ func CoUninitialize() { procCoUninitialize.Call() } +// CoInitializeSecurity: Registers security and sets the default security values +// for the process. +func CoInitializeSecurity(cAuthSvc int32, + dwAuthnLevel uint32, + dwImpLevel uint32, + dwCapabilities uint32) (err error) { + return coInitializeSecurity(cAuthSvc, dwAuthnLevel, dwImpLevel, dwCapabilities) +} + // CoTaskMemFree frees memory pointer. func CoTaskMemFree(memptr uintptr) { procCoTaskMemFree.Call(memptr) diff --git a/vendor/github.com/go-ole/go-ole/idispatch_windows.go b/vendor/github.com/go-ole/go-ole/idispatch_windows.go index b399f0479..649c0734f 100644 --- a/vendor/github.com/go-ole/go-ole/idispatch_windows.go +++ b/vendor/github.com/go-ole/go-ole/idispatch_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package ole @@ -92,7 +93,7 @@ func invoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{} case int8: vargs[n] = NewVariant(VT_I1, int64(v.(int8))) case *int8: - vargs[n] = NewVariant(VT_I1|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint8))))) + vargs[n] = NewVariant(VT_I1|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int8))))) case int16: vargs[n] = NewVariant(VT_I2, int64(v.(int16))) case *int16: diff --git a/vendor/github.com/go-ole/go-ole/variant.go b/vendor/github.com/go-ole/go-ole/variant.go index 967a23fea..a2c8402f7 100644 --- a/vendor/github.com/go-ole/go-ole/variant.go +++ b/vendor/github.com/go-ole/go-ole/variant.go @@ -99,7 +99,7 @@ func (v *VARIANT) Value() interface{} { case VT_DISPATCH: return v.ToIDispatch() case VT_BOOL: - return v.Val != 0 + return (v.Val & 0xffff) != 0 } return nil } diff --git a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml new file mode 100644 index 000000000..22f8d21cc --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml @@ -0,0 +1,61 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 45 + maligned: + suggest-new: true + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + +linters: + enable-all: true + disable: + - maligned + - unparam + - lll + - gochecknoinits + - gochecknoglobals + - funlen + - godox + - gocognit + - whitespace + - wsl + - wrapcheck + - testpackage + - nlreturn + - gomnd + - exhaustivestruct + - goerr113 + - errorlint + - nestif + - godot + - gofumpt + - paralleltest + - tparallel + - thelper + - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md index 813788aff..0108f1d57 100644 --- a/vendor/github.com/go-openapi/jsonpointer/README.md +++ b/vendor/github.com/go-openapi/jsonpointer/README.md @@ -1,6 +1,10 @@ -# gojsonpointer [![Build Status](https://travis-ci.org/go-openapi/jsonpointer.svg?branch=master)](https://travis-ci.org/go-openapi/jsonpointer) [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +# gojsonpointer [![Build Status](https://github.com/go-openapi/jsonpointer/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/jsonpointer/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/jsonpointer.svg)](https://pkg.go.dev/github.com/go-openapi/jsonpointer) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/jsonpointer)](https://goreportcard.com/report/github.com/go-openapi/jsonpointer) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonpointer?status.svg)](http://godoc.org/github.com/go-openapi/jsonpointer) An implementation of JSON Pointer - Go language ## Status diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go index de60dc7dd..d970c7cf4 100644 --- a/vendor/github.com/go-openapi/jsonpointer/pointer.go +++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go @@ -110,19 +110,39 @@ func SetForToken(document any, decodedToken string, value any) (any, error) { return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider) } +func isNil(input any) bool { + if input == nil { + return true + } + + kind := reflect.TypeOf(input).Kind() + switch kind { //nolint:exhaustive + case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan: + return reflect.ValueOf(input).IsNil() + default: + return false + } +} + func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvider) (any, reflect.Kind, error) { rValue := reflect.Indirect(reflect.ValueOf(node)) kind := rValue.Kind() + if isNil(node) { + return nil, kind, fmt.Errorf("nil value has not field %q", decodedToken) + } - if rValue.Type().Implements(jsonPointableType) { - r, err := node.(JSONPointable).JSONLookup(decodedToken) + switch typed := node.(type) { + case JSONPointable: + r, err := typed.JSONLookup(decodedToken) if err != nil { return nil, kind, err } return r, kind, nil + case *any: // case of a pointer to interface, that is not resolved by reflect.Indirect + return getSingleImpl(*typed, decodedToken, nameProvider) } - switch kind { + switch kind { //nolint:exhaustive case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { @@ -170,7 +190,7 @@ func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameP return node.(JSONSetable).JSONSet(decodedToken, data) } - switch rValue.Kind() { + switch rValue.Kind() { //nolint:exhaustive case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { @@ -231,8 +251,7 @@ func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.K if err != nil { return nil, knd, err } - node, kind = r, knd - + node = r } rValue := reflect.ValueOf(node) @@ -245,7 +264,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { knd := reflect.ValueOf(node).Kind() if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { - return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values") + return errors.New("only structs, pointers, maps and slices are supported for setting values") } if nameProvider == nil { @@ -284,7 +303,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { continue } - switch kind { + switch kind { //nolint:exhaustive case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { @@ -405,11 +424,11 @@ func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) { case json.Delim: switch tk { case '{': - if err := drainSingle(dec); err != nil { + if err = drainSingle(dec); err != nil { return 0, err } case '[': - if err := drainSingle(dec); err != nil { + if err = drainSingle(dec); err != nil { return 0, err } } @@ -435,20 +454,21 @@ func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) { if err != nil { return 0, err } - switch tk := tk.(type) { - case json.Delim: - switch tk { + + if delim, isDelim := tk.(json.Delim); isDelim { + switch delim { case '{': - if err := drainSingle(dec); err != nil { + if err = drainSingle(dec); err != nil { return 0, err } case '[': - if err := drainSingle(dec); err != nil { + if err = drainSingle(dec); err != nil { return 0, err } } } } + if !dec.More() { return 0, fmt.Errorf("token reference %q not found", decodedToken) } @@ -456,27 +476,27 @@ func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) { } // drainSingle drains a single level of object or array. -// The decoder has to guarantee the begining delim (i.e. '{' or '[') has been consumed. +// The decoder has to guarantee the beginning delim (i.e. '{' or '[') has been consumed. func drainSingle(dec *json.Decoder) error { for dec.More() { tk, err := dec.Token() if err != nil { return err } - switch tk := tk.(type) { - case json.Delim: - switch tk { + if delim, isDelim := tk.(json.Delim); isDelim { + switch delim { case '{': - if err := drainSingle(dec); err != nil { + if err = drainSingle(dec); err != nil { return err } case '[': - if err := drainSingle(dec); err != nil { + if err = drainSingle(dec); err != nil { return err } } } } + // Consumes the ending delim if _, err := dec.Token(); err != nil { return err @@ -498,14 +518,14 @@ const ( // Unescape unescapes a json pointer reference token string to the original representation func Unescape(token string) string { - step1 := strings.Replace(token, encRefTok1, decRefTok1, -1) - step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1) + step1 := strings.ReplaceAll(token, encRefTok1, decRefTok1) + step2 := strings.ReplaceAll(step1, encRefTok0, decRefTok0) return step2 } // Escape escapes a pointer reference token string func Escape(token string) string { - step1 := strings.Replace(token, decRefTok0, encRefTok0, -1) - step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1) + step1 := strings.ReplaceAll(token, decRefTok0, encRefTok0) + step2 := strings.ReplaceAll(step1, decRefTok1, encRefTok1) return step2 } diff --git a/vendor/github.com/go-openapi/jsonreference/.golangci.yml b/vendor/github.com/go-openapi/jsonreference/.golangci.yml index 013fc1943..22f8d21cc 100644 --- a/vendor/github.com/go-openapi/jsonreference/.golangci.yml +++ b/vendor/github.com/go-openapi/jsonreference/.golangci.yml @@ -1,50 +1,61 @@ linters-settings: govet: check-shadowing: true + golint: + min-confidence: 0 gocyclo: - min-complexity: 30 + min-complexity: 45 maligned: suggest-new: true dupl: - threshold: 100 + threshold: 200 goconst: min-len: 2 - min-occurrences: 4 - paralleltest: - ignore-missing: true + min-occurrences: 3 + linters: enable-all: true disable: - maligned + - unparam - lll + - gochecknoinits - gochecknoglobals + - funlen - godox - gocognit - whitespace - wsl - - funlen - - gochecknoglobals - - gochecknoinits - - scopelint - wrapcheck - - exhaustivestruct - - exhaustive - - nlreturn - testpackage - - gci - - gofumpt - - goerr113 + - nlreturn - gomnd - - tparallel + - exhaustivestruct + - goerr113 + - errorlint - nestif - godot - - errorlint - - varcheck - - interfacer - - deadcode - - golint + - gofumpt + - paralleltest + - tparallel + - thelper - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck - structcheck + - golint - nosnakecase - - varnamelen - - exhaustruct diff --git a/vendor/github.com/go-openapi/jsonreference/README.md b/vendor/github.com/go-openapi/jsonreference/README.md index b94753aa5..c7fc2049c 100644 --- a/vendor/github.com/go-openapi/jsonreference/README.md +++ b/vendor/github.com/go-openapi/jsonreference/README.md @@ -1,15 +1,19 @@ -# gojsonreference [![Build Status](https://travis-ci.org/go-openapi/jsonreference.svg?branch=master)](https://travis-ci.org/go-openapi/jsonreference) [![codecov](https://codecov.io/gh/go-openapi/jsonreference/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonreference) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +# gojsonreference [![Build Status](https://github.com/go-openapi/jsonreference/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/jsonreference/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/jsonreference/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonreference) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/jsonreference.svg)](https://pkg.go.dev/github.com/go-openapi/jsonreference) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/jsonreference)](https://goreportcard.com/report/github.com/go-openapi/jsonreference) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonreference?status.svg)](http://godoc.org/github.com/go-openapi/jsonreference) An implementation of JSON Reference - Go language ## Status Feature complete. Stable API ## Dependencies -https://github.com/go-openapi/jsonpointer +* https://github.com/go-openapi/jsonpointer ## References -http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 -http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 +* http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 +* http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 diff --git a/vendor/github.com/go-openapi/swag/.gitignore b/vendor/github.com/go-openapi/swag/.gitignore index d69b53acc..c4b1b64f0 100644 --- a/vendor/github.com/go-openapi/swag/.gitignore +++ b/vendor/github.com/go-openapi/swag/.gitignore @@ -2,3 +2,4 @@ secrets.yml vendor Godeps .idea +*.out diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml index bf503e400..80e2be004 100644 --- a/vendor/github.com/go-openapi/swag/.golangci.yml +++ b/vendor/github.com/go-openapi/swag/.golangci.yml @@ -4,14 +4,14 @@ linters-settings: golint: min-confidence: 0 gocyclo: - min-complexity: 25 + min-complexity: 45 maligned: suggest-new: true dupl: - threshold: 100 + threshold: 200 goconst: min-len: 3 - min-occurrences: 2 + min-occurrences: 3 linters: enable-all: true @@ -20,35 +20,41 @@ linters: - lll - gochecknoinits - gochecknoglobals - - nlreturn - - testpackage + - funlen + - godox + - gocognit + - whitespace + - wsl - wrapcheck + - testpackage + - nlreturn - gomnd - - exhaustive - exhaustivestruct - goerr113 - - wsl - - whitespace - - gofumpt - - godot + - errorlint - nestif - - godox - - funlen - - gci - - gocognit + - godot + - gofumpt - paralleltest + - tparallel - thelper - ifshort - - gomoddirectives - - cyclop - - forcetypeassert - - ireturn - - tagliatelle - - varnamelen - - goimports - - tenv - - golint - exhaustruct - - nilnil + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint - nosnakecase diff --git a/vendor/github.com/go-openapi/swag/BENCHMARK.md b/vendor/github.com/go-openapi/swag/BENCHMARK.md new file mode 100644 index 000000000..e7f28ed6b --- /dev/null +++ b/vendor/github.com/go-openapi/swag/BENCHMARK.md @@ -0,0 +1,52 @@ +# Benchmarks + +## Name mangling utilities + +```bash +go test -bench XXX -run XXX -benchtime 30s +``` + +### Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz +BenchmarkToXXXName/ToGoName-4 862623 44101 ns/op 10450 B/op 732 allocs/op +BenchmarkToXXXName/ToVarName-4 853656 40728 ns/op 10468 B/op 734 allocs/op +BenchmarkToXXXName/ToFileName-4 1268312 27813 ns/op 9785 B/op 617 allocs/op +BenchmarkToXXXName/ToCommandName-4 1276322 27903 ns/op 9785 B/op 617 allocs/op +BenchmarkToXXXName/ToHumanNameLower-4 895334 40354 ns/op 10472 B/op 731 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-4 882441 40678 ns/op 10566 B/op 749 allocs/op +``` + +### Benchmarks after PR #79 + +~ x10 performance improvement and ~ /100 memory allocations. + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz +BenchmarkToXXXName/ToGoName-4 9595830 3991 ns/op 42 B/op 5 allocs/op +BenchmarkToXXXName/ToVarName-4 9194276 3984 ns/op 62 B/op 7 allocs/op +BenchmarkToXXXName/ToFileName-4 17002711 2123 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToCommandName-4 16772926 2111 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToHumanNameLower-4 9788331 3749 ns/op 92 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-4 9188260 3941 ns/op 104 B/op 6 allocs/op +``` + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: AMD Ryzen 7 5800X 8-Core Processor +BenchmarkToXXXName/ToGoName-16 18527378 1972 ns/op 42 B/op 5 allocs/op +BenchmarkToXXXName/ToVarName-16 15552692 2093 ns/op 62 B/op 7 allocs/op +BenchmarkToXXXName/ToFileName-16 32161176 1117 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToCommandName-16 32256634 1137 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToHumanNameLower-16 18599661 1946 ns/op 92 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-16 17581353 2054 ns/op 105 B/op 6 allocs/op +``` diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md index 217f6fa50..a72922299 100644 --- a/vendor/github.com/go-openapi/swag/README.md +++ b/vendor/github.com/go-openapi/swag/README.md @@ -1,7 +1,8 @@ -# Swag [![Build Status](https://travis-ci.org/go-openapi/swag.svg?branch=master)](https://travis-ci.org/go-openapi/swag) [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +# Swag [![Build Status](https://github.com/go-openapi/swag/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/swag/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) -[![GoDoc](https://godoc.org/github.com/go-openapi/swag?status.svg)](http://godoc.org/github.com/go-openapi/swag) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/swag.svg)](https://pkg.go.dev/github.com/go-openapi/swag) [![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/swag)](https://goreportcard.com/report/github.com/go-openapi/swag) Contains a bunch of helper functions for go-openapi and go-swagger projects. @@ -18,4 +19,5 @@ You may also use it standalone for your projects. This repo has only few dependencies outside of the standard library: -* YAML utilities depend on gopkg.in/yaml.v2 +* YAML utilities depend on `gopkg.in/yaml.v3` +* `github.com/mailru/easyjson v0.7.7` diff --git a/vendor/github.com/go-openapi/swag/initialism_index.go b/vendor/github.com/go-openapi/swag/initialism_index.go new file mode 100644 index 000000000..20a359bb6 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/initialism_index.go @@ -0,0 +1,202 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "sort" + "strings" + "sync" +) + +var ( + // commonInitialisms are common acronyms that are kept as whole uppercased words. + commonInitialisms *indexOfInitialisms + + // initialisms is a slice of sorted initialisms + initialisms []string + + // a copy of initialisms pre-baked as []rune + initialismsRunes [][]rune + initialismsUpperCased [][]rune + + isInitialism func(string) bool + + maxAllocMatches int +) + +func init() { + // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 + configuredInitialisms := map[string]bool{ + "ACL": true, + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTPS": true, + "HTTP": true, + "ID": true, + "IP": true, + "IPv4": true, + "IPv6": true, + "JSON": true, + "LHS": true, + "OAI": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SQL": true, + "SSH": true, + "TCP": true, + "TLS": true, + "TTL": true, + "UDP": true, + "UI": true, + "UID": true, + "UUID": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XMPP": true, + "XSRF": true, + "XSS": true, + } + + // a thread-safe index of initialisms + commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) + initialisms = commonInitialisms.sorted() + initialismsRunes = asRunes(initialisms) + initialismsUpperCased = asUpperCased(initialisms) + maxAllocMatches = maxAllocHeuristic(initialismsRunes) + + // a test function + isInitialism = commonInitialisms.isInitialism +} + +func asRunes(in []string) [][]rune { + out := make([][]rune, len(in)) + for i, initialism := range in { + out[i] = []rune(initialism) + } + + return out +} + +func asUpperCased(in []string) [][]rune { + out := make([][]rune, len(in)) + + for i, initialism := range in { + out[i] = []rune(upper(trim(initialism))) + } + + return out +} + +func maxAllocHeuristic(in [][]rune) int { + heuristic := make(map[rune]int) + for _, initialism := range in { + heuristic[initialism[0]]++ + } + + var maxAlloc int + for _, val := range heuristic { + if val > maxAlloc { + maxAlloc = val + } + } + + return maxAlloc +} + +// AddInitialisms add additional initialisms +func AddInitialisms(words ...string) { + for _, word := range words { + // commonInitialisms[upper(word)] = true + commonInitialisms.add(upper(word)) + } + // sort again + initialisms = commonInitialisms.sorted() + initialismsRunes = asRunes(initialisms) + initialismsUpperCased = asUpperCased(initialisms) +} + +// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. +// Since go1.9, this may be implemented with sync.Map. +type indexOfInitialisms struct { + sortMutex *sync.Mutex + index *sync.Map +} + +func newIndexOfInitialisms() *indexOfInitialisms { + return &indexOfInitialisms{ + sortMutex: new(sync.Mutex), + index: new(sync.Map), + } +} + +func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { + m.sortMutex.Lock() + defer m.sortMutex.Unlock() + for k, v := range initial { + m.index.Store(k, v) + } + return m +} + +func (m *indexOfInitialisms) isInitialism(key string) bool { + _, ok := m.index.Load(key) + return ok +} + +func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { + m.index.Store(key, true) + return m +} + +func (m *indexOfInitialisms) sorted() (result []string) { + m.sortMutex.Lock() + defer m.sortMutex.Unlock() + m.index.Range(func(key, _ interface{}) bool { + k := key.(string) + result = append(result, k) + return true + }) + sort.Sort(sort.Reverse(byInitialism(result))) + return +} + +type byInitialism []string + +func (s byInitialism) Len() int { + return len(s) +} +func (s byInitialism) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s byInitialism) Less(i, j int) bool { + if len(s[i]) != len(s[j]) { + return len(s[i]) < len(s[j]) + } + + return strings.Compare(s[i], s[j]) > 0 +} diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go index 00038c377..783442fdd 100644 --- a/vendor/github.com/go-openapi/swag/loading.go +++ b/vendor/github.com/go-openapi/swag/loading.go @@ -21,6 +21,7 @@ import ( "net/http" "net/url" "os" + "path" "path/filepath" "runtime" "strings" @@ -40,43 +41,97 @@ var LoadHTTPBasicAuthPassword = "" var LoadHTTPCustomHeaders = map[string]string{} // LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in -func LoadFromFileOrHTTP(path string) ([]byte, error) { - return LoadStrategy(path, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path) +func LoadFromFileOrHTTP(pth string) ([]byte, error) { + return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(pth) } // LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in // timeout arg allows for per request overriding of the request timeout -func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) { - return LoadStrategy(path, os.ReadFile, loadHTTPBytes(timeout))(path) +func LoadFromFileOrHTTPWithTimeout(pth string, timeout time.Duration) ([]byte, error) { + return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(timeout))(pth) } -// LoadStrategy returns a loader function for a given path or uri -func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) { - if strings.HasPrefix(path, "http") { +// LoadStrategy returns a loader function for a given path or URI. +// +// The load strategy returns the remote load for any path starting with `http`. +// So this works for any URI with a scheme `http` or `https`. +// +// The fallback strategy is to call the local loader. +// +// The local loader takes a local file system path (absolute or relative) as argument, +// or alternatively a `file://...` URI, **without host** (see also below for windows). +// +// There are a few liberalities, initially intended to be tolerant regarding the URI syntax, +// especially on windows. +// +// Before the local loader is called, the given path is transformed: +// - percent-encoded characters are unescaped +// - simple paths (e.g. `./folder/file`) are passed as-is +// - on windows, occurrences of `/` are replaced by `\`, so providing a relative path such a `folder/file` works too. +// +// For paths provided as URIs with the "file" scheme, please note that: +// - `file://` is simply stripped. +// This means that the host part of the URI is not parsed at all. +// For example, `file:///folder/file" becomes "/folder/file`, +// but `file://localhost/folder/file` becomes `localhost/folder/file` on unix systems. +// Similarly, `file://./folder/file` yields `./folder/file`. +// - on windows, `file://...` can take a host so as to specify an UNC share location. +// +// Reminder about windows-specifics: +// - `file://host/folder/file` becomes an UNC path like `\\host\folder\file` (no port specification is supported) +// - `file:///c:/folder/file` becomes `C:\folder\file` +// - `file://c:/folder/file` is tolerated (without leading `/`) and becomes `c:\folder\file` +func LoadStrategy(pth string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) { + if strings.HasPrefix(pth, "http") { return remote } - return func(pth string) ([]byte, error) { - upth, err := pathUnescape(pth) + + return func(p string) ([]byte, error) { + upth, err := url.PathUnescape(p) if err != nil { return nil, err } - if strings.HasPrefix(pth, `file://`) { - if runtime.GOOS == "windows" { - // support for canonical file URIs on windows. - // Zero tolerance here for dodgy URIs. - u, _ := url.Parse(upth) - if u.Host != "" { - // assume UNC name (volume share) - // file://host/share/folder\... ==> \\host\share\path\folder - // NOTE: UNC port not yet supported - upth = strings.Join([]string{`\`, u.Host, u.Path}, `\`) - } else { - // file:///c:/folder/... ==> just remove the leading slash - upth = strings.TrimPrefix(upth, `file:///`) - } - } else { - upth = strings.TrimPrefix(upth, `file://`) + if !strings.HasPrefix(p, `file://`) { + // regular file path provided: just normalize slashes + return local(filepath.FromSlash(upth)) + } + + if runtime.GOOS != "windows" { + // crude processing: this leaves full URIs with a host with a (mostly) unexpected result + upth = strings.TrimPrefix(upth, `file://`) + + return local(filepath.FromSlash(upth)) + } + + // windows-only pre-processing of file://... URIs + + // support for canonical file URIs on windows. + u, err := url.Parse(filepath.ToSlash(upth)) + if err != nil { + return nil, err + } + + if u.Host != "" { + // assume UNC name (volume share) + // NOTE: UNC port not yet supported + + // when the "host" segment is a drive letter: + // file://C:/folder/... => C:\folder + upth = path.Clean(strings.Join([]string{u.Host, u.Path}, `/`)) + if !strings.HasSuffix(u.Host, ":") && u.Host[0] != '.' { + // tolerance: if we have a leading dot, this can't be a host + // file://host/share/folder\... ==> \\host\share\path\folder + upth = "//" + upth + } + } else { + // no host, let's figure out if this is a drive letter + upth = strings.TrimPrefix(upth, `file://`) + first, _, _ := strings.Cut(strings.TrimPrefix(u.Path, "/"), "/") + if strings.HasSuffix(first, ":") { + // drive letter in the first segment: + // file:///c:/folder/... ==> strip the leading slash + upth = strings.TrimPrefix(upth, `/`) } } diff --git a/vendor/github.com/go-openapi/swag/name_lexem.go b/vendor/github.com/go-openapi/swag/name_lexem.go index aa7f6a9bb..8bb64ac32 100644 --- a/vendor/github.com/go-openapi/swag/name_lexem.go +++ b/vendor/github.com/go-openapi/swag/name_lexem.go @@ -14,74 +14,80 @@ package swag -import "unicode" +import ( + "unicode" + "unicode/utf8" +) type ( - nameLexem interface { - GetUnsafeGoName() string - GetOriginal() string - IsInitialism() bool - } + lexemKind uint8 - initialismNameLexem struct { + nameLexem struct { original string matchedInitialism string + kind lexemKind } +) - casualNameLexem struct { - original string - } +const ( + lexemKindCasualName lexemKind = iota + lexemKindInitialismName ) -func newInitialismNameLexem(original, matchedInitialism string) *initialismNameLexem { - return &initialismNameLexem{ +func newInitialismNameLexem(original, matchedInitialism string) nameLexem { + return nameLexem{ + kind: lexemKindInitialismName, original: original, matchedInitialism: matchedInitialism, } } -func newCasualNameLexem(original string) *casualNameLexem { - return &casualNameLexem{ +func newCasualNameLexem(original string) nameLexem { + return nameLexem{ + kind: lexemKindCasualName, original: original, } } -func (l *initialismNameLexem) GetUnsafeGoName() string { - return l.matchedInitialism -} +func (l nameLexem) GetUnsafeGoName() string { + if l.kind == lexemKindInitialismName { + return l.matchedInitialism + } + + var ( + first rune + rest string + ) -func (l *casualNameLexem) GetUnsafeGoName() string { - var first rune - var rest string for i, orig := range l.original { if i == 0 { first = orig continue } + if i > 0 { rest = l.original[i:] break } } + if len(l.original) > 1 { - return string(unicode.ToUpper(first)) + lower(rest) + b := poolOfBuffers.BorrowBuffer(utf8.UTFMax + len(rest)) + defer func() { + poolOfBuffers.RedeemBuffer(b) + }() + b.WriteRune(unicode.ToUpper(first)) + b.WriteString(lower(rest)) + return b.String() } return l.original } -func (l *initialismNameLexem) GetOriginal() string { +func (l nameLexem) GetOriginal() string { return l.original } -func (l *casualNameLexem) GetOriginal() string { - return l.original -} - -func (l *initialismNameLexem) IsInitialism() bool { - return true -} - -func (l *casualNameLexem) IsInitialism() bool { - return false +func (l nameLexem) IsInitialism() bool { + return l.kind == lexemKindInitialismName } diff --git a/vendor/github.com/go-openapi/swag/post_go18.go b/vendor/github.com/go-openapi/swag/post_go18.go deleted file mode 100644 index f5228b82c..000000000 --- a/vendor/github.com/go-openapi/swag/post_go18.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.8 -// +build go1.8 - -package swag - -import "net/url" - -func pathUnescape(path string) (string, error) { - return url.PathUnescape(path) -} diff --git a/vendor/github.com/go-openapi/swag/post_go19.go b/vendor/github.com/go-openapi/swag/post_go19.go deleted file mode 100644 index 7c7da9c08..000000000 --- a/vendor/github.com/go-openapi/swag/post_go19.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.9 -// +build go1.9 - -package swag - -import ( - "sort" - "sync" -) - -// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. -// Since go1.9, this may be implemented with sync.Map. -type indexOfInitialisms struct { - sortMutex *sync.Mutex - index *sync.Map -} - -func newIndexOfInitialisms() *indexOfInitialisms { - return &indexOfInitialisms{ - sortMutex: new(sync.Mutex), - index: new(sync.Map), - } -} - -func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { - m.sortMutex.Lock() - defer m.sortMutex.Unlock() - for k, v := range initial { - m.index.Store(k, v) - } - return m -} - -func (m *indexOfInitialisms) isInitialism(key string) bool { - _, ok := m.index.Load(key) - return ok -} - -func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { - m.index.Store(key, true) - return m -} - -func (m *indexOfInitialisms) sorted() (result []string) { - m.sortMutex.Lock() - defer m.sortMutex.Unlock() - m.index.Range(func(key, value interface{}) bool { - k := key.(string) - result = append(result, k) - return true - }) - sort.Sort(sort.Reverse(byInitialism(result))) - return -} diff --git a/vendor/github.com/go-openapi/swag/pre_go18.go b/vendor/github.com/go-openapi/swag/pre_go18.go deleted file mode 100644 index 2757d9b95..000000000 --- a/vendor/github.com/go-openapi/swag/pre_go18.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.8 -// +build !go1.8 - -package swag - -import "net/url" - -func pathUnescape(path string) (string, error) { - return url.QueryUnescape(path) -} diff --git a/vendor/github.com/go-openapi/swag/pre_go19.go b/vendor/github.com/go-openapi/swag/pre_go19.go deleted file mode 100644 index 0565db377..000000000 --- a/vendor/github.com/go-openapi/swag/pre_go19.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.9 -// +build !go1.9 - -package swag - -import ( - "sort" - "sync" -) - -// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. -// Before go1.9, this may be implemented with a mutex on the map. -type indexOfInitialisms struct { - getMutex *sync.Mutex - index map[string]bool -} - -func newIndexOfInitialisms() *indexOfInitialisms { - return &indexOfInitialisms{ - getMutex: new(sync.Mutex), - index: make(map[string]bool, 50), - } -} - -func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { - m.getMutex.Lock() - defer m.getMutex.Unlock() - for k, v := range initial { - m.index[k] = v - } - return m -} - -func (m *indexOfInitialisms) isInitialism(key string) bool { - m.getMutex.Lock() - defer m.getMutex.Unlock() - _, ok := m.index[key] - return ok -} - -func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { - m.getMutex.Lock() - defer m.getMutex.Unlock() - m.index[key] = true - return m -} - -func (m *indexOfInitialisms) sorted() (result []string) { - m.getMutex.Lock() - defer m.getMutex.Unlock() - for k := range m.index { - result = append(result, k) - } - sort.Sort(sort.Reverse(byInitialism(result))) - return -} diff --git a/vendor/github.com/go-openapi/swag/split.go b/vendor/github.com/go-openapi/swag/split.go index a1825fb7d..274727a86 100644 --- a/vendor/github.com/go-openapi/swag/split.go +++ b/vendor/github.com/go-openapi/swag/split.go @@ -15,124 +15,269 @@ package swag import ( + "bytes" + "sync" "unicode" + "unicode/utf8" ) -var nameReplaceTable = map[rune]string{ - '@': "At ", - '&': "And ", - '|': "Pipe ", - '$': "Dollar ", - '!': "Bang ", - '-': "", - '_': "", -} - type ( splitter struct { - postSplitInitialismCheck bool initialisms []string + initialismsRunes [][]rune + initialismsUpperCased [][]rune // initialisms cached in their trimmed, upper-cased version + postSplitInitialismCheck bool + } + + splitterOption func(*splitter) + + initialismMatch struct { + body []rune + start, end int + complete bool + } + initialismMatches []initialismMatch +) + +type ( + // memory pools of temporary objects. + // + // These are used to recycle temporarily allocated objects + // and relieve the GC from undue pressure. + + matchesPool struct { + *sync.Pool } - splitterOption func(*splitter) *splitter + buffersPool struct { + *sync.Pool + } + + lexemsPool struct { + *sync.Pool + } + + splittersPool struct { + *sync.Pool + } ) -// split calls the splitter; splitter provides more control and post options +var ( + // poolOfMatches holds temporary slices for recycling during the initialism match process + poolOfMatches = matchesPool{ + Pool: &sync.Pool{ + New: func() any { + s := make(initialismMatches, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfBuffers = buffersPool{ + Pool: &sync.Pool{ + New: func() any { + return new(bytes.Buffer) + }, + }, + } + + poolOfLexems = lexemsPool{ + Pool: &sync.Pool{ + New: func() any { + s := make([]nameLexem, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfSplitters = splittersPool{ + Pool: &sync.Pool{ + New: func() any { + s := newSplitter() + + return &s + }, + }, + } +) + +// nameReplaceTable finds a word representation for special characters. +func nameReplaceTable(r rune) (string, bool) { + switch r { + case '@': + return "At ", true + case '&': + return "And ", true + case '|': + return "Pipe ", true + case '$': + return "Dollar ", true + case '!': + return "Bang ", true + case '-': + return "", true + case '_': + return "", true + default: + return "", false + } +} + +// split calls the splitter. +// +// Use newSplitter for more control and options func split(str string) []string { - lexems := newSplitter().split(str) - result := make([]string, 0, len(lexems)) + s := poolOfSplitters.BorrowSplitter() + lexems := s.split(str) + result := make([]string, 0, len(*lexems)) - for _, lexem := range lexems { + for _, lexem := range *lexems { result = append(result, lexem.GetOriginal()) } + poolOfLexems.RedeemLexems(lexems) + poolOfSplitters.RedeemSplitter(s) return result } -func (s *splitter) split(str string) []nameLexem { - return s.toNameLexems(str) -} - -func newSplitter(options ...splitterOption) *splitter { - splitter := &splitter{ +func newSplitter(options ...splitterOption) splitter { + s := splitter{ postSplitInitialismCheck: false, initialisms: initialisms, + initialismsRunes: initialismsRunes, + initialismsUpperCased: initialismsUpperCased, } for _, option := range options { - splitter = option(splitter) + option(&s) } - return splitter + return s } // withPostSplitInitialismCheck allows to catch initialisms after main split process -func withPostSplitInitialismCheck(s *splitter) *splitter { +func withPostSplitInitialismCheck(s *splitter) { s.postSplitInitialismCheck = true +} + +func (p matchesPool) BorrowMatches() *initialismMatches { + s := p.Get().(*initialismMatches) + *s = (*s)[:0] // reset slice, keep allocated capacity + return s } -type ( - initialismMatch struct { - start, end int - body []rune - complete bool +func (p buffersPool) BorrowBuffer(size int) *bytes.Buffer { + s := p.Get().(*bytes.Buffer) + s.Reset() + + if s.Cap() < size { + s.Grow(size) } - initialismMatches []*initialismMatch -) -func (s *splitter) toNameLexems(name string) []nameLexem { + return s +} + +func (p lexemsPool) BorrowLexems() *[]nameLexem { + s := p.Get().(*[]nameLexem) + *s = (*s)[:0] // reset slice, keep allocated capacity + + return s +} + +func (p splittersPool) BorrowSplitter(options ...splitterOption) *splitter { + s := p.Get().(*splitter) + s.postSplitInitialismCheck = false // reset options + for _, apply := range options { + apply(s) + } + + return s +} + +func (p matchesPool) RedeemMatches(s *initialismMatches) { + p.Put(s) +} + +func (p buffersPool) RedeemBuffer(s *bytes.Buffer) { + p.Put(s) +} + +func (p lexemsPool) RedeemLexems(s *[]nameLexem) { + p.Put(s) +} + +func (p splittersPool) RedeemSplitter(s *splitter) { + p.Put(s) +} + +func (m initialismMatch) isZero() bool { + return m.start == 0 && m.end == 0 +} + +func (s splitter) split(name string) *[]nameLexem { nameRunes := []rune(name) matches := s.gatherInitialismMatches(nameRunes) + if matches == nil { + return poolOfLexems.BorrowLexems() + } + return s.mapMatchesToNameLexems(nameRunes, matches) } -func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches { - matches := make(initialismMatches, 0) +func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches { + var matches *initialismMatches for currentRunePosition, currentRune := range nameRunes { - newMatches := make(initialismMatches, 0, len(matches)) + // recycle these allocations as we loop over runes + // with such recycling, only 2 slices should be allocated per call + // instead of o(n). + newMatches := poolOfMatches.BorrowMatches() // check current initialism matches - for _, match := range matches { - if keepCompleteMatch := match.complete; keepCompleteMatch { - newMatches = append(newMatches, match) - continue - } + if matches != nil { // skip first iteration + for _, match := range *matches { + if keepCompleteMatch := match.complete; keepCompleteMatch { + *newMatches = append(*newMatches, match) + continue + } - // drop failed match - currentMatchRune := match.body[currentRunePosition-match.start] - if !s.initialismRuneEqual(currentMatchRune, currentRune) { - continue - } + // drop failed match + currentMatchRune := match.body[currentRunePosition-match.start] + if currentMatchRune != currentRune { + continue + } - // try to complete ongoing match - if currentRunePosition-match.start == len(match.body)-1 { - // we are close; the next step is to check the symbol ahead - // if it is a small letter, then it is not the end of match - // but beginning of the next word - - if currentRunePosition < len(nameRunes)-1 { - nextRune := nameRunes[currentRunePosition+1] - if newWord := unicode.IsLower(nextRune); newWord { - // oh ok, it was the start of a new word - continue + // try to complete ongoing match + if currentRunePosition-match.start == len(match.body)-1 { + // we are close; the next step is to check the symbol ahead + // if it is a small letter, then it is not the end of match + // but beginning of the next word + + if currentRunePosition < len(nameRunes)-1 { + nextRune := nameRunes[currentRunePosition+1] + if newWord := unicode.IsLower(nextRune); newWord { + // oh ok, it was the start of a new word + continue + } } + + match.complete = true + match.end = currentRunePosition } - match.complete = true - match.end = currentRunePosition + *newMatches = append(*newMatches, match) } - - newMatches = append(newMatches, match) } // check for new initialism matches - for _, initialism := range s.initialisms { - initialismRunes := []rune(initialism) - if s.initialismRuneEqual(initialismRunes[0], currentRune) { - newMatches = append(newMatches, &initialismMatch{ + for i := range s.initialisms { + initialismRunes := s.initialismsRunes[i] + if initialismRunes[0] == currentRune { + *newMatches = append(*newMatches, initialismMatch{ start: currentRunePosition, body: initialismRunes, complete: false, @@ -140,24 +285,28 @@ func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches { } } + if matches != nil { + poolOfMatches.RedeemMatches(matches) + } matches = newMatches } + // up to the caller to redeem this last slice return matches } -func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMatches) []nameLexem { - nameLexems := make([]nameLexem, 0) +func (s splitter) mapMatchesToNameLexems(nameRunes []rune, matches *initialismMatches) *[]nameLexem { + nameLexems := poolOfLexems.BorrowLexems() - var lastAcceptedMatch *initialismMatch - for _, match := range matches { + var lastAcceptedMatch initialismMatch + for _, match := range *matches { if !match.complete { continue } - if firstMatch := lastAcceptedMatch == nil; firstMatch { - nameLexems = append(nameLexems, s.breakCasualString(nameRunes[:match.start])...) - nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) + if firstMatch := lastAcceptedMatch.isZero(); firstMatch { + s.appendBrokenDownCasualString(nameLexems, nameRunes[:match.start]) + *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) lastAcceptedMatch = match @@ -169,63 +318,66 @@ func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMa } middle := nameRunes[lastAcceptedMatch.end+1 : match.start] - nameLexems = append(nameLexems, s.breakCasualString(middle)...) - nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) + s.appendBrokenDownCasualString(nameLexems, middle) + *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) lastAcceptedMatch = match } // we have not found any accepted matches - if lastAcceptedMatch == nil { - return s.breakCasualString(nameRunes) - } - - if lastAcceptedMatch.end+1 != len(nameRunes) { + if lastAcceptedMatch.isZero() { + *nameLexems = (*nameLexems)[:0] + s.appendBrokenDownCasualString(nameLexems, nameRunes) + } else if lastAcceptedMatch.end+1 != len(nameRunes) { rest := nameRunes[lastAcceptedMatch.end+1:] - nameLexems = append(nameLexems, s.breakCasualString(rest)...) + s.appendBrokenDownCasualString(nameLexems, rest) } - return nameLexems -} + poolOfMatches.RedeemMatches(matches) -func (s *splitter) initialismRuneEqual(a, b rune) bool { - return a == b + return nameLexems } -func (s *splitter) breakInitialism(original string) nameLexem { +func (s splitter) breakInitialism(original string) nameLexem { return newInitialismNameLexem(original, original) } -func (s *splitter) breakCasualString(str []rune) []nameLexem { - segments := make([]nameLexem, 0) - currentSegment := "" +func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune) { + currentSegment := poolOfBuffers.BorrowBuffer(len(str)) // unlike strings.Builder, bytes.Buffer initial storage can reused + defer func() { + poolOfBuffers.RedeemBuffer(currentSegment) + }() addCasualNameLexem := func(original string) { - segments = append(segments, newCasualNameLexem(original)) + *segments = append(*segments, newCasualNameLexem(original)) } addInitialismNameLexem := func(original, match string) { - segments = append(segments, newInitialismNameLexem(original, match)) + *segments = append(*segments, newInitialismNameLexem(original, match)) } - addNameLexem := func(original string) { - if s.postSplitInitialismCheck { - for _, initialism := range s.initialisms { - if upper(initialism) == upper(original) { - addInitialismNameLexem(original, initialism) + var addNameLexem func(string) + if s.postSplitInitialismCheck { + addNameLexem = func(original string) { + for i := range s.initialisms { + if isEqualFoldIgnoreSpace(s.initialismsUpperCased[i], original) { + addInitialismNameLexem(original, s.initialisms[i]) + return } } - } - addCasualNameLexem(original) + addCasualNameLexem(original) + } + } else { + addNameLexem = addCasualNameLexem } - for _, rn := range string(str) { - if replace, found := nameReplaceTable[rn]; found { - if currentSegment != "" { - addNameLexem(currentSegment) - currentSegment = "" + for _, rn := range str { + if replace, found := nameReplaceTable(rn); found { + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + currentSegment.Reset() } if replace != "" { @@ -236,27 +388,121 @@ func (s *splitter) breakCasualString(str []rune) []nameLexem { } if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) { - if currentSegment != "" { - addNameLexem(currentSegment) - currentSegment = "" + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + currentSegment.Reset() } continue } if unicode.IsUpper(rn) { - if currentSegment != "" { - addNameLexem(currentSegment) + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) } - currentSegment = "" + currentSegment.Reset() } - currentSegment += string(rn) + currentSegment.WriteRune(rn) + } + + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) } +} + +// isEqualFoldIgnoreSpace is the same as strings.EqualFold, but +// it ignores leading and trailing blank spaces in the compared +// string. +// +// base is assumed to be composed of upper-cased runes, and be already +// trimmed. +// +// This code is heavily inspired from strings.EqualFold. +func isEqualFoldIgnoreSpace(base []rune, str string) bool { + var i, baseIndex int + // equivalent to b := []byte(str), but without data copy + b := hackStringBytes(str) + + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + break + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + break + } + i += size + } + + if i >= len(b) { + return len(base) == 0 + } + + for _, baseRune := range base { + if i >= len(b) { + break + } + + if c := b[i]; c < utf8.RuneSelf { + // single byte rune case (ASCII) + if baseRune >= utf8.RuneSelf { + return false + } + + baseChar := byte(baseRune) + if c != baseChar && + !('a' <= c && c <= 'z' && c-'a'+'A' == baseChar) { + return false + } + + baseIndex++ + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if unicode.ToUpper(r) != baseRune { + return false + } + baseIndex++ + i += size + } + + if baseIndex != len(base) { + return false + } + + // all passed: now we should only have blanks + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + return false + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + return false + } - if currentSegment != "" { - addNameLexem(currentSegment) + i += size } - return segments + return true } diff --git a/vendor/github.com/go-openapi/swag/string_bytes.go b/vendor/github.com/go-openapi/swag/string_bytes.go new file mode 100644 index 000000000..90745d5ca --- /dev/null +++ b/vendor/github.com/go-openapi/swag/string_bytes.go @@ -0,0 +1,8 @@ +package swag + +import "unsafe" + +// hackStringBytes returns the (unsafe) underlying bytes slice of a string. +func hackStringBytes(str string) []byte { + return unsafe.Slice(unsafe.StringData(str), len(str)) +} diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go index d971fbe34..5051401c4 100644 --- a/vendor/github.com/go-openapi/swag/util.go +++ b/vendor/github.com/go-openapi/swag/util.go @@ -18,76 +18,25 @@ import ( "reflect" "strings" "unicode" + "unicode/utf8" ) -// commonInitialisms are common acronyms that are kept as whole uppercased words. -var commonInitialisms *indexOfInitialisms - -// initialisms is a slice of sorted initialisms -var initialisms []string - -var isInitialism func(string) bool - // GoNamePrefixFunc sets an optional rule to prefix go names // which do not start with a letter. // +// The prefix function is assumed to return a string that starts with an upper case letter. +// // e.g. to help convert "123" into "{prefix}123" // // The default is to prefix with "X" var GoNamePrefixFunc func(string) string -func init() { - // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 - var configuredInitialisms = map[string]bool{ - "ACL": true, - "API": true, - "ASCII": true, - "CPU": true, - "CSS": true, - "DNS": true, - "EOF": true, - "GUID": true, - "HTML": true, - "HTTPS": true, - "HTTP": true, - "ID": true, - "IP": true, - "IPv4": true, - "IPv6": true, - "JSON": true, - "LHS": true, - "OAI": true, - "QPS": true, - "RAM": true, - "RHS": true, - "RPC": true, - "SLA": true, - "SMTP": true, - "SQL": true, - "SSH": true, - "TCP": true, - "TLS": true, - "TTL": true, - "UDP": true, - "UI": true, - "UID": true, - "UUID": true, - "URI": true, - "URL": true, - "UTF8": true, - "VM": true, - "XML": true, - "XMPP": true, - "XSRF": true, - "XSS": true, +func prefixFunc(name, in string) string { + if GoNamePrefixFunc == nil { + return "X" + in } - // a thread-safe index of initialisms - commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) - initialisms = commonInitialisms.sorted() - - // a test function - isInitialism = commonInitialisms.isInitialism + return GoNamePrefixFunc(name) + in } const ( @@ -156,25 +105,9 @@ func SplitByFormat(data, format string) []string { return result } -type byInitialism []string - -func (s byInitialism) Len() int { - return len(s) -} -func (s byInitialism) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s byInitialism) Less(i, j int) bool { - if len(s[i]) != len(s[j]) { - return len(s[i]) < len(s[j]) - } - - return strings.Compare(s[i], s[j]) > 0 -} - // Removes leading whitespaces func trim(str string) string { - return strings.Trim(str, " ") + return strings.TrimSpace(str) } // Shortcut to strings.ToUpper() @@ -188,15 +121,20 @@ func lower(str string) string { } // Camelize an uppercased word -func Camelize(word string) (camelized string) { +func Camelize(word string) string { + camelized := poolOfBuffers.BorrowBuffer(len(word)) + defer func() { + poolOfBuffers.RedeemBuffer(camelized) + }() + for pos, ru := range []rune(word) { if pos > 0 { - camelized += string(unicode.ToLower(ru)) + camelized.WriteRune(unicode.ToLower(ru)) } else { - camelized += string(unicode.ToUpper(ru)) + camelized.WriteRune(unicode.ToUpper(ru)) } } - return + return camelized.String() } // ToFileName lowercases and underscores a go type name @@ -224,33 +162,40 @@ func ToCommandName(name string) string { // ToHumanNameLower represents a code name as a human series of words func ToHumanNameLower(name string) string { - in := newSplitter(withPostSplitInitialismCheck).split(name) - out := make([]string, 0, len(in)) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + in := s.split(name) + poolOfSplitters.RedeemSplitter(s) + out := make([]string, 0, len(*in)) - for _, w := range in { + for _, w := range *in { if !w.IsInitialism() { out = append(out, lower(w.GetOriginal())) } else { - out = append(out, w.GetOriginal()) + out = append(out, trim(w.GetOriginal())) } } + poolOfLexems.RedeemLexems(in) return strings.Join(out, " ") } // ToHumanNameTitle represents a code name as a human series of words with the first letters titleized func ToHumanNameTitle(name string) string { - in := newSplitter(withPostSplitInitialismCheck).split(name) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + in := s.split(name) + poolOfSplitters.RedeemSplitter(s) - out := make([]string, 0, len(in)) - for _, w := range in { - original := w.GetOriginal() + out := make([]string, 0, len(*in)) + for _, w := range *in { + original := trim(w.GetOriginal()) if !w.IsInitialism() { out = append(out, Camelize(original)) } else { out = append(out, original) } } + poolOfLexems.RedeemLexems(in) + return strings.Join(out, " ") } @@ -264,7 +209,7 @@ func ToJSONName(name string) string { out = append(out, lower(w)) continue } - out = append(out, Camelize(w)) + out = append(out, Camelize(trim(w))) } return strings.Join(out, "") } @@ -283,35 +228,70 @@ func ToVarName(name string) string { // ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes func ToGoName(name string) string { - lexems := newSplitter(withPostSplitInitialismCheck).split(name) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + lexems := s.split(name) + poolOfSplitters.RedeemSplitter(s) + defer func() { + poolOfLexems.RedeemLexems(lexems) + }() + lexemes := *lexems + + if len(lexemes) == 0 { + return "" + } + + result := poolOfBuffers.BorrowBuffer(len(name)) + defer func() { + poolOfBuffers.RedeemBuffer(result) + }() + + // check if not starting with a letter, upper case + firstPart := lexemes[0].GetUnsafeGoName() + if lexemes[0].IsInitialism() { + firstPart = upper(firstPart) + } + + if c := firstPart[0]; c < utf8.RuneSelf { + // ASCII + switch { + case 'A' <= c && c <= 'Z': + result.WriteString(firstPart) + case 'a' <= c && c <= 'z': + result.WriteByte(c - 'a' + 'A') + result.WriteString(firstPart[1:]) + default: + result.WriteString(prefixFunc(name, firstPart)) + // NOTE: no longer check if prefixFunc returns a string that starts with uppercase: + // assume this is always the case + } + } else { + // unicode + firstRune, _ := utf8.DecodeRuneInString(firstPart) + switch { + case !unicode.IsLetter(firstRune): + result.WriteString(prefixFunc(name, firstPart)) + case !unicode.IsUpper(firstRune): + result.WriteString(prefixFunc(name, firstPart)) + /* + result.WriteRune(unicode.ToUpper(firstRune)) + result.WriteString(firstPart[offset:]) + */ + default: + result.WriteString(firstPart) + } + } - result := "" - for _, lexem := range lexems { + for _, lexem := range lexemes[1:] { goName := lexem.GetUnsafeGoName() // to support old behavior if lexem.IsInitialism() { goName = upper(goName) } - result += goName + result.WriteString(goName) } - if len(result) > 0 { - // Only prefix with X when the first character isn't an ascii letter - first := []rune(result)[0] - if !unicode.IsLetter(first) || (first > unicode.MaxASCII && !unicode.IsUpper(first)) { - if GoNamePrefixFunc == nil { - return "X" + result - } - result = GoNamePrefixFunc(name) + result - } - first = []rune(result)[0] - if unicode.IsLetter(first) && !unicode.IsUpper(first) { - result = string(append([]rune{unicode.ToUpper(first)}, []rune(result)[1:]...)) - } - } - - return result + return result.String() } // ContainsStrings searches a slice of strings for a case-sensitive match @@ -343,7 +323,7 @@ type zeroable interface { func IsZero(data interface{}) bool { v := reflect.ValueOf(data) // check for nil data - switch v.Kind() { + switch v.Kind() { //nolint:exhaustive case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: if v.IsNil() { return true @@ -356,7 +336,7 @@ func IsZero(data interface{}) bool { } // continue with slightly more complex reflection - switch v.Kind() { + switch v.Kind() { //nolint:exhaustive case reflect.String: return v.Len() == 0 case reflect.Bool: @@ -376,16 +356,6 @@ func IsZero(data interface{}) bool { } } -// AddInitialisms add additional initialisms -func AddInitialisms(words ...string) { - for _, word := range words { - // commonInitialisms[upper(word)] = true - commonInitialisms.add(upper(word)) - } - // sort again - initialisms = commonInitialisms.sorted() -} - // CommandLineOptionsGroup represents a group of user-defined command line options type CommandLineOptionsGroup struct { ShortDescription string diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go index f09ee609f..f59e02593 100644 --- a/vendor/github.com/go-openapi/swag/yaml.go +++ b/vendor/github.com/go-openapi/swag/yaml.go @@ -16,8 +16,11 @@ package swag import ( "encoding/json" + "errors" "fmt" "path/filepath" + "reflect" + "sort" "strconv" "github.com/mailru/easyjson/jlexer" @@ -48,7 +51,7 @@ func BytesToYAMLDoc(data []byte) (interface{}, error) { return nil, err } if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode { - return nil, fmt.Errorf("only YAML documents that are objects are supported") + return nil, errors.New("only YAML documents that are objects are supported") } return &document, nil } @@ -147,7 +150,7 @@ func yamlScalar(node *yaml.Node) (interface{}, error) { case yamlTimestamp: return node.Value, nil case yamlNull: - return nil, nil + return nil, nil //nolint:nilnil default: return nil, fmt.Errorf("YAML tag %q is not supported", node.LongTag()) } @@ -245,7 +248,27 @@ func (s JSONMapSlice) MarshalYAML() (interface{}, error) { return yaml.Marshal(&n) } +func isNil(input interface{}) bool { + if input == nil { + return true + } + kind := reflect.TypeOf(input).Kind() + switch kind { //nolint:exhaustive + case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan: + return reflect.ValueOf(input).IsNil() + default: + return false + } +} + func json2yaml(item interface{}) (*yaml.Node, error) { + if isNil(item) { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Value: "null", + }, nil + } + switch val := item.(type) { case JSONMapSlice: var n yaml.Node @@ -265,7 +288,14 @@ func json2yaml(item interface{}) (*yaml.Node, error) { case map[string]interface{}: var n yaml.Node n.Kind = yaml.MappingNode - for k, v := range val { + keys := make([]string, 0, len(val)) + for k := range val { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + v := val[k] childNode, err := json2yaml(v) if err != nil { return nil, err @@ -318,8 +348,9 @@ func json2yaml(item interface{}) (*yaml.Node, error) { Tag: yamlBoolScalar, Value: strconv.FormatBool(val), }, nil + default: + return nil, fmt.Errorf("unhandled type: %T", val) } - return nil, nil } // JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice diff --git a/vendor/github.com/go-resty/resty/v2/README.md b/vendor/github.com/go-resty/resty/v2/README.md index d6d501ef8..eccca8bce 100644 --- a/vendor/github.com/go-resty/resty/v2/README.md +++ b/vendor/github.com/go-resty/resty/v2/README.md @@ -4,7 +4,7 @@

Features section describes in detail about Resty capabilities

-

Build Status Code Coverage Go Report Card Release Version GoDoc License Mentioned in Awesome Go

+

Build Status Code Coverage Go Report Card Release Version GoDoc License Mentioned in Awesome Go

Resty Communication Channels

@@ -13,7 +13,7 @@ ## News - * v2.11.0 [released](https://github.com/go-resty/resty/releases/tag/v2.11.0) and tagged on Dec 27, 2023. + * v2.13.1 [released](https://github.com/go-resty/resty/releases/tag/v2.13.1) and tagged on May 10, 2024. * v2.0.0 [released](https://github.com/go-resty/resty/releases/tag/v2.0.0) and tagged on Jul 16, 2019. * v1.12.0 [released](https://github.com/go-resty/resty/releases/tag/v1.12.0) and tagged on Feb 27, 2019. * v1.0 released and tagged on Sep 25, 2017. - Resty's first version was released on Sep 15, 2015 then it grew gradually as a very handy and helpful library. Its been a two years since first release. I'm very thankful to Resty users and its [contributors](https://github.com/go-resty/resty/graphs/contributors). @@ -702,8 +702,8 @@ client. }) ``` -By default, resty will retry requests that return a non-nil error during execution. -Therefore, the above setup will result in resty retrying requests with non-nil errors up to 3 times, +By default, resty will retry requests that return a non-nil error during execution. +Therefore, the above setup will result in resty retrying requests with non-nil errors up to 3 times, with the delay increasing after each attempt. You can optionally provide client with [custom retry conditions](https://pkg.go.dev/github.com/go-resty/resty/v2#RetryConditionFunc): @@ -739,7 +739,7 @@ client.AddRetryCondition( ) ``` -Multiple retry conditions can be added. +Multiple retry conditions can be added. Note that if multiple conditions are specified, a retry will occur if any of the conditions are met. It is also possible to use `resty.Backoff(...)` to get arbitrary retry scenarios @@ -797,7 +797,7 @@ client.SetTimeout(1 * time.Minute) // You can override all below settings and options at request level if you want to //-------------------------------------------------------------------------------- // Host URL for all request. So you can use relative URL in the request -client.SetHostURL("http://httpbin.org") +client.SetBaseURL("http://httpbin.org") // Headers for all request client.SetHeader("Accept", "application/json") @@ -861,7 +861,7 @@ client := resty.New() // Set the previous transport that we created, set the scheme of the communication to the // socket and set the unixSocket as the HostURL. -client.SetTransport(&transport).SetScheme("http").SetHostURL(unixSocket) +client.SetTransport(&transport).SetScheme("http").SetBaseURL(unixSocket) // No need to write the host's URL on the request, just the path. client.R().Get("http://localhost/index.html") diff --git a/vendor/github.com/go-resty/resty/v2/client.go b/vendor/github.com/go-resty/resty/v2/client.go index 446ba8517..1bcafba81 100644 --- a/vendor/github.com/go-resty/resty/v2/client.go +++ b/vendor/github.com/go-resty/resty/v2/client.go @@ -142,11 +142,11 @@ type Client struct { proxyURL *url.URL beforeRequest []RequestMiddleware udBeforeRequest []RequestMiddleware - udBeforeRequestLock sync.RWMutex + udBeforeRequestLock *sync.RWMutex preReqHook PreRequestHook successHooks []SuccessHook afterResponse []ResponseMiddleware - afterResponseLock sync.RWMutex + afterResponseLock *sync.RWMutex requestLog RequestLogCallback responseLog ResponseLogCallback errorHooks []ErrorHook @@ -1125,6 +1125,25 @@ func (c *Client) GetClient() *http.Client { return c.httpClient } +// Clone returns a clone of the original client. +// +// Be careful when using this function: +// - Interface values are not deeply cloned. Thus, both the original and the clone will use the +// same value. +// - This function is not safe for concurrent use. You should only use this when you are sure that +// the client is not being used by any other goroutine. +// +// Since v2.12.0 +func (c *Client) Clone() *Client { + // dereference the pointer and copy the value + cc := *c + + // lock values should not be copied - thus new values are used. + cc.afterResponseLock = &sync.RWMutex{} + cc.udBeforeRequestLock = &sync.RWMutex{} + return &cc +} + //‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾ // Client Unexported methods //_______________________________________________________________________ @@ -1360,9 +1379,11 @@ func createClient(hc *http.Client) *Client { XMLUnmarshal: xml.Unmarshal, HeaderAuthorizationKey: http.CanonicalHeaderKey("Authorization"), - jsonEscapeHTML: true, - httpClient: hc, - debugBodySizeLimit: math.MaxInt32, + jsonEscapeHTML: true, + httpClient: hc, + debugBodySizeLimit: math.MaxInt32, + udBeforeRequestLock: &sync.RWMutex{}, + afterResponseLock: &sync.RWMutex{}, } // Logger diff --git a/vendor/github.com/go-resty/resty/v2/digest.go b/vendor/github.com/go-resty/resty/v2/digest.go index 9dd3a13b5..3cd19637f 100644 --- a/vendor/github.com/go-resty/resty/v2/digest.go +++ b/vendor/github.com/go-resty/resty/v2/digest.go @@ -1,5 +1,6 @@ // Copyright (c) 2015-2023 Jeevanandam M (jeeva@myjeeva.com) // 2023 Segev Dagan (https://github.com/segevda) +// 2024 Philipp Wolfer (https://github.com/phw) // All rights reserved. // resty source code and usage is governed by a MIT style // license that can be found in the LICENSE file. @@ -125,48 +126,78 @@ type challenge struct { userhash string } +func (c *challenge) setValue(k, v string) error { + switch k { + case "realm": + c.realm = v + case "domain": + c.domain = v + case "nonce": + c.nonce = v + case "opaque": + c.opaque = v + case "stale": + c.stale = v + case "algorithm": + c.algorithm = v + case "qop": + c.qop = v + case "charset": + if strings.ToUpper(v) != "UTF-8" { + return ErrDigestCharset + } + case "userhash": + c.userhash = v + default: + return ErrDigestBadChallenge + } + return nil +} + func parseChallenge(input string) (*challenge, error) { const ws = " \n\r\t" - const qs = `"` s := strings.Trim(input, ws) if !strings.HasPrefix(s, "Digest ") { return nil, ErrDigestBadChallenge } s = strings.Trim(s[7:], ws) - sl := strings.Split(s, ",") c := &challenge{} - var r []string - for i := range sl { - sl[i] = strings.TrimSpace(sl[i]) - r = strings.SplitN(sl[i], "=", 2) - if len(r) != 2 { - return nil, ErrDigestBadChallenge - } - r[0] = strings.TrimSpace(r[0]) - r[1] = strings.TrimSpace(r[1]) - switch r[0] { - case "realm": - c.realm = strings.Trim(r[1], qs) - case "domain": - c.domain = strings.Trim(r[1], qs) - case "nonce": - c.nonce = strings.Trim(r[1], qs) - case "opaque": - c.opaque = strings.Trim(r[1], qs) - case "stale": - c.stale = strings.Trim(r[1], qs) - case "algorithm": - c.algorithm = strings.Trim(r[1], qs) - case "qop": - c.qop = strings.Trim(r[1], qs) - case "charset": - if strings.ToUpper(strings.Trim(r[1], qs)) != "UTF-8" { - return nil, ErrDigestCharset + b := strings.Builder{} + key := "" + quoted := false + for _, r := range s { + switch r { + case '"': + quoted = !quoted + case ',': + if quoted { + b.WriteRune(r) + } else { + val := strings.Trim(b.String(), ws) + b.Reset() + if err := c.setValue(key, val); err != nil { + return nil, err + } + key = "" + } + case '=': + if quoted { + b.WriteRune(r) + } else { + key = strings.Trim(b.String(), ws) + b.Reset() } - case "userhash": - c.userhash = strings.Trim(r[1], qs) default: - return nil, ErrDigestBadChallenge + b.WriteRune(r) + } + } + if quoted || (key == "" && b.Len() > 0) { + return nil, ErrDigestBadChallenge + } + if key != "" { + val := strings.Trim(b.String(), ws) + if err := c.setValue(key, val); err != nil { + return nil, err } } return c, nil @@ -233,9 +264,10 @@ func (c *credentials) validateQop() error { if c.messageQop == "" { return ErrDigestNoQop } - possibleQops := strings.Split(c.messageQop, ", ") + possibleQops := strings.Split(c.messageQop, ",") var authSupport bool for _, qop := range possibleQops { + qop = strings.TrimSpace(qop) if qop == "auth" { authSupport = true break diff --git a/vendor/github.com/go-resty/resty/v2/middleware.go b/vendor/github.com/go-resty/resty/v2/middleware.go index ac2bbc9e8..603448dfb 100644 --- a/vendor/github.com/go-resty/resty/v2/middleware.go +++ b/vendor/github.com/go-resty/resty/v2/middleware.go @@ -57,8 +57,8 @@ func parseRequestURL(c *Client, r *Request) error { buf := acquireBuffer() defer releaseBuffer(buf) // search for the next or first opened curly bracket - for curr := strings.Index(r.URL, "{"); curr > prev; curr = prev + strings.Index(r.URL[prev:], "{") { - // write everything form the previous position up to the current + for curr := strings.Index(r.URL, "{"); curr == 0 || curr > prev; curr = prev + strings.Index(r.URL[prev:], "{") { + // write everything from the previous position up to the current if curr > prev { buf.WriteString(r.URL[prev:curr]) } diff --git a/vendor/github.com/go-resty/resty/v2/request.go b/vendor/github.com/go-resty/resty/v2/request.go index fec097638..4e13ff094 100644 --- a/vendor/github.com/go-resty/resty/v2/request.go +++ b/vendor/github.com/go-resty/resty/v2/request.go @@ -1014,7 +1014,12 @@ func (r *Request) fmtBodyString(sl int64) (body string) { contentType := r.Header.Get(hdrContentTypeKey) kind := kindOf(r.Body) if canJSONMarshal(contentType, kind) { - prtBodyBytes, err = noescapeJSONMarshalIndent(&r.Body) + var bodyBuf *bytes.Buffer + bodyBuf, err = noescapeJSONMarshalIndent(&r.Body) + if err == nil { + prtBodyBytes = bodyBuf.Bytes() + defer releaseBuffer(bodyBuf) + } } else if IsXMLType(contentType) && (kind == reflect.Struct) { prtBodyBytes, err = xml.MarshalIndent(&r.Body, "", " ") } else if b, ok := r.Body.(string); ok { @@ -1077,17 +1082,16 @@ var noescapeJSONMarshal = func(v interface{}) (*bytes.Buffer, error) { return buf, nil } -var noescapeJSONMarshalIndent = func(v interface{}) ([]byte, error) { +var noescapeJSONMarshalIndent = func(v interface{}) (*bytes.Buffer, error) { buf := acquireBuffer() - defer releaseBuffer(buf) - encoder := json.NewEncoder(buf) encoder.SetEscapeHTML(false) encoder.SetIndent("", " ") if err := encoder.Encode(v); err != nil { + releaseBuffer(buf) return nil, err } - return buf.Bytes(), nil + return buf, nil } diff --git a/vendor/github.com/go-resty/resty/v2/resty.go b/vendor/github.com/go-resty/resty/v2/resty.go index 21dcd5655..f8becec6c 100644 --- a/vendor/github.com/go-resty/resty/v2/resty.go +++ b/vendor/github.com/go-resty/resty/v2/resty.go @@ -14,7 +14,7 @@ import ( ) // Version # of resty -const Version = "2.10.0" +const Version = "2.13.1" // New method creates a new Resty client. func New() *Client { diff --git a/vendor/github.com/go-resty/resty/v2/util.go b/vendor/github.com/go-resty/resty/v2/util.go index 27b466dc1..5a69e4fcf 100644 --- a/vendor/github.com/go-resty/resty/v2/util.go +++ b/vendor/github.com/go-resty/resty/v2/util.go @@ -216,7 +216,7 @@ func writeMultipartFormFile(w *multipart.Writer, fieldName, fileName string, r i return err } - partWriter, err := w.CreatePart(createMultipartHeader(fieldName, fileName, http.DetectContentType(cbuf))) + partWriter, err := w.CreatePart(createMultipartHeader(fieldName, fileName, http.DetectContentType(cbuf[:size]))) if err != nil { return err } diff --git a/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go b/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go index ca85659ba..c929e4a02 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go +++ b/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go @@ -62,7 +62,7 @@ func (m *SigningMethodECDSA) Verify(signingString string, sig []byte, key interf case *ecdsa.PublicKey: ecdsaKey = k default: - return newError("ECDSA verify expects *ecsda.PublicKey", ErrInvalidKeyType) + return newError("ECDSA verify expects *ecdsa.PublicKey", ErrInvalidKeyType) } if len(sig) != 2*m.KeySize { @@ -96,7 +96,7 @@ func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) ([]byte case *ecdsa.PrivateKey: ecdsaKey = k default: - return nil, newError("ECDSA sign expects *ecsda.PrivateKey", ErrInvalidKeyType) + return nil, newError("ECDSA sign expects *ecdsa.PrivateKey", ErrInvalidKeyType) } // Create the hasher diff --git a/vendor/github.com/golang-jwt/jwt/v5/hmac.go b/vendor/github.com/golang-jwt/jwt/v5/hmac.go index 96c62722d..aca600ce1 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/hmac.go +++ b/vendor/github.com/golang-jwt/jwt/v5/hmac.go @@ -91,7 +91,7 @@ func (m *SigningMethodHMAC) Verify(signingString string, sig []byte, key interfa func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) ([]byte, error) { if keyBytes, ok := key.([]byte); ok { if !m.Hash.Available() { - return nil, newError("HMAC sign expects []byte", ErrInvalidKeyType) + return nil, ErrHashUnavailable } hasher := hmac.New(m.Hash.New, keyBytes) @@ -100,5 +100,5 @@ func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) ([]byte, return hasher.Sum(nil), nil } - return nil, ErrInvalidKeyType + return nil, newError("HMAC sign expects []byte", ErrInvalidKeyType) } diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der deleted file mode 100644 index 958f3cfad..000000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der deleted file mode 100644 index d2817641b..000000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der deleted file mode 100644 index d8c3710c8..000000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der deleted file mode 100644 index dae619c09..000000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der deleted file mode 100644 index ce7f8d31d..000000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der deleted file mode 100644 index 04b0d7360..000000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der deleted file mode 100644 index d8c3710c8..000000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem deleted file mode 100644 index 493a5a264..000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 -a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 -OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 -RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK -P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 -HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu -0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 -EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 -/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA -QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ -nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD -X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco -pKklVz0= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem deleted file mode 100644 index 55a7f10c7..000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF -l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj -+Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G -4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA -xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh -68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ -/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL -Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA -VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 -9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH -MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt -aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq -xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx -2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv -EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z -aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq -udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs -VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm -56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT -GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V -Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm -HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q -BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH -qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh -GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der deleted file mode 100644 index 04b0d7360..000000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem deleted file mode 100644 index 0f98322c7..000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT -fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ -qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE -xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es -Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2 -Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM -ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR -e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X -POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl -AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg -odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+ -PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN -Dhm6uZM= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem deleted file mode 100644 index 81afea783..000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs -8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO -QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk -XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA -Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc -gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf -LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl -jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0 -4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q -Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P -nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1 -drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE -duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50 -L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG -06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm -eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD -uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7 -lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL -a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb -hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ -7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j -r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7 -eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD -B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz -7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g== ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem deleted file mode 100644 index 493a5a264..000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 -a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 -OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 -RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK -P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 -HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu -0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 -EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 -/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA -QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ -nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD -X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco -pKklVz0= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem b/vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem deleted file mode 100644 index 55a7f10c7..000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF -l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj -+Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G -4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA -xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh -68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ -/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL -Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA -VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 -9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH -MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt -aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq -xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx -2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv -EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z -aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq -udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs -VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm -56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT -GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V -Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm -HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q -BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH -qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh -GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem deleted file mode 100644 index 0f98322c7..000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT -fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ -qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE -xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es -Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2 -Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM -ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR -e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X -POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl -AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg -odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+ -PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN -Dhm6uZM= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem b/vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem deleted file mode 100644 index 81afea783..000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs -8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO -QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk -XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA -Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc -gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf -LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl -jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0 -4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q -Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P -nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1 -drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE -duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50 -L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG -06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm -eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD -uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7 -lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL -a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb -hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ -7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j -r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7 -eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD -B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz -7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g== ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem deleted file mode 100644 index 493a5a264..000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 -a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 -OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 -RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK -P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 -HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu -0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 -EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 -/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA -QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ -nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD -X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco -pKklVz0= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem deleted file mode 100644 index 55a7f10c7..000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF -l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj -+Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G -4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA -xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh -68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ -/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL -Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA -VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 -9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH -MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt -aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq -xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx -2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv -EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z -aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq -udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs -VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm -56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT -GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V -Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm -HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q -BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH -qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh -GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem deleted file mode 100644 index 0f98322c7..000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT -fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ -qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE -xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es -Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2 -Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM -ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR -e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X -POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl -AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg -odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+ -PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN -Dhm6uZM= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem deleted file mode 100644 index 81afea783..000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs -8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO -QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk -XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA -Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc -gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf -LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl -jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0 -4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q -Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P -nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1 -drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE -duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50 -L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG -06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm -eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD -uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7 -lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL -a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb -hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ -7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j -r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7 -eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD -B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz -7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g== ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/testdata/client_cert.pem deleted file mode 100644 index 493a5a264..000000000 --- a/vendor/github.com/google/s2a-go/testdata/client_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 -a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 -OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 -RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK -P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 -HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu -0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 -EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 -/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA -QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ -nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD -X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco -pKklVz0= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/client_key.pem b/vendor/github.com/google/s2a-go/testdata/client_key.pem deleted file mode 100644 index 55a7f10c7..000000000 --- a/vendor/github.com/google/s2a-go/testdata/client_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF -l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj -+Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G -4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA -xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh -68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ -/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL -Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA -VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 -9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH -MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt -aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq -xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx -2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv -EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z -aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq -udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs -VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm -56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT -GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V -Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm -HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q -BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH -qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh -GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem b/vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem deleted file mode 100644 index 60c4cf069..000000000 --- a/vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDCDCCAfACFFlYsYCFit01ZpYmfjxpo7/6wMEbMA0GCSqGSIb3DQEBCwUAMEgx -CzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEPMA0GA1UECgwGR29vZ2xlMRswGQYD -VQQDDBJ0ZXN0LXMyYS1tdGxzLXJvb3QwHhcNMjMwODIyMTY0NTE4WhcNNDMwODIy -MTY0NTE4WjA5MQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExHTAbBgNVBAMMFHRl -c3QtczJhLW10bHMtY2xpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC -AQEAqrQQMyxNtmdCB+uY3szgRsfPrKC+TV9Fusnd8PfaCVuGTGcSBKM018nV2TDn -3IYFQ1HgLpGwGwOFDBb3y0o9i2/l2VJySriX1GSNX6nDmVasQlO1wuOLCP7/LRmO -7b6Kise5W0IFhYaptKyWnekn2pS0tAjimqpfn2w0U6FDGtQUqg/trQQmGtTSJHjb -A+OFd0EFC18KGP8Q+jOMaMkJRmpeEiAPyHPDoMhqQNT26RApv9j2Uzo4SuXzHH6T -cAdm1+zG+EXY/UZKX9oDkSbwIJvN+gCmNyORLalJ12gsGYOCjMd8K0mlXBqrmmbO -VHVbUm9062lhE7x59AA8DK4DoQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQCPOvtL -dq2hxFHlIy0YUK8jp/DtwJZPwzx1id5FtWwd0CxBS1StIgmkHMxtkJGz1iyQLplI -je+Msd4sTsb5zZi/8kGKehi8Wj4lghp4oP30cpob41OvM68M9RC/wSOVk9igSww+ -l3zof6wKRIswsi5VHrL16ruIVVoDlyFbKr8yk+cp9OPOV8hNNN7ewY9xC8OgnTt8 -YtdaLe6uTplKBLW+j3GtshigRhyfkGJyPFYL4LAeDJCHlC1qmBnkyP0ijMp6vneM -E8TLavnMTMcpihWTWpyKeRkO6HDRsP4AofQAp7VAiAdSOplga+w2qgrVICV+m8MK -BTq2PBvc59T6OFLq ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/mds_client_key.pem b/vendor/github.com/google/s2a-go/testdata/mds_client_key.pem deleted file mode 100644 index 9d112d1e9..000000000 --- a/vendor/github.com/google/s2a-go/testdata/mds_client_key.pem +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCqtBAzLE22Z0IH -65jezOBGx8+soL5NX0W6yd3w99oJW4ZMZxIEozTXydXZMOfchgVDUeAukbAbA4UM -FvfLSj2Lb+XZUnJKuJfUZI1fqcOZVqxCU7XC44sI/v8tGY7tvoqKx7lbQgWFhqm0 -rJad6SfalLS0COKaql+fbDRToUMa1BSqD+2tBCYa1NIkeNsD44V3QQULXwoY/xD6 -M4xoyQlGal4SIA/Ic8OgyGpA1PbpECm/2PZTOjhK5fMcfpNwB2bX7Mb4Rdj9Rkpf -2gORJvAgm836AKY3I5EtqUnXaCwZg4KMx3wrSaVcGquaZs5UdVtSb3TraWETvHn0 -ADwMrgOhAgMBAAECggEAUccupZ1ZY4OHTi0PkNk8rpwFwTFGyeFVEf2ofkr24RnA -NnUAXEllxOUUNlcoFOz9s3kTeavg3qgqgpa0QmdAIb9LMXg+ec6CKkW7trMpGho8 -LxBUWNfSoU4sKEqAvyPT0lWJVo9D/up6/avbAi6TIbOw+Djzel4ZrlHTpabxc3WT -EilXzn4q54b3MzxCQeQjcnzTieW4Q5semG2kLiXFToHIY2di01P/O8awUjgrD+uW -/Cb6H49MnHm9VPkqea1iwZeMQd6Gh5FrC7RezsBjdB1JBcfsv6PFt2ySInjB8SF+ -XR5Gr3Cc5sh9s0LfprZ9Dq0rlSWmwasPMI1COK6SswKBgQDczgeWd3erQ1JX9LEI -wollawqC9y7uJhEsw1hrPqA3uqZYiLUc7Nmi4laZ12mcGoXNDS3R3XmD58qGmGaU -lxEVTb8KDVWBgw450VoBKzSMQnCP6zn4nZxTYxeqMKjDGf6TRB6TZc843qsG3eRC -k91yxrCQ/0HV6PT48C+lieDzLwKBgQDF6aNKiyrswr457undBnM1H8q/Y6xC5ZlK -UtiQdhuyBnicvz0U8WPxBY/8gha0OXWuSnBqq/z77iFVNv/zT6p9K7kM7nBGd8cB -8KO6FNbyaHWFrhCI5zNzRTH4oha0hfvUOoti09vqavCtWD4L+D/63ba1wNLKPO9o -4gWbCnUCLwKBgQC/vus372csgrnvR761LLrEJ8BpGt7WUJh5luoht7DKtHvgRleB -Vu1oVcV+s2Iy/ZVUDC3OIdZ0hcWKPK5YOxfKuEk+IXYvke+4peTTPwHTC59UW6Fs -FPK8N0FFuhvT0a8RlAY5WiAp8rPysp6WcnHMSl7qi8BQUozp4Sp/RsziYQKBgBXv -r4mzoy5a53rEYGd/L4XT4EUWZyGDEVqLlDVu4eL5lKTLDZokp08vrqXuRVX0iHap -CYzJQ2EpI8iuL/BoBB2bmwcz5n3pCMXORld5t9lmeqA2it6hwbIlGUTVsm6P6zm6 -w3hQwy9YaxTLkxUAjxbfPEEo/jQsTNzzMGve3NlBAoGAbgJExpDyMDnaD2Vi5eyr -63b54BsqeLHqxJmADifyRCj7G1SJMm3zMKkNNOS0vsXgoiId973STFf1XQiojiv8 -Slbxyv5rczcY0n3LOuQYcM5OzsjzpNFZsT2dDnMfNRUF3rx3Geu/FuJ9scF1b00r -fVMrcL3jSf/W1Xh4TgtyoU8= ------END PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem b/vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem deleted file mode 100644 index 44e436f6e..000000000 --- a/vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem +++ /dev/null @@ -1,21 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDcTCCAlmgAwIBAgIUDUkgI+2FZtuUHyUUi0ZBH7JvN00wDQYJKoZIhvcNAQEL -BQAwSDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQ8wDQYDVQQKDAZHb29nbGUx -GzAZBgNVBAMMEnRlc3QtczJhLW10bHMtcm9vdDAeFw0yMzA4MjEyMTI5MTVaFw00 -MzA4MjEyMTI5MTVaMEgxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEPMA0GA1UE -CgwGR29vZ2xlMRswGQYDVQQDDBJ0ZXN0LXMyYS1tdGxzLXJvb3QwggEiMA0GCSqG -SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCbFEQfpvla27bATedrN4BAWsI9GSwSnJLW -QWzXcnAk6cKxQBAhnaKHRxHY8ttLhNTtxQeub894CLzJvHE/0xDhuMzjtCCCZ7i2 -r08tKZ1KcEzPJCPNlxlzAXPA45XU3LRlbGvju/PBPhm6n1hCEKTNI/KETJ5DEaYg -Cf2LcXVsl/zW20MwDZ+e2w/9a2a6n6DdpW1ekOR550hXAUOIxvmXRBeYeGLFvp1n -rQgZBhRaxP03UB+PQD2oMi/4mfsS96uGCXdzzX8qV46O8m132HUbnA/wagIwboEe -d7Bx237dERDyHw5GFnll7orgA0FOtoEufXdeQxWVvTjO0+PVPgsvAgMBAAGjUzBR -MB0GA1UdDgQWBBRyMtg/yutV8hw8vOq0i8x0eBQi7DAfBgNVHSMEGDAWgBRyMtg/ -yutV8hw8vOq0i8x0eBQi7DAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUA -A4IBAQArN/gdqWMxd5Rvq2eJMTp6I4RepJOT7Go4sMsRsy1caJqqcoS2EvREDZMN -XNEBcyQBB5kYd6TCcZGoLnEtWYXQ4jjEiXG1g7/+rWxyqw0ZYuP7FWzuHg3Uor/x -fApbEKwptP5ywVc+33h4qreGcqXkVCCn+sAcstGgrqubdGZW2T5gazUMyammOOuN -9IWL1PbvXmgEKD+80NUIrk09zanYyrElGdU/zw/kUbZ3Jf6WUBtJGhTzRQ1qZeKa -VnpCbLoG3vObEB8mxDUAlIzwAtfvw4U32BVIZA8xrocz6OOoAnSW1bTlo3EOIo/G -MTV7jmY9TBPtfhRuO/cG650+F+cw ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem b/vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem deleted file mode 100644 index 68c606134..000000000 --- a/vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem +++ /dev/null @@ -1,21 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDbjCCAlagAwIBAgIUbexZ5sZl86Al9dsI2PkOgtqKnkgwDQYJKoZIhvcNAQEL -BQAwSDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQ8wDQYDVQQKDAZHb29nbGUx -GzAZBgNVBAMMEnRlc3QtczJhLW10bHMtcm9vdDAeFw0yMzA4MjIwMDMyMDRaFw00 -MzA4MjIwMDMyMDRaMDkxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEdMBsGA1UE -AwwUdGVzdC1zMmEtbXRscy1zZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw -ggEKAoIBAQCMEzybsGPqfh92GLwy43mt8kQDF3ztr8y06RwU1hVnY7QqYK4obpvh -HkJVnTz9gwNBF3n5nUalqRzactlf2PCydN9oSYNCO8svVmo7vw1CleKAKFAiV5Qn -H76QlqD15oJreh7nSM8R4qj5KukIHvt0cN0gD6CJQzIURDtsKJwkW3yQjYyT/FAK -GYtFrB6buDn3Eg3Hsw6z7uj7CzLBsSl7BIGrQILbpbI9nFNT3rUTUhXZKY/3UtJA -Ob66AjTmMbD16RGYZR4JsPx6CstheifJ6YSI79r5KgD37zX0jMXFWimvb2SmZmFe -LoohtC8K7uTyjm/dROx6nHXdDt5TQYXHAgMBAAGjXzBdMBsGA1UdEQQUMBKHEAAA -AAAAAAAAAAAAAAAAAAAwHQYDVR0OBBYEFI3i2+tIk6YYn0MIxC0q93jk1VsUMB8G -A1UdIwQYMBaAFHIy2D/K61XyHDy86rSLzHR4FCLsMA0GCSqGSIb3DQEBCwUAA4IB -AQAUhk+s/lrIAULBbU7E22C8f93AzTxE1mhyHGNlfPPJP3t1Dl+h4X4WkFpkz5gT -EcNXB//Vvoq99HbEK5/92sxsIPexKdJBdcggeHXIgLDkOrEZEb0Nnh9eaAuU2QDn -JW44hMB+aF6mEaJvOHE6DRkQw3hwFYFisFKKHtlQ3TyOhw5CHGzSExPZusdSFNIe -2E7V/0QzGPJEFnEFUNe9N8nTH2P385Paoi+5+Iizlp/nztVXfzv0Cj/i+qGgtDUs -HB+gBU2wxMw8eYyuNzACH70wqGR1Parj8/JoyYhx0S4+Gjzy3JH3CcAMaxyfH/dI -4Wcvfz/isxgmH1UqIt3oc6ad ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/mds_server_key.pem b/vendor/github.com/google/s2a-go/testdata/mds_server_key.pem deleted file mode 100644 index b14ad0f72..000000000 --- a/vendor/github.com/google/s2a-go/testdata/mds_server_key.pem +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCMEzybsGPqfh92 -GLwy43mt8kQDF3ztr8y06RwU1hVnY7QqYK4obpvhHkJVnTz9gwNBF3n5nUalqRza -ctlf2PCydN9oSYNCO8svVmo7vw1CleKAKFAiV5QnH76QlqD15oJreh7nSM8R4qj5 -KukIHvt0cN0gD6CJQzIURDtsKJwkW3yQjYyT/FAKGYtFrB6buDn3Eg3Hsw6z7uj7 -CzLBsSl7BIGrQILbpbI9nFNT3rUTUhXZKY/3UtJAOb66AjTmMbD16RGYZR4JsPx6 -CstheifJ6YSI79r5KgD37zX0jMXFWimvb2SmZmFeLoohtC8K7uTyjm/dROx6nHXd -Dt5TQYXHAgMBAAECggEAIB5zGdIG/yh/Z1GBqfuOFaxFGx5iJ5BVlLAVH9P9IXFz -yPnVRXEjbinFlSMSbqEBeIX9EpcVMXxHIPIP1RIGEy2IYr3kiqXyT771ahDDZh6/ -Spqz0UQatSPqyvW3H9uE0Uc12dvQm23JSCUmPRX5m7gbhDQBIChXzdzdcU4Yi59V -4xmJUvbsAcLw5CBM6kwV+1NGVH9+3mUdhrr9M6B6+sVB/xnaqMGEDfQGiwL8U7EY -QOuc46KXu3Pd/qCdVLn60IrdjSzDJKeC5UZZ+ejNAo+DfbtOovBj3qu3OCUg4XVy -0CDBJ1sTdLvUfF4Gb+crjPsd+qBbXcjVfqdadwhsoQKBgQDBF1Pys/NitW8okJwp -2fiDIASP3TiI+MthWHGyuoZGPvmXQ3H6iuLSm8c/iYI2WPTf53Xff1VcFm1GmQms -GCsYM8Ax94zCeO6Ei1sYYxwcBloEZfOeV37MPA4pjJF4Lt+n5nveNxP+lrsjksJz -wToSEgWPDT1b/xcdt4/5j9J85wKBgQC5tiLx+33mwH4DoaFRmSl0+VuSNYFw6DTQ -SQ+kWqWGH4NENc9wf4Dj2VUZQhpXNhXVSxj+aP2d/ck1NrTJAWqYEXCDtFQOGSa2 -cGPRr+Fhy5NIEaEvR7IXcMBZzx3koYmWVBHricyrXs5FvHrT3N14mGDUG8n24U3f -R799bau0IQKBgQC97UM+lHCPJCWNggiJRgSifcje9VtZp1btjoBvq/bNe74nYkjn -htsrC91Fiu1Qpdlfr50K1IXSyaB886VG6JLjAGxI+dUzqJ38M9LLvxj0G+9JKjsi -AbAQFfZcOg8QZxLJZPVsE0MQhZTXndC06VhEVAOxvPUg214Sde8hK61/+wKBgCRw -O10VhnePT2pw/VEgZ0T/ZFtEylgYB7zSiRIrgwzVBBGPKVueePC8BPmGwdpYz2Hh -cU8B1Ll6QU+Co2hJMdwSl+wPpup5PuJPHRbYlrV0lzpt0x2OyL/WrLcyb2Ab3f40 -EqwPhqwdVwXR3JvTW1U9OMqFhVQ+kuP7lPQMX8NhAoGBAJOgZ7Tokipc4Mi68Olw -SCaOPvjjy4sW2rTRuKyjc1wTAzy7SJ3vXHfGkkN99nTLJFwAyJhWUpnRdwAXGi+x -gyOa95ImsEfRSwEjbluWfF8/P0IU8GR+ZTqT4NnNCOsi8T/xst4Szd1ECJNnnZDe -1ChfPP1AH+/75MJCvu6wQBQv ------END PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem b/vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem deleted file mode 100644 index ad1bad598..000000000 --- a/vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDITCCAgkCFBS8mLoytMpMWBwpAtnRaq3eIKnsMA0GCSqGSIb3DQEBCwUAME0x -CzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTENMAsGA1UECgwEVGVzdDEiMCAGA1UE -AwwZdGVzdC1zMmEtbXRscy1zZWxmLXNpZ25lZDAeFw0yMzA4MjIyMTE2MDFaFw00 -MzA4MjIyMTE2MDFaME0xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTENMAsGA1UE -CgwEVGVzdDEiMCAGA1UEAwwZdGVzdC1zMmEtbXRscy1zZWxmLXNpZ25lZDCCASIw -DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKFFPsYasKZeCFLEXl3RpE/ZOXFe -2lhutIalSpZvCmso+mQGoZ4cHK7At+kDjBi5CrnXkYcw7quQAhHgU0frhWdj7tsW -HUUtq7T8eaGWKBnVD9fl+MjtAl1BmhXwV9qRBbj4EesSKGDSGpKf66dOtzw83JbB -cU7XlPAH1c1zo2GXC1himcZ+SVGHVrOjn4NmeFs8g94/Dke8dWkHwv5YTMVugFK4 -5KxKgSOKkr4ka7PCBzgxCnW4wYSZNRHcxrqkiArO2HAQq0ACr7u+fVDYH//9mP2Z -ADo/zch7O5yhkiNbjXJIRrptDWEuVYMRloYDhT773h7bV/Q0Wo0NQGtasJ8CAwEA -ATANBgkqhkiG9w0BAQsFAAOCAQEAPjbH0TMyegF/MDvglkc0sXr6DqlmTxDCZZmG -lYPZ5Xy062+rxIHghMARbvO4BxepiG37KsP2agvOldm4TtU8nQ8LyswmSIFm4BQ+ -XQWwdsWyYyd8l0d5sXAdaN6AXwy50fvqCepmEqyreMY6dtLzlwo9gVCBFB7QuAPt -Nc14phpEUZt/KPNuY6cUlB7bz3tmnFbwxUrWj1p0KBEYsr7+KEVZxR+z0wtlU7S9 -ZBrmUvx0fq5Ef7JWtHW0w4ofg1op742sdYl+53C26GZ76ts4MmqVz2/94DScgRaU -gT0GLVuuCZXRDVeTXqTb4mditRCfzFPe9cCegYhGhSqBs8yh5A== ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/self_signed_key.pem b/vendor/github.com/google/s2a-go/testdata/self_signed_key.pem deleted file mode 100644 index bcf08e4f1..000000000 --- a/vendor/github.com/google/s2a-go/testdata/self_signed_key.pem +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQChRT7GGrCmXghS -xF5d0aRP2TlxXtpYbrSGpUqWbwprKPpkBqGeHByuwLfpA4wYuQq515GHMO6rkAIR -4FNH64VnY+7bFh1FLau0/HmhligZ1Q/X5fjI7QJdQZoV8FfakQW4+BHrEihg0hqS -n+unTrc8PNyWwXFO15TwB9XNc6NhlwtYYpnGfklRh1azo5+DZnhbPIPePw5HvHVp -B8L+WEzFboBSuOSsSoEjipK+JGuzwgc4MQp1uMGEmTUR3Ma6pIgKzthwEKtAAq+7 -vn1Q2B///Zj9mQA6P83IezucoZIjW41ySEa6bQ1hLlWDEZaGA4U++94e21f0NFqN -DUBrWrCfAgMBAAECggEAR8e8YwyqJ8KezcgdgIC5M9kp2i4v3UCZFX0or8CI0J2S -pUbWVLuKgLXCpfIwPyjNf15Vpei/spkMcsx4BQDthdFTFSzIpmvni0z9DlD5VFYj -ESOJElV7wepbHPy2/c+izmuL/ic81aturGiFyRgeMq+cN3WuaztFTXkPTrzzsZGF -p/Mx3gqm7Hoc3d2xlv+8L5GjCtEJPlQgZJV+s3ennBjOAd8CC7d9qJetE3Er46pn -r5jedV3bQRZYBzmooYNHjbAs26++wYac/jTE0/U6nKS17eWq4BQZUtlMXUw5N81B -7LKn7C03rj2KCn+Nf5uin9ALmoy888LXCDdvL/NZkQKBgQDduv1Heu+tOZuNYUdQ -Hswmd8sVNAAWGZxdxixHMv58zrgbLFXSX6K89X2l5Sj9XON8TH46MuSFdjSwwWw5 -fBrhVEhA5srcqpvVWIBE05yqPpt0s1NQktMWJKELWlG8jOhVKwM5OYDpdxtwehpz -1g70XJz+nF/LTV8RdTK+OWDDpQKBgQC6MhdbGHUz/56dY3gZpE5TXnN2hkNbZCgk -emr6z85VHhQflZbedhCzB9PUnZnCKWOGQHQdxRTtRfd46LVboZqCdYO1ZNQv6toP -ysS7dTpZZFy7CpQaW0Y6/jS65jW6xIDKR1W40vgltZ3sfpG37JaowpzWdw2WuOnw -Bg0rcJAf8wKBgQCqE+p/z97UwuF8eufWnyj9QNo382E1koOMspv4KTdnyLETtthF -vDH6O1wbykG8xmmASLRyM+NyNA+KnXNETNvZh2q8zctBpGRQK8iIAsGjHM7ln0AD -B/x+ea5GJQuZU4RK/+lDFca6TjBwAFkWDVX/PqL18kDQkxKfM4SuwRhmOQKBgDGh -eoJIsa0LnP787Z2AI3Srf4F/ZmLs/ppCm1OBotEjdF+64v0nYWonUvqgi8SqfaHi -elEZIGvis4ViGj1zhRjzNAlc+AZRxpBhDzGcnNIJI4Kj3jhsTfsZmXqcNIQ1LtM8 -Uogyi/yZPaA1WKg7Aym2vlGYaGHdplXZdxc2KOSrAoGABRkD9l2OVcwK7RyNgFxo -mjxx0tfUdDBhHIi2igih1FiHpeP9E+4/kE/K7PnU9DoDrL1jW1MTpXaYV4seOylk -k9z/9QfcRa9ePD2N4FqbHWSYp5n3aLoIcGq/9jyjTwayZbbIhWO+vNuHE9wIvecZ -8x3gNkxJRb4NaLIoNzAhCoo= ------END PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/testdata/server_cert.pem b/vendor/github.com/google/s2a-go/testdata/server_cert.pem deleted file mode 100644 index 0f98322c7..000000000 --- a/vendor/github.com/google/s2a-go/testdata/server_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT -fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ -qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE -xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es -Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2 -Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM -ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR -e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X -POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl -AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg -odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+ -PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN -Dhm6uZM= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/server_key.pem b/vendor/github.com/google/s2a-go/testdata/server_key.pem deleted file mode 100644 index 81afea783..000000000 --- a/vendor/github.com/google/s2a-go/testdata/server_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs -8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO -QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk -XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA -Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc -gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf -LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl -jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0 -4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q -Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P -nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1 -drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE -duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50 -L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG -06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm -eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD -uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7 -lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL -a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb -hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ -7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j -r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7 -eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD -B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz -7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g== ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json index ef508417b..d51736e7e 100644 --- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -1,3 +1,3 @@ { - "v2": "2.12.0" + "v2": "2.12.4" } diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md index ae7114947..7e36eb48f 100644 --- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -1,5 +1,33 @@ # Changelog +## [2.12.4](https://github.com/googleapis/gax-go/compare/v2.12.3...v2.12.4) (2024-05-03) + + +### Bug Fixes + +* provide unmarshal options for streams ([#343](https://github.com/googleapis/gax-go/issues/343)) ([ddf9a90](https://github.com/googleapis/gax-go/commit/ddf9a90bf180295d49875e15cb80b2136a49dbaf)) + +## [2.12.3](https://github.com/googleapis/gax-go/compare/v2.12.2...v2.12.3) (2024-03-14) + + +### Bug Fixes + +* bump protobuf dep to v1.33 ([#333](https://github.com/googleapis/gax-go/issues/333)) ([2892b22](https://github.com/googleapis/gax-go/commit/2892b22c1ae8a70dec3448d82e634643fe6c1be2)) + +## [2.12.2](https://github.com/googleapis/gax-go/compare/v2.12.1...v2.12.2) (2024-02-23) + + +### Bug Fixes + +* **v2/callctx:** fix SetHeader race by cloning header map ([#326](https://github.com/googleapis/gax-go/issues/326)) ([534311f](https://github.com/googleapis/gax-go/commit/534311f0f163d101f30657736c0e6f860e9c39dc)) + +## [2.12.1](https://github.com/googleapis/gax-go/compare/v2.12.0...v2.12.1) (2024-02-13) + + +### Bug Fixes + +* add XGoogFieldMaskHeader constant ([#321](https://github.com/googleapis/gax-go/issues/321)) ([666ee08](https://github.com/googleapis/gax-go/commit/666ee08931041b7fed56bed7132649785b2d3dfe)) + ## [2.12.0](https://github.com/googleapis/gax-go/compare/v2.11.0...v2.12.0) (2023-06-26) diff --git a/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go b/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go index af15fb582..f5af5c990 100644 --- a/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go +++ b/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go @@ -38,6 +38,14 @@ import ( ) const ( + // XGoogFieldMaskHeader is the canonical header key for the [System Parameter] + // that specifies the response read mask. The value(s) for this header + // must adhere to format described in [fieldmaskpb]. + // + // [System Parameter]: https://cloud.google.com/apis/docs/system-parameters + // [fieldmaskpb]: https://google.golang.org/protobuf/types/known/fieldmaskpb + XGoogFieldMaskHeader = "x-goog-fieldmask" + headerKey = contextKey("header") ) @@ -66,9 +74,27 @@ func SetHeaders(ctx context.Context, keyvals ...string) context.Context { h, ok := ctx.Value(headerKey).(map[string][]string) if !ok { h = make(map[string][]string) + } else { + h = cloneHeaders(h) } + for i := 0; i < len(keyvals); i = i + 2 { h[keyvals[i]] = append(h[keyvals[i]], keyvals[i+1]) } return context.WithValue(ctx, headerKey, h) } + +// cloneHeaders makes a new key-value map while reusing the value slices. +// As such, new values should be appended to the value slice, and modifying +// indexed values is not thread safe. +// +// TODO: Replace this with maps.Clone when Go 1.21 is the minimum version. +func cloneHeaders(h map[string][]string) map[string][]string { + c := make(map[string][]string, len(h)) + for k, v := range h { + vc := make([]string, len(v)) + copy(vc, v) + c[k] = vc + } + return c +} diff --git a/vendor/github.com/googleapis/gax-go/v2/header.go b/vendor/github.com/googleapis/gax-go/v2/header.go index 453fab7ec..3e53729e5 100644 --- a/vendor/github.com/googleapis/gax-go/v2/header.go +++ b/vendor/github.com/googleapis/gax-go/v2/header.go @@ -103,7 +103,9 @@ func goVersion() string { return "UNKNOWN" } -// XGoogHeader is for use by the Google Cloud Libraries only. +// XGoogHeader is for use by the Google Cloud Libraries only. See package +// [github.com/googleapis/gax-go/v2/callctx] for help setting/retrieving +// request/response headers. // // XGoogHeader formats key-value pairs. // The resulting string is suitable for x-goog-api-client header. @@ -125,7 +127,8 @@ func XGoogHeader(keyval ...string) string { } // InsertMetadataIntoOutgoingContext is for use by the Google Cloud Libraries -// only. +// only. See package [github.com/googleapis/gax-go/v2/callctx] for help +// setting/retrieving request/response headers. // // InsertMetadataIntoOutgoingContext returns a new context that merges the // provided keyvals metadata pairs with any existing metadata/headers in the @@ -137,7 +140,9 @@ func InsertMetadataIntoOutgoingContext(ctx context.Context, keyvals ...string) c return metadata.NewOutgoingContext(ctx, insertMetadata(ctx, keyvals...)) } -// BuildHeaders is for use by the Google Cloud Libraries only. +// BuildHeaders is for use by the Google Cloud Libraries only. See package +// [github.com/googleapis/gax-go/v2/callctx] for help setting/retrieving +// request/response headers. // // BuildHeaders returns a new http.Header that merges the provided // keyvals header pairs with any existing metadata/headers in the provided diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go index 7425b5ffb..3006ad7bd 100644 --- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go +++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -30,4 +30,4 @@ package internal // Version is the current tagged release of the library. -const Version = "2.12.0" +const Version = "2.12.4" diff --git a/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go b/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go index cc4486eb9..9b690d40c 100644 --- a/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go +++ b/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go @@ -111,7 +111,8 @@ func (s *ProtoJSONStream) Recv() (proto.Message, error) { // Initialize a new instance of the protobuf message to unmarshal the // raw data into. m := s.typ.New().Interface() - err := protojson.Unmarshal(raw, m) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + err := unm.Unmarshal(raw, m) return m, err } diff --git a/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md b/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md index e13f9e17e..0eec3e6dd 100644 --- a/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md +++ b/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md @@ -1,3 +1,34 @@ +## v1.11.0 (2024-03-07) + +This version reverts the inclusion of Context in the v1 branch. This inclusion +didn't add much value because no packages were using it; on the other hand, it +introduced a bug when using the Context property of the Provider client. + +## v1.10.0 (2024-02-27) **RETRACTED**: see https://github.com/gophercloud/gophercloud/issues/2969 + +* [GH-2893](https://github.com/gophercloud/gophercloud/pull/2893) [v1] authentication: Add WithContext functions +* [GH-2894](https://github.com/gophercloud/gophercloud/pull/2894) [v1] pager: Add WithContext functions +* [GH-2899](https://github.com/gophercloud/gophercloud/pull/2899) [v1] Authenticate with a clouds.yaml +* [GH-2917](https://github.com/gophercloud/gophercloud/pull/2917) [v1] Add ParseOption type to made clouds.Parse() more usable for optional With* funcs +* [GH-2924](https://github.com/gophercloud/gophercloud/pull/2924) [v1] build(deps): bump EmilienM/devstack-action from 0.11 to 0.14 +* [GH-2933](https://github.com/gophercloud/gophercloud/pull/2933) [v1] Fix AllowReauth reauthentication +* [GH-2950](https://github.com/gophercloud/gophercloud/pull/2950) [v1] compute: Use volumeID, not attachmentID for volume attachments + +## v1.9.0 (2024-02-02) **RETRACTED**: see https://github.com/gophercloud/gophercloud/issues/2969 + +New features and improvements: + +* [GH-2884](https://github.com/gophercloud/gophercloud/pull/2884) [v1] Context-aware methods to ProviderClient and ServiceClient +* [GH-2887](https://github.com/gophercloud/gophercloud/pull/2887) [v1] Add support of Flavors and FlavorProfiles for Octavia +* [GH-2875](https://github.com/gophercloud/gophercloud/pull/2875) [v1] [db/v1/instance]: adding support for availability_zone for a db instance + +CI changes: + +* [GH-2856](https://github.com/gophercloud/gophercloud/pull/2856) [v1] Fix devstack install on EOL magnum branches +* [GH-2857](https://github.com/gophercloud/gophercloud/pull/2857) [v1] Fix networking acceptance tests +* [GH-2858](https://github.com/gophercloud/gophercloud/pull/2858) [v1] build(deps): bump actions/upload-artifact from 3 to 4 +* [GH-2859](https://github.com/gophercloud/gophercloud/pull/2859) [v1] build(deps): bump github/codeql-action from 2 to 3 + ## v1.8.0 (2023-11-30) New features and improvements: diff --git a/vendor/github.com/gophercloud/gophercloud/provider_client.go b/vendor/github.com/gophercloud/gophercloud/provider_client.go index ce291a3ab..1ff54b819 100644 --- a/vendor/github.com/gophercloud/gophercloud/provider_client.go +++ b/vendor/github.com/gophercloud/gophercloud/provider_client.go @@ -14,7 +14,7 @@ import ( // DefaultUserAgent is the default User-Agent string set in the request header. const ( - DefaultUserAgent = "gophercloud/v1.8.0" + DefaultUserAgent = "gophercloud/v1.11.0" DefaultMaxBackoffRetries = 60 ) diff --git a/vendor/github.com/gorilla/websocket/.editorconfig b/vendor/github.com/gorilla/websocket/.editorconfig new file mode 100644 index 000000000..2940ec92a --- /dev/null +++ b/vendor/github.com/gorilla/websocket/.editorconfig @@ -0,0 +1,20 @@ +; https://editorconfig.org/ + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = space +indent_size = 2 + +[{Makefile,go.mod,go.sum,*.go,.gitmodules}] +indent_style = tab +indent_size = 4 + +[*.md] +indent_size = 4 +trim_trailing_whitespace = false + +eclint_indent_style = unset diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore index cd3fcd1ef..84039fec6 100644 --- a/vendor/github.com/gorilla/websocket/.gitignore +++ b/vendor/github.com/gorilla/websocket/.gitignore @@ -1,25 +1 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -.idea/ -*.iml +coverage.coverprofile diff --git a/vendor/github.com/gorilla/websocket/.golangci.yml b/vendor/github.com/gorilla/websocket/.golangci.yml new file mode 100644 index 000000000..34882139e --- /dev/null +++ b/vendor/github.com/gorilla/websocket/.golangci.yml @@ -0,0 +1,3 @@ +run: + skip-dirs: + - examples/*.go diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS deleted file mode 100644 index 1931f4006..000000000 --- a/vendor/github.com/gorilla/websocket/AUTHORS +++ /dev/null @@ -1,9 +0,0 @@ -# This is the official list of Gorilla WebSocket authors for copyright -# purposes. -# -# Please keep the list sorted. - -Gary Burd -Google LLC (https://opensource.google.com/) -Joachim Bauch - diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE index 9171c9722..bb9d80bc9 100644 --- a/vendor/github.com/gorilla/websocket/LICENSE +++ b/vendor/github.com/gorilla/websocket/LICENSE @@ -1,22 +1,27 @@ -Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. +Copyright (c) 2023 The Gorilla Authors. All rights reserved. Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: +modification, are permitted provided that the following conditions are +met: - Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/websocket/Makefile b/vendor/github.com/gorilla/websocket/Makefile new file mode 100644 index 000000000..603a63f50 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/Makefile @@ -0,0 +1,34 @@ +GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '') +GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest + +GO_SEC=$(shell which gosec 2> /dev/null || echo '') +GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest + +GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '') +GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest + +.PHONY: golangci-lint +golangci-lint: + $(if $(GO_LINT), ,go install $(GO_LINT_URI)) + @echo "##### Running golangci-lint" + golangci-lint run -v + +.PHONY: gosec +gosec: + $(if $(GO_SEC), ,go install $(GO_SEC_URI)) + @echo "##### Running gosec" + gosec -exclude-dir examples ./... + +.PHONY: govulncheck +govulncheck: + $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI)) + @echo "##### Running govulncheck" + govulncheck ./... + +.PHONY: verify +verify: golangci-lint gosec govulncheck + +.PHONY: test +test: + @echo "##### Running tests" + go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./... diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md index 2517a2871..1fd5e9c4e 100644 --- a/vendor/github.com/gorilla/websocket/README.md +++ b/vendor/github.com/gorilla/websocket/README.md @@ -1,17 +1,14 @@ -# Gorilla WebSocket +# gorilla/websocket -[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) -[![CircleCI](https://circleci.com/gh/gorilla/websocket.svg?style=svg)](https://circleci.com/gh/gorilla/websocket) +![testing](https://github.com/gorilla/websocket/actions/workflows/test.yml/badge.svg) +[![codecov](https://codecov.io/github/gorilla/websocket/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/websocket) +[![godoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) +[![sourcegraph](https://sourcegraph.com/github.com/gorilla/websocket/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/websocket?badge) -Gorilla WebSocket is a [Go](http://golang.org/) implementation of the -[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. +Gorilla WebSocket is a [Go](http://golang.org/) implementation of the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. +![Gorilla Logo](https://github.com/gorilla/.github/assets/53367916/d92caabf-98e0-473e-bfbf-ab554ba435e5) ---- - -⚠️ **[The Gorilla WebSocket Package is looking for a new maintainer](https://github.com/gorilla/websocket/issues/370)** - ---- ### Documentation @@ -20,6 +17,7 @@ Gorilla WebSocket is a [Go](http://golang.org/) implementation of the * [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) * [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) * [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) +* [Write buffer pool example](https://github.com/gorilla/websocket/tree/master/examples/bufferpool) ### Status @@ -36,4 +34,3 @@ package API is stable. The Gorilla WebSocket package passes the server tests in the [Autobahn Test Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). - diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go index 2efd83555..815b0ca5c 100644 --- a/vendor/github.com/gorilla/websocket/client.go +++ b/vendor/github.com/gorilla/websocket/client.go @@ -9,14 +9,18 @@ import ( "context" "crypto/tls" "errors" + "fmt" "io" - "io/ioutil" + "log" + "net" "net/http" "net/http/httptrace" "net/url" "strings" "time" + + "golang.org/x/net/proxy" ) // ErrBadHandshake is returned when the server response to opening handshake is @@ -224,6 +228,7 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h k == "Connection" || k == "Sec-Websocket-Key" || k == "Sec-Websocket-Version" || + //#nosec G101 (CWE-798): Potential HTTP request smuggling via parameter pollution k == "Sec-Websocket-Extensions" || (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) @@ -289,7 +294,9 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h } err = c.SetDeadline(deadline) if err != nil { - c.Close() + if err := c.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } return nil, err } return c, nil @@ -303,7 +310,7 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h return nil, nil, err } if proxyURL != nil { - dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial)) + dialer, err := proxy.FromURL(proxyURL, netDialerFunc(netDial)) if err != nil { return nil, nil, err } @@ -318,18 +325,20 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h } netConn, err := netDial("tcp", hostPort) + if err != nil { + return nil, nil, err + } if trace != nil && trace.GotConn != nil { trace.GotConn(httptrace.GotConnInfo{ Conn: netConn, }) } - if err != nil { - return nil, nil, err - } defer func() { if netConn != nil { - netConn.Close() + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } } }() @@ -370,6 +379,17 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h resp, err := http.ReadResponse(conn.br, req) if err != nil { + if d.TLSClientConfig != nil { + for _, proto := range d.TLSClientConfig.NextProtos { + if proto != "http/1.1" { + return nil, nil, fmt.Errorf( + "websocket: protocol %q was given but is not supported;"+ + "sharing tls.Config with net/http Transport can cause this error: %w", + proto, err, + ) + } + } + } return nil, nil, err } @@ -388,7 +408,7 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h // debugging. buf := make([]byte, 1024) n, _ := io.ReadFull(resp.Body, buf) - resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) + resp.Body = io.NopCloser(bytes.NewReader(buf[:n])) return nil, resp, ErrBadHandshake } @@ -406,17 +426,19 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h break } - resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) + resp.Body = io.NopCloser(bytes.NewReader([]byte{})) conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") - netConn.SetDeadline(time.Time{}) + if err := netConn.SetDeadline(time.Time{}); err != nil { + return nil, nil, err + } netConn = nil // to avoid close in defer. return conn, resp, nil } func cloneTLSConfig(cfg *tls.Config) *tls.Config { if cfg == nil { - return &tls.Config{} + return &tls.Config{MinVersion: tls.VersionTLS12} } return cfg.Clone() } diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go index 813ffb1e8..9fed0ef52 100644 --- a/vendor/github.com/gorilla/websocket/compression.go +++ b/vendor/github.com/gorilla/websocket/compression.go @@ -8,6 +8,7 @@ import ( "compress/flate" "errors" "io" + "log" "strings" "sync" ) @@ -33,7 +34,9 @@ func decompressNoContextTakeover(r io.Reader) io.ReadCloser { "\x01\x00\x00\xff\xff" fr, _ := flateReaderPool.Get().(io.ReadCloser) - fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil) + if err := fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil); err != nil { + panic(err) + } return &flateReadWrapper{fr} } @@ -132,7 +135,9 @@ func (r *flateReadWrapper) Read(p []byte) (int, error) { // Preemptively place the reader back in the pool. This helps with // scenarios where the application does not call NextReader() soon after // this final read. - r.Close() + if err := r.Close(); err != nil { + log.Printf("websocket: flateReadWrapper.Close() returned error: %v", err) + } } return n, err } diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go index 331eebc85..221e6cf79 100644 --- a/vendor/github.com/gorilla/websocket/conn.go +++ b/vendor/github.com/gorilla/websocket/conn.go @@ -6,11 +6,11 @@ package websocket import ( "bufio" + "crypto/rand" "encoding/binary" "errors" "io" - "io/ioutil" - "math/rand" + "log" "net" "strconv" "strings" @@ -181,13 +181,20 @@ var ( errInvalidControlFrame = errors.New("websocket: invalid control frame") ) +// maskRand is an io.Reader for generating mask bytes. The reader is initialized +// to crypto/rand Reader. Tests swap the reader to a math/rand reader for +// reproducible results. +var maskRand = rand.Reader + +// newMaskKey returns a new 32 bit value for masking client frames. func newMaskKey() [4]byte { - n := rand.Uint32() - return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} + var k [4]byte + _, _ = io.ReadFull(maskRand, k[:]) + return k } func hideTempErr(err error) error { - if e, ok := err.(net.Error); ok && e.Temporary() { + if e, ok := err.(net.Error); ok { err = &netError{msg: e.Error(), timeout: e.Timeout()} } return err @@ -372,7 +379,9 @@ func (c *Conn) read(n int) ([]byte, error) { if err == io.EOF { err = errUnexpectedEOF } - c.br.Discard(len(p)) + if _, err := c.br.Discard(len(p)); err != nil { + return p, err + } return p, err } @@ -387,7 +396,9 @@ func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error return err } - c.conn.SetWriteDeadline(deadline) + if err := c.conn.SetWriteDeadline(deadline); err != nil { + return c.writeFatal(err) + } if len(buf1) == 0 { _, err = c.conn.Write(buf0) } else { @@ -397,7 +408,7 @@ func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error return c.writeFatal(err) } if frameType == CloseMessage { - c.writeFatal(ErrCloseSent) + _ = c.writeFatal(ErrCloseSent) } return nil } @@ -438,7 +449,7 @@ func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) er d := 1000 * time.Hour if !deadline.IsZero() { - d = deadline.Sub(time.Now()) + d = time.Until(deadline) if d < 0 { return errWriteTimeout } @@ -460,13 +471,15 @@ func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) er return err } - c.conn.SetWriteDeadline(deadline) + if err := c.conn.SetWriteDeadline(deadline); err != nil { + return c.writeFatal(err) + } _, err = c.conn.Write(buf) if err != nil { return c.writeFatal(err) } if messageType == CloseMessage { - c.writeFatal(ErrCloseSent) + _ = c.writeFatal(ErrCloseSent) } return err } @@ -477,7 +490,9 @@ func (c *Conn) beginMessage(mw *messageWriter, messageType int) error { // probably better to return an error in this situation, but we cannot // change this without breaking existing applications. if c.writer != nil { - c.writer.Close() + if err := c.writer.Close(); err != nil { + log.Printf("websocket: discarding writer close error: %v", err) + } c.writer = nil } @@ -630,7 +645,7 @@ func (w *messageWriter) flushFrame(final bool, extra []byte) error { } if final { - w.endMessage(errWriteClosed) + _ = w.endMessage(errWriteClosed) return nil } @@ -795,7 +810,7 @@ func (c *Conn) advanceFrame() (int, error) { // 1. Skip remainder of previous frame. if c.readRemaining > 0 { - if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { + if _, err := io.CopyN(io.Discard, c.br, c.readRemaining); err != nil { return noFrame, err } } @@ -817,7 +832,9 @@ func (c *Conn) advanceFrame() (int, error) { rsv2 := p[0]&rsv2Bit != 0 rsv3 := p[0]&rsv3Bit != 0 mask := p[1]&maskBit != 0 - c.setReadRemaining(int64(p[1] & 0x7f)) + if err := c.setReadRemaining(int64(p[1] & 0x7f)); err != nil { + return noFrame, err + } c.readDecompress = false if rsv1 { @@ -922,7 +939,9 @@ func (c *Conn) advanceFrame() (int, error) { } if c.readLimit > 0 && c.readLength > c.readLimit { - c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) + if err := c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)); err != nil { + return noFrame, err + } return noFrame, ErrReadLimit } @@ -934,7 +953,9 @@ func (c *Conn) advanceFrame() (int, error) { var payload []byte if c.readRemaining > 0 { payload, err = c.read(int(c.readRemaining)) - c.setReadRemaining(0) + if err := c.setReadRemaining(0); err != nil { + return noFrame, err + } if err != nil { return noFrame, err } @@ -981,7 +1002,9 @@ func (c *Conn) handleProtocolError(message string) error { if len(data) > maxControlFramePayloadSize { data = data[:maxControlFramePayloadSize] } - c.WriteControl(CloseMessage, data, time.Now().Add(writeWait)) + if err := c.WriteControl(CloseMessage, data, time.Now().Add(writeWait)); err != nil { + return err + } return errors.New("websocket: " + message) } @@ -998,7 +1021,9 @@ func (c *Conn) handleProtocolError(message string) error { func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { // Close previous reader, only relevant for decompression. if c.reader != nil { - c.reader.Close() + if err := c.reader.Close(); err != nil { + log.Printf("websocket: discarding reader close error: %v", err) + } c.reader = nil } @@ -1054,7 +1079,9 @@ func (r *messageReader) Read(b []byte) (int, error) { } rem := c.readRemaining rem -= int64(n) - c.setReadRemaining(rem) + if err := c.setReadRemaining(rem); err != nil { + return 0, err + } if c.readRemaining > 0 && c.readErr == io.EOF { c.readErr = errUnexpectedEOF } @@ -1094,7 +1121,7 @@ func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { if err != nil { return messageType, nil, err } - p, err = ioutil.ReadAll(r) + p, err = io.ReadAll(r) return messageType, p, err } @@ -1136,7 +1163,9 @@ func (c *Conn) SetCloseHandler(h func(code int, text string) error) { if h == nil { h = func(code int, text string) error { message := FormatCloseMessage(code, "") - c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)) + if err := c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)); err != nil { + return err + } return nil } } @@ -1161,7 +1190,7 @@ func (c *Conn) SetPingHandler(h func(appData string) error) { err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) if err == ErrCloseSent { return nil - } else if e, ok := err.(net.Error); ok && e.Temporary() { + } else if _, ok := err.(net.Error); ok { return nil } return err @@ -1189,8 +1218,16 @@ func (c *Conn) SetPongHandler(h func(appData string) error) { c.handlePong = h } +// NetConn returns the underlying connection that is wrapped by c. +// Note that writing to or reading from this connection directly will corrupt the +// WebSocket connection. +func (c *Conn) NetConn() net.Conn { + return c.conn +} + // UnderlyingConn returns the internal net.Conn. This can be used to further // modifications to connection specific flags. +// Deprecated: Use the NetConn method. func (c *Conn) UnderlyingConn() net.Conn { return c.conn } diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go index d0742bf2a..67d0968be 100644 --- a/vendor/github.com/gorilla/websocket/mask.go +++ b/vendor/github.com/gorilla/websocket/mask.go @@ -9,6 +9,7 @@ package websocket import "unsafe" +// #nosec G103 -- (CWE-242) Has been audited const wordSize = int(unsafe.Sizeof(uintptr(0))) func maskBytes(key [4]byte, pos int, b []byte) int { @@ -22,6 +23,7 @@ func maskBytes(key [4]byte, pos int, b []byte) int { } // Mask one byte at a time to word boundary. + //#nosec G103 -- (CWE-242) Has been audited if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { n = wordSize - n for i := range b[:n] { @@ -36,11 +38,13 @@ func maskBytes(key [4]byte, pos int, b []byte) int { for i := range k { k[i] = key[(pos+i)&3] } + //#nosec G103 -- (CWE-242) Has been audited kw := *(*uintptr)(unsafe.Pointer(&k)) // Mask one word at a time. n := (len(b) / wordSize) * wordSize for i := 0; i < n; i += wordSize { + //#nosec G103 -- (CWE-242) Has been audited *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw } diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go index e0f466b72..80f55d1ea 100644 --- a/vendor/github.com/gorilla/websocket/proxy.go +++ b/vendor/github.com/gorilla/websocket/proxy.go @@ -8,10 +8,13 @@ import ( "bufio" "encoding/base64" "errors" + "log" "net" "net/http" "net/url" "strings" + + "golang.org/x/net/proxy" ) type netDialerFunc func(network, addr string) (net.Conn, error) @@ -21,7 +24,7 @@ func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) { } func init() { - proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) { + proxy.RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy.Dialer) (proxy.Dialer, error) { return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil }) } @@ -55,7 +58,9 @@ func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) } if err := connectReq.Write(conn); err != nil { - conn.Close() + if err := conn.Close(); err != nil { + log.Printf("httpProxyDialer: failed to close connection: %v", err) + } return nil, err } @@ -64,12 +69,16 @@ func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) br := bufio.NewReader(conn) resp, err := http.ReadResponse(br, connectReq) if err != nil { - conn.Close() + if err := conn.Close(); err != nil { + log.Printf("httpProxyDialer: failed to close connection: %v", err) + } return nil, err } if resp.StatusCode != 200 { - conn.Close() + if err := conn.Close(); err != nil { + log.Printf("httpProxyDialer: failed to close connection: %v", err) + } f := strings.SplitN(resp.Status, " ", 2) return nil, errors.New(f[1]) } diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go index 24d53b38a..1e720e1da 100644 --- a/vendor/github.com/gorilla/websocket/server.go +++ b/vendor/github.com/gorilla/websocket/server.go @@ -8,6 +8,7 @@ import ( "bufio" "errors" "io" + "log" "net/http" "net/url" "strings" @@ -154,8 +155,8 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade } challengeKey := r.Header.Get("Sec-Websocket-Key") - if challengeKey == "" { - return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header is missing or blank") + if !isValidChallengeKey(challengeKey) { + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header must be Base64 encoded value of 16-byte in length") } subprotocol := u.selectSubprotocol(r, responseHeader) @@ -183,7 +184,9 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade } if brw.Reader.Buffered() > 0 { - netConn.Close() + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } return nil, errors.New("websocket: client sent data before handshake is complete") } @@ -248,17 +251,34 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade p = append(p, "\r\n"...) // Clear deadlines set by HTTP server. - netConn.SetDeadline(time.Time{}) + if err := netConn.SetDeadline(time.Time{}); err != nil { + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + return nil, err + } if u.HandshakeTimeout > 0 { - netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) + if err := netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)); err != nil { + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + return nil, err + } } if _, err = netConn.Write(p); err != nil { - netConn.Close() + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } return nil, err } if u.HandshakeTimeout > 0 { - netConn.SetWriteDeadline(time.Time{}) + if err := netConn.SetWriteDeadline(time.Time{}); err != nil { + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + return nil, err + } } return c, nil @@ -356,8 +376,12 @@ func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte { // bufio.Writer's underlying writer. var wh writeHook bw.Reset(&wh) - bw.WriteByte(0) - bw.Flush() + if err := bw.WriteByte(0); err != nil { + panic(err) + } + if err := bw.Flush(); err != nil { + log.Printf("websocket: bufioWriterBuffer: Flush: %v", err) + } bw.Reset(originalWriter) diff --git a/vendor/github.com/gorilla/websocket/tls_handshake.go b/vendor/github.com/gorilla/websocket/tls_handshake.go index a62b68ccb..7f3864534 100644 --- a/vendor/github.com/gorilla/websocket/tls_handshake.go +++ b/vendor/github.com/gorilla/websocket/tls_handshake.go @@ -1,6 +1,3 @@ -//go:build go1.17 -// +build go1.17 - package websocket import ( diff --git a/vendor/github.com/gorilla/websocket/tls_handshake_116.go b/vendor/github.com/gorilla/websocket/tls_handshake_116.go deleted file mode 100644 index e1b2b44f6..000000000 --- a/vendor/github.com/gorilla/websocket/tls_handshake_116.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build !go1.17 -// +build !go1.17 - -package websocket - -import ( - "context" - "crypto/tls" -) - -func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error { - if err := tlsConn.Handshake(); err != nil { - return err - } - if !cfg.InsecureSkipVerify { - if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go index 7bf2f66c6..9b1a629bf 100644 --- a/vendor/github.com/gorilla/websocket/util.go +++ b/vendor/github.com/gorilla/websocket/util.go @@ -6,7 +6,7 @@ package websocket import ( "crypto/rand" - "crypto/sha1" + "crypto/sha1" //#nosec G505 -- (CWE-327) https://datatracker.ietf.org/doc/html/rfc6455#page-54 "encoding/base64" "io" "net/http" @@ -17,7 +17,7 @@ import ( var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") func computeAcceptKey(challengeKey string) string { - h := sha1.New() + h := sha1.New() //#nosec G401 -- (CWE-326) https://datatracker.ietf.org/doc/html/rfc6455#page-54 h.Write([]byte(challengeKey)) h.Write(keyGUID) return base64.StdEncoding.EncodeToString(h.Sum(nil)) @@ -281,3 +281,18 @@ headers: } return result } + +// isValidChallengeKey checks if the argument meets RFC6455 specification. +func isValidChallengeKey(s string) bool { + // From RFC6455: + // + // A |Sec-WebSocket-Key| header field with a base64-encoded (see + // Section 4 of [RFC4648]) value that, when decoded, is 16 bytes in + // length. + + if s == "" { + return false + } + decoded, err := base64.StdEncoding.DecodeString(s) + return err == nil && len(decoded) == 16 +} diff --git a/vendor/github.com/gorilla/websocket/x_net_proxy.go b/vendor/github.com/gorilla/websocket/x_net_proxy.go deleted file mode 100644 index 2e668f6b8..000000000 --- a/vendor/github.com/gorilla/websocket/x_net_proxy.go +++ /dev/null @@ -1,473 +0,0 @@ -// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. -//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy - -// Package proxy provides support for a variety of protocols to proxy network -// data. -// - -package websocket - -import ( - "errors" - "io" - "net" - "net/url" - "os" - "strconv" - "strings" - "sync" -) - -type proxy_direct struct{} - -// Direct is a direct proxy: one that makes network connections directly. -var proxy_Direct = proxy_direct{} - -func (proxy_direct) Dial(network, addr string) (net.Conn, error) { - return net.Dial(network, addr) -} - -// A PerHost directs connections to a default Dialer unless the host name -// requested matches one of a number of exceptions. -type proxy_PerHost struct { - def, bypass proxy_Dialer - - bypassNetworks []*net.IPNet - bypassIPs []net.IP - bypassZones []string - bypassHosts []string -} - -// NewPerHost returns a PerHost Dialer that directs connections to either -// defaultDialer or bypass, depending on whether the connection matches one of -// the configured rules. -func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost { - return &proxy_PerHost{ - def: defaultDialer, - bypass: bypass, - } -} - -// Dial connects to the address addr on the given network through either -// defaultDialer or bypass. -func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - - return p.dialerForRequest(host).Dial(network, addr) -} - -func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer { - if ip := net.ParseIP(host); ip != nil { - for _, net := range p.bypassNetworks { - if net.Contains(ip) { - return p.bypass - } - } - for _, bypassIP := range p.bypassIPs { - if bypassIP.Equal(ip) { - return p.bypass - } - } - return p.def - } - - for _, zone := range p.bypassZones { - if strings.HasSuffix(host, zone) { - return p.bypass - } - if host == zone[1:] { - // For a zone ".example.com", we match "example.com" - // too. - return p.bypass - } - } - for _, bypassHost := range p.bypassHosts { - if bypassHost == host { - return p.bypass - } - } - return p.def -} - -// AddFromString parses a string that contains comma-separated values -// specifying hosts that should use the bypass proxy. Each value is either an -// IP address, a CIDR range, a zone (*.example.com) or a host name -// (localhost). A best effort is made to parse the string and errors are -// ignored. -func (p *proxy_PerHost) AddFromString(s string) { - hosts := strings.Split(s, ",") - for _, host := range hosts { - host = strings.TrimSpace(host) - if len(host) == 0 { - continue - } - if strings.Contains(host, "/") { - // We assume that it's a CIDR address like 127.0.0.0/8 - if _, net, err := net.ParseCIDR(host); err == nil { - p.AddNetwork(net) - } - continue - } - if ip := net.ParseIP(host); ip != nil { - p.AddIP(ip) - continue - } - if strings.HasPrefix(host, "*.") { - p.AddZone(host[1:]) - continue - } - p.AddHost(host) - } -} - -// AddIP specifies an IP address that will use the bypass proxy. Note that -// this will only take effect if a literal IP address is dialed. A connection -// to a named host will never match an IP. -func (p *proxy_PerHost) AddIP(ip net.IP) { - p.bypassIPs = append(p.bypassIPs, ip) -} - -// AddNetwork specifies an IP range that will use the bypass proxy. Note that -// this will only take effect if a literal IP address is dialed. A connection -// to a named host will never match. -func (p *proxy_PerHost) AddNetwork(net *net.IPNet) { - p.bypassNetworks = append(p.bypassNetworks, net) -} - -// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of -// "example.com" matches "example.com" and all of its subdomains. -func (p *proxy_PerHost) AddZone(zone string) { - if strings.HasSuffix(zone, ".") { - zone = zone[:len(zone)-1] - } - if !strings.HasPrefix(zone, ".") { - zone = "." + zone - } - p.bypassZones = append(p.bypassZones, zone) -} - -// AddHost specifies a host name that will use the bypass proxy. -func (p *proxy_PerHost) AddHost(host string) { - if strings.HasSuffix(host, ".") { - host = host[:len(host)-1] - } - p.bypassHosts = append(p.bypassHosts, host) -} - -// A Dialer is a means to establish a connection. -type proxy_Dialer interface { - // Dial connects to the given address via the proxy. - Dial(network, addr string) (c net.Conn, err error) -} - -// Auth contains authentication parameters that specific Dialers may require. -type proxy_Auth struct { - User, Password string -} - -// FromEnvironment returns the dialer specified by the proxy related variables in -// the environment. -func proxy_FromEnvironment() proxy_Dialer { - allProxy := proxy_allProxyEnv.Get() - if len(allProxy) == 0 { - return proxy_Direct - } - - proxyURL, err := url.Parse(allProxy) - if err != nil { - return proxy_Direct - } - proxy, err := proxy_FromURL(proxyURL, proxy_Direct) - if err != nil { - return proxy_Direct - } - - noProxy := proxy_noProxyEnv.Get() - if len(noProxy) == 0 { - return proxy - } - - perHost := proxy_NewPerHost(proxy, proxy_Direct) - perHost.AddFromString(noProxy) - return perHost -} - -// proxySchemes is a map from URL schemes to a function that creates a Dialer -// from a URL with such a scheme. -var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error) - -// RegisterDialerType takes a URL scheme and a function to generate Dialers from -// a URL with that scheme and a forwarding Dialer. Registered schemes are used -// by FromURL. -func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) { - if proxy_proxySchemes == nil { - proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) - } - proxy_proxySchemes[scheme] = f -} - -// FromURL returns a Dialer given a URL specification and an underlying -// Dialer for it to make network requests. -func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) { - var auth *proxy_Auth - if u.User != nil { - auth = new(proxy_Auth) - auth.User = u.User.Username() - if p, ok := u.User.Password(); ok { - auth.Password = p - } - } - - switch u.Scheme { - case "socks5": - return proxy_SOCKS5("tcp", u.Host, auth, forward) - } - - // If the scheme doesn't match any of the built-in schemes, see if it - // was registered by another package. - if proxy_proxySchemes != nil { - if f, ok := proxy_proxySchemes[u.Scheme]; ok { - return f(u, forward) - } - } - - return nil, errors.New("proxy: unknown scheme: " + u.Scheme) -} - -var ( - proxy_allProxyEnv = &proxy_envOnce{ - names: []string{"ALL_PROXY", "all_proxy"}, - } - proxy_noProxyEnv = &proxy_envOnce{ - names: []string{"NO_PROXY", "no_proxy"}, - } -) - -// envOnce looks up an environment variable (optionally by multiple -// names) once. It mitigates expensive lookups on some platforms -// (e.g. Windows). -// (Borrowed from net/http/transport.go) -type proxy_envOnce struct { - names []string - once sync.Once - val string -} - -func (e *proxy_envOnce) Get() string { - e.once.Do(e.init) - return e.val -} - -func (e *proxy_envOnce) init() { - for _, n := range e.names { - e.val = os.Getenv(n) - if e.val != "" { - return - } - } -} - -// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address -// with an optional username and password. See RFC 1928 and RFC 1929. -func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) { - s := &proxy_socks5{ - network: network, - addr: addr, - forward: forward, - } - if auth != nil { - s.user = auth.User - s.password = auth.Password - } - - return s, nil -} - -type proxy_socks5 struct { - user, password string - network, addr string - forward proxy_Dialer -} - -const proxy_socks5Version = 5 - -const ( - proxy_socks5AuthNone = 0 - proxy_socks5AuthPassword = 2 -) - -const proxy_socks5Connect = 1 - -const ( - proxy_socks5IP4 = 1 - proxy_socks5Domain = 3 - proxy_socks5IP6 = 4 -) - -var proxy_socks5Errors = []string{ - "", - "general failure", - "connection forbidden", - "network unreachable", - "host unreachable", - "connection refused", - "TTL expired", - "command not supported", - "address type not supported", -} - -// Dial connects to the address addr on the given network via the SOCKS5 proxy. -func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) { - switch network { - case "tcp", "tcp6", "tcp4": - default: - return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) - } - - conn, err := s.forward.Dial(s.network, s.addr) - if err != nil { - return nil, err - } - if err := s.connect(conn, addr); err != nil { - conn.Close() - return nil, err - } - return conn, nil -} - -// connect takes an existing connection to a socks5 proxy server, -// and commands the server to extend that connection to target, -// which must be a canonical address with a host and port. -func (s *proxy_socks5) connect(conn net.Conn, target string) error { - host, portStr, err := net.SplitHostPort(target) - if err != nil { - return err - } - - port, err := strconv.Atoi(portStr) - if err != nil { - return errors.New("proxy: failed to parse port number: " + portStr) - } - if port < 1 || port > 0xffff { - return errors.New("proxy: port number out of range: " + portStr) - } - - // the size here is just an estimate - buf := make([]byte, 0, 6+len(host)) - - buf = append(buf, proxy_socks5Version) - if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { - buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword) - } else { - buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone) - } - - if _, err := conn.Write(buf); err != nil { - return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - if buf[0] != 5 { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) - } - if buf[1] == 0xff { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") - } - - // See RFC 1929 - if buf[1] == proxy_socks5AuthPassword { - buf = buf[:0] - buf = append(buf, 1 /* password protocol version */) - buf = append(buf, uint8(len(s.user))) - buf = append(buf, s.user...) - buf = append(buf, uint8(len(s.password))) - buf = append(buf, s.password...) - - if _, err := conn.Write(buf); err != nil { - return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if buf[1] != 0 { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") - } - } - - buf = buf[:0] - buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */) - - if ip := net.ParseIP(host); ip != nil { - if ip4 := ip.To4(); ip4 != nil { - buf = append(buf, proxy_socks5IP4) - ip = ip4 - } else { - buf = append(buf, proxy_socks5IP6) - } - buf = append(buf, ip...) - } else { - if len(host) > 255 { - return errors.New("proxy: destination host name too long: " + host) - } - buf = append(buf, proxy_socks5Domain) - buf = append(buf, byte(len(host))) - buf = append(buf, host...) - } - buf = append(buf, byte(port>>8), byte(port)) - - if _, err := conn.Write(buf); err != nil { - return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if _, err := io.ReadFull(conn, buf[:4]); err != nil { - return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - failure := "unknown error" - if int(buf[1]) < len(proxy_socks5Errors) { - failure = proxy_socks5Errors[buf[1]] - } - - if len(failure) > 0 { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) - } - - bytesToDiscard := 0 - switch buf[3] { - case proxy_socks5IP4: - bytesToDiscard = net.IPv4len - case proxy_socks5IP6: - bytesToDiscard = net.IPv6len - case proxy_socks5Domain: - _, err := io.ReadFull(conn, buf[:1]) - if err != nil { - return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - bytesToDiscard = int(buf[0]) - default: - return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) - } - - if cap(buf) < bytesToDiscard { - buf = make([]byte, bytesToDiscard) - } else { - buf = buf[:bytesToDiscard] - } - if _, err := io.ReadFull(conn, buf); err != nil { - return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - // Also need to discard the port number - if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - return nil -} diff --git a/vendor/github.com/grafana/regexp/backtrack.go b/vendor/github.com/grafana/regexp/backtrack.go index 0739f5ff5..7c37c66a8 100644 --- a/vendor/github.com/grafana/regexp/backtrack.go +++ b/vendor/github.com/grafana/regexp/backtrack.go @@ -91,9 +91,7 @@ func (b *bitState) reset(prog *syntax.Prog, end int, ncap int) { b.visited = make([]uint32, visitedSize, maxBacktrackVector/visitedBits) } else { b.visited = b.visited[:visitedSize] - for i := range b.visited { - b.visited[i] = 0 - } + clear(b.visited) // set to 0 } if cap(b.cap) < ncap { diff --git a/vendor/github.com/grafana/regexp/onepass.go b/vendor/github.com/grafana/regexp/onepass.go index bc47f4c4a..53cbd9583 100644 --- a/vendor/github.com/grafana/regexp/onepass.go +++ b/vendor/github.com/grafana/regexp/onepass.go @@ -6,7 +6,7 @@ package regexp import ( "regexp/syntax" - "sort" + "slices" "strings" "unicode" "unicode/utf8" @@ -33,11 +33,11 @@ type onePassInst struct { Next []uint32 } -// OnePassPrefix returns a literal string that all matches for the +// onePassPrefix returns a literal string that all matches for the // regexp must start with. Complete is true if the prefix // is the entire match. Pc is the index of the last rune instruction -// in the string. The OnePassPrefix skips over the mandatory -// EmptyBeginText +// in the string. The onePassPrefix skips over the mandatory +// EmptyBeginText. func onePassPrefix(p *syntax.Prog) (prefix string, complete bool, pc uint32) { i := &p.Inst[p.Start] if i.Op != syntax.InstEmptyWidth || (syntax.EmptyOp(i.Arg))&syntax.EmptyBeginText == 0 { @@ -68,7 +68,7 @@ func onePassPrefix(p *syntax.Prog) (prefix string, complete bool, pc uint32) { return buf.String(), complete, pc } -// OnePassNext selects the next actionable state of the prog, based on the input character. +// onePassNext selects the next actionable state of the prog, based on the input character. // It should only be called when i.Op == InstAlt or InstAltMatch, and from the one-pass machine. // One of the alternates may ultimately lead without input to end of line. If the instruction // is InstAltMatch the path to the InstMatch is in i.Out, the normal node in i.Next. @@ -218,7 +218,7 @@ func cleanupOnePass(prog *onePassProg, original *syntax.Prog) { } } -// onePassCopy creates a copy of the original Prog, as we'll be modifying it +// onePassCopy creates a copy of the original Prog, as we'll be modifying it. func onePassCopy(prog *syntax.Prog) *onePassProg { p := &onePassProg{ Start: prog.Start, @@ -282,13 +282,6 @@ func onePassCopy(prog *syntax.Prog) *onePassProg { return p } -// runeSlice exists to permit sorting the case-folded rune sets. -type runeSlice []rune - -func (p runeSlice) Len() int { return len(p) } -func (p runeSlice) Less(i, j int) bool { return p[i] < p[j] } -func (p runeSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - var anyRuneNotNL = []rune{0, '\n' - 1, '\n' + 1, unicode.MaxRune} var anyRune = []rune{0, unicode.MaxRune} @@ -383,7 +376,7 @@ func makeOnePass(p *onePassProg) *onePassProg { for r1 := unicode.SimpleFold(r0); r1 != r0; r1 = unicode.SimpleFold(r1) { runes = append(runes, r1, r1) } - sort.Sort(runeSlice(runes)) + slices.Sort(runes) } else { runes = append(runes, inst.Rune...) } @@ -407,7 +400,7 @@ func makeOnePass(p *onePassProg) *onePassProg { for r1 := unicode.SimpleFold(r0); r1 != r0; r1 = unicode.SimpleFold(r1) { runes = append(runes, r1, r1) } - sort.Sort(runeSlice(runes)) + slices.Sort(runes) } else { runes = append(runes, inst.Rune[0], inst.Rune[0]) } diff --git a/vendor/github.com/grafana/regexp/regexp.go b/vendor/github.com/grafana/regexp/regexp.go index 7958a3972..d1218ad0e 100644 --- a/vendor/github.com/grafana/regexp/regexp.go +++ b/vendor/github.com/grafana/regexp/regexp.go @@ -8,9 +8,7 @@ // general syntax used by Perl, Python, and other languages. // More precisely, it is the syntax accepted by RE2 and described at // https://golang.org/s/re2syntax, except for \C. -// For an overview of the syntax, run -// -// go doc regexp/syntax +// For an overview of the syntax, see the [regexp/syntax] package. // // The regexp implementation provided by this package is // guaranteed to run in time linear in the size of the input. @@ -23,10 +21,10 @@ // or any book about automata theory. // // All characters are UTF-8-encoded code points. -// Following utf8.DecodeRune, each byte of an invalid UTF-8 sequence +// Following [utf8.DecodeRune], each byte of an invalid UTF-8 sequence // is treated as if it encoded utf8.RuneError (U+FFFD). // -// There are 16 methods of Regexp that match a regular expression and identify +// There are 16 methods of [Regexp] that match a regular expression and identify // the matched text. Their names are matched by this regular expression: // // Find(All)?(String)?(Submatch)?(Index)? @@ -82,7 +80,7 @@ import ( // Regexp is the representation of a compiled regular expression. // A Regexp is safe for concurrent use by multiple goroutines, -// except for configuration methods, such as Longest. +// except for configuration methods, such as [Regexp.Longest]. type Regexp struct { expr string // as passed to Compile prog *syntax.Prog // compiled program @@ -110,21 +108,21 @@ func (re *Regexp) String() string { return re.expr } -// Copy returns a new Regexp object copied from re. -// Calling Longest on one copy does not affect another. +// Copy returns a new [Regexp] object copied from re. +// Calling [Regexp.Longest] on one copy does not affect another. // -// Deprecated: In earlier releases, when using a Regexp in multiple goroutines, +// Deprecated: In earlier releases, when using a [Regexp] in multiple goroutines, // giving each goroutine its own copy helped to avoid lock contention. // As of Go 1.12, using Copy is no longer necessary to avoid lock contention. // Copy may still be appropriate if the reason for its use is to make -// two copies with different Longest settings. +// two copies with different [Regexp.Longest] settings. func (re *Regexp) Copy() *Regexp { re2 := *re return &re2 } // Compile parses a regular expression and returns, if successful, -// a Regexp object that can be used to match against text. +// a [Regexp] object that can be used to match against text. // // When matching against text, the regexp returns a match that // begins as early as possible in the input (leftmost), and among those @@ -132,12 +130,12 @@ func (re *Regexp) Copy() *Regexp { // This so-called leftmost-first matching is the same semantics // that Perl, Python, and other implementations use, although this // package implements it without the expense of backtracking. -// For POSIX leftmost-longest matching, see CompilePOSIX. +// For POSIX leftmost-longest matching, see [CompilePOSIX]. func Compile(expr string) (*Regexp, error) { return compile(expr, syntax.Perl, false) } -// CompilePOSIX is like Compile but restricts the regular expression +// CompilePOSIX is like [Compile] but restricts the regular expression // to POSIX ERE (egrep) syntax and changes the match semantics to // leftmost-longest. // @@ -164,7 +162,7 @@ func CompilePOSIX(expr string) (*Regexp, error) { // That is, when matching against text, the regexp returns a match that // begins as early as possible in the input (leftmost), and among those // it chooses a match that is as long as possible. -// This method modifies the Regexp and may not be called concurrently +// This method modifies the [Regexp] and may not be called concurrently // with any other methods. func (re *Regexp) Longest() { re.longest = true @@ -270,7 +268,7 @@ func (re *Regexp) put(m *machine) { matchPool[re.mpool].Put(m) } -// minInputLen walks the regexp to find the minimum length of any matchable input +// minInputLen walks the regexp to find the minimum length of any matchable input. func minInputLen(re *syntax.Regexp) int { switch re.Op { default: @@ -310,7 +308,7 @@ func minInputLen(re *syntax.Regexp) int { } } -// MustCompile is like Compile but panics if the expression cannot be parsed. +// MustCompile is like [Compile] but panics if the expression cannot be parsed. // It simplifies safe initialization of global variables holding compiled regular // expressions. func MustCompile(str string) *Regexp { @@ -321,7 +319,7 @@ func MustCompile(str string) *Regexp { return regexp } -// MustCompilePOSIX is like CompilePOSIX but panics if the expression cannot be parsed. +// MustCompilePOSIX is like [CompilePOSIX] but panics if the expression cannot be parsed. // It simplifies safe initialization of global variables holding compiled regular // expressions. func MustCompilePOSIX(str string) *Regexp { @@ -339,13 +337,13 @@ func quote(s string) string { return strconv.Quote(s) } -// NumSubexp returns the number of parenthesized subexpressions in this Regexp. +// NumSubexp returns the number of parenthesized subexpressions in this [Regexp]. func (re *Regexp) NumSubexp() int { return re.numSubexp } // SubexpNames returns the names of the parenthesized subexpressions -// in this Regexp. The name for the first sub-expression is names[1], +// in this [Regexp]. The name for the first sub-expression is names[1], // so that if m is a match slice, the name for m[i] is SubexpNames()[i]. // Since the Regexp as a whole cannot be named, names[0] is always // the empty string. The slice should not be modified. @@ -521,7 +519,7 @@ func (re *Regexp) LiteralPrefix() (prefix string, complete bool) { return re.prefix, re.prefixComplete } -// MatchReader reports whether the text returned by the RuneReader +// MatchReader reports whether the text returned by the [io.RuneReader] // contains any match of the regular expression re. func (re *Regexp) MatchReader(r io.RuneReader) bool { return re.doMatch(r, nil, "") @@ -541,7 +539,7 @@ func (re *Regexp) Match(b []byte) bool { // MatchReader reports whether the text returned by the RuneReader // contains any match of the regular expression pattern. -// More complicated queries need to use Compile and the full Regexp interface. +// More complicated queries need to use [Compile] and the full [Regexp] interface. func MatchReader(pattern string, r io.RuneReader) (matched bool, err error) { re, err := Compile(pattern) if err != nil { @@ -552,7 +550,7 @@ func MatchReader(pattern string, r io.RuneReader) (matched bool, err error) { // MatchString reports whether the string s // contains any match of the regular expression pattern. -// More complicated queries need to use Compile and the full Regexp interface. +// More complicated queries need to use [Compile] and the full [Regexp] interface. func MatchString(pattern string, s string) (matched bool, err error) { re, err := Compile(pattern) if err != nil { @@ -563,7 +561,7 @@ func MatchString(pattern string, s string) (matched bool, err error) { // Match reports whether the byte slice b // contains any match of the regular expression pattern. -// More complicated queries need to use Compile and the full Regexp interface. +// More complicated queries need to use [Compile] and the full [Regexp] interface. func Match(pattern string, b []byte) (matched bool, err error) { re, err := Compile(pattern) if err != nil { @@ -572,9 +570,9 @@ func Match(pattern string, b []byte) (matched bool, err error) { return re.Match(b), nil } -// ReplaceAllString returns a copy of src, replacing matches of the Regexp -// with the replacement string repl. Inside repl, $ signs are interpreted as -// in Expand, so for instance $1 represents the text of the first submatch. +// ReplaceAllString returns a copy of src, replacing matches of the [Regexp] +// with the replacement string repl. +// Inside repl, $ signs are interpreted as in [Regexp.Expand]. func (re *Regexp) ReplaceAllString(src, repl string) string { n := 2 if strings.Contains(repl, "$") { @@ -586,9 +584,9 @@ func (re *Regexp) ReplaceAllString(src, repl string) string { return string(b) } -// ReplaceAllLiteralString returns a copy of src, replacing matches of the Regexp +// ReplaceAllLiteralString returns a copy of src, replacing matches of the [Regexp] // with the replacement string repl. The replacement repl is substituted directly, -// without using Expand. +// without using [Regexp.Expand]. func (re *Regexp) ReplaceAllLiteralString(src, repl string) string { return string(re.replaceAll(nil, src, 2, func(dst []byte, match []int) []byte { return append(dst, repl...) @@ -596,9 +594,9 @@ func (re *Regexp) ReplaceAllLiteralString(src, repl string) string { } // ReplaceAllStringFunc returns a copy of src in which all matches of the -// Regexp have been replaced by the return value of function repl applied +// [Regexp] have been replaced by the return value of function repl applied // to the matched substring. The replacement returned by repl is substituted -// directly, without using Expand. +// directly, without using [Regexp.Expand]. func (re *Regexp) ReplaceAllStringFunc(src string, repl func(string) string) string { b := re.replaceAll(nil, src, 2, func(dst []byte, match []int) []byte { return append(dst, repl(src[match[0]:match[1]])...) @@ -671,9 +669,9 @@ func (re *Regexp) replaceAll(bsrc []byte, src string, nmatch int, repl func(dst return buf } -// ReplaceAll returns a copy of src, replacing matches of the Regexp -// with the replacement text repl. Inside repl, $ signs are interpreted as -// in Expand, so for instance $1 represents the text of the first submatch. +// ReplaceAll returns a copy of src, replacing matches of the [Regexp] +// with the replacement text repl. +// Inside repl, $ signs are interpreted as in [Regexp.Expand]. func (re *Regexp) ReplaceAll(src, repl []byte) []byte { n := 2 if bytes.IndexByte(repl, '$') >= 0 { @@ -689,9 +687,9 @@ func (re *Regexp) ReplaceAll(src, repl []byte) []byte { return b } -// ReplaceAllLiteral returns a copy of src, replacing matches of the Regexp +// ReplaceAllLiteral returns a copy of src, replacing matches of the [Regexp] // with the replacement bytes repl. The replacement repl is substituted directly, -// without using Expand. +// without using [Regexp.Expand]. func (re *Regexp) ReplaceAllLiteral(src, repl []byte) []byte { return re.replaceAll(src, "", 2, func(dst []byte, match []int) []byte { return append(dst, repl...) @@ -699,9 +697,9 @@ func (re *Regexp) ReplaceAllLiteral(src, repl []byte) []byte { } // ReplaceAllFunc returns a copy of src in which all matches of the -// Regexp have been replaced by the return value of function repl applied +// [Regexp] have been replaced by the return value of function repl applied // to the matched byte slice. The replacement returned by repl is substituted -// directly, without using Expand. +// directly, without using [Regexp.Expand]. func (re *Regexp) ReplaceAllFunc(src []byte, repl func([]byte) []byte) []byte { return re.replaceAll(src, "", 2, func(dst []byte, match []int) []byte { return append(dst, repl(src[match[0]:match[1]])...) @@ -845,7 +843,7 @@ func (re *Regexp) FindIndex(b []byte) (loc []int) { // FindString returns a string holding the text of the leftmost match in s of the regular // expression. If there is no match, the return value is an empty string, // but it will also be empty if the regular expression successfully matches -// an empty string. Use FindStringIndex or FindStringSubmatch if it is +// an empty string. Use [Regexp.FindStringIndex] or [Regexp.FindStringSubmatch] if it is // necessary to distinguish these cases. func (re *Regexp) FindString(s string) string { var dstCap [2]int @@ -870,7 +868,7 @@ func (re *Regexp) FindStringIndex(s string) (loc []int) { // FindReaderIndex returns a two-element slice of integers defining the // location of the leftmost match of the regular expression in text read from -// the RuneReader. The match text was found in the input stream at +// the [io.RuneReader]. The match text was found in the input stream at // byte offset loc[0] through loc[1]-1. // A return value of nil indicates no match. func (re *Regexp) FindReaderIndex(r io.RuneReader) (loc []int) { @@ -904,7 +902,7 @@ func (re *Regexp) FindSubmatch(b []byte) [][]byte { // Expand appends template to dst and returns the result; during the // append, Expand replaces variables in the template with corresponding // matches drawn from src. The match slice should have been returned by -// FindSubmatchIndex. +// [Regexp.FindSubmatchIndex]. // // In the template, a variable is denoted by a substring of the form // $name or ${name}, where name is a non-empty sequence of letters, @@ -922,7 +920,7 @@ func (re *Regexp) Expand(dst []byte, template []byte, src []byte, match []int) [ return re.expand(dst, string(template), src, "", match) } -// ExpandString is like Expand but the template and source are strings. +// ExpandString is like [Regexp.Expand] but the template and source are strings. // It appends to and returns a byte slice in order to give the calling // code control over allocation. func (re *Regexp) ExpandString(dst []byte, template string, src string, match []int) []byte { @@ -1067,7 +1065,7 @@ func (re *Regexp) FindStringSubmatchIndex(s string) []int { // FindReaderSubmatchIndex returns a slice holding the index pairs // identifying the leftmost match of the regular expression of text read by -// the RuneReader, and the matches, if any, of its subexpressions, as defined +// the [io.RuneReader], and the matches, if any, of its subexpressions, as defined // by the 'Submatch' and 'Index' descriptions in the package comment. A // return value of nil indicates no match. func (re *Regexp) FindReaderSubmatchIndex(r io.RuneReader) []int { @@ -1076,7 +1074,7 @@ func (re *Regexp) FindReaderSubmatchIndex(r io.RuneReader) []int { const startSize = 10 // The size at which to start a slice in the 'All' routines. -// FindAll is the 'All' version of Find; it returns a slice of all successive +// FindAll is the 'All' version of [Regexp.Find]; it returns a slice of all successive // matches of the expression, as defined by the 'All' description in the // package comment. // A return value of nil indicates no match. @@ -1094,7 +1092,7 @@ func (re *Regexp) FindAll(b []byte, n int) [][]byte { return result } -// FindAllIndex is the 'All' version of FindIndex; it returns a slice of all +// FindAllIndex is the 'All' version of [Regexp.FindIndex]; it returns a slice of all // successive matches of the expression, as defined by the 'All' description // in the package comment. // A return value of nil indicates no match. @@ -1112,7 +1110,7 @@ func (re *Regexp) FindAllIndex(b []byte, n int) [][]int { return result } -// FindAllString is the 'All' version of FindString; it returns a slice of all +// FindAllString is the 'All' version of [Regexp.FindString]; it returns a slice of all // successive matches of the expression, as defined by the 'All' description // in the package comment. // A return value of nil indicates no match. @@ -1130,7 +1128,7 @@ func (re *Regexp) FindAllString(s string, n int) []string { return result } -// FindAllStringIndex is the 'All' version of FindStringIndex; it returns a +// FindAllStringIndex is the 'All' version of [Regexp.FindStringIndex]; it returns a // slice of all successive matches of the expression, as defined by the 'All' // description in the package comment. // A return value of nil indicates no match. @@ -1148,7 +1146,7 @@ func (re *Regexp) FindAllStringIndex(s string, n int) [][]int { return result } -// FindAllSubmatch is the 'All' version of FindSubmatch; it returns a slice +// FindAllSubmatch is the 'All' version of [Regexp.FindSubmatch]; it returns a slice // of all successive matches of the expression, as defined by the 'All' // description in the package comment. // A return value of nil indicates no match. @@ -1172,7 +1170,7 @@ func (re *Regexp) FindAllSubmatch(b []byte, n int) [][][]byte { return result } -// FindAllSubmatchIndex is the 'All' version of FindSubmatchIndex; it returns +// FindAllSubmatchIndex is the 'All' version of [Regexp.FindSubmatchIndex]; it returns // a slice of all successive matches of the expression, as defined by the // 'All' description in the package comment. // A return value of nil indicates no match. @@ -1190,7 +1188,7 @@ func (re *Regexp) FindAllSubmatchIndex(b []byte, n int) [][]int { return result } -// FindAllStringSubmatch is the 'All' version of FindStringSubmatch; it +// FindAllStringSubmatch is the 'All' version of [Regexp.FindStringSubmatch]; it // returns a slice of all successive matches of the expression, as defined by // the 'All' description in the package comment. // A return value of nil indicates no match. @@ -1215,7 +1213,7 @@ func (re *Regexp) FindAllStringSubmatch(s string, n int) [][]string { } // FindAllStringSubmatchIndex is the 'All' version of -// FindStringSubmatchIndex; it returns a slice of all successive matches of +// [Regexp.FindStringSubmatchIndex]; it returns a slice of all successive matches of // the expression, as defined by the 'All' description in the package // comment. // A return value of nil indicates no match. @@ -1237,8 +1235,8 @@ func (re *Regexp) FindAllStringSubmatchIndex(s string, n int) [][]int { // the substrings between those expression matches. // // The slice returned by this method consists of all the substrings of s -// not contained in the slice returned by FindAllString. When called on an expression -// that contains no metacharacters, it is equivalent to strings.SplitN. +// not contained in the slice returned by [Regexp.FindAllString]. When called on an expression +// that contains no metacharacters, it is equivalent to [strings.SplitN]. // // Example: // @@ -1283,3 +1281,24 @@ func (re *Regexp) Split(s string, n int) []string { return strings } + +// MarshalText implements [encoding.TextMarshaler]. The output +// matches that of calling the [Regexp.String] method. +// +// Note that the output is lossy in some cases: This method does not indicate +// POSIX regular expressions (i.e. those compiled by calling [CompilePOSIX]), or +// those for which the [Regexp.Longest] method has been called. +func (re *Regexp) MarshalText() ([]byte, error) { + return []byte(re.String()), nil +} + +// UnmarshalText implements [encoding.TextUnmarshaler] by calling +// [Compile] on the encoded value. +func (re *Regexp) UnmarshalText(text []byte) error { + newRE, err := Compile(string(text)) + if err != nil { + return err + } + *re = *newRE + return nil +} diff --git a/vendor/github.com/grafana/regexp/syntax/doc.go b/vendor/github.com/grafana/regexp/syntax/doc.go index f6a4b43f7..877f1043d 100644 --- a/vendor/github.com/grafana/regexp/syntax/doc.go +++ b/vendor/github.com/grafana/regexp/syntax/doc.go @@ -2,17 +2,17 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// DO NOT EDIT. This file is generated by mksyntaxgo from the RE2 distribution. +// Code generated by mksyntaxgo from the RE2 distribution. DO NOT EDIT. /* Package syntax parses regular expressions into parse trees and compiles parse trees into programs. Most clients of regular expressions will use the -facilities of package regexp (such as Compile and Match) instead of this package. +facilities of package [regexp] (such as [regexp.Compile] and [regexp.Match]) instead of this package. # Syntax -The regular expression syntax understood by this package when parsing with the Perl flag is as follows. -Parts of the syntax can be disabled by passing alternate flags to Parse. +The regular expression syntax understood by this package when parsing with the [Perl] flag is as follows. +Parts of the syntax can be disabled by passing alternate flags to [Parse]. Single characters: @@ -56,6 +56,7 @@ Grouping: (re) numbered capturing group (submatch) (?Pre) named & numbered capturing group (submatch) + (?re) named & numbered capturing group (submatch) (?:re) non-capturing group (?flags) set flags within current group; non-capturing (?flags:re) set flags during re; non-capturing @@ -136,6 +137,6 @@ ASCII character classes: [[:word:]] word characters (== [0-9A-Za-z_]) [[:xdigit:]] hex digit (== [0-9A-Fa-f]) -Unicode character classes are those in unicode.Categories and unicode.Scripts. +Unicode character classes are those in [unicode.Categories] and [unicode.Scripts]. */ package syntax diff --git a/vendor/github.com/grafana/regexp/syntax/op_string.go b/vendor/github.com/grafana/regexp/syntax/op_string.go index 3952b2bdd..1368f5b7e 100644 --- a/vendor/github.com/grafana/regexp/syntax/op_string.go +++ b/vendor/github.com/grafana/regexp/syntax/op_string.go @@ -4,6 +4,32 @@ package syntax import "strconv" +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[OpNoMatch-1] + _ = x[OpEmptyMatch-2] + _ = x[OpLiteral-3] + _ = x[OpCharClass-4] + _ = x[OpAnyCharNotNL-5] + _ = x[OpAnyChar-6] + _ = x[OpBeginLine-7] + _ = x[OpEndLine-8] + _ = x[OpBeginText-9] + _ = x[OpEndText-10] + _ = x[OpWordBoundary-11] + _ = x[OpNoWordBoundary-12] + _ = x[OpCapture-13] + _ = x[OpStar-14] + _ = x[OpPlus-15] + _ = x[OpQuest-16] + _ = x[OpRepeat-17] + _ = x[OpConcat-18] + _ = x[OpAlternate-19] + _ = x[opPseudo-128] +} + const ( _Op_name_0 = "NoMatchEmptyMatchLiteralCharClassAnyCharNotNLAnyCharBeginLineEndLineBeginTextEndTextWordBoundaryNoWordBoundaryCaptureStarPlusQuestRepeatConcatAlternate" _Op_name_1 = "opPseudo" diff --git a/vendor/github.com/grafana/regexp/syntax/parse.go b/vendor/github.com/grafana/regexp/syntax/parse.go index b6d348d00..6ed6491c8 100644 --- a/vendor/github.com/grafana/regexp/syntax/parse.go +++ b/vendor/github.com/grafana/regexp/syntax/parse.go @@ -44,6 +44,7 @@ const ( ErrTrailingBackslash ErrorCode = "trailing backslash at end of expression" ErrUnexpectedParen ErrorCode = "unexpected )" ErrNestingDepth ErrorCode = "expression nests too deeply" + ErrLarge ErrorCode = "expression too large" ) func (e ErrorCode) String() string { @@ -159,7 +160,7 @@ func (p *parser) reuse(re *Regexp) { func (p *parser) checkLimits(re *Regexp) { if p.numRunes > maxRunes { - panic(ErrInternalError) + panic(ErrLarge) } p.checkSize(re) p.checkHeight(re) @@ -203,7 +204,7 @@ func (p *parser) checkSize(re *Regexp) { } if p.calcSize(re, true) > maxSize { - panic(ErrInternalError) + panic(ErrLarge) } } @@ -248,9 +249,7 @@ func (p *parser) calcSize(re *Regexp, force bool) int64 { size = int64(re.Max)*sub + int64(re.Max-re.Min) } - if size < 1 { - size = 1 - } + size = max(1, size) p.size[re] = size return size } @@ -381,14 +380,12 @@ func minFoldRune(r rune) rune { if r < minFold || r > maxFold { return r } - min := r + m := r r0 := r for r = unicode.SimpleFold(r); r != r0; r = unicode.SimpleFold(r) { - if min > r { - min = r - } + m = min(m, r) } - return min + return m } // op pushes a regexp with the given op onto the stack @@ -897,8 +894,8 @@ func parse(s string, flags Flags) (_ *Regexp, err error) { panic(r) case nil: // ok - case ErrInternalError: // too big - err = &Error{Code: ErrInternalError, Expr: s} + case ErrLarge: // too big + err = &Error{Code: ErrLarge, Expr: s} case ErrNestingDepth: err = &Error{Code: ErrNestingDepth, Expr: s} } @@ -1158,9 +1155,18 @@ func (p *parser) parsePerlFlags(s string) (rest string, err error) { // support all three as well. EcmaScript 4 uses only the Python form. // // In both the open source world (via Code Search) and the - // Google source tree, (?Pname) is the dominant form, - // so that's the one we implement. One is enough. - if len(t) > 4 && t[2] == 'P' && t[3] == '<' { + // Google source tree, (?Pname) and (?name) are the + // dominant forms of named captures and both are supported. + startsWithP := len(t) > 4 && t[2] == 'P' && t[3] == '<' + startsWithName := len(t) > 3 && t[2] == '<' + + if startsWithP || startsWithName { + // position of expr start + exprStartPos := 4 + if startsWithName { + exprStartPos = 3 + } + // Pull out name. end := strings.IndexRune(t, '>') if end < 0 { @@ -1170,8 +1176,8 @@ func (p *parser) parsePerlFlags(s string) (rest string, err error) { return "", &Error{ErrInvalidNamedCapture, s} } - capture := t[:end+1] // "(?P" - name := t[4:end] // "name" + capture := t[:end+1] // "(?P" or "(?" + name := t[exprStartPos:end] // "name" if err = checkUTF8(name); err != nil { return "", err } @@ -1853,6 +1859,22 @@ func cleanClass(rp *[]rune) []rune { return r[:w] } +// inCharClass reports whether r is in the class. +// It assumes the class has been cleaned by cleanClass. +func inCharClass(r rune, class []rune) bool { + _, ok := sort.Find(len(class)/2, func(i int) int { + lo, hi := class[2*i], class[2*i+1] + if r > hi { + return +1 + } + if r < lo { + return -1 + } + return 0 + }) + return ok +} + // appendLiteral returns the result of appending the literal x to the class r. func appendLiteral(r []rune, x rune, flags Flags) []rune { if flags&FoldCase != 0 { @@ -1937,7 +1959,7 @@ func appendClass(r []rune, x []rune) []rune { return r } -// appendFolded returns the result of appending the case folding of the class x to the class r. +// appendFoldedClass returns the result of appending the case folding of the class x to the class r. func appendFoldedClass(r []rune, x []rune) []rune { for i := 0; i < len(x); i += 2 { r = appendFoldedRange(r, x[i], x[i+1]) diff --git a/vendor/github.com/grafana/regexp/syntax/prog.go b/vendor/github.com/grafana/regexp/syntax/prog.go index 896cdc42c..6a3705ec8 100644 --- a/vendor/github.com/grafana/regexp/syntax/prog.go +++ b/vendor/github.com/grafana/regexp/syntax/prog.go @@ -106,7 +106,9 @@ func EmptyOpContext(r1, r2 rune) EmptyOp { // during the evaluation of the \b and \B zero-width assertions. // These assertions are ASCII-only: the word characters are [A-Za-z0-9_]. func IsWordChar(r rune) bool { - return 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' || '0' <= r && r <= '9' || r == '_' + // Test for lowercase letters first, as these occur more + // frequently than uppercase letters in common cases. + return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || '0' <= r && r <= '9' || r == '_' } // An Inst is a single instruction in a regular expression program. @@ -189,7 +191,7 @@ Loop: const noMatch = -1 // MatchRune reports whether the instruction matches (and consumes) r. -// It should only be called when i.Op == InstRune. +// It should only be called when i.Op == [InstRune]. func (i *Inst) MatchRune(r rune) bool { return i.MatchRunePos(r) != noMatch } @@ -198,7 +200,7 @@ func (i *Inst) MatchRune(r rune) bool { // If so, MatchRunePos returns the index of the matching rune pair // (or, when len(i.Rune) == 1, rune singleton). // If not, MatchRunePos returns -1. -// MatchRunePos should only be called when i.Op == InstRune. +// MatchRunePos should only be called when i.Op == [InstRune]. func (i *Inst) MatchRunePos(r rune) int { rune := i.Rune @@ -245,7 +247,7 @@ func (i *Inst) MatchRunePos(r rune) int { lo := 0 hi := len(rune) / 2 for lo < hi { - m := lo + (hi-lo)/2 + m := int(uint(lo+hi) >> 1) if c := rune[2*m]; c <= r { if r <= rune[2*m+1] { return m @@ -260,7 +262,7 @@ func (i *Inst) MatchRunePos(r rune) int { // MatchEmptyWidth reports whether the instruction matches // an empty string between the runes before and after. -// It should only be called when i.Op == InstEmptyWidth. +// It should only be called when i.Op == [InstEmptyWidth]. func (i *Inst) MatchEmptyWidth(before rune, after rune) bool { switch EmptyOp(i.Arg) { case EmptyBeginLine: diff --git a/vendor/github.com/grafana/regexp/syntax/regexp.go b/vendor/github.com/grafana/regexp/syntax/regexp.go index 3a4d2d201..8ad3653ab 100644 --- a/vendor/github.com/grafana/regexp/syntax/regexp.go +++ b/vendor/github.com/grafana/regexp/syntax/regexp.go @@ -8,6 +8,7 @@ package syntax // In this package, re is always a *Regexp and r is always a rune. import ( + "slices" "strconv" "strings" "unicode" @@ -75,24 +76,10 @@ func (x *Regexp) Equal(y *Regexp) bool { } case OpLiteral, OpCharClass: - if len(x.Rune) != len(y.Rune) { - return false - } - for i, r := range x.Rune { - if r != y.Rune[i] { - return false - } - } + return slices.Equal(x.Rune, y.Rune) case OpAlternate, OpConcat: - if len(x.Sub) != len(y.Sub) { - return false - } - for i, sub := range x.Sub { - if !sub.Equal(y.Sub[i]) { - return false - } - } + return slices.EqualFunc(x.Sub, y.Sub, func(a, b *Regexp) bool { return a.Equal(b) }) case OpStar, OpPlus, OpQuest: if x.Flags&NonGreedy != y.Flags&NonGreedy || !x.Sub[0].Equal(y.Sub[0]) { @@ -112,8 +99,165 @@ func (x *Regexp) Equal(y *Regexp) bool { return true } +// printFlags is a bit set indicating which flags (including non-capturing parens) to print around a regexp. +type printFlags uint8 + +const ( + flagI printFlags = 1 << iota // (?i: + flagM // (?m: + flagS // (?s: + flagOff // ) + flagPrec // (?: ) + negShift = 5 // flagI<") @@ -122,15 +266,9 @@ func writeRegexp(b *strings.Builder, re *Regexp) { case OpEmptyMatch: b.WriteString(`(?:)`) case OpLiteral: - if re.Flags&FoldCase != 0 { - b.WriteString(`(?i:`) - } for _, r := range re.Rune { escape(b, r, false) } - if re.Flags&FoldCase != 0 { - b.WriteString(`)`) - } case OpCharClass: if len(re.Rune)%2 != 0 { b.WriteString(`[invalid char class]`) @@ -147,7 +285,9 @@ func writeRegexp(b *strings.Builder, re *Regexp) { lo, hi := re.Rune[i]+1, re.Rune[i+1]-1 escape(b, lo, lo == '-') if lo != hi { - b.WriteRune('-') + if hi != lo+1 { + b.WriteRune('-') + } escape(b, hi, hi == '-') } } @@ -156,25 +296,25 @@ func writeRegexp(b *strings.Builder, re *Regexp) { lo, hi := re.Rune[i], re.Rune[i+1] escape(b, lo, lo == '-') if lo != hi { - b.WriteRune('-') + if hi != lo+1 { + b.WriteRune('-') + } escape(b, hi, hi == '-') } } } b.WriteRune(']') - case OpAnyCharNotNL: - b.WriteString(`(?-s:.)`) - case OpAnyChar: - b.WriteString(`(?s:.)`) + case OpAnyCharNotNL, OpAnyChar: + b.WriteString(`.`) case OpBeginLine: - b.WriteString(`(?m:^)`) + b.WriteString(`^`) case OpEndLine: - b.WriteString(`(?m:$)`) + b.WriteString(`$`) case OpBeginText: b.WriteString(`\A`) case OpEndText: if re.Flags&WasDollar != 0 { - b.WriteString(`(?-m:$)`) + b.WriteString(`$`) } else { b.WriteString(`\z`) } @@ -191,17 +331,17 @@ func writeRegexp(b *strings.Builder, re *Regexp) { b.WriteRune('(') } if re.Sub[0].Op != OpEmptyMatch { - writeRegexp(b, re.Sub[0]) + writeRegexp(b, re.Sub[0], flags[re.Sub[0]], flags) } b.WriteRune(')') case OpStar, OpPlus, OpQuest, OpRepeat: - if sub := re.Sub[0]; sub.Op > OpCapture || sub.Op == OpLiteral && len(sub.Rune) > 1 { - b.WriteString(`(?:`) - writeRegexp(b, sub) - b.WriteString(`)`) - } else { - writeRegexp(b, sub) + p := printFlags(0) + sub := re.Sub[0] + if sub.Op > OpCapture || sub.Op == OpLiteral && len(sub.Rune) > 1 { + p = flagPrec } + writeRegexp(b, sub, p, flags) + switch re.Op { case OpStar: b.WriteRune('*') @@ -225,27 +365,31 @@ func writeRegexp(b *strings.Builder, re *Regexp) { } case OpConcat: for _, sub := range re.Sub { + p := printFlags(0) if sub.Op == OpAlternate { - b.WriteString(`(?:`) - writeRegexp(b, sub) - b.WriteString(`)`) - } else { - writeRegexp(b, sub) + p = flagPrec } + writeRegexp(b, sub, p, flags) } case OpAlternate: for i, sub := range re.Sub { if i > 0 { b.WriteRune('|') } - writeRegexp(b, sub) + writeRegexp(b, sub, 0, flags) } } } func (re *Regexp) String() string { var b strings.Builder - writeRegexp(&b, re) + var flags map[*Regexp]printFlags + must, cant := calcFlags(re, &flags) + must |= (cant &^ flagI) << negShift + if must != 0 { + must |= flagOff + } + writeRegexp(&b, re, must, flags) return b.String() } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go index 31553e784..5dd4e4478 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go @@ -148,6 +148,12 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcM var pairs []string for key, vals := range req.Header { key = textproto.CanonicalMIMEHeaderKey(key) + switch key { + case xForwardedFor, xForwardedHost: + // Handled separately below + continue + } + for _, val := range vals { // For backwards-compatibility, pass through 'authorization' header with no prefix. if key == "Authorization" { @@ -181,18 +187,17 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcM pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host) } + xff := req.Header.Values(xForwardedFor) if addr := req.RemoteAddr; addr != "" { if remoteIP, _, err := net.SplitHostPort(addr); err == nil { - if fwd := req.Header.Get(xForwardedFor); fwd == "" { - pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP) - } else { - pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP)) - } + xff = append(xff, remoteIP) } } + if len(xff) > 0 { + pairs = append(pairs, strings.ToLower(xForwardedFor), strings.Join(xff, ", ")) + } if timeout != 0 { - //nolint:govet // The context outlives this function ctx, _ = context.WithTimeout(ctx, timeout) } if len(pairs) == 0 { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go index 230cac7b8..568299869 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go @@ -71,7 +71,7 @@ func HTTPStatusFromCode(code codes.Code) int { case codes.DataLoss: return http.StatusInternalServerError default: - grpclog.Infof("Unknown gRPC error code: %v", code) + grpclog.Warningf("Unknown gRPC error code: %v", code) return http.StatusInternalServerError } } @@ -114,17 +114,17 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh buf, merr := marshaler.Marshal(pb) if merr != nil { - grpclog.Infof("Failed to marshal error message %q: %v", s, merr) + grpclog.Errorf("Failed to marshal error message %q: %v", s, merr) w.WriteHeader(http.StatusInternalServerError) if _, err := io.WriteString(w, fallback); err != nil { - grpclog.Infof("Failed to write response: %v", err) + grpclog.Errorf("Failed to write response: %v", err) } return } md, ok := ServerMetadataFromContext(ctx) if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") + grpclog.Error("Failed to extract ServerMetadata from context") } handleForwardResponseServerMetadata(w, mux, md) @@ -148,7 +148,7 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh w.WriteHeader(st) if _, err := w.Write(buf); err != nil { - grpclog.Infof("Failed to write response: %v", err) + grpclog.Errorf("Failed to write response: %v", err) } if doForwardTrailers { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go index 19d9d37ff..9005d6a0b 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go @@ -41,7 +41,7 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field m, ok := item.node.(map[string]interface{}) switch { - case ok: + case ok && len(m) > 0: // if the item is an object, then enqueue all of its children for k, v := range m { if item.msg == nil { @@ -96,6 +96,8 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field queue = append(queue, child) } } + case ok && len(m) == 0: + fallthrough case len(item.path) > 0: // otherwise, it's a leaf node so print its path fm.Paths = append(fm.Paths, item.path) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go index 5e14cf8b0..de1eef1f4 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go @@ -6,6 +6,7 @@ import ( "io" "net/http" "net/textproto" + "strconv" "strings" "google.golang.org/genproto/googleapis/api/httpbody" @@ -17,16 +18,10 @@ import ( // ForwardResponseStream forwards the stream from gRPC server to REST client. func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { - f, ok := w.(http.Flusher) - if !ok { - grpclog.Infof("Flush not supported in %T", w) - http.Error(w, "unexpected type of web server", http.StatusInternalServerError) - return - } - + rc := http.NewResponseController(w) md, ok := ServerMetadataFromContext(ctx) if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") + grpclog.Error("Failed to extract ServerMetadata from context") http.Error(w, "unexpected error", http.StatusInternalServerError) return } @@ -81,20 +76,29 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal } if err != nil { - grpclog.Infof("Failed to marshal response chunk: %v", err) + grpclog.Errorf("Failed to marshal response chunk: %v", err) handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) return } if _, err := w.Write(buf); err != nil { - grpclog.Infof("Failed to send response chunk: %v", err) + grpclog.Errorf("Failed to send response chunk: %v", err) return } wroteHeader = true if _, err := w.Write(delimiter); err != nil { - grpclog.Infof("Failed to send delimiter chunk: %v", err) + grpclog.Errorf("Failed to send delimiter chunk: %v", err) + return + } + err = rc.Flush() + if err != nil { + if errors.Is(err, http.ErrNotSupported) { + grpclog.Errorf("Flush not supported in %T", w) + http.Error(w, "unexpected type of web server", http.StatusInternalServerError) + return + } + grpclog.Errorf("Failed to flush response to client: %v", err) return } - f.Flush() } } @@ -136,7 +140,7 @@ type responseBody interface { func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { md, ok := ServerMetadataFromContext(ctx) if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") + grpclog.Error("Failed to extract ServerMetadata from context") } handleForwardResponseServerMetadata(w, mux, md) @@ -168,13 +172,17 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha buf, err = marshaler.Marshal(resp) } if err != nil { - grpclog.Infof("Marshal error: %v", err) + grpclog.Errorf("Marshal error: %v", err) HTTPError(ctx, mux, marshaler, w, req, err) return } + if !doForwardTrailers { + w.Header().Set("Content-Length", strconv.Itoa(len(buf))) + } + if _, err = w.Write(buf); err != nil { - grpclog.Infof("Failed to write response: %v", err) + grpclog.Errorf("Failed to write response: %v", err) } if doForwardTrailers { @@ -193,7 +201,7 @@ func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, re } for _, opt := range opts { if err := opt(ctx, w, resp); err != nil { - grpclog.Infof("Error handling ForwardResponseOptions: %v", err) + grpclog.Errorf("Error handling ForwardResponseOptions: %v", err) return err } } @@ -209,15 +217,15 @@ func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, mar } buf, err := marshaler.Marshal(msg) if err != nil { - grpclog.Infof("Failed to marshal an error: %v", err) + grpclog.Errorf("Failed to marshal an error: %v", err) return } if _, err := w.Write(buf); err != nil { - grpclog.Infof("Failed to notify error to client: %v", err) + grpclog.Errorf("Failed to notify error to client: %v", err) return } if _, err := w.Write(delimiter); err != nil { - grpclog.Infof("Failed to send delimiter chunk: %v", err) + grpclog.Errorf("Failed to send delimiter chunk: %v", err) return } } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go index d6aa82578..fe52081ab 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go @@ -24,6 +24,11 @@ func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) { return json.Marshal(v) } +// MarshalIndent is like Marshal but applies Indent to format the output +func (j *JSONBuiltin) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return json.MarshalIndent(v, prefix, indent) +} + // Unmarshal unmarshals JSON data into "v". func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error { return json.Unmarshal(data, v) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go index 51b8247da..8376d1e0e 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go @@ -30,10 +30,6 @@ func (*JSONPb) ContentType(_ interface{}) string { // Marshal marshals "v" into JSON. func (j *JSONPb) Marshal(v interface{}) ([]byte, error) { - if _, ok := v.(proto.Message); !ok { - return j.marshalNonProtoField(v) - } - var buf bytes.Buffer if err := j.marshalTo(&buf, v); err != nil { return nil, err @@ -48,9 +44,17 @@ func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error { if err != nil { return err } + if j.Indent != "" { + b := &bytes.Buffer{} + if err := json.Indent(b, buf, "", j.Indent); err != nil { + return err + } + buf = b.Bytes() + } _, err = w.Write(buf) return err } + b, err := j.MarshalOptions.Marshal(p) if err != nil { return err @@ -150,9 +154,6 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { } m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf) } - if j.Indent != "" { - return json.MarshalIndent(m, "", j.Indent) - } return json.Marshal(m) } if enum, ok := rv.Interface().(protoEnum); ok && !j.UseEnumNumbers { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go index a714de024..0b051e6e8 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go @@ -46,7 +46,7 @@ func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, out for _, contentTypeVal := range r.Header[contentTypeHeader] { contentType, _, err := mime.ParseMediaType(contentTypeVal) if err != nil { - grpclog.Infof("Failed to parse Content-Type %s: %v", contentTypeVal, err) + grpclog.Errorf("Failed to parse Content-Type %s: %v", contentTypeVal, err) continue } if m, ok := mux.marshalers.mimeMap[contentType]; ok { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go index 628e1fde1..ed9a7e438 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go @@ -341,13 +341,13 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { } if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) { - r.Method = strings.ToUpper(override) if err := r.ParseForm(); err != nil { _, outboundMarshaler := MarshalerForRequest(s, r) sterr := status.Error(codes.InvalidArgument, err.Error()) s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr) return } + r.Method = strings.ToUpper(override) } var pathComponents []string diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go index 8f90d15a5..e54507145 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go @@ -52,13 +52,13 @@ type Pattern struct { // It returns an error if the given definition is invalid. func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) { if version != 1 { - grpclog.Infof("unsupported version: %d", version) + grpclog.Errorf("unsupported version: %d", version) return Pattern{}, ErrInvalidPattern } l := len(ops) if l%2 != 0 { - grpclog.Infof("odd number of ops codes: %d", l) + grpclog.Errorf("odd number of ops codes: %d", l) return Pattern{}, ErrInvalidPattern } @@ -81,14 +81,14 @@ func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, er stack++ case utilities.OpPushM: if pushMSeen { - grpclog.Infof("pushM appears twice") + grpclog.Error("pushM appears twice") return Pattern{}, ErrInvalidPattern } pushMSeen = true stack++ case utilities.OpLitPush: if op.operand < 0 || len(pool) <= op.operand { - grpclog.Infof("negative literal index: %d", op.operand) + grpclog.Errorf("negative literal index: %d", op.operand) return Pattern{}, ErrInvalidPattern } if pushMSeen { @@ -97,18 +97,18 @@ func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, er stack++ case utilities.OpConcatN: if op.operand <= 0 { - grpclog.Infof("negative concat size: %d", op.operand) + grpclog.Errorf("negative concat size: %d", op.operand) return Pattern{}, ErrInvalidPattern } stack -= op.operand if stack < 0 { - grpclog.Info("stack underflow") + grpclog.Error("stack underflow") return Pattern{}, ErrInvalidPattern } stack++ case utilities.OpCapture: if op.operand < 0 || len(pool) <= op.operand { - grpclog.Infof("variable name index out of bound: %d", op.operand) + grpclog.Errorf("variable name index out of bound: %d", op.operand) return Pattern{}, ErrInvalidPattern } v := pool[op.operand] @@ -116,11 +116,11 @@ func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, er vars = append(vars, v) stack-- if stack < 0 { - grpclog.Infof("stack underflow") + grpclog.Error("stack underflow") return Pattern{}, ErrInvalidPattern } default: - grpclog.Infof("invalid opcode: %d", op.code) + grpclog.Errorf("invalid opcode: %d", op.code) return Pattern{}, ErrInvalidPattern } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go index d01933c4f..fe634174b 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go @@ -51,11 +51,13 @@ func (*DefaultQueryParser) Parse(msg proto.Message, values url.Values, filter *u key = match[1] values = append([]string{match[2]}, values...) } - fieldPath := strings.Split(key, ".") + + msgValue := msg.ProtoReflect() + fieldPath := normalizeFieldPath(msgValue, strings.Split(key, ".")) if filter.HasCommonPrefix(fieldPath) { continue } - if err := populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, values); err != nil { + if err := populateFieldValueFromPath(msgValue, fieldPath, values); err != nil { return err } } @@ -68,6 +70,38 @@ func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value stri return populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, []string{value}) } +func normalizeFieldPath(msgValue protoreflect.Message, fieldPath []string) []string { + newFieldPath := make([]string, 0, len(fieldPath)) + for i, fieldName := range fieldPath { + fields := msgValue.Descriptor().Fields() + fieldDesc := fields.ByTextName(fieldName) + if fieldDesc == nil { + fieldDesc = fields.ByJSONName(fieldName) + } + if fieldDesc == nil { + // return initial field path values if no matching message field was found + return fieldPath + } + + newFieldPath = append(newFieldPath, string(fieldDesc.Name())) + + // If this is the last element, we're done + if i == len(fieldPath)-1 { + break + } + + // Only singular message fields are allowed + if fieldDesc.Message() == nil || fieldDesc.Cardinality() == protoreflect.Repeated { + return fieldPath + } + + // Get the nested message + msgValue = msgValue.Get(fieldDesc).Message() + } + + return newFieldPath +} + func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []string, values []string) error { if len(fieldPath) < 1 { return errors.New("no field path") diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go index f62c0c5a1..5c23ae8b7 100644 --- a/vendor/github.com/hashicorp/consul/api/api.go +++ b/vendor/github.com/hashicorp/consul/api/api.go @@ -1129,6 +1129,23 @@ func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (* return wm, nil } +// delete is used to do a DELETE request against an endpoint +func (c *Client) delete(endpoint string, q *QueryOptions) (*WriteMeta, error) { + r := c.newRequest("DELETE", endpoint) + r.setQueryOptions(q) + rtt, resp, err := c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err = requireHttpCodes(resp, 204, 200); err != nil { + return nil, err + } + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + // parseQueryMeta is used to help parse query meta-data // // TODO(rb): bug? the error from this function is never handled diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go b/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go index baf274e2d..ba2bac19e 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go @@ -195,6 +195,9 @@ type TerminatingGatewayConfigEntry struct { type LinkedService struct { // Referencing other partitions is not supported. + //DisableAutoHostRewrite disables terminating gateways auto host rewrite feature when set to true. + DisableAutoHostRewrite bool `json:",omitempty"` + // Namespace is where the service is registered. Namespace string `json:",omitempty"` diff --git a/vendor/github.com/hashicorp/consul/api/partition.go b/vendor/github.com/hashicorp/consul/api/partition.go index 8467c3118..8a9bfb482 100644 --- a/vendor/github.com/hashicorp/consul/api/partition.go +++ b/vendor/github.com/hashicorp/consul/api/partition.go @@ -27,6 +27,9 @@ type Partition struct { // ModifyIndex is the latest Raft index at which the Partition was modified. ModifyIndex uint64 `json:"ModifyIndex,omitempty"` + + // DisableGossip will not enable a gossip pool for the partition + DisableGossip bool `json:"DisableGossip,omitempty"` } // PartitionDefaultName is the default partition value. diff --git a/vendor/github.com/hashicorp/consul/api/raw.go b/vendor/github.com/hashicorp/consul/api/raw.go index 639513d29..7fb9c390c 100644 --- a/vendor/github.com/hashicorp/consul/api/raw.go +++ b/vendor/github.com/hashicorp/consul/api/raw.go @@ -25,3 +25,8 @@ func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*Query func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { return raw.c.write(endpoint, in, out, q) } + +// Delete is used to do a DELETE request against an endpoint +func (raw *Raw) Delete(endpoint string, q *QueryOptions) (*WriteMeta, error) { + return raw.c.delete(endpoint, q) +} diff --git a/vendor/github.com/hashicorp/go-hclog/README.md b/vendor/github.com/hashicorp/go-hclog/README.md index 21a17c5af..983d44c7d 100644 --- a/vendor/github.com/hashicorp/go-hclog/README.md +++ b/vendor/github.com/hashicorp/go-hclog/README.md @@ -140,9 +140,10 @@ log.Printf("[DEBUG] %d", 42) ... [DEBUG] my-app: 42 ``` -Notice that if `appLogger` is initialized with the `INFO` log level _and_ you +Notice that if `appLogger` is initialized with the `INFO` log level, _and_ you specify `InferLevels: true`, you will not see any output here. You must change `appLogger` to `DEBUG` to see output. See the docs for more information. If the log lines start with a timestamp you can use the -`InferLevelsWithTimestamp` option to try and ignore them. +`InferLevelsWithTimestamp` option to try and ignore them. Please note that in order +for `InferLevelsWithTimestamp` to be relevant, `InferLevels` must be set to `true`. diff --git a/vendor/github.com/hashicorp/go-hclog/intlogger.go b/vendor/github.com/hashicorp/go-hclog/intlogger.go index b45064acf..272a710c0 100644 --- a/vendor/github.com/hashicorp/go-hclog/intlogger.go +++ b/vendor/github.com/hashicorp/go-hclog/intlogger.go @@ -55,23 +55,38 @@ var ( faintBoldColor = color.New(color.Faint, color.Bold) faintColor = color.New(color.Faint) - faintMultiLinePrefix = faintColor.Sprint(" | ") - faintFieldSeparator = faintColor.Sprint("=") - faintFieldSeparatorWithNewLine = faintColor.Sprint("=\n") + faintMultiLinePrefix string + faintFieldSeparator string + faintFieldSeparatorWithNewLine string ) +func init() { + // Force all the colors to enabled because we do our own detection of color usage. + for _, c := range _levelToColor { + c.EnableColor() + } + + faintBoldColor.EnableColor() + faintColor.EnableColor() + + faintMultiLinePrefix = faintColor.Sprint(" | ") + faintFieldSeparator = faintColor.Sprint("=") + faintFieldSeparatorWithNewLine = faintColor.Sprint("=\n") +} + // Make sure that intLogger is a Logger var _ Logger = &intLogger{} // intLogger is an internal logger implementation. Internal in that it is // defined entirely by this package. type intLogger struct { - json bool - callerOffset int - name string - timeFormat string - timeFn TimeFunction - disableTime bool + json bool + jsonEscapeEnabled bool + callerOffset int + name string + timeFormat string + timeFn TimeFunction + disableTime bool // This is an interface so that it's shared by any derived loggers, since // those derived loggers share the bufio.Writer as well. @@ -79,6 +94,19 @@ type intLogger struct { writer *writer level *int32 + // The value of curEpoch when our level was set + setEpoch uint64 + + // The value of curEpoch the last time we performed the level sync process + ownEpoch uint64 + + // Shared amongst all the loggers created in this hierachy, used to determine + // if the level sync process should be run by comparing it with ownEpoch + curEpoch *uint64 + + // The logger this one was created from. Only set when syncParentLevel is set + parent *intLogger + headerColor ColorOption fieldColor ColorOption @@ -88,6 +116,7 @@ type intLogger struct { // create subloggers with their own level setting independentLevels bool + syncParentLevel bool subloggerHook func(sub Logger) Logger } @@ -129,9 +158,9 @@ func newLogger(opts *LoggerOptions) *intLogger { } var ( - primaryColor ColorOption = ColorOff - headerColor ColorOption = ColorOff - fieldColor ColorOption = ColorOff + primaryColor = ColorOff + headerColor = ColorOff + fieldColor = ColorOff ) switch { case opts.ColorHeaderOnly: @@ -145,6 +174,7 @@ func newLogger(opts *LoggerOptions) *intLogger { l := &intLogger{ json: opts.JSONFormat, + jsonEscapeEnabled: !opts.JSONEscapeDisabled, name: opts.Name, timeFormat: TimeFormat, timeFn: time.Now, @@ -152,8 +182,10 @@ func newLogger(opts *LoggerOptions) *intLogger { mutex: mutex, writer: newWriter(output, primaryColor), level: new(int32), + curEpoch: new(uint64), exclude: opts.Exclude, independentLevels: opts.IndependentLevels, + syncParentLevel: opts.SyncParentLevel, headerColor: headerColor, fieldColor: fieldColor, subloggerHook: opts.SubloggerHook, @@ -194,7 +226,7 @@ const offsetIntLogger = 3 // Log a message and a set of key/value pairs if the given level is at // or more severe that the threshold configured in the Logger. func (l *intLogger) log(name string, level Level, msg string, args ...interface{}) { - if level < Level(atomic.LoadInt32(l.level)) { + if level < l.GetLevel() { return } @@ -597,7 +629,7 @@ func (l *intLogger) logJSON(t time.Time, name string, level Level, msg string, a vals := l.jsonMapEntry(t, name, level, msg) args = append(l.implied, args...) - if args != nil && len(args) > 0 { + if len(args) > 0 { if len(args)%2 != 0 { cs, ok := args[len(args)-1].(CapturedStacktrace) if ok { @@ -637,13 +669,17 @@ func (l *intLogger) logJSON(t time.Time, name string, level Level, msg string, a } } - err := json.NewEncoder(l.writer).Encode(vals) + encoder := json.NewEncoder(l.writer) + encoder.SetEscapeHTML(l.jsonEscapeEnabled) + err := encoder.Encode(vals) if err != nil { if _, ok := err.(*json.UnsupportedTypeError); ok { plainVal := l.jsonMapEntry(t, name, level, msg) plainVal["@warn"] = errJsonUnsupportedTypeMsg - json.NewEncoder(l.writer).Encode(plainVal) + errEncoder := json.NewEncoder(l.writer) + errEncoder.SetEscapeHTML(l.jsonEscapeEnabled) + errEncoder.Encode(plainVal) } } } @@ -718,27 +754,27 @@ func (l *intLogger) Error(msg string, args ...interface{}) { // Indicate that the logger would emit TRACE level logs func (l *intLogger) IsTrace() bool { - return Level(atomic.LoadInt32(l.level)) == Trace + return l.GetLevel() == Trace } // Indicate that the logger would emit DEBUG level logs func (l *intLogger) IsDebug() bool { - return Level(atomic.LoadInt32(l.level)) <= Debug + return l.GetLevel() <= Debug } // Indicate that the logger would emit INFO level logs func (l *intLogger) IsInfo() bool { - return Level(atomic.LoadInt32(l.level)) <= Info + return l.GetLevel() <= Info } // Indicate that the logger would emit WARN level logs func (l *intLogger) IsWarn() bool { - return Level(atomic.LoadInt32(l.level)) <= Warn + return l.GetLevel() <= Warn } // Indicate that the logger would emit ERROR level logs func (l *intLogger) IsError() bool { - return Level(atomic.LoadInt32(l.level)) <= Error + return l.GetLevel() <= Error } const MissingKey = "EXTRA_VALUE_AT_END" @@ -854,12 +890,63 @@ func (l *intLogger) resetOutput(opts *LoggerOptions) error { // Update the logging level on-the-fly. This will affect all subloggers as // well. func (l *intLogger) SetLevel(level Level) { - atomic.StoreInt32(l.level, int32(level)) + if !l.syncParentLevel { + atomic.StoreInt32(l.level, int32(level)) + return + } + + nsl := new(int32) + *nsl = int32(level) + + l.level = nsl + + l.ownEpoch = atomic.AddUint64(l.curEpoch, 1) + l.setEpoch = l.ownEpoch +} + +func (l *intLogger) searchLevelPtr() *int32 { + p := l.parent + + ptr := l.level + + max := l.setEpoch + + for p != nil { + if p.setEpoch > max { + max = p.setEpoch + ptr = p.level + } + + p = p.parent + } + + return ptr } // Returns the current level func (l *intLogger) GetLevel() Level { - return Level(atomic.LoadInt32(l.level)) + // We perform the loads immediately to keep the CPU pipeline busy, which + // effectively makes the second load cost nothing. Once loaded into registers + // the comparison returns the already loaded value. The comparison is almost + // always true, so the branch predictor should hit consistently with it. + var ( + curEpoch = atomic.LoadUint64(l.curEpoch) + level = Level(atomic.LoadInt32(l.level)) + own = l.ownEpoch + ) + + if curEpoch == own { + return level + } + + // Perform the level sync process. We'll avoid doing this next time by seeing the + // epoch as current. + + ptr := l.searchLevelPtr() + l.level = ptr + l.ownEpoch = curEpoch + + return Level(atomic.LoadInt32(ptr)) } // Create a *log.Logger that will send it's data through this Logger. This @@ -912,6 +999,8 @@ func (l *intLogger) copy() *intLogger { if l.independentLevels { sl.level = new(int32) *sl.level = *l.level + } else if l.syncParentLevel { + sl.parent = l } return &sl diff --git a/vendor/github.com/hashicorp/go-hclog/logger.go b/vendor/github.com/hashicorp/go-hclog/logger.go index 947ac0c9a..ad17544f5 100644 --- a/vendor/github.com/hashicorp/go-hclog/logger.go +++ b/vendor/github.com/hashicorp/go-hclog/logger.go @@ -233,6 +233,7 @@ type StandardLoggerOptions struct { // [DEBUG] and strip it off before reapplying it. // The timestamp detection may result in false positives and incomplete // string outputs. + // InferLevelsWithTimestamp is only relevant if InferLevels is true. InferLevelsWithTimestamp bool // ForceLevel is used to force all output from the standard logger to be at @@ -263,6 +264,9 @@ type LoggerOptions struct { // Control if the output should be in JSON. JSONFormat bool + // Control the escape switch of json.Encoder + JSONEscapeDisabled bool + // Include file and line information in each log line IncludeLocation bool @@ -303,6 +307,24 @@ type LoggerOptions struct { // will not affect the parent or sibling loggers. IndependentLevels bool + // When set, changing the level of a logger effects only it's direct sub-loggers + // rather than all sub-loggers. For example: + // a := logger.Named("a") + // a.SetLevel(Error) + // b := a.Named("b") + // c := a.Named("c") + // b.GetLevel() => Error + // c.GetLevel() => Error + // b.SetLevel(Info) + // a.GetLevel() => Error + // b.GetLevel() => Info + // c.GetLevel() => Error + // a.SetLevel(Warn) + // a.GetLevel() => Warn + // b.GetLevel() => Warn + // c.GetLevel() => Warn + SyncParentLevel bool + // SubloggerHook registers a function that is called when a sublogger via // Named, With, or ResetNamed is created. If defined, the function is passed // the newly created Logger and the returned Logger is returned from the diff --git a/vendor/github.com/hashicorp/go-retryablehttp/.go-version b/vendor/github.com/hashicorp/go-retryablehttp/.go-version new file mode 100644 index 000000000..6fee2fedb --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/.go-version @@ -0,0 +1 @@ +1.22.2 diff --git a/vendor/github.com/hashicorp/go-retryablehttp/CHANGELOG.md b/vendor/github.com/hashicorp/go-retryablehttp/CHANGELOG.md index 33686e4da..0c4c7a2bb 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/CHANGELOG.md +++ b/vendor/github.com/hashicorp/go-retryablehttp/CHANGELOG.md @@ -1,8 +1,26 @@ +## 0.7.6 (May 9, 2024) + +ENHANCEMENTS: + +- client: support a `RetryPrepare` function for modifying the request before retrying (#216) +- client: support HTTP-date values for `Retry-After` header value (#138) +- client: avoid reading entire body when the body is a `*bytes.Reader` (#197) + +BUG FIXES: + +- client: fix a broken check for invalid server certificate in go 1.20+ (#210) + +## 0.7.5 (Nov 8, 2023) + +BUG FIXES: + +- client: fixes an issue where the request body is not preserved on temporary redirects or re-established HTTP/2 connections (#207) + ## 0.7.4 (Jun 6, 2023) -BUG FIXES +BUG FIXES: -- client: fixing an issue where the Content-Type header wouldn't be sent with an empty payload when using HTTP/2 [GH-194] +- client: fixing an issue where the Content-Type header wouldn't be sent with an empty payload when using HTTP/2 (#194) ## 0.7.3 (May 15, 2023) diff --git a/vendor/github.com/hashicorp/go-retryablehttp/CODEOWNERS b/vendor/github.com/hashicorp/go-retryablehttp/CODEOWNERS index f8389c995..d6dd78a2d 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/CODEOWNERS +++ b/vendor/github.com/hashicorp/go-retryablehttp/CODEOWNERS @@ -1 +1 @@ -* @hashicorp/release-engineering \ No newline at end of file +* @hashicorp/go-retryablehttp-maintainers diff --git a/vendor/github.com/hashicorp/go-retryablehttp/Makefile b/vendor/github.com/hashicorp/go-retryablehttp/Makefile index da17640e6..525524196 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/Makefile +++ b/vendor/github.com/hashicorp/go-retryablehttp/Makefile @@ -2,7 +2,7 @@ default: test test: go vet ./... - go test -race ./... + go test -v -race ./... updatedeps: go get -f -t -u ./... diff --git a/vendor/github.com/hashicorp/go-retryablehttp/README.md b/vendor/github.com/hashicorp/go-retryablehttp/README.md index 8943becf1..145a62f21 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/README.md +++ b/vendor/github.com/hashicorp/go-retryablehttp/README.md @@ -59,4 +59,4 @@ standardClient := retryClient.StandardClient() // *http.Client ``` For more usage and examples see the -[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp). +[pkg.go.dev](https://pkg.go.dev/github.com/hashicorp/go-retryablehttp). diff --git a/vendor/github.com/hashicorp/go-retryablehttp/cert_error_go119.go b/vendor/github.com/hashicorp/go-retryablehttp/cert_error_go119.go new file mode 100644 index 000000000..b2b27e872 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/cert_error_go119.go @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !go1.20 +// +build !go1.20 + +package retryablehttp + +import "crypto/x509" + +func isCertError(err error) bool { + _, ok := err.(x509.UnknownAuthorityError) + return ok +} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/cert_error_go120.go b/vendor/github.com/hashicorp/go-retryablehttp/cert_error_go120.go new file mode 100644 index 000000000..a3cd315a2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/cert_error_go120.go @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build go1.20 +// +build go1.20 + +package retryablehttp + +import "crypto/tls" + +func isCertError(err error) bool { + _, ok := err.(*tls.CertificateVerificationError) + return ok +} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/client.go b/vendor/github.com/hashicorp/go-retryablehttp/client.go index cad96bd97..12ac50bcc 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/client.go +++ b/vendor/github.com/hashicorp/go-retryablehttp/client.go @@ -27,10 +27,8 @@ package retryablehttp import ( "bytes" "context" - "crypto/x509" "fmt" "io" - "io/ioutil" "log" "math" "math/rand" @@ -63,6 +61,10 @@ var ( // limit the size we consume to respReadLimit. respReadLimit = int64(4096) + // timeNow sets the function that returns the current time. + // This defaults to time.Now. Changes to this should only be done in tests. + timeNow = time.Now + // A regular expression to match the error returned by net/http when the // configured number of redirects is exhausted. This error isn't typed // specifically so we resort to matching on the error string. @@ -73,6 +75,11 @@ var ( // specifically so we resort to matching on the error string. schemeErrorRe = regexp.MustCompile(`unsupported protocol scheme`) + // A regular expression to match the error returned by net/http when a + // request header or value is invalid. This error isn't typed + // specifically so we resort to matching on the error string. + invalidHeaderErrorRe = regexp.MustCompile(`invalid header`) + // A regular expression to match the error returned by net/http when the // TLS certificate is not trusted. This error isn't typed // specifically so we resort to matching on the error string. @@ -160,6 +167,20 @@ func (r *Request) SetBody(rawBody interface{}) error { } r.body = bodyReader r.ContentLength = contentLength + if bodyReader != nil { + r.GetBody = func() (io.ReadCloser, error) { + body, err := bodyReader() + if err != nil { + return nil, err + } + if rc, ok := body.(io.ReadCloser); ok { + return rc, nil + } + return io.NopCloser(body), nil + } + } else { + r.GetBody = func() (io.ReadCloser, error) { return http.NoBody, nil } + } return nil } @@ -234,21 +255,19 @@ func getBodyReaderAndContentLength(rawBody interface{}) (ReaderFunc, int64, erro // deal with it seeking so want it to match here instead of the // io.ReadSeeker case. case *bytes.Reader: - buf, err := ioutil.ReadAll(body) - if err != nil { - return nil, 0, err - } + snapshot := *body bodyReader = func() (io.Reader, error) { - return bytes.NewReader(buf), nil + r := snapshot + return &r, nil } - contentLength = int64(len(buf)) + contentLength = int64(body.Len()) // Compat case case io.ReadSeeker: raw := body bodyReader = func() (io.Reader, error) { _, err := raw.Seek(0, 0) - return ioutil.NopCloser(raw), err + return io.NopCloser(raw), err } if lr, ok := raw.(LenReader); ok { contentLength = int64(lr.Len()) @@ -256,7 +275,7 @@ func getBodyReaderAndContentLength(rawBody interface{}) (ReaderFunc, int64, erro // Read all in so we can reset case io.Reader: - buf, err := ioutil.ReadAll(body) + buf, err := io.ReadAll(body) if err != nil { return nil, 0, err } @@ -302,18 +321,19 @@ func NewRequest(method, url string, rawBody interface{}) (*Request, error) { // The context controls the entire lifetime of a request and its response: // obtaining a connection, sending the request, and reading the response headers and body. func NewRequestWithContext(ctx context.Context, method, url string, rawBody interface{}) (*Request, error) { - bodyReader, contentLength, err := getBodyReaderAndContentLength(rawBody) + httpReq, err := http.NewRequestWithContext(ctx, method, url, nil) if err != nil { return nil, err } - httpReq, err := http.NewRequestWithContext(ctx, method, url, nil) - if err != nil { + req := &Request{ + Request: httpReq, + } + if err := req.SetBody(rawBody); err != nil { return nil, err } - httpReq.ContentLength = contentLength - return &Request{body: bodyReader, Request: httpReq}, nil + return req, nil } // Logger interface allows to use other loggers than @@ -378,6 +398,9 @@ type Backoff func(min, max time.Duration, attemptNum int, resp *http.Response) t // attempted. If overriding this, be sure to close the body if needed. type ErrorHandler func(resp *http.Response, err error, numTries int) (*http.Response, error) +// PrepareRetry is called before retry operation. It can be used for example to re-sign the request +type PrepareRetry func(req *http.Request) error + // Client is used to make HTTP requests. It adds additional functionality // like automatic retries to tolerate minor outages. type Client struct { @@ -406,6 +429,9 @@ type Client struct { // ErrorHandler specifies the custom error handler to use, if any ErrorHandler ErrorHandler + // PrepareRetry can prepare the request for retry operation, for example re-sign it + PrepareRetry PrepareRetry + loggerInit sync.Once clientInit sync.Once } @@ -479,11 +505,16 @@ func baseRetryPolicy(resp *http.Response, err error) (bool, error) { return false, v } + // Don't retry if the error was due to an invalid header. + if invalidHeaderErrorRe.MatchString(v.Error()) { + return false, v + } + // Don't retry if the error was due to TLS cert verification failure. if notTrustedErrorRe.MatchString(v.Error()) { return false, v } - if _, ok := v.Err.(x509.UnknownAuthorityError); ok { + if isCertError(v.Err) { return false, v } } @@ -520,10 +551,8 @@ func baseRetryPolicy(resp *http.Response, err error) (bool, error) { func DefaultBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { if resp != nil { if resp.StatusCode == http.StatusTooManyRequests || resp.StatusCode == http.StatusServiceUnavailable { - if s, ok := resp.Header["Retry-After"]; ok { - if sleep, err := strconv.ParseInt(s[0], 10, 64); err == nil { - return time.Second * time.Duration(sleep) - } + if sleep, ok := parseRetryAfterHeader(resp.Header["Retry-After"]); ok { + return sleep } } } @@ -536,6 +565,41 @@ func DefaultBackoff(min, max time.Duration, attemptNum int, resp *http.Response) return sleep } +// parseRetryAfterHeader parses the Retry-After header and returns the +// delay duration according to the spec: https://httpwg.org/specs/rfc7231.html#header.retry-after +// The bool returned will be true if the header was successfully parsed. +// Otherwise, the header was either not present, or was not parseable according to the spec. +// +// Retry-After headers come in two flavors: Seconds or HTTP-Date +// +// Examples: +// * Retry-After: Fri, 31 Dec 1999 23:59:59 GMT +// * Retry-After: 120 +func parseRetryAfterHeader(headers []string) (time.Duration, bool) { + if len(headers) == 0 || headers[0] == "" { + return 0, false + } + header := headers[0] + // Retry-After: 120 + if sleep, err := strconv.ParseInt(header, 10, 64); err == nil { + if sleep < 0 { // a negative sleep doesn't make sense + return 0, false + } + return time.Second * time.Duration(sleep), true + } + + // Retry-After: Fri, 31 Dec 1999 23:59:59 GMT + retryTime, err := time.Parse(time.RFC1123, header) + if err != nil { + return 0, false + } + if until := retryTime.Sub(timeNow()); until > 0 { + return until, true + } + // date is in the past + return 0, true +} + // LinearJitterBackoff provides a callback for Client.Backoff which will // perform linear backoff based on the attempt number and with jitter to // prevent a thundering herd. @@ -563,13 +627,13 @@ func LinearJitterBackoff(min, max time.Duration, attemptNum int, resp *http.Resp } // Seed rand; doing this every time is fine - rand := rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) + source := rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) // Pick a random number that lies somewhere between the min and max and // multiply by the attemptNum. attemptNum starts at zero so we always // increment here. We first get a random percentage, then apply that to the // difference between min and max, and add to min. - jitter := rand.Float64() * float64(max-min) + jitter := source.Float64() * float64(max-min) jitterMin := int64(jitter) + int64(min) return time.Duration(jitterMin * int64(attemptNum)) } @@ -603,10 +667,10 @@ func (c *Client) Do(req *Request) (*http.Response, error) { var resp *http.Response var attempt int var shouldRetry bool - var doErr, respErr, checkErr error + var doErr, respErr, checkErr, prepareErr error for i := 0; ; i++ { - doErr, respErr = nil, nil + doErr, respErr, prepareErr = nil, nil, nil attempt++ // Always rewind the request body when non-nil. @@ -619,7 +683,7 @@ func (c *Client) Do(req *Request) (*http.Response, error) { if c, ok := body.(io.ReadCloser); ok { req.Body = c } else { - req.Body = ioutil.NopCloser(body) + req.Body = io.NopCloser(body) } } @@ -713,17 +777,26 @@ func (c *Client) Do(req *Request) (*http.Response, error) { // without racing against the closeBody call in persistConn.writeLoop. httpreq := *req.Request req.Request = &httpreq + + if c.PrepareRetry != nil { + if err := c.PrepareRetry(req.Request); err != nil { + prepareErr = err + break + } + } } // this is the closest we have to success criteria - if doErr == nil && respErr == nil && checkErr == nil && !shouldRetry { + if doErr == nil && respErr == nil && checkErr == nil && prepareErr == nil && !shouldRetry { return resp, nil } defer c.HTTPClient.CloseIdleConnections() var err error - if checkErr != nil { + if prepareErr != nil { + err = prepareErr + } else if checkErr != nil { err = checkErr } else if respErr != nil { err = respErr @@ -755,7 +828,7 @@ func (c *Client) Do(req *Request) (*http.Response, error) { // Try to read the response body so we can reuse this connection. func (c *Client) drainBody(body io.ReadCloser) { defer body.Close() - _, err := io.Copy(ioutil.Discard, io.LimitReader(body, respReadLimit)) + _, err := io.Copy(io.Discard, io.LimitReader(body, respReadLimit)) if err != nil { if c.logger() != nil { switch v := c.logger().(type) { diff --git a/vendor/github.com/hashicorp/nomad/api/.copywrite.hcl b/vendor/github.com/hashicorp/nomad/api/.copywrite.hcl new file mode 100644 index 000000000..61b20a2c8 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/.copywrite.hcl @@ -0,0 +1,12 @@ +schema_version = 1 + +project { + license = "MPL-2.0" + copyright_year = 2024 + + header_ignore = [ + // Enterprise files do not fall under the open source licensing. CE-ENT + // merge conflicts might happen here, please be sure to put new CE + // exceptions above this comment. + ] +} diff --git a/vendor/github.com/hashicorp/nomad/api/acl.go b/vendor/github.com/hashicorp/nomad/api/acl.go index 78d47895e..64d1a22af 100644 --- a/vendor/github.com/hashicorp/nomad/api/acl.go +++ b/vendor/github.com/hashicorp/nomad/api/acl.go @@ -77,8 +77,9 @@ func (c *Client) ACLTokens() *ACLTokens { return &ACLTokens{client: c} } -// DEPRECATED: will be removed in Nomad 1.5.0 // Bootstrap is used to get the initial bootstrap token +// +// See BootstrapOpts to set ACL bootstrapping options. func (a *ACLTokens) Bootstrap(q *WriteOptions) (*ACLToken, *WriteMeta, error) { var resp ACLToken wm, err := a.client.put("/v1/acl/bootstrap", nil, &resp, q) @@ -753,6 +754,9 @@ type ACLAuthMethod struct { // ACLAuthMethodTokenLocalityGlobal for convenience. TokenLocality string + // TokenNameFormat defines the HIL template to use when building the token name + TokenNameFormat string + // MaxTokenTTL is the maximum life of a token created by this method. MaxTokenTTL time.Duration @@ -822,6 +826,8 @@ type ACLAuthMethodConfig struct { OIDCClientID string // The OAuth Client Secret configured with the OIDC provider OIDCClientSecret string + // Disable claims from the OIDC UserInfo endpoint + OIDCDisableUserInfo bool // List of OIDC scopes OIDCScopes []string // List of auth claims that are valid for login diff --git a/vendor/github.com/hashicorp/nomad/api/agent.go b/vendor/github.com/hashicorp/nomad/api/agent.go index 521215803..459a62d8d 100644 --- a/vendor/github.com/hashicorp/nomad/api/agent.go +++ b/vendor/github.com/hashicorp/nomad/api/agent.go @@ -35,6 +35,12 @@ type KeyringRequest struct { Key string } +// ForceLeaveOpts are used to configure the ForceLeave method. +type ForceLeaveOpts struct { + // Prune indicates whether to remove a node from the list of members + Prune bool +} + // Agent returns a new agent which can be used to query // the agent-specific endpoints. func (c *Client) Agent() *Agent { @@ -163,7 +169,21 @@ func (a *Agent) MembersOpts(opts *QueryOptions) (*ServerMembers, error) { // ForceLeave is used to eject an existing node from the cluster. func (a *Agent) ForceLeave(node string) error { - _, err := a.client.put("/v1/agent/force-leave?node="+node, nil, nil, nil) + v := url.Values{} + v.Add("node", node) + _, err := a.client.put("/v1/agent/force-leave?"+v.Encode(), nil, nil, nil) + return err +} + +// ForceLeaveWithOptions is used to eject an existing node from the cluster +// with additional options such as prune. +func (a *Agent) ForceLeaveWithOptions(node string, opts ForceLeaveOpts) error { + v := url.Values{} + v.Add("node", node) + if opts.Prune { + v.Add("prune", "1") + } + _, err := a.client.put("/v1/agent/force-leave?"+v.Encode(), nil, nil, nil) return err } @@ -290,7 +310,7 @@ func (a *Agent) Monitor(stopCh <-chan struct{}, q *QueryOptions) (<-chan *Stream } r.setQueryOptions(q) - _, resp, err := requireOK(a.client.doRequest(r)) + _, resp, err := requireOK(a.client.doRequest(r)) //nolint:bodyclose if err != nil { errCh <- err return nil, errCh diff --git a/vendor/github.com/hashicorp/nomad/api/allocations.go b/vendor/github.com/hashicorp/nomad/api/allocations.go index 121a75934..6907a1db1 100644 --- a/vendor/github.com/hashicorp/nomad/api/allocations.go +++ b/vendor/github.com/hashicorp/nomad/api/allocations.go @@ -235,6 +235,27 @@ func (a *Allocations) Signal(alloc *Allocation, q *QueryOptions, task, signal st return err } +// SetPauseState sets the schedule behavior of one task in the allocation. +func (a *Allocations) SetPauseState(alloc *Allocation, q *QueryOptions, task, state string) error { + req := AllocPauseRequest{ + ScheduleState: state, + Task: task, + } + var resp GenericResponse + _, err := a.client.putQuery("/v1/client/allocation/"+alloc.ID+"/pause", &req, &resp, q) + return err +} + +// GetPauseState gets the schedule behavior of one task in the allocation. +// +// The ?task= query parameter must be set. +func (a *Allocations) GetPauseState(alloc *Allocation, q *QueryOptions, task string) (string, *QueryMeta, error) { + var resp AllocGetPauseResponse + qm, err := a.client.query("/v1/client/allocation/"+alloc.ID+"/pause", &resp, q) + state := resp.ScheduleState + return state, qm, err +} + // Services is used to return a list of service registrations associated to the // specified allocID. func (a *Allocations) Services(allocID string, q *QueryOptions) ([]*ServiceRegistration, *QueryMeta, error) { @@ -311,7 +332,7 @@ type NodeScoreMeta struct { // Stub returns a list stub for the allocation func (a *Allocation) Stub() *AllocationListStub { - return &AllocationListStub{ + stub := &AllocationListStub{ ID: a.ID, EvalID: a.EvalID, Name: a.Name, @@ -319,8 +340,6 @@ func (a *Allocation) Stub() *AllocationListStub { NodeID: a.NodeID, NodeName: a.NodeName, JobID: a.JobID, - JobType: *a.Job.Type, - JobVersion: *a.Job.Version, TaskGroup: a.TaskGroup, DesiredStatus: a.DesiredStatus, DesiredDescription: a.DesiredDescription, @@ -338,6 +357,13 @@ func (a *Allocation) Stub() *AllocationListStub { CreateTime: a.CreateTime, ModifyTime: a.ModifyTime, } + + if a.Job != nil { + stub.JobType = *a.Job.Type + stub.JobVersion = *a.Job.Version + } + + return stub } // ServerTerminalStatus returns true if the desired state of the allocation is @@ -512,6 +538,18 @@ type AllocSignalRequest struct { Signal string } +type AllocPauseRequest struct { + Task string + + // ScheduleState must be one of "pause", "run", "scheduled". + ScheduleState string +} + +type AllocGetPauseResponse struct { + // ScheduleState will be one of "pause", "run", "scheduled". + ScheduleState string +} + // GenericResponse is used to respond to a request where no // specific response information is needed. type GenericResponse struct { diff --git a/vendor/github.com/hashicorp/nomad/api/allocations_exec.go b/vendor/github.com/hashicorp/nomad/api/allocations_exec.go index 5300f5f60..44c8e1788 100644 --- a/vendor/github.com/hashicorp/nomad/api/allocations_exec.go +++ b/vendor/github.com/hashicorp/nomad/api/allocations_exec.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "io" + "net/url" "strconv" "sync" "time" @@ -25,9 +26,11 @@ const ( type execSession struct { client *Client alloc *Allocation + job string task string tty bool command []string + action string stdin io.Reader stdout io.Writer @@ -94,17 +97,23 @@ func (s *execSession) startConnection() (*websocket.Conn, error) { q.Params["tty"] = strconv.FormatBool(s.tty) q.Params["task"] = s.task q.Params["command"] = string(commandBytes) - reqPath := fmt.Sprintf("/v1/client/allocation/%s/exec", s.alloc.ID) + if s.action != "" { + q.Params["action"] = s.action + q.Params["allocID"] = s.alloc.ID + q.Params["group"] = s.alloc.TaskGroup + reqPath = fmt.Sprintf("/v1/job/%s/action", url.PathEscape(s.job)) + } + var conn *websocket.Conn if nodeClient != nil { - conn, _, _ = nodeClient.websocket(reqPath, q) + conn, _, _ = nodeClient.websocket(reqPath, q) //nolint:bodyclose // gorilla/websocket Dialer.DialContext() does not require the body to be closed. } if conn == nil { - conn, _, err = s.client.websocket(reqPath, q) + conn, _, err = s.client.websocket(reqPath, q) //nolint:bodyclose // gorilla/websocket Dialer.DialContext() does not require the body to be closed. if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/nomad/api/api.go b/vendor/github.com/hashicorp/nomad/api/api.go index ac755e254..7b42b32de 100644 --- a/vendor/github.com/hashicorp/nomad/api/api.go +++ b/vendor/github.com/hashicorp/nomad/api/api.go @@ -12,6 +12,7 @@ import ( "errors" "fmt" "io" + "math" "net" "net/http" "net/url" @@ -200,6 +201,20 @@ type Config struct { TLSConfig *TLSConfig Headers http.Header + + // retryOptions holds the configuration necessary to perform retries + // on put calls. + retryOptions *retryOptions + + // url is populated with the initial parsed address and is not modified in the + // case of a unix:// URL, as opposed to Address. + url *url.URL +} + +// URL returns a copy of the initial parsed address and is not modified in the +// case of a `unix://` URL, as opposed to Address. +func (c *Config) URL() *url.URL { + return c.url } // ClientConfig copies the configuration with a new client address, region, and @@ -209,6 +224,7 @@ func (c *Config) ClientConfig(region, address string, tlsEnabled bool) *Config { if tlsEnabled { scheme = "https" } + config := &Config{ Address: fmt.Sprintf("%s://%s", scheme, address), Region: region, @@ -218,6 +234,7 @@ func (c *Config) ClientConfig(region, address string, tlsEnabled bool) *Config { HttpAuth: c.HttpAuth, WaitTime: c.WaitTime, TLSConfig: c.TLSConfig.Copy(), + url: copyURL(c.url), } // Update the tls server name for connecting to a client @@ -273,9 +290,30 @@ func (t *TLSConfig) Copy() *TLSConfig { return nt } +// defaultUDSClient creates a unix domain socket client. Errors return a nil +// http.Client, which is tested for in ConfigureTLS. This function expects that +// the Address has already been parsed into the config.url value. +func defaultUDSClient(config *Config) *http.Client { + + config.Address = "http://127.0.0.1" + + httpClient := &http.Client{ + Transport: &http.Transport{ + DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { + return net.Dial("unix", config.url.EscapedPath()) + }, + }, + } + return defaultClient(httpClient) +} + func defaultHttpClient() *http.Client { httpClient := cleanhttp.DefaultPooledClient() - transport := httpClient.Transport.(*http.Transport) + return defaultClient(httpClient) +} + +func defaultClient(c *http.Client) *http.Client { + transport := c.Transport.(*http.Transport) transport.TLSHandshakeTimeout = 10 * time.Second transport.TLSClientConfig = &tls.Config{ MinVersion: tls.VersionTLS12, @@ -285,7 +323,7 @@ func defaultHttpClient() *http.Client { // well yet: https://github.com/gorilla/websocket/issues/417 transport.ForceAttemptHTTP2 = false - return httpClient + return c } // DefaultConfig returns a default configuration for the client @@ -397,7 +435,7 @@ func cloneWithTimeout(httpClient *http.Client, t time.Duration) (*http.Client, e return &nc, nil } -// ConfigureTLS applies a set of TLS configurations to the the HTTP client. +// ConfigureTLS applies a set of TLS configurations to the HTTP client. func ConfigureTLS(httpClient *http.Client, tlsConfig *TLSConfig) error { if tlsConfig == nil { return nil @@ -462,18 +500,29 @@ type Client struct { // NewClient returns a new client func NewClient(config *Config) (*Client, error) { + var err error // bootstrap the config defConfig := DefaultConfig() if config.Address == "" { config.Address = defConfig.Address - } else if _, err := url.Parse(config.Address); err != nil { + } + + // we have to test the address that comes from DefaultConfig, because it + // could be the value of NOMAD_ADDR which is applied without testing + if config.url, err = url.Parse(config.Address); err != nil { return nil, fmt.Errorf("invalid address '%s': %v", config.Address, err) } httpClient := config.HttpClient if httpClient == nil { - httpClient = defaultHttpClient() + switch { + case config.url.Scheme == "unix": + httpClient = defaultUDSClient(config) // mutates config + default: + httpClient = defaultHttpClient() + } + if err := ConfigureTLS(httpClient, config.TLSConfig); err != nil { return nil, err } @@ -578,6 +627,40 @@ func (c *Client) SetSecretID(secretID string) { c.config.SecretID = secretID } +func (c *Client) configureRetries(ro *retryOptions) { + + c.config.retryOptions = &retryOptions{ + maxRetries: defaultNumberOfRetries, + maxBackoffDelay: defaultMaxBackoffDelay, + delayBase: defaultDelayTimeBase, + } + + if ro.delayBase != 0 { + c.config.retryOptions.delayBase = ro.delayBase + } + + if ro.maxRetries != defaultNumberOfRetries { + c.config.retryOptions.maxRetries = ro.maxRetries + } + + if ro.maxBackoffDelay != 0 { + c.config.retryOptions.maxBackoffDelay = ro.maxBackoffDelay + } + + if ro.maxToLastCall != 0 { + c.config.retryOptions.maxToLastCall = ro.maxToLastCall + } + + if ro.fixedDelay != 0 { + c.config.retryOptions.fixedDelay = ro.fixedDelay + } + + // Ensure that a big attempt number or a big delayBase number will not cause + // a negative delay by overflowing the delay increase. + c.config.retryOptions.maxValidAttempt = int64(math.Log2(float64(math.MaxInt64 / + c.config.retryOptions.delayBase.Nanoseconds()))) +} + // request is used to help build up a request type request struct { config *Config @@ -721,24 +804,32 @@ func (r *request) toHTTP() (*http.Request, error) { // newRequest is used to create a new request func (c *Client) newRequest(method, path string) (*request, error) { - base, _ := url.Parse(c.config.Address) + u, err := url.Parse(path) if err != nil { return nil, err } + r := &request{ config: &c.config, method: method, url: &url.URL{ - Scheme: base.Scheme, - User: base.User, - Host: base.Host, + Scheme: c.config.url.Scheme, + User: c.config.url.User, + Host: c.config.url.Host, Path: u.Path, RawPath: u.RawPath, }, header: make(http.Header), params: make(map[string][]string), } + + // fixup socket paths + if r.url.Scheme == "unix" { + r.url.Scheme = "http" + r.url.Host = "127.0.0.1" + } + if c.config.Region != "" { r.params.Set("region", c.config.Region) } @@ -841,7 +932,7 @@ func (c *Client) rawQuery(endpoint string, q *QueryOptions) (io.ReadCloser, erro return nil, err } r.setQueryOptions(q) - _, resp, err := requireOK(c.doRequest(r)) + _, resp, err := requireOK(c.doRequest(r)) //nolint:bodyclose // Closing the body is the caller's responsibility. if err != nil { return nil, err } @@ -895,28 +986,42 @@ func (c *Client) websocket(endpoint string, q *QueryOptions) (*websocket.Conn, * conn, resp, err := dialer.Dial(rhttp.URL.String(), rhttp.Header) // check resp status code, as it's more informative than handshake error we get from ws library - if resp != nil && resp.StatusCode != http.StatusSwitchingProtocols { - var buf bytes.Buffer - - if resp.Header.Get("Content-Encoding") == "gzip" { - greader, err := gzip.NewReader(resp.Body) + if resp != nil { + switch resp.StatusCode { + case http.StatusSwitchingProtocols: + // Connection upgrade was successful. + + case http.StatusPermanentRedirect, http.StatusTemporaryRedirect, http.StatusMovedPermanently: + loc := resp.Header.Get("Location") + u, err := url.Parse(loc) if err != nil { - return nil, nil, newUnexpectedResponseError( - fromStatusCode(resp.StatusCode), - withExpectedStatuses([]int{http.StatusSwitchingProtocols}), - withError(err)) + return nil, nil, fmt.Errorf("invalid redirect location %q: %w", loc, err) } - io.Copy(&buf, greader) - } else { - io.Copy(&buf, resp.Body) - } - resp.Body.Close() + return c.websocket(u.Path, q) + + default: + var buf bytes.Buffer + + if resp.Header.Get("Content-Encoding") == "gzip" { + greader, err := gzip.NewReader(resp.Body) + if err != nil { + return nil, nil, newUnexpectedResponseError( + fromStatusCode(resp.StatusCode), + withExpectedStatuses([]int{http.StatusSwitchingProtocols}), + withError(err)) + } + _, _ = io.Copy(&buf, greader) + } else { + _, _ = io.Copy(&buf, resp.Body) + } + _ = resp.Body.Close() - return nil, nil, newUnexpectedResponseError( - fromStatusCode(resp.StatusCode), - withExpectedStatuses([]int{http.StatusSwitchingProtocols}), - withBody(fmt.Sprint(buf.Bytes())), - ) + return nil, nil, newUnexpectedResponseError( + fromStatusCode(resp.StatusCode), + withExpectedStatuses([]int{http.StatusSwitchingProtocols}), + withBody(buf.String()), + ) + } } return conn, resp, err @@ -931,7 +1036,7 @@ func (c *Client) query(endpoint string, out any, q *QueryOptions) (*QueryMeta, e return nil, err } r.setQueryOptions(q) - rtt, resp, err := requireOK(c.doRequest(r)) + rtt, resp, err := requireOK(c.doRequest(r)) //nolint:bodyclose // Closing the body is the caller's responsibility. if err != nil { return nil, err } @@ -956,7 +1061,7 @@ func (c *Client) putQuery(endpoint string, in, out any, q *QueryOptions) (*Query } r.setQueryOptions(q) r.obj = in - rtt, resp, err := requireOK(c.doRequest(r)) + rtt, resp, err := requireOK(c.doRequest(r)) //nolint:bodyclose // Closing the body is the caller's responsibility. if err != nil { return nil, err } @@ -987,7 +1092,7 @@ func (c *Client) postQuery(endpoint string, in, out any, q *QueryOptions) (*Quer } r.setQueryOptions(q) r.obj = in - rtt, resp, err := requireOK(c.doRequest(r)) + rtt, resp, err := requireOK(c.doRequest(r)) //nolint:bodyclose // Closing the body is the caller's responsibility. if err != nil { return nil, err } @@ -1020,7 +1125,7 @@ func (c *Client) write(verb, endpoint string, in, out any, q *WriteOptions) (*Wr } r.setWriteOptions(q) r.obj = in - rtt, resp, err := requireOK(c.doRequest(r)) + rtt, resp, err := requireOK(c.doRequest(r)) //nolint:bodyclose // Closing the body is the caller's responsibility. if err != nil { return nil, err } @@ -1046,7 +1151,7 @@ func (c *Client) delete(endpoint string, in, out any, q *WriteOptions) (*WriteMe } r.setWriteOptions(q) r.obj = in - rtt, resp, err := requireOK(c.doRequest(r)) + rtt, resp, err := requireOK(c.doRequest(r)) //nolint:bodyclose // Closing the body is the caller's responsibility. if err != nil { return nil, err } @@ -1079,6 +1184,9 @@ func parseQueryMeta(resp *http.Response, q *QueryMeta) error { if err != nil { return fmt.Errorf("Failed to parse X-Nomad-LastContact: %v", err) } + if last > math.MaxInt64 { + return fmt.Errorf("Last contact duration is out of range: %d", last) + } q.LastContact = time.Duration(last) * time.Millisecond q.NextToken = header.Get("X-Nomad-NextToken") @@ -1171,3 +1279,16 @@ func (o *WriteOptions) WithContext(ctx context.Context) *WriteOptions { o2.ctx = ctx return o2 } + +// copyURL makes a deep copy of a net/url.URL +func copyURL(u1 *url.URL) *url.URL { + if u1 == nil { + return nil + } + o := *u1 + if o.User != nil { + ou := *u1.User + o.User = &ou + } + return &o +} diff --git a/vendor/github.com/hashicorp/nomad/api/consul.go b/vendor/github.com/hashicorp/nomad/api/consul.go index 23451e7e0..b20738b4b 100644 --- a/vendor/github.com/hashicorp/nomad/api/consul.go +++ b/vendor/github.com/hashicorp/nomad/api/consul.go @@ -4,32 +4,46 @@ package api import ( + "maps" + "slices" "time" - - "golang.org/x/exp/maps" ) // Consul represents configuration related to consul. type Consul struct { // (Enterprise-only) Namespace represents a Consul namespace. Namespace string `mapstructure:"namespace" hcl:"namespace,optional"` + + // (Enterprise-only) Cluster represents a specific Consul cluster. + Cluster string `mapstructure:"cluster" hcl:"cluster,optional"` + + // Partition is the Consul admin partition where the workload should + // run. This is available in Nomad CE but only works with Consul ENT + Partition string `mapstructure:"partition" hcl:"partition,optional"` } // Canonicalize Consul into a canonical form. The Canonicalize structs containing // a Consul should ensure it is not nil. func (c *Consul) Canonicalize() { - // Nothing to do here. - // + if c.Cluster == "" { + c.Cluster = "default" + } + // If Namespace is nil, that is a choice of the job submitter that // we should inherit from higher up (i.e. job<-group). Likewise, if // Namespace is set but empty, that is a choice to use the default consul // namespace. + + // Partition should never be defaulted to "default" because non-ENT Consul + // clusters don't have admin partitions } // Copy creates a deep copy of c. func (c *Consul) Copy() *Consul { return &Consul{ Namespace: c.Namespace, + Cluster: c.Cluster, + Partition: c.Partition, } } @@ -101,6 +115,7 @@ type SidecarTask struct { LogConfig *LogConfig `mapstructure:"logs" hcl:"logs,block"` ShutdownDelay *time.Duration `mapstructure:"shutdown_delay" hcl:"shutdown_delay,optional"` KillSignal string `mapstructure:"kill_signal" hcl:"kill_signal,optional"` + VolumeMounts []*VolumeMount `hcl:"volume_mount,block"` } func (st *SidecarTask) Canonicalize() { @@ -139,16 +154,25 @@ func (st *SidecarTask) Canonicalize() { if st.ShutdownDelay == nil { st.ShutdownDelay = pointerOf(time.Duration(0)) } + + for _, vm := range st.VolumeMounts { + vm.Canonicalize() + } } // ConsulProxy represents a Consul Connect sidecar proxy jobspec block. type ConsulProxy struct { - LocalServiceAddress string `mapstructure:"local_service_address" hcl:"local_service_address,optional"` - LocalServicePort int `mapstructure:"local_service_port" hcl:"local_service_port,optional"` - Expose *ConsulExposeConfig `mapstructure:"expose" hcl:"expose,block"` - ExposeConfig *ConsulExposeConfig // Deprecated: only to maintain backwards compatibility. Use Expose instead. - Upstreams []*ConsulUpstream `hcl:"upstreams,block"` - Config map[string]interface{} `hcl:"config,block"` + LocalServiceAddress string `mapstructure:"local_service_address" hcl:"local_service_address,optional"` + LocalServicePort int `mapstructure:"local_service_port" hcl:"local_service_port,optional"` + Expose *ConsulExposeConfig `mapstructure:"expose" hcl:"expose,block"` + ExposeConfig *ConsulExposeConfig // Deprecated: only to maintain backwards compatibility. Use Expose instead. + Upstreams []*ConsulUpstream `hcl:"upstreams,block"` + + // TransparentProxy configures the Envoy sidecar to use "transparent + // proxying", which creates IP tables rules inside the network namespace to + // ensure traffic flows thru the Envoy proxy + TransparentProxy *ConsulTransparentProxy `mapstructure:"transparent_proxy" hcl:"transparent_proxy,block"` + Config map[string]interface{} `hcl:"config,block"` } func (cp *ConsulProxy) Canonicalize() { @@ -162,6 +186,8 @@ func (cp *ConsulProxy) Canonicalize() { cp.Upstreams = nil } + cp.TransparentProxy.Canonicalize() + for _, upstream := range cp.Upstreams { upstream.Canonicalize() } @@ -193,7 +219,6 @@ type ConsulMeshGateway struct { func (c *ConsulMeshGateway) Canonicalize() { // Mode may be empty string, indicating behavior will defer to Consul // service-defaults config entry. - return } func (c *ConsulMeshGateway) Copy() *ConsulMeshGateway { @@ -210,9 +235,14 @@ func (c *ConsulMeshGateway) Copy() *ConsulMeshGateway { type ConsulUpstream struct { DestinationName string `mapstructure:"destination_name" hcl:"destination_name,optional"` DestinationNamespace string `mapstructure:"destination_namespace" hcl:"destination_namespace,optional"` + DestinationPeer string `mapstructure:"destination_peer" hcl:"destination_peer,optional"` + DestinationPartition string `mapstructure:"destination_partition" hcl:"destination_partition,optional"` + DestinationType string `mapstructure:"destination_type" hcl:"destination_type,optional"` LocalBindPort int `mapstructure:"local_bind_port" hcl:"local_bind_port,optional"` Datacenter string `mapstructure:"datacenter" hcl:"datacenter,optional"` LocalBindAddress string `mapstructure:"local_bind_address" hcl:"local_bind_address,optional"` + LocalBindSocketPath string `mapstructure:"local_bind_socket_path" hcl:"local_bind_socket_path,optional"` + LocalBindSocketMode string `mapstructure:"local_bind_socket_mode" hcl:"local_bind_socket_mode,optional"` MeshGateway *ConsulMeshGateway `mapstructure:"mesh_gateway" hcl:"mesh_gateway,block"` Config map[string]any `mapstructure:"config" hcl:"config,block"` } @@ -221,15 +251,11 @@ func (cu *ConsulUpstream) Copy() *ConsulUpstream { if cu == nil { return nil } - return &ConsulUpstream{ - DestinationName: cu.DestinationName, - DestinationNamespace: cu.DestinationNamespace, - LocalBindPort: cu.LocalBindPort, - Datacenter: cu.Datacenter, - LocalBindAddress: cu.LocalBindAddress, - MeshGateway: cu.MeshGateway.Copy(), - Config: maps.Clone(cu.Config), - } + up := new(ConsulUpstream) + *up = *cu + up.MeshGateway = cu.MeshGateway.Copy() + up.Config = maps.Clone(cu.Config) + return up } func (cu *ConsulUpstream) Canonicalize() { @@ -242,6 +268,61 @@ func (cu *ConsulUpstream) Canonicalize() { } } +// ConsulTransparentProxy is used to configure the Envoy sidecar for +// "transparent proxying", which creates IP tables rules inside the network +// namespace to ensure traffic flows thru the Envoy proxy +type ConsulTransparentProxy struct { + // UID of the Envoy proxy. Defaults to the default Envoy proxy container + // image user. + UID string `mapstructure:"uid" hcl:"uid,optional"` + + // OutboundPort is the Envoy proxy's outbound listener port. Inbound TCP + // traffic hitting the PROXY_IN_REDIRECT chain will be redirected here. + // Defaults to 15001. + OutboundPort uint16 `mapstructure:"outbound_port" hcl:"outbound_port,optional"` + + // ExcludeInboundPorts is an additional set of ports will be excluded from + // redirection to the Envoy proxy. Can be Port.Label or Port.Value. This set + // will be added to the ports automatically excluded for the Expose.Port and + // Check.Expose fields. + ExcludeInboundPorts []string `mapstructure:"exclude_inbound_ports" hcl:"exclude_inbound_ports,optional"` + + // ExcludeOutboundPorts is a set of outbound ports that will not be + // redirected to the Envoy proxy, specified as port numbers. + ExcludeOutboundPorts []uint16 `mapstructure:"exclude_outbound_ports" hcl:"exclude_outbound_ports,optional"` + + // ExcludeOutboundCIDRs is a set of outbound CIDR blocks that will not be + // redirected to the Envoy proxy. + ExcludeOutboundCIDRs []string `mapstructure:"exclude_outbound_cidrs" hcl:"exclude_outbound_cidrs,optional"` + + // ExcludeUIDs is a set of user IDs whose network traffic will not be + // redirected through the Envoy proxy. + ExcludeUIDs []string `mapstructure:"exclude_uids" hcl:"exclude_uids,optional"` + + // NoDNS disables redirection of DNS traffic to Consul DNS. By default NoDNS + // is false and transparent proxy will direct DNS traffic to Consul DNS if + // available on the client. + NoDNS bool `mapstructure:"no_dns" hcl:"no_dns,optional"` +} + +func (tp *ConsulTransparentProxy) Canonicalize() { + if tp == nil { + return + } + if len(tp.ExcludeInboundPorts) == 0 { + tp.ExcludeInboundPorts = nil + } + if len(tp.ExcludeOutboundCIDRs) == 0 { + tp.ExcludeOutboundCIDRs = nil + } + if len(tp.ExcludeOutboundPorts) == 0 { + tp.ExcludeOutboundPorts = nil + } + if len(tp.ExcludeUIDs) == 0 { + tp.ExcludeUIDs = nil + } +} + type ConsulExposeConfig struct { Paths []*ConsulExposePath `mapstructure:"path" hcl:"path,block"` Path []*ConsulExposePath // Deprecated: only to maintain backwards compatibility. Use Paths instead. @@ -379,12 +460,52 @@ func (p *ConsulGatewayProxy) Copy() *ConsulGatewayProxy { } } -// ConsulGatewayTLSConfig is used to configure TLS for a gateway. +// ConsulGatewayTLSSDSConfig is used to configure the gateway's TLS listener to +// load certificates from an external Secret Discovery Service (SDS) +type ConsulGatewayTLSSDSConfig struct { + // ClusterName specifies the name of the SDS cluster where Consul should + // retrieve certificates. + ClusterName string `hcl:"cluster_name,optional" mapstructure:"cluster_name"` + + // CertResource specifies an SDS resource name + CertResource string `hcl:"cert_resource,optional" mapstructure:"cert_resource"` +} + +func (c *ConsulGatewayTLSSDSConfig) Copy() *ConsulGatewayTLSSDSConfig { + if c == nil { + return nil + } + + return &ConsulGatewayTLSSDSConfig{ + ClusterName: c.ClusterName, + CertResource: c.CertResource, + } +} + +// ConsulGatewayTLSConfig is used to configure TLS for a gateway. Both +// ConsulIngressConfigEntry and ConsulIngressService use this struct. For more +// details, consult the Consul documentation: +// https://developer.hashicorp.com/consul/docs/connect/config-entries/ingress-gateway#listeners-services-tls type ConsulGatewayTLSConfig struct { - Enabled bool `hcl:"enabled,optional"` - TLSMinVersion string `hcl:"tls_min_version,optional" mapstructure:"tls_min_version"` - TLSMaxVersion string `hcl:"tls_max_version,optional" mapstructure:"tls_max_version"` - CipherSuites []string `hcl:"cipher_suites,optional" mapstructure:"cipher_suites"` + + // Enabled indicates whether TLS is enabled for the configuration entry + Enabled bool `hcl:"enabled,optional"` + + // TLSMinVersion specifies the minimum TLS version supported for gateway + // listeners. + TLSMinVersion string `hcl:"tls_min_version,optional" mapstructure:"tls_min_version"` + + // TLSMaxVersion specifies the maxmimum TLS version supported for gateway + // listeners. + TLSMaxVersion string `hcl:"tls_max_version,optional" mapstructure:"tls_max_version"` + + // CipherSuites specifies a list of cipher suites that gateway listeners + // support when negotiating connections using TLS 1.2 or older. + CipherSuites []string `hcl:"cipher_suites,optional" mapstructure:"cipher_suites"` + + // SDS specifies parameters that configure the listener to load TLS + // certificates from an external Secrets Discovery Service (SDS). + SDS *ConsulGatewayTLSSDSConfig `hcl:"sds,block" mapstructure:"sds"` } func (tc *ConsulGatewayTLSConfig) Canonicalize() { @@ -399,6 +520,7 @@ func (tc *ConsulGatewayTLSConfig) Copy() *ConsulGatewayTLSConfig { Enabled: tc.Enabled, TLSMinVersion: tc.TLSMinVersion, TLSMaxVersion: tc.TLSMaxVersion, + SDS: tc.SDS.Copy(), } if len(tc.CipherSuites) != 0 { cipherSuites := make([]string, len(tc.CipherSuites)) @@ -409,13 +531,90 @@ func (tc *ConsulGatewayTLSConfig) Copy() *ConsulGatewayTLSConfig { return result } -// ConsulIngressService is used to configure a service fronted by the ingress gateway. +// ConsulHTTPHeaderModifiers is a set of rules for HTTP header modification that +// should be performed by proxies as the request passes through them. It can +// operate on either request or response headers depending on the context in +// which it is used. +type ConsulHTTPHeaderModifiers struct { + // Add is a set of name -> value pairs that should be appended to the + // request or response (i.e. allowing duplicates if the same header already + // exists). + Add map[string]string `hcl:"add,block" mapstructure:"add"` + + // Set is a set of name -> value pairs that should be added to the request + // or response, overwriting any existing header values of the same name. + Set map[string]string `hcl:"set,block" mapstructure:"set"` + + // Remove is the set of header names that should be stripped from the + // request or response. + Remove []string `hcl:"remove,optional" mapstructure:"remove"` +} + +func (h *ConsulHTTPHeaderModifiers) Copy() *ConsulHTTPHeaderModifiers { + if h == nil { + return nil + } + + return &ConsulHTTPHeaderModifiers{ + Add: maps.Clone(h.Add), + Set: maps.Clone(h.Set), + Remove: slices.Clone(h.Remove), + } +} + +func (h *ConsulHTTPHeaderModifiers) Canonicalize() { + if h == nil { + return + } + + if len(h.Add) == 0 { + h.Add = nil + } + if len(h.Set) == 0 { + h.Set = nil + } + if len(h.Remove) == 0 { + h.Remove = nil + } +} + +// ConsulIngressService is used to configure a service fronted by the ingress +// gateway. For more details, consult the Consul documentation: +// https://developer.hashicorp.com/consul/docs/connect/config-entries/ingress-gateway type ConsulIngressService struct { // Namespace is not yet supported. // Namespace string + + // Name of the service exposed through this listener. Name string `hcl:"name,optional"` + // Hosts specifies one or more hosts that the listening services can receive + // requests on. Hosts []string `hcl:"hosts,optional"` + + // TLS specifies a TLS configuration override for a specific service. If + // unset this will fallback to the ConsulIngressConfigEntry's own TLS field. + TLS *ConsulGatewayTLSConfig `hcl:"tls,block" mapstructure:"tls"` + + // RequestHeaders specifies a set of HTTP-specific header modification rules + // applied to requests routed through the gateway + RequestHeaders *ConsulHTTPHeaderModifiers `hcl:"request_headers,block" mapstructure:"request_headers"` + + // ResponseHeader specifies a set of HTTP-specific header modification rules + // applied to responses routed through the gateway + ResponseHeaders *ConsulHTTPHeaderModifiers `hcl:"response_headers,block" mapstructure:"response_headers"` + + // MaxConnections specifies the maximum number of HTTP/1.1 connections a + // service instance is allowed to establish against the upstream + MaxConnections *uint32 `hcl:"max_connections,optional" mapstructure:"max_connections"` + + // MaxPendingRequests specifies the maximum number of requests that are + // allowed to queue while waiting to establish a connection + MaxPendingRequests *uint32 `hcl:"max_pending_requests,optional" mapstructure:"max_pending_requests"` + + // MaxConcurrentRequests specifies the maximum number of concurrent HTTP/2 + // traffic requests that are allowed at a single point in time + MaxConcurrentRequests *uint32 `hcl:"max_concurrent_requests,optional" mapstructure:"max_concurrent_requests"` } func (s *ConsulIngressService) Canonicalize() { @@ -426,6 +625,9 @@ func (s *ConsulIngressService) Canonicalize() { if len(s.Hosts) == 0 { s.Hosts = nil } + + s.RequestHeaders.Canonicalize() + s.ResponseHeaders.Canonicalize() } func (s *ConsulIngressService) Copy() *ConsulIngressService { @@ -433,16 +635,19 @@ func (s *ConsulIngressService) Copy() *ConsulIngressService { return nil } - var hosts []string = nil - if n := len(s.Hosts); n > 0 { - hosts = make([]string, n) - copy(hosts, s.Hosts) - } + ns := new(ConsulIngressService) + *ns = *s - return &ConsulIngressService{ - Name: s.Name, - Hosts: hosts, - } + ns.Hosts = slices.Clone(s.Hosts) + ns.RequestHeaders = s.RequestHeaders.Copy() + ns.ResponseHeaders = s.ResponseHeaders.Copy() + ns.TLS = s.TLS.Copy() + + ns.MaxConnections = pointerCopy(s.MaxConnections) + ns.MaxPendingRequests = pointerCopy(s.MaxPendingRequests) + ns.MaxConcurrentRequests = pointerCopy(s.MaxConcurrentRequests) + + return ns } const ( @@ -500,7 +705,11 @@ type ConsulIngressConfigEntry struct { // Namespace is not yet supported. // Namespace string - TLS *ConsulGatewayTLSConfig `hcl:"tls,block"` + // TLS specifies a TLS configuration for the gateway. + TLS *ConsulGatewayTLSConfig `hcl:"tls,block"` + + // Listeners specifies a list of listeners in the mesh for the + // gateway. Listeners are uniquely identified by their port number. Listeners []*ConsulIngressListener `hcl:"listener,block"` } @@ -616,9 +825,7 @@ type ConsulMeshConfigEntry struct { // nothing in here } -func (e *ConsulMeshConfigEntry) Canonicalize() { - return -} +func (e *ConsulMeshConfigEntry) Canonicalize() {} func (e *ConsulMeshConfigEntry) Copy() *ConsulMeshConfigEntry { if e == nil { diff --git a/vendor/github.com/hashicorp/nomad/api/csi.go b/vendor/github.com/hashicorp/nomad/api/csi.go index 8a7a63dca..65e1ca569 100644 --- a/vendor/github.com/hashicorp/nomad/api/csi.go +++ b/vendor/github.com/hashicorp/nomad/api/csi.go @@ -507,7 +507,7 @@ type CSISnapshotCreateResponse struct { } // CSISnapshotListRequest is a request to a controller plugin to list all the -// snapshot known to the the storage provider. This request is paginated by +// snapshot known to the storage provider. This request is paginated by // the plugin and accepts the QueryOptions.PerPage and QueryOptions.NextToken // fields type CSISnapshotListRequest struct { diff --git a/vendor/github.com/hashicorp/nomad/api/deployments.go b/vendor/github.com/hashicorp/nomad/api/deployments.go index 665d38834..6785b8f6d 100644 --- a/vendor/github.com/hashicorp/nomad/api/deployments.go +++ b/vendor/github.com/hashicorp/nomad/api/deployments.go @@ -18,7 +18,7 @@ func (c *Client) Deployments() *Deployments { return &Deployments{client: c} } -// List is used to dump all of the deployments. +// List is used to dump all the deployments. func (d *Deployments) List(q *QueryOptions) ([]*Deployment, *QueryMeta, error) { var resp []*Deployment qm, err := d.client.query("/v1/deployments", &resp, q) diff --git a/vendor/github.com/hashicorp/nomad/api/error_unexpected_response.go b/vendor/github.com/hashicorp/nomad/api/error_unexpected_response.go index b843fc7ab..611ca33a4 100644 --- a/vendor/github.com/hashicorp/nomad/api/error_unexpected_response.go +++ b/vendor/github.com/hashicorp/nomad/api/error_unexpected_response.go @@ -8,10 +8,9 @@ import ( "fmt" "io" "net/http" + "slices" "strings" "time" - - "golang.org/x/exp/slices" ) // UnexpectedResponseError tracks the components for API errors encountered when diff --git a/vendor/github.com/hashicorp/nomad/api/event_stream.go b/vendor/github.com/hashicorp/nomad/api/event_stream.go index 7721d15cc..47523b125 100644 --- a/vendor/github.com/hashicorp/nomad/api/event_stream.go +++ b/vendor/github.com/hashicorp/nomad/api/event_stream.go @@ -186,7 +186,7 @@ func (e *EventStream) Stream(ctx context.Context, topics map[Topic][]string, ind } } - _, resp, err := requireOK(e.client.doRequest(r)) + _, resp, err := requireOK(e.client.doRequest(r)) //nolint:bodyclose if err != nil { return nil, err diff --git a/vendor/github.com/hashicorp/nomad/api/jobs.go b/vendor/github.com/hashicorp/nomad/api/jobs.go index 3b60f695b..8ac8555f5 100644 --- a/vendor/github.com/hashicorp/nomad/api/jobs.go +++ b/vendor/github.com/hashicorp/nomad/api/jobs.go @@ -4,15 +4,17 @@ package api import ( + "context" "errors" "fmt" + "io" + "maps" "net/url" "sort" "strconv" "time" "github.com/hashicorp/cronexpr" - "golang.org/x/exp/maps" ) const ( @@ -821,8 +823,9 @@ type MultiregionRegion struct { // PeriodicConfig is for serializing periodic config for a job. type PeriodicConfig struct { - Enabled *bool `hcl:"enabled,optional"` - Spec *string `hcl:"cron,optional"` + Enabled *bool `hcl:"enabled,optional"` + Spec *string `hcl:"cron,optional"` + Specs []string `hcl:"crons,optional"` SpecType *string ProhibitOverlap *bool `mapstructure:"prohibit_overlap" hcl:"prohibit_overlap,optional"` TimeZone *string `mapstructure:"time_zone" hcl:"time_zone,optional"` @@ -835,6 +838,9 @@ func (p *PeriodicConfig) Canonicalize() { if p.Spec == nil { p.Spec = pointerOf("") } + if p.Specs == nil { + p.Specs = []string{} + } if p.SpecType == nil { p.SpecType = pointerOf(PeriodicSpecCron) } @@ -851,30 +857,43 @@ func (p *PeriodicConfig) Canonicalize() { // returned. The `time.Location` of the returned value matches that of the // passed time. func (p *PeriodicConfig) Next(fromTime time.Time) (time.Time, error) { + // Single spec parsing if p != nil && *p.SpecType == PeriodicSpecCron { - e, err := cronexpr.Parse(*p.Spec) - if err != nil { - return time.Time{}, fmt.Errorf("failed parsing cron expression %q: %v", *p.Spec, err) + if p.Spec != nil && *p.Spec != "" { + return cronParseNext(fromTime, *p.Spec) } - return cronParseNext(e, fromTime, *p.Spec) } - return time.Time{}, nil + // multiple specs parsing + var nextTime time.Time + for _, spec := range p.Specs { + t, err := cronParseNext(fromTime, spec) + if err != nil { + return time.Time{}, fmt.Errorf("failed parsing cron expression %s: %v", spec, err) + } + if nextTime.IsZero() || t.Before(nextTime) { + nextTime = t + } + } + return nextTime, nil } // cronParseNext is a helper that parses the next time for the given expression // but captures any panic that may occur in the underlying library. // --- THIS FUNCTION IS REPLICATED IN nomad/structs/structs.go // and should be kept in sync. -func cronParseNext(e *cronexpr.Expression, fromTime time.Time, spec string) (t time.Time, err error) { +func cronParseNext(fromTime time.Time, spec string) (t time.Time, err error) { defer func() { if recover() != nil { t = time.Time{} err = fmt.Errorf("failed parsing cron expression: %q", spec) } }() - - return e.Next(fromTime), nil + exp, err := cronexpr.Parse(spec) + if err != nil { + return time.Time{}, fmt.Errorf("failed parsing cron expression: %s: %v", spec, err) + } + return exp.Next(fromTime), nil } func (p *PeriodicConfig) GetLocation() (*time.Location, error) { @@ -914,6 +933,52 @@ type JobSubmission struct { Variables string } +type JobUIConfig struct { + Description string `hcl:"description,optional"` + Links []*JobUILink `hcl:"link,block"` +} + +type JobUILink struct { + Label string `hcl:"label,optional"` + URL string `hcl:"url,optional"` +} + +func (j *JobUIConfig) Canonicalize() { + if j == nil { + return + } + + if len(j.Links) == 0 { + j.Links = nil + } +} + +func (j *JobUIConfig) Copy() *JobUIConfig { + if j == nil { + return nil + } + + copy := new(JobUIConfig) + copy.Description = j.Description + + for _, link := range j.Links { + copy.Links = append(copy.Links, link.Copy()) + } + + return copy +} + +func (j *JobUILink) Copy() *JobUILink { + if j == nil { + return nil + } + + return &JobUILink{ + Label: j.Label, + URL: j.URL, + } +} + func (js *JobSubmission) Canonicalize() { if js == nil { return @@ -949,7 +1014,7 @@ type Job struct { Priority *int `hcl:"priority,optional"` AllAtOnce *bool `mapstructure:"all_at_once" hcl:"all_at_once,optional"` Datacenters []string `hcl:"datacenters,optional"` - NodePool *string `hcl:"node_pool,optional"` + NodePool *string `mapstructure:"node_pool" hcl:"node_pool,optional"` Constraints []*Constraint `hcl:"constraint,block"` Affinities []*Affinity `hcl:"affinity,block"` TaskGroups []*TaskGroup `hcl:"group,block"` @@ -963,6 +1028,7 @@ type Job struct { Meta map[string]string `hcl:"meta,block"` ConsulToken *string `mapstructure:"consul_token" hcl:"consul_token,optional"` VaultToken *string `mapstructure:"vault_token" hcl:"vault_token,optional"` + UI *JobUIConfig `hcl:"ui,block"` /* Fields set by server, not sourced from job config file */ @@ -1088,6 +1154,10 @@ func (j *Job) Canonicalize() { for _, a := range j.Affinities { a.Canonicalize() } + + if j.UI != nil { + j.UI.Canonicalize() + } } // LookupTaskGroup finds a task group by name @@ -1497,3 +1567,40 @@ type JobEvaluateRequest struct { type EvalOptions struct { ForceReschedule bool } + +// ActionExec is used to run a pre-defined command inside a running task. +// The call blocks until command terminates (or an error occurs), and returns the exit code. +func (j *Jobs) ActionExec(ctx context.Context, + alloc *Allocation, job string, task string, tty bool, command []string, + action string, + stdin io.Reader, stdout, stderr io.Writer, + terminalSizeCh <-chan TerminalSize, q *QueryOptions) (exitCode int, err error) { + + s := &execSession{ + client: j.client, + alloc: alloc, + job: job, + task: task, + tty: tty, + command: command, + action: action, + + stdin: stdin, + stdout: stdout, + stderr: stderr, + + terminalSizeCh: terminalSizeCh, + q: q, + } + + return s.run(ctx) +} + +// JobStatusesRequest is used to get statuses for jobs, +// their allocations and deployments. +type JobStatusesRequest struct { + // Jobs may be optionally provided to request a subset of specific jobs. + Jobs []NamespacedID + // IncludeChildren will include child (batch) jobs in the response. + IncludeChildren bool +} diff --git a/vendor/github.com/hashicorp/nomad/api/locks.go b/vendor/github.com/hashicorp/nomad/api/locks.go new file mode 100644 index 000000000..e26e68a9c --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/locks.go @@ -0,0 +1,377 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "errors" + "fmt" + "math/rand" + "net/http" + "time" + + "github.com/hashicorp/go-multierror" +) + +const ( + lockLeaseRenewalFactor = 0.7 + lockRetryBackoffFactor = 1.1 + + // DefaultLockTTL is the default value used to maintain a lock before it needs to + // be renewed. The actual value comes from the experience with Consul. + DefaultLockTTL = 15 * time.Second + + // DefaultLockDelay is the default a lock will be blocked after the TTL + // went by without any renews. It is intended to prevent split brain situations. + // The actual value comes from the experience with Consul. + DefaultLockDelay = 15 * time.Second +) + +var ( + // ErrLockConflict is returned in case a lock operation can't be performed + // because the caller is not the current holder of the lock. + ErrLockConflict = errors.New("conflicting operation over lock") + + //LockNoPathErr is returned when no path is provided in the variable to be + // used for the lease mechanism + LockNoPathErr = errors.New("variable's path can't be empty") +) + +// Locks returns a new handle on a lock for the given variable. +func (c *Client) Locks(wo WriteOptions, v Variable, opts ...LocksOption) (*Locks, error) { + + if v.Path == "" { + return nil, LockNoPathErr + } + + ttl, err := time.ParseDuration(v.Lock.TTL) + if err != nil { + return nil, err + } + + l := &Locks{ + c: c, + WriteOptions: wo, + variable: v, + ttl: ttl, + ro: retryOptions{ + maxToLastCall: ttl, + maxRetries: defaultNumberOfRetries, + }, + } + + for _, opt := range opts { + opt(l) + } + + l.c.configureRetries(&l.ro) + + return l, nil +} + +// Locks is used to maintain all the resources necessary to operate over a lock. +// It makes the calls to the http using an exponential retry mechanism that will +// try until it either reaches 5 attempts or the ttl of the lock expires. +// The variable doesn't need to exist, one will be created internally +// but a path most be provided. +// +// Important: It will be on the user to remove the variable created for the lock. +type Locks struct { + c *Client + variable Variable + ttl time.Duration + ro retryOptions + + WriteOptions +} + +type LocksOption = func(l *Locks) + +// LocksOptionWithMaxRetries allows access to configure the number of max retries the lock +// handler will perform in case of an expected response while interacting with the +// locks endpoint. +func LocksOptionWithMaxRetries(maxRetries int64) LocksOption { + return func(l *Locks) { + l.ro.maxRetries = maxRetries + } +} + +// Acquire will make the actual call to acquire the lock over the variable using +// the ttl in the Locks to create the VariableLock. It will return the +// path of the variable holding the lock. +// +// Acquire returns the path to the variable holding the lock. +func (l *Locks) Acquire(ctx context.Context) (string, error) { + + var out Variable + + _, err := l.c.retryPut(ctx, "/v1/var/"+l.variable.Path+"?lock-acquire", l.variable, &out, &l.WriteOptions) + if err != nil { + callErr, ok := err.(UnexpectedResponseError) + + // http.StatusConflict means the lock is already held. This will happen + // under the normal execution if multiple instances are fighting for the same lock and + // doesn't disrupt the flow. + if ok && callErr.statusCode == http.StatusConflict { + return "", fmt.Errorf("acquire conflict %w", ErrLockConflict) + } + + return "", err + } + + l.variable.Lock = out.Lock + + return l.variable.Path, nil +} + +// Release makes the call to release the lock over a variable, even if the ttl +// has not yet passed. +// In case of a call to release a non held lock, Release returns ErrLockConflict. +func (l *Locks) Release(ctx context.Context) error { + var out Variable + + rv := &Variable{ + Lock: &VariableLock{ + ID: l.variable.LockID(), + }, + } + + _, err := l.c.retryPut(ctx, "/v1/var/"+l.variable.Path+"?lock-release", rv, + &out, &l.WriteOptions) + if err != nil { + callErr, ok := err.(UnexpectedResponseError) + + if ok && callErr.statusCode == http.StatusConflict { + return fmt.Errorf("release conflict %w", ErrLockConflict) + } + return err + } + + return nil +} + +// Renew is used to extend the ttl of a lock. It can be used as a heartbeat or a +// lease to maintain the hold over the lock for longer periods or as a sync +// mechanism among multiple instances looking to acquire the same lock. +// Renew will return true if the renewal was successful. +// +// In case of a call to renew a non held lock, Renew returns ErrLockConflict. +func (l *Locks) Renew(ctx context.Context) error { + var out VariableMetadata + + _, err := l.c.retryPut(ctx, "/v1/var/"+l.variable.Path+"?lock-renew", l.variable, &out, &l.WriteOptions) + if err != nil { + callErr, ok := err.(UnexpectedResponseError) + + if ok && callErr.statusCode == http.StatusConflict { + return fmt.Errorf("renew conflict %w", ErrLockConflict) + } + + return err + } + return nil +} + +func (l *Locks) LockTTL() time.Duration { + return l.ttl +} + +// Locker is the interface that wraps the lock handler. It is used by the lock +// leaser to handle all lock operations. +type Locker interface { + // Acquire will make the actual call to acquire the lock over the variable using + // the ttl in the Locks to create the VariableLock. + // + // Acquire returns the path to the variable holding the lock. + Acquire(ctx context.Context) (string, error) + // Release makes the call to release the lock over a variable, even if the ttl + // has not yet passed. + Release(ctx context.Context) error + // Renew is used to extend the ttl of a lock. It can be used as a heartbeat or a + // lease to maintain the hold over the lock for longer periods or as a sync + // mechanism among multiple instances looking to acquire the same lock. + Renew(ctx context.Context) error + + // LockTTL returns the expiration time of the underlying lock. + LockTTL() time.Duration +} + +// LockLeaser is a helper used to run a protected function that should only be +// active if the instance that runs it is currently holding the lock. +// Can be used to provide synchrony among multiple independent instances. +// +// It includes the lease renewal mechanism and tracking in case the protected +// function returns an error. Internally it uses an exponential retry mechanism +// for the api calls. +type LockLeaser struct { + Name string + renewalPeriod time.Duration + waitPeriod time.Duration + randomDelay time.Duration + earlyReturn bool + locked bool + + locker Locker +} + +type LockLeaserOption = func(l *LockLeaser) + +// LockLeaserOptionWithEarlyReturn informs the leaser to return after the lock +// acquire fails and to not wait to attempt again. +func LockLeaserOptionWithEarlyReturn(er bool) LockLeaserOption { + return func(l *LockLeaser) { + l.earlyReturn = er + } +} + +// LockLeaserOptionWithWaitPeriod is used to set a back off period between +// calls to attempt to acquire the lock. By default it is set to 1.1 * TTLs. +func LockLeaserOptionWithWaitPeriod(wp time.Duration) LockLeaserOption { + return func(l *LockLeaser) { + l.waitPeriod = wp + } +} + +// NewLockLeaser returns an instance of LockLeaser. callerID +// is optional, in case they it is not provided, internal one will be created. +func (c *Client) NewLockLeaser(l Locker, opts ...LockLeaserOption) *LockLeaser { + + rn := rand.New(rand.NewSource(time.Now().Unix())).Intn(100) + + ll := &LockLeaser{ + renewalPeriod: time.Duration(float64(l.LockTTL()) * lockLeaseRenewalFactor), + waitPeriod: time.Duration(float64(l.LockTTL()) * lockRetryBackoffFactor), + randomDelay: time.Duration(rn) * time.Millisecond, + locker: l, + earlyReturn: false, + } + + for _, opt := range opts { + opt(ll) + } + + return ll +} + +// Start wraps the start function in charge of executing the protected +// function and maintain the lease but is in charge of releasing the +// lock before exiting. It is a blocking function. +func (ll *LockLeaser) Start(ctx context.Context, protectedFuncs ...func(ctx context.Context) error) error { + var mErr multierror.Error + + err := ll.start(ctx, protectedFuncs...) + if err != nil { + mErr.Errors = append(mErr.Errors, err) + } + + if ll.locked { + err = ll.locker.Release(ctx) + if err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("lock release: %w", err)) + } + } + + return mErr.ErrorOrNil() +} + +// start starts the process of maintaining the lease and executes the protected +// function on an independent go routine. It is a blocking function, it +// will return once the protected function is done or an execution error +// arises. +func (ll *LockLeaser) start(ctx context.Context, protectedFuncs ...func(ctx context.Context) error) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // errChannel is used track execution errors + errChannel := make(chan error, 1) + defer close(errChannel) + + // To avoid collisions if all the instances start at the same time, wait + // a random time before making the first call. + waitWithContext(ctx, ll.randomDelay) + + waitTicker := time.NewTicker(ll.waitPeriod) + defer waitTicker.Stop() + + for { + lockID, err := ll.locker.Acquire(ctx) + if err != nil { + + if errors.Is(err, ErrLockConflict) && ll.earlyReturn { + + return nil + } + + if !errors.Is(err, ErrLockConflict) { + errChannel <- err + } + } + + if lockID != "" { + ll.locked = true + + funcCtx, funcCancel := context.WithCancel(ctx) + defer funcCancel() + + // Execute the lock protected function. + go func() { + defer funcCancel() + for _, f := range protectedFuncs { + err := f(funcCtx) + if err != nil { + errChannel <- fmt.Errorf("error executing protected function %w", err) + return + } + cancel() + } + }() + + // Maintain lease is a blocking function, it will return if there is + // an error maintaining the lease or the protected function returned. + err = ll.maintainLease(funcCtx) + if err != nil && !errors.Is(err, ErrLockConflict) { + errChannel <- fmt.Errorf("error renewing the lease: %w", err) + } + } + + waitTicker.Stop() + waitTicker = time.NewTicker(ll.waitPeriod) + select { + case <-ctx.Done(): + return nil + + case err := <-errChannel: + return fmt.Errorf("locks: %w", err) + + case <-waitTicker.C: + } + } +} + +func (ll *LockLeaser) maintainLease(ctx context.Context) error { + renewTicker := time.NewTicker(ll.renewalPeriod) + defer renewTicker.Stop() + for { + select { + case <-ctx.Done(): + return nil + + case <-renewTicker.C: + err := ll.locker.Renew(ctx) + if err != nil { + return err + } + } + } +} + +func waitWithContext(ctx context.Context, d time.Duration) { + t := time.NewTimer(d) + defer t.Stop() + + select { + case <-ctx.Done(): + case <-t.C: + } +} diff --git a/vendor/github.com/hashicorp/nomad/api/namespace.go b/vendor/github.com/hashicorp/nomad/api/namespace.go index d1b4fbbee..bc12ec77d 100644 --- a/vendor/github.com/hashicorp/nomad/api/namespace.go +++ b/vendor/github.com/hashicorp/nomad/api/namespace.go @@ -75,6 +75,8 @@ type Namespace struct { Quota string Capabilities *NamespaceCapabilities `hcl:"capabilities,block"` NodePoolConfiguration *NamespaceNodePoolConfiguration `hcl:"node_pool_config,block"` + VaultConfiguration *NamespaceVaultConfiguration `hcl:"vault,block"` + ConsulConfiguration *NamespaceConsulConfiguration `hcl:"consul,block"` Meta map[string]string CreateIndex uint64 ModifyIndex uint64 @@ -95,6 +97,50 @@ type NamespaceNodePoolConfiguration struct { Denied []string } +// NamespaceVaultConfiguration stores configuration about permissions to Vault +// clusters for a namespace, for use with Nomad Enterprise. +type NamespaceVaultConfiguration struct { + // Default is the Vault cluster used by jobs in this namespace that don't + // specify a cluster of their own. + Default string + + // Allowed specifies the Vault clusters that are allowed to be used by jobs + // in this namespace. By default, all clusters are allowed. If an empty list + // is provided only the namespace's default cluster is allowed. This field + // supports wildcard globbing through the use of `*` for multi-character + // matching. This field cannot be used with Denied. + Allowed []string + + // Denied specifies the Vault clusters that are not allowed to be used by + // jobs in this namespace. This field supports wildcard globbing through the + // use of `*` for multi-character matching. If specified, any cluster is + // allowed to be used, except for those that match any of these patterns. + // This field cannot be used with Allowed. + Denied []string +} + +// NamespaceConsulConfiguration stores configuration about permissions to Consul +// clusters for a namespace, for use with Nomad Enterprise. +type NamespaceConsulConfiguration struct { + // Default is the Consul cluster used by jobs in this namespace that don't + // specify a cluster of their own. + Default string + + // Allowed specifies the Consul clusters that are allowed to be used by jobs + // in this namespace. By default, all clusters are allowed. If an empty list + // is provided only the namespace's default cluster is allowed. This field + // supports wildcard globbing through the use of `*` for multi-character + // matching. This field cannot be used with Denied. + Allowed []string + + // Denied specifies the Consul clusters that are not allowed to be used by + // jobs in this namespace. This field supports wildcard globbing through the + // use of `*` for multi-character matching. If specified, any cluster is + // allowed to be used, except for those that match any of these patterns. + // This field cannot be used with Allowed. + Denied []string +} + // NamespaceIndexSort is a wrapper to sort Namespaces by CreateIndex. We // reverse the test so that we get the highest index first. type NamespaceIndexSort []*Namespace @@ -110,3 +156,12 @@ func (n NamespaceIndexSort) Less(i, j int) bool { func (n NamespaceIndexSort) Swap(i, j int) { n[i], n[j] = n[j], n[i] } + +// NamespacedID is used for things that are unique only per-namespace, +// such as jobs. +type NamespacedID struct { + // Namespace is the Name of the Namespace + Namespace string + // ID is the ID of the namespaced object (e.g. Job ID) + ID string +} diff --git a/vendor/github.com/hashicorp/nomad/api/nodes.go b/vendor/github.com/hashicorp/nomad/api/nodes.go index 697c7d731..809382bf7 100644 --- a/vendor/github.com/hashicorp/nomad/api/nodes.go +++ b/vendor/github.com/hashicorp/nomad/api/nodes.go @@ -783,6 +783,7 @@ type HostStats struct { Memory *HostMemoryStats CPU []*HostCPUStats DiskStats []*HostDiskStats + AllocDirStats *HostDiskStats DeviceStats []*DeviceGroupStats Uptime uint64 CPUTicksConsumed float64 diff --git a/vendor/github.com/hashicorp/nomad/api/operator.go b/vendor/github.com/hashicorp/nomad/api/operator.go index 32faf3546..a6f11f45e 100644 --- a/vendor/github.com/hashicorp/nomad/api/operator.go +++ b/vendor/github.com/hashicorp/nomad/api/operator.go @@ -66,7 +66,7 @@ func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, e return nil, err } r.setQueryOptions(q) - _, resp, err := requireOK(op.c.doRequest(r)) + _, resp, err := requireOK(op.c.doRequest(r)) //nolint:bodyclose if err != nil { return nil, err } @@ -91,7 +91,7 @@ func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) err r.params.Set("address", address) - _, resp, err := requireOK(op.c.doRequest(r)) + _, resp, err := requireOK(op.c.doRequest(r)) //nolint:bodyclose if err != nil { return err } @@ -111,7 +111,47 @@ func (op *Operator) RaftRemovePeerByID(id string, q *WriteOptions) error { r.params.Set("id", id) - _, resp, err := requireOK(op.c.doRequest(r)) + _, resp, err := requireOK(op.c.doRequest(r)) //nolint:bodyclose + if err != nil { + return err + } + + resp.Body.Close() + return nil +} + +// RaftTransferLeadershipByAddress is used to transfer leadership to a +// different peer using its address in the form of "IP:port". +func (op *Operator) RaftTransferLeadershipByAddress(address string, q *WriteOptions) error { + r, err := op.c.newRequest("PUT", "/v1/operator/raft/transfer-leadership") + if err != nil { + return err + } + r.setWriteOptions(q) + + r.params.Set("address", address) + + _, resp, err := requireOK(op.c.doRequest(r)) //nolint:bodyclose + if err != nil { + return err + } + + resp.Body.Close() + return nil +} + +// RaftTransferLeadershipByID is used to transfer leadership to a +// different peer using its Raft ID. +func (op *Operator) RaftTransferLeadershipByID(id string, q *WriteOptions) error { + r, err := op.c.newRequest("PUT", "/v1/operator/raft/transfer-leadership") + if err != nil { + return err + } + r.setWriteOptions(q) + + r.params.Set("id", id) + + _, resp, err := requireOK(op.c.doRequest(r)) //nolint:bodyclose if err != nil { return err } @@ -222,7 +262,7 @@ func (op *Operator) Snapshot(q *QueryOptions) (io.ReadCloser, error) { return nil, err } r.setQueryOptions(q) - _, resp, err := requireOK(op.c.doRequest(r)) + _, resp, err := requireOK(op.c.doRequest(r)) //nolint:bodyclose if err != nil { return nil, err } @@ -233,7 +273,6 @@ func (op *Operator) Snapshot(q *QueryOptions) (io.ReadCloser, error) { if err != nil { io.Copy(io.Discard, resp.Body) resp.Body.Close() - return nil, err } @@ -315,7 +354,7 @@ func (op *Operator) ApplyLicense(license string, opts *ApplyLicenseOptions, q *W r.setWriteOptions(q) r.body = strings.NewReader(license) - rtt, resp, err := requireOK(op.c.doRequest(r)) + rtt, resp, err := requireOK(op.c.doRequest(r)) //nolint:bodyclose if err != nil { return nil, err } @@ -335,7 +374,7 @@ func (op *Operator) LicenseGet(q *QueryOptions) (*LicenseReply, *QueryMeta, erro req.setQueryOptions(q) var reply LicenseReply - rtt, resp, err := op.c.doRequest(req) + rtt, resp, err := op.c.doRequest(req) //nolint:bodyclose if err != nil { return nil, nil, err } @@ -363,3 +402,72 @@ func (op *Operator) LicenseGet(q *QueryOptions) (*LicenseReply, *QueryMeta, erro return &reply, qm, nil } + +type LeadershipTransferResponse struct { + From RaftServer + To RaftServer + Noop bool + Err error + + WriteMeta +} + +// VaultWorkloadIdentityUpgradeCheck is the result of verifying if the cluster +// is ready to switch to workload identities for Vault. +type VaultWorkloadIdentityUpgradeCheck struct { + // JobsWithoutVaultIdentity is the list of jobs that have a `vault` block + // but do not have an `identity` for Vault. + JobsWithoutVaultIdentity []*JobListStub + + // OutdatedNodes is the list of nodes running a version of Nomad that does + // not support workload identities for Vault. + OutdatedNodes []*NodeListStub + + // VaultTokens is the list of Vault ACL token accessors that Nomad created + // and will no longer manage after the cluster is migrated to workload + // identities. + VaultTokens []*VaultAccessor +} + +// Ready returns true if the cluster is ready to migrate to workload identities +// with Vault. +func (v *VaultWorkloadIdentityUpgradeCheck) Ready() bool { + return v != nil && + len(v.VaultTokens) == 0 && + len(v.OutdatedNodes) == 0 && + len(v.JobsWithoutVaultIdentity) == 0 +} + +// VaultAccessor is a Vault ACL token created by Nomad for a task to access +// Vault using the legacy authentication flow. +type VaultAccessor struct { + // AllocID is the ID of the allocation that requested this token. + AllocID string + + // Task is the name of the task that requested this token. + Task string + + // NodeID is the ID of the node running the allocation that requested this + // token. + NodeID string + + // Accessor is the Vault ACL token accessor ID. + Accessor string + + // CreationTTL is the TTL set when the token was created. + CreationTTL int + + // CreateIndex is the Raft index when the token was created. + CreateIndex uint64 +} + +// UpgradeCheckVaultWorkloadIdentity retrieves the cluster status for migrating +// to workload identities with Vault. +func (op *Operator) UpgradeCheckVaultWorkloadIdentity(q *QueryOptions) (*VaultWorkloadIdentityUpgradeCheck, *QueryMeta, error) { + var resp VaultWorkloadIdentityUpgradeCheck + qm, err := op.c.query("/v1/operator/upgrade-check/vault-workload-identity", &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, qm, nil +} diff --git a/vendor/github.com/hashicorp/nomad/api/operator_autopilot.go b/vendor/github.com/hashicorp/nomad/api/operator_autopilot.go index ddc5de74e..05eaac1eb 100644 --- a/vendor/github.com/hashicorp/nomad/api/operator_autopilot.go +++ b/vendor/github.com/hashicorp/nomad/api/operator_autopilot.go @@ -178,6 +178,86 @@ type OperatorHealthReply struct { // Servers holds the health of each server. Servers []ServerHealth + + // The ID of the current leader. + Leader string + + // List of servers that are voters in the Raft configuration. + Voters []string + + // ReadReplicas holds the list of servers that are + // read replicas in the Raft configuration. (Enterprise only) + ReadReplicas []string `json:",omitempty"` + + // RedundancyZones holds the list of servers in each redundancy zone. + // (Enterprise only) + RedundancyZones map[string]AutopilotZone `json:",omitempty"` + + // Upgrade holds the current upgrade status. + Upgrade *AutopilotUpgrade `json:",omitempty"` + + // The number of servers that could be lost without an outage + // occurring if all the voters don't fail at once. (Enterprise only) + OptimisticFailureTolerance int `json:",omitempty"` +} + +// AutopilotZone holds the list of servers in a redundancy zone. (Enterprise only) +type AutopilotZone struct { + // Servers holds the list of servers in the redundancy zone. + Servers []string + + // Voters holds the list of servers that are voters in the redundancy zone. + Voters []string + + // FailureTolerance is the number of servers that could be lost without an + // outage occurring. + FailureTolerance int +} + +// AutopilotUpgrade holds the current upgrade status. (Enterprise only) +type AutopilotUpgrade struct { + // Status of the upgrade. + Status string + + // TargetVersion is the version that the cluster is upgrading to. + TargetVersion string + + // TargetVersionVoters holds the list of servers that are voters in the Raft + // configuration of the TargetVersion. + TargetVersionVoters []string + + // TargetVersionNonVoters holds the list of servers that are non-voters in + // the Raft configuration of the TargetVersion. + TargetVersionNonVoters []string + + // TargetVersionReadReplicas holds the list of servers that are read + // replicas in the Raft configuration of the TargetVersion. + TargetVersionReadReplicas []string + + // OtherVersionVoters holds the list of servers that are voters in the Raft + // configuration of a version other than the TargetVersion. + OtherVersionVoters []string + + // OtherVersionNonVoters holds the list of servers that are non-voters in + // the Raft configuration of a version other than the TargetVersion. + OtherVersionNonVoters []string + + // OtherVersionReadReplicas holds the list of servers that are read replicas + // in the Raft configuration of a version other than the TargetVersion. + OtherVersionReadReplicas []string + + // RedundancyZones holds the list of servers in each redundancy zone for the + // TargetVersion. + RedundancyZones map[string]AutopilotZoneUpgradeVersions +} + +// AutopilotZoneUpgradeVersions holds the list of servers +// in a redundancy zone for a specific version. (Enterprise only) +type AutopilotZoneUpgradeVersions struct { + TargetVersionVoters []string + TargetVersionNonVoters []string + OtherVersionVoters []string + OtherVersionNonVoters []string } // AutopilotGetConfiguration is used to query the current Autopilot configuration. diff --git a/vendor/github.com/hashicorp/nomad/api/raw.go b/vendor/github.com/hashicorp/nomad/api/raw.go index 87f8a9c5e..73e2a5299 100644 --- a/vendor/github.com/hashicorp/nomad/api/raw.go +++ b/vendor/github.com/hashicorp/nomad/api/raw.go @@ -3,7 +3,10 @@ package api -import "io" +import ( + "io" + "net/http" +) // Raw can be used to do raw queries against custom endpoints type Raw struct { @@ -39,3 +42,8 @@ func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*W func (raw *Raw) Delete(endpoint string, out interface{}, q *WriteOptions) (*WriteMeta, error) { return raw.c.delete(endpoint, nil, out, q) } + +// Do uses the raw client's internal httpClient to process the request +func (raw *Raw) Do(req *http.Request) (*http.Response, error) { + return raw.c.httpClient.Do(req) +} diff --git a/vendor/github.com/hashicorp/nomad/api/resources.go b/vendor/github.com/hashicorp/nomad/api/resources.go index d41a46293..a6fe60d20 100644 --- a/vendor/github.com/hashicorp/nomad/api/resources.go +++ b/vendor/github.com/hashicorp/nomad/api/resources.go @@ -17,6 +17,7 @@ type Resources struct { DiskMB *int `mapstructure:"disk" hcl:"disk,optional"` Networks []*NetworkResource `hcl:"network,block"` Devices []*RequestedDevice `hcl:"device,block"` + NUMA *NUMAResource `hcl:"numa,block"` // COMPAT(0.10) // XXX Deprecated. Please do not use. The field will be removed in Nomad @@ -50,6 +51,8 @@ func (r *Resources) Canonicalize() { for _, d := range r.Devices { d.Canonicalize() } + + r.NUMA.Canonicalize() } // DefaultResources is a small resources object that contains the @@ -97,6 +100,35 @@ func (r *Resources) Merge(other *Resources) { if len(other.Devices) != 0 { r.Devices = other.Devices } + if other.NUMA != nil { + r.NUMA = other.NUMA.Copy() + } +} + +// NUMAResource contains the NUMA affinity request for scheduling purposes. +// +// Applies only to Nomad Enterprise. +type NUMAResource struct { + // Affinity must be one of "none", "prefer", "require". + Affinity string `hcl:"affinity,optional"` +} + +func (n *NUMAResource) Copy() *NUMAResource { + if n == nil { + return nil + } + return &NUMAResource{ + Affinity: n.Affinity, + } +} + +func (n *NUMAResource) Canonicalize() { + if n == nil { + return + } + if n.Affinity == "" { + n.Affinity = "none" + } } type Port struct { diff --git a/vendor/github.com/hashicorp/nomad/api/retry.go b/vendor/github.com/hashicorp/nomad/api/retry.go new file mode 100644 index 000000000..c32dbff5e --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/retry.go @@ -0,0 +1,124 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "errors" + "net/http" + "time" +) + +const ( + defaultNumberOfRetries = 5 + defaultDelayTimeBase = time.Second + defaultMaxBackoffDelay = 5 * time.Minute +) + +type retryOptions struct { + maxRetries int64 // Optional, defaults to 5 + // maxBackoffDelay sets a capping value for the delay between calls, to avoid it growing infinitely + maxBackoffDelay time.Duration // Optional, defaults to 5 min + // maxToLastCall sets a capping value for all the retry process, in case there is a deadline to make the call. + maxToLastCall time.Duration // Optional, defaults to 0, meaning no time cap + // fixedDelay is used in case an uniform distribution of the calls is preferred. + fixedDelay time.Duration // Optional, defaults to 0, meaning Delay is exponential, starting at 1sec + // delayBase is used to calculate the starting value at which the delay starts to grow, + // When left empty, a value of 1 sec will be used as base and then the delays will + // grow exponentially with every attempt: starting at 1s, then 2s, 4s, 8s... + delayBase time.Duration // Optional, defaults to 1sec + + // maxValidAttempt is used to ensure that a big attempts number or a big delayBase number will not cause + // a negative delay by overflowing the delay increase. Every attempt after the + // maxValid will use the maxBackoffDelay if configured, or the defaultMaxBackoffDelay if not. + maxValidAttempt int64 +} + +func (c *Client) retryPut(ctx context.Context, endpoint string, in, out any, q *WriteOptions) (*WriteMeta, error) { + var err error + var wm *WriteMeta + + attemptDelay := 100 * time.Second // Avoid a tick before starting + startTime := time.Now() + + t := time.NewTimer(attemptDelay) + defer t.Stop() + + for attempt := int64(0); attempt < c.config.retryOptions.maxRetries+1; attempt++ { + attemptDelay = c.calculateDelay(attempt) + + t.Reset(attemptDelay) + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-t.C: + + } + + wm, err = c.put(endpoint, in, out, q) + + // Maximum retry period is up, don't retry + if c.config.retryOptions.maxToLastCall != 0 && time.Since(startTime) > c.config.retryOptions.maxToLastCall { + break + } + + // The put function only returns WriteMetadata if the call was successful + // don't retry + if wm != nil { + break + } + + // If WriteMetadata is nil, we need to process the error to decide if a retry is + // necessary or not + var callErr UnexpectedResponseError + ok := errors.As(err, &callErr) + + // If is not UnexpectedResponseError, it is an error while performing the call + // don't retry + if !ok { + break + } + + // Only 500+ or 429 status calls may be retried, otherwise + // don't retry + if !isCallRetriable(callErr.StatusCode()) { + break + } + } + + return wm, err +} + +// According to the HTTP protocol, it only makes sense to retry calls +// when the error is caused by a temporary situation, like a server being down +// (500s+) or the call being rate limited (429), this function checks if the +// statusCode is between the errors worth retrying. +func isCallRetriable(statusCode int) bool { + return statusCode > http.StatusInternalServerError && + statusCode < http.StatusNetworkAuthenticationRequired || + statusCode == http.StatusTooManyRequests +} + +func (c *Client) calculateDelay(attempt int64) time.Duration { + if c.config.retryOptions.fixedDelay != 0 { + return c.config.retryOptions.fixedDelay + } + + if attempt == 0 { + return 0 + } + + if attempt > c.config.retryOptions.maxValidAttempt { + return c.config.retryOptions.maxBackoffDelay + } + + newDelay := c.config.retryOptions.delayBase << (attempt - 1) + if c.config.retryOptions.maxBackoffDelay != defaultMaxBackoffDelay && + newDelay > c.config.retryOptions.maxBackoffDelay { + return c.config.retryOptions.maxBackoffDelay + } + + return newDelay +} diff --git a/vendor/github.com/hashicorp/nomad/api/search.go b/vendor/github.com/hashicorp/nomad/api/search.go index a06ee1646..3983fe148 100644 --- a/vendor/github.com/hashicorp/nomad/api/search.go +++ b/vendor/github.com/hashicorp/nomad/api/search.go @@ -64,7 +64,8 @@ func (s *Search) FuzzySearch(text string, context contexts.Context, q *QueryOpti // ID. // // e.g. A Task-level service would have scope like, -// ["", "", "", ""] +// +// ["", "", "", ""] type FuzzyMatch struct { ID string // ID is UUID or Name of object Scope []string `json:",omitempty"` // IDs of parent objects diff --git a/vendor/github.com/hashicorp/nomad/api/services.go b/vendor/github.com/hashicorp/nomad/api/services.go index 95f027810..e8f25e19a 100644 --- a/vendor/github.com/hashicorp/nomad/api/services.go +++ b/vendor/github.com/hashicorp/nomad/api/services.go @@ -222,6 +222,7 @@ type ServiceCheck struct { TaskName string `mapstructure:"task" hcl:"task,optional"` SuccessBeforePassing int `mapstructure:"success_before_passing" hcl:"success_before_passing,optional"` FailuresBeforeCritical int `mapstructure:"failures_before_critical" hcl:"failures_before_critical,optional"` + FailuresBeforeWarning int `mapstructure:"failures_before_warning" hcl:"failures_before_warning,optional"` Body string `hcl:"body,optional"` OnUpdate string `mapstructure:"on_update" hcl:"on_update,optional"` } @@ -243,10 +244,14 @@ type Service struct { TaggedAddresses map[string]string `hcl:"tagged_addresses,block"` TaskName string `mapstructure:"task" hcl:"task,optional"` OnUpdate string `mapstructure:"on_update" hcl:"on_update,optional"` + Identity *WorkloadIdentity `hcl:"identity,block"` // Provider defines which backend system provides the service registration, // either "consul" (default) or "nomad". Provider string `hcl:"provider,optional"` + + // Cluster is valid only for Nomad Enterprise with provider: consul + Cluster string `hcl:"cluster,optional"` } const ( @@ -284,6 +289,9 @@ func (s *Service) Canonicalize(t *Task, tg *TaskGroup, job *Job) { if s.Provider == "" { s.Provider = ServiceProviderConsul } + if s.Cluster == "" { + s.Cluster = "default" + } if len(s.Meta) == 0 { s.Meta = nil @@ -313,6 +321,10 @@ func (s *Service) Canonicalize(t *Task, tg *TaskGroup, job *Job) { s.Checks[i].FailuresBeforeCritical = 0 } + if s.Checks[i].FailuresBeforeWarning < 0 { + s.Checks[i].FailuresBeforeWarning = 0 + } + // Inhert Service if s.Checks[i].OnUpdate == "" { s.Checks[i].OnUpdate = s.OnUpdate diff --git a/vendor/github.com/hashicorp/nomad/api/task_sched.go b/vendor/github.com/hashicorp/nomad/api/task_sched.go new file mode 100644 index 000000000..42af2a9d9 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/task_sched.go @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +type TaskSchedule struct { + Cron *TaskScheduleCron `hcl:"cron,block"` +} + +type TaskScheduleCron struct { + Start string `hcl:"start,optional"` + End string `hcl:"end,optional"` + Timezone string `hcl:"timezone,optional"` +} diff --git a/vendor/github.com/hashicorp/nomad/api/tasks.go b/vendor/github.com/hashicorp/nomad/api/tasks.go index 188fa8649..cc9d14331 100644 --- a/vendor/github.com/hashicorp/nomad/api/tasks.go +++ b/vendor/github.com/hashicorp/nomad/api/tasks.go @@ -11,6 +11,8 @@ import ( "time" ) +type ReconcileOption = string + const ( // RestartPolicyModeDelay causes an artificial delay till the next interval is // reached when the specified attempts have been reached in the interval. @@ -19,6 +21,14 @@ const ( // RestartPolicyModeFail causes a job to fail if the specified number of // attempts are reached within an interval. RestartPolicyModeFail = "fail" + + // ReconcileOption is used to specify the behavior of the reconciliation process + // between the original allocations and the replacements when a previously + // disconnected client comes back online. + ReconcileOptionKeepOriginal = "keep_original" + ReconcileOptionKeepReplacement = "keep_replacement" + ReconcileOptionBestScore = "best_score" + ReconcileOptionLongestRunning = "longest_running" ) // MemoryStats holds memory usage related stats @@ -88,10 +98,11 @@ type AllocCheckStatuses map[string]AllocCheckStatus // RestartPolicy defines how the Nomad client restarts // tasks in a taskgroup when they fail type RestartPolicy struct { - Interval *time.Duration `hcl:"interval,optional"` - Attempts *int `hcl:"attempts,optional"` - Delay *time.Duration `hcl:"delay,optional"` - Mode *string `hcl:"mode,optional"` + Interval *time.Duration `hcl:"interval,optional"` + Attempts *int `hcl:"attempts,optional"` + Delay *time.Duration `hcl:"delay,optional"` + Mode *string `hcl:"mode,optional"` + RenderTemplates *bool `mapstructure:"render_templates" hcl:"render_templates,optional"` } func (r *RestartPolicy) Merge(rp *RestartPolicy) { @@ -107,6 +118,40 @@ func (r *RestartPolicy) Merge(rp *RestartPolicy) { if rp.Mode != nil { r.Mode = rp.Mode } + if rp.RenderTemplates != nil { + r.RenderTemplates = rp.RenderTemplates + } +} + +// Disconnect strategy defines how both clients and server should behave in case of +// disconnection between them. +type DisconnectStrategy struct { + // Defines for how long the server will consider the unresponsive node as + // disconnected but alive instead of lost. + LostAfter *time.Duration `mapstructure:"lost_after" hcl:"lost_after,optional"` + + // Defines for how long a disconnected client will keep its allocations running. + StopOnClientAfter *time.Duration `mapstructure:"stop_on_client_after" hcl:"stop_on_client_after,optional"` + + // A boolean field used to define if the allocations should be replaced while + // it's considered disconnected. + Replace *bool `mapstructure:"replace" hcl:"replace,optional"` + + // Once the disconnected node starts reporting again, it will define which + // instances to keep: the original allocations, the replacement, the one + // running on the node with the best score as it is currently implemented, + // or the allocation that has been running continuously the longest. + Reconcile *ReconcileOption `mapstructure:"reconcile" hcl:"reconcile,optional"` +} + +func (ds *DisconnectStrategy) Canonicalize() { + if ds.Replace == nil { + ds.Replace = pointerOf(true) + } + + if ds.Reconcile == nil { + ds.Reconcile = pointerOf(ReconcileOptionBestScore) + } } // Reschedule configures how Tasks are rescheduled when they crash or fail. @@ -191,7 +236,7 @@ func NewAffinity(lTarget string, operand string, rTarget string, weight int8) *A LTarget: lTarget, RTarget: rTarget, Operand: operand, - Weight: pointerOf(int8(weight)), + Weight: pointerOf(weight), } } @@ -201,6 +246,14 @@ func (a *Affinity) Canonicalize() { } } +func NewDefaultDisconnectStrategy() *DisconnectStrategy { + return &DisconnectStrategy{ + LostAfter: pointerOf(0 * time.Minute), + Replace: pointerOf(true), + Reconcile: pointerOf(ReconcileOptionBestScore), + } +} + func NewDefaultReschedulePolicy(jobType string) *ReschedulePolicy { var dp *ReschedulePolicy switch jobType { @@ -297,7 +350,7 @@ func NewSpreadTarget(value string, percent uint8) *SpreadTarget { func NewSpread(attribute string, weight int8, spreadTargets []*SpreadTarget) *Spread { return &Spread{ Attribute: attribute, - Weight: pointerOf(int8(weight)), + Weight: pointerOf(weight), SpreadTarget: spreadTargets, } } @@ -422,39 +475,50 @@ type VolumeMount struct { Destination *string `hcl:"destination,optional"` ReadOnly *bool `mapstructure:"read_only" hcl:"read_only,optional"` PropagationMode *string `mapstructure:"propagation_mode" hcl:"propagation_mode,optional"` + SELinuxLabel *string `mapstructure:"selinux_label" hcl:"selinux_label,optional"` } func (vm *VolumeMount) Canonicalize() { if vm.PropagationMode == nil { vm.PropagationMode = pointerOf(VolumeMountPropagationPrivate) } + if vm.ReadOnly == nil { vm.ReadOnly = pointerOf(false) } + + if vm.SELinuxLabel == nil { + vm.SELinuxLabel = pointerOf("") + } } // TaskGroup is the unit of scheduling. type TaskGroup struct { - Name *string `hcl:"name,label"` - Count *int `hcl:"count,optional"` - Constraints []*Constraint `hcl:"constraint,block"` - Affinities []*Affinity `hcl:"affinity,block"` - Tasks []*Task `hcl:"task,block"` - Spreads []*Spread `hcl:"spread,block"` - Volumes map[string]*VolumeRequest `hcl:"volume,block"` - RestartPolicy *RestartPolicy `hcl:"restart,block"` - ReschedulePolicy *ReschedulePolicy `hcl:"reschedule,block"` - EphemeralDisk *EphemeralDisk `hcl:"ephemeral_disk,block"` - Update *UpdateStrategy `hcl:"update,block"` - Migrate *MigrateStrategy `hcl:"migrate,block"` - Networks []*NetworkResource `hcl:"network,block"` - Meta map[string]string `hcl:"meta,block"` - Services []*Service `hcl:"service,block"` - ShutdownDelay *time.Duration `mapstructure:"shutdown_delay" hcl:"shutdown_delay,optional"` - StopAfterClientDisconnect *time.Duration `mapstructure:"stop_after_client_disconnect" hcl:"stop_after_client_disconnect,optional"` - MaxClientDisconnect *time.Duration `mapstructure:"max_client_disconnect" hcl:"max_client_disconnect,optional"` - Scaling *ScalingPolicy `hcl:"scaling,block"` - Consul *Consul `hcl:"consul,block"` + Name *string `hcl:"name,label"` + Count *int `hcl:"count,optional"` + Constraints []*Constraint `hcl:"constraint,block"` + Affinities []*Affinity `hcl:"affinity,block"` + Tasks []*Task `hcl:"task,block"` + Spreads []*Spread `hcl:"spread,block"` + Volumes map[string]*VolumeRequest `hcl:"volume,block"` + RestartPolicy *RestartPolicy `hcl:"restart,block"` + Disconnect *DisconnectStrategy `hcl:"disconnect,block"` + ReschedulePolicy *ReschedulePolicy `hcl:"reschedule,block"` + EphemeralDisk *EphemeralDisk `hcl:"ephemeral_disk,block"` + Update *UpdateStrategy `hcl:"update,block"` + Migrate *MigrateStrategy `hcl:"migrate,block"` + Networks []*NetworkResource `hcl:"network,block"` + Meta map[string]string `hcl:"meta,block"` + Services []*Service `hcl:"service,block"` + ShutdownDelay *time.Duration `mapstructure:"shutdown_delay" hcl:"shutdown_delay,optional"` + // Deprecated: StopAfterClientDisconnect is deprecated in Nomad 1.8. Use Disconnect.StopOnClientAfter instead. + StopAfterClientDisconnect *time.Duration `mapstructure:"stop_after_client_disconnect" hcl:"stop_after_client_disconnect,optional"` + // To be deprecated after 1.8.0 infavour of Disconnect.LostAfter + MaxClientDisconnect *time.Duration `mapstructure:"max_client_disconnect" hcl:"max_client_disconnect,optional"` + Scaling *ScalingPolicy `hcl:"scaling,block"` + Consul *Consul `hcl:"consul,block"` + // To be deprecated after 1.8.0 infavour of Disconnect.Replace + PreventRescheduleOnLost *bool `hcl:"prevent_reschedule_on_lost,optional"` } // NewTaskGroup creates a new TaskGroup. @@ -526,6 +590,7 @@ func (g *TaskGroup) Canonicalize(job *Job) { if g.ReschedulePolicy != nil { g.ReschedulePolicy.Canonicalize(*job.Type) } + // Merge the migrate strategy from the job if jm, tm := job.Migrate != nil, g.Migrate != nil; jm && tm { jobMigrate := job.Migrate.Copy() @@ -574,16 +639,24 @@ func (g *TaskGroup) Canonicalize(job *Job) { s.Canonicalize(nil, g, job) } + if g.PreventRescheduleOnLost == nil { + g.PreventRescheduleOnLost = pointerOf(false) + } + + if g.Disconnect != nil { + g.Disconnect.Canonicalize() + } } // These needs to be in sync with DefaultServiceJobRestartPolicy in // in nomad/structs/structs.go func defaultServiceJobRestartPolicy() *RestartPolicy { return &RestartPolicy{ - Delay: pointerOf(15 * time.Second), - Attempts: pointerOf(2), - Interval: pointerOf(30 * time.Minute), - Mode: pointerOf(RestartPolicyModeFail), + Delay: pointerOf(15 * time.Second), + Attempts: pointerOf(2), + Interval: pointerOf(30 * time.Minute), + Mode: pointerOf(RestartPolicyModeFail), + RenderTemplates: pointerOf(false), } } @@ -591,10 +664,11 @@ func defaultServiceJobRestartPolicy() *RestartPolicy { // in nomad/structs/structs.go func defaultBatchJobRestartPolicy() *RestartPolicy { return &RestartPolicy{ - Delay: pointerOf(15 * time.Second), - Attempts: pointerOf(3), - Interval: pointerOf(24 * time.Hour), - Mode: pointerOf(RestartPolicyModeFail), + Delay: pointerOf(15 * time.Second), + Attempts: pointerOf(3), + Interval: pointerOf(24 * time.Hour), + Mode: pointerOf(RestartPolicyModeFail), + RenderTemplates: pointerOf(false), } } @@ -708,6 +782,7 @@ type Task struct { LogConfig *LogConfig `mapstructure:"logs" hcl:"logs,block"` Artifacts []*TaskArtifact `hcl:"artifact,block"` Vault *Vault `hcl:"vault,block"` + Consul *Consul `hcl:"consul,block"` Templates []*Template `hcl:"template,block"` DispatchPayload *DispatchPayloadConfig `hcl:"dispatch_payload,block"` VolumeMounts []*VolumeMount `hcl:"volume_mount,block"` @@ -717,7 +792,17 @@ type Task struct { KillSignal string `mapstructure:"kill_signal" hcl:"kill_signal,optional"` Kind string `hcl:"kind,optional"` ScalingPolicies []*ScalingPolicy `hcl:"scaling,block"` - Identity *WorkloadIdentity `hcl:"identity,block"` + + // Identity is the default Nomad Workload Identity and will be added to + // Identities with the name "default" + Identity *WorkloadIdentity + + // Workload Identities + Identities []*WorkloadIdentity `hcl:"identity,block"` + + Actions []*Action `hcl:"action,block"` + + Schedule *TaskSchedule `hcl:"schedule,block"` } func (t *Task) Canonicalize(tg *TaskGroup, job *Job) { @@ -740,6 +825,9 @@ func (t *Task) Canonicalize(tg *TaskGroup, job *Job) { if t.Vault != nil { t.Vault.Canonicalize() } + if t.Consul != nil { + t.Consul.Canonicalize() + } for _, tmpl := range t.Templates { tmpl.Canonicalize() } @@ -770,17 +858,21 @@ func (t *Task) Canonicalize(tg *TaskGroup, job *Job) { // TaskArtifact is used to download artifacts before running a task. type TaskArtifact struct { - GetterSource *string `mapstructure:"source" hcl:"source,optional"` - GetterOptions map[string]string `mapstructure:"options" hcl:"options,block"` - GetterHeaders map[string]string `mapstructure:"headers" hcl:"headers,block"` - GetterMode *string `mapstructure:"mode" hcl:"mode,optional"` - RelativeDest *string `mapstructure:"destination" hcl:"destination,optional"` + GetterSource *string `mapstructure:"source" hcl:"source,optional"` + GetterOptions map[string]string `mapstructure:"options" hcl:"options,block"` + GetterHeaders map[string]string `mapstructure:"headers" hcl:"headers,block"` + GetterMode *string `mapstructure:"mode" hcl:"mode,optional"` + GetterInsecure *bool `mapstructure:"insecure" hcl:"insecure,optional"` + RelativeDest *string `mapstructure:"destination" hcl:"destination,optional"` } func (a *TaskArtifact) Canonicalize() { if a.GetterMode == nil { a.GetterMode = pointerOf("any") } + if a.GetterInsecure == nil { + a.GetterInsecure = pointerOf(false) + } if a.GetterSource == nil { // Shouldn't be possible, but we don't want to panic a.GetterSource = pointerOf("") @@ -916,12 +1008,15 @@ func (tmpl *Template) Canonicalize() { } type Vault struct { - Policies []string `hcl:"policies,optional"` - Namespace *string `mapstructure:"namespace" hcl:"namespace,optional"` - Env *bool `hcl:"env,optional"` - DisableFile *bool `mapstructure:"disable_file" hcl:"disable_file,optional"` - ChangeMode *string `mapstructure:"change_mode" hcl:"change_mode,optional"` - ChangeSignal *string `mapstructure:"change_signal" hcl:"change_signal,optional"` + Policies []string `hcl:"policies,optional"` + Role string `hcl:"role,optional"` + Namespace *string `mapstructure:"namespace" hcl:"namespace,optional"` + Cluster string `hcl:"cluster,optional"` + Env *bool `hcl:"env,optional"` + DisableFile *bool `mapstructure:"disable_file" hcl:"disable_file,optional"` + ChangeMode *string `mapstructure:"change_mode" hcl:"change_mode,optional"` + ChangeSignal *string `mapstructure:"change_signal" hcl:"change_signal,optional"` + AllowTokenExpiration *bool `mapstructure:"allow_token_expiration" hcl:"allow_token_expiration,optional"` } func (v *Vault) Canonicalize() { @@ -934,12 +1029,18 @@ func (v *Vault) Canonicalize() { if v.Namespace == nil { v.Namespace = pointerOf("") } + if v.Cluster == "" { + v.Cluster = "default" + } if v.ChangeMode == nil { v.ChangeMode = pointerOf("restart") } if v.ChangeSignal == nil { v.ChangeSignal = pointerOf("SIGHUP") } + if v.AllowTokenExpiration == nil { + v.AllowTokenExpiration = pointerOf(false) + } } // NewTask creates and initializes a new Task. @@ -1139,6 +1240,18 @@ func (t *TaskCSIPluginConfig) Canonicalize() { // WorkloadIdentity is the jobspec block which determines if and how a workload // identity is exposed to tasks. type WorkloadIdentity struct { - Env bool `hcl:"env,optional"` - File bool `hcl:"file,optional"` + Name string `hcl:"name,optional"` + Audience []string `mapstructure:"aud" hcl:"aud,optional"` + ChangeMode string `mapstructure:"change_mode" hcl:"change_mode,optional"` + ChangeSignal string `mapstructure:"change_signal" hcl:"change_signal,optional"` + Env bool `hcl:"env,optional"` + File bool `hcl:"file,optional"` + ServiceName string `hcl:"service_name,optional"` + TTL time.Duration `mapstructure:"ttl" hcl:"ttl,optional"` +} + +type Action struct { + Name string `hcl:"name,label"` + Command string `mapstructure:"command" hcl:"command"` + Args []string `mapstructure:"args" hcl:"args,optional"` } diff --git a/vendor/github.com/hashicorp/nomad/api/utils.go b/vendor/github.com/hashicorp/nomad/api/utils.go index a1cad14cc..be48c98ff 100644 --- a/vendor/github.com/hashicorp/nomad/api/utils.go +++ b/vendor/github.com/hashicorp/nomad/api/utils.go @@ -33,3 +33,12 @@ func formatFloat(f float64, maxPrec int) string { func pointerOf[A any](a A) *A { return &a } + +// pointerCopy returns a new pointer to a. +func pointerCopy[A any](a *A) *A { + if a == nil { + return nil + } + na := *a + return &na +} diff --git a/vendor/github.com/hashicorp/nomad/api/variables.go b/vendor/github.com/hashicorp/nomad/api/variables.go index 86458c13a..0c35d30d2 100644 --- a/vendor/github.com/hashicorp/nomad/api/variables.go +++ b/vendor/github.com/hashicorp/nomad/api/variables.go @@ -183,6 +183,44 @@ func (vars *Variables) GetVariableItems(path string, qo *QueryOptions) (Variable return v.Items, qm, nil } +// RenewLock renews the lease for the lock on the given variable. It has to be called +// before the lock's TTL expires or the lock will be automatically released after the +// delay period. +func (vars *Variables) RenewLock(v *Variable, qo *WriteOptions) (*VariableMetadata, *WriteMeta, error) { + v.Path = cleanPathString(v.Path) + var out VariableMetadata + + wm, err := vars.client.put("/v1/var/"+v.Path+"?lock-renew", v, &out, qo) + if err != nil { + return nil, wm, err + } + return &out, wm, nil +} + +// ReleaseLock removes the lock on the given variable. +func (vars *Variables) ReleaseLock(v *Variable, qo *WriteOptions) (*Variable, *WriteMeta, error) { + return vars.lockOperation(v, qo, "lock-release") +} + +// AcquireLock adds a lock on the given variable and starts a lease on it. In order +// to make any update on the locked variable, the lock ID has to be included in the +// request. In order to maintain ownership of the lock, the lease needs to be +// periodically renewed before the lock's TTL expires. +func (vars *Variables) AcquireLock(v *Variable, qo *WriteOptions) (*Variable, *WriteMeta, error) { + return vars.lockOperation(v, qo, "lock-acquire") +} + +func (vars *Variables) lockOperation(v *Variable, qo *WriteOptions, operation string) (*Variable, *WriteMeta, error) { + v.Path = cleanPathString(v.Path) + var out Variable + + wm, err := vars.client.put("/v1/var/"+v.Path+"?"+operation, v, &out, qo) + if err != nil { + return nil, wm, err + } + return &out, wm, nil +} + // readInternal exists because the API's higher-level read method requires // the status code to be 200 (OK). For Peek(), we do not consider 403 (Permission // Denied or 404 (Not Found) an error, this function just returns a nil in those @@ -197,8 +235,8 @@ func (vars *Variables) readInternal(endpoint string, out **Variable, q *QueryOpt } r.setQueryOptions(q) - checkFn := requireStatusIn(http.StatusOK, http.StatusNotFound, http.StatusForbidden) - rtt, resp, err := checkFn(vars.client.doRequest(r)) + checkFn := requireStatusIn(http.StatusOK, http.StatusNotFound, http.StatusForbidden) //nolint:bodyclose + rtt, resp, err := checkFn(vars.client.doRequest(r)) //nolint:bodyclose if err != nil { return nil, err } @@ -246,12 +284,12 @@ func (vars *Variables) deleteInternal(path string, q *WriteOptions) (*WriteMeta, } r.setWriteOptions(q) - checkFn := requireStatusIn(http.StatusOK, http.StatusNoContent) - rtt, resp, err := checkFn(vars.client.doRequest(r)) - + checkFn := requireStatusIn(http.StatusOK, http.StatusNoContent) //nolint:bodyclose + rtt, resp, err := checkFn(vars.client.doRequest(r)) //nolint:bodyclose if err != nil { return nil, err } + defer resp.Body.Close() wm := &WriteMeta{RequestTime: rtt} _ = parseWriteMeta(resp, wm) @@ -267,11 +305,12 @@ func (vars *Variables) deleteChecked(path string, checkIndex uint64, q *WriteOpt return nil, err } r.setWriteOptions(q) - checkFn := requireStatusIn(http.StatusOK, http.StatusNoContent, http.StatusConflict) - rtt, resp, err := checkFn(vars.client.doRequest(r)) + checkFn := requireStatusIn(http.StatusOK, http.StatusNoContent, http.StatusConflict) //nolint:bodyclose + rtt, resp, err := checkFn(vars.client.doRequest(r)) //nolint:bodyclose if err != nil { return nil, err } + defer resp.Body.Close() wm := &WriteMeta{RequestTime: rtt} _ = parseWriteMeta(resp, wm) @@ -303,8 +342,8 @@ func (vars *Variables) writeChecked(endpoint string, in *Variable, out *Variable r.setWriteOptions(q) r.obj = in - checkFn := requireStatusIn(http.StatusOK, http.StatusNoContent, http.StatusConflict) - rtt, resp, err := checkFn(vars.client.doRequest(r)) + checkFn := requireStatusIn(http.StatusOK, http.StatusNoContent, http.StatusConflict) //nolint:bodyclose + rtt, resp, err := checkFn(vars.client.doRequest(r)) //nolint:bodyclose if err != nil { return nil, err @@ -358,6 +397,9 @@ type Variable struct { // Items contains the k/v variable component Items VariableItems `hcl:"items"` + + // Lock holds the information about the variable lock if its being used. + Lock *VariableLock `hcl:",lock,optional" json:",omitempty"` } // VariableMetadata specifies the metadata for a variable and @@ -380,6 +422,24 @@ type VariableMetadata struct { // ModifyTime is the unix nano of the last modified time ModifyTime int64 `hcl:"modify_time"` + + // Lock holds the information about the variable lock if its being used. + Lock *VariableLock `hcl:",lock,optional" json:",omitempty"` +} + +type VariableLock struct { + // ID is generated by Nomad to provide a unique caller ID which can be used + // for renewals and unlocking. + ID string + + // TTL describes the time-to-live of the current lock holder. + // This is a string version of a time.Duration like "2m". + TTL string + + // LockDelay describes a grace period that exists after a lock is lost, + // before another client may acquire the lock. This helps protect against + // split-brains. This is a string version of a time.Duration like "2m". + LockDelay string } // VariableItems are the key/value pairs of a Variable. @@ -446,6 +506,16 @@ func (v *Variable) AsPrettyJSON() string { return string(b) } +// LockID returns the ID of the lock. In the event this is not held, or the +// variable is not a lock, this string will be empty. +func (v *Variable) LockID() string { + if v.Lock == nil { + return "" + } + + return v.Lock.ID +} + type ErrCASConflict struct { CheckIndex uint64 Conflict *Variable diff --git a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/action.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/action.go index 4d3821804..6fb78877b 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/action.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/action.go @@ -120,172 +120,6 @@ func (c *ActionClient) AllWithOpts(ctx context.Context, opts ActionListOpts) ([] return c.action.All(ctx, opts) } -// WatchOverallProgress watches several actions' progress until they complete -// with success or error. This watching happens in a goroutine and updates are -// provided through the two returned channels: -// -// - The first channel receives percentage updates of the progress, based on -// the number of completed versus total watched actions. The return value -// is an int between 0 and 100. -// - The second channel returned receives errors for actions that did not -// complete successfully, as well as any errors that happened while -// querying the API. -// -// By default, the method keeps watching until all actions have finished -// processing. If you want to be able to cancel the method or configure a -// timeout, use the [context.Context]. Once the method has stopped watching, -// both returned channels are closed. -// -// WatchOverallProgress uses the [WithPollBackoffFunc] of the [Client] to wait -// until sending the next request. -func (c *ActionClient) WatchOverallProgress(ctx context.Context, actions []*Action) (<-chan int, <-chan error) { - errCh := make(chan error, len(actions)) - progressCh := make(chan int) - - go func() { - defer close(errCh) - defer close(progressCh) - - completedIDs := make([]int64, 0, len(actions)) - watchIDs := make(map[int64]struct{}, len(actions)) - for _, action := range actions { - watchIDs[action.ID] = struct{}{} - } - - retries := 0 - previousProgress := 0 - - for { - select { - case <-ctx.Done(): - errCh <- ctx.Err() - return - case <-time.After(c.action.client.pollBackoffFunc(retries)): - retries++ - } - - opts := ActionListOpts{} - for watchID := range watchIDs { - opts.ID = append(opts.ID, watchID) - } - - as, err := c.AllWithOpts(ctx, opts) - if err != nil { - errCh <- err - return - } - if len(as) == 0 { - // No actions returned for the provided IDs, they do not exist in the API. - // We need to catch and fail early for this, otherwise the loop will continue - // indefinitely. - errCh <- fmt.Errorf("failed to wait for actions: remaining actions (%v) are not returned from API", opts.ID) - return - } - - progress := 0 - for _, a := range as { - switch a.Status { - case ActionStatusRunning: - progress += a.Progress - case ActionStatusSuccess: - delete(watchIDs, a.ID) - completedIDs = append(completedIDs, a.ID) - case ActionStatusError: - delete(watchIDs, a.ID) - completedIDs = append(completedIDs, a.ID) - errCh <- fmt.Errorf("action %d failed: %w", a.ID, a.Error()) - } - } - - progress += len(completedIDs) * 100 - if progress != 0 && progress != previousProgress { - sendProgress(progressCh, progress/len(actions)) - previousProgress = progress - } - - if len(watchIDs) == 0 { - return - } - } - }() - - return progressCh, errCh -} - -// WatchProgress watches one action's progress until it completes with success -// or error. This watching happens in a goroutine and updates are provided -// through the two returned channels: -// -// - The first channel receives percentage updates of the progress, based on -// the progress percentage indicated by the API. The return value is an int -// between 0 and 100. -// - The second channel receives any errors that happened while querying the -// API, as well as the error of the action if it did not complete -// successfully, or nil if it did. -// -// By default, the method keeps watching until the action has finished -// processing. If you want to be able to cancel the method or configure a -// timeout, use the [context.Context]. Once the method has stopped watching, -// both returned channels are closed. -// -// WatchProgress uses the [WithPollBackoffFunc] of the [Client] to wait until -// sending the next request. -func (c *ActionClient) WatchProgress(ctx context.Context, action *Action) (<-chan int, <-chan error) { - errCh := make(chan error, 1) - progressCh := make(chan int) - - go func() { - defer close(errCh) - defer close(progressCh) - - retries := 0 - - for { - select { - case <-ctx.Done(): - errCh <- ctx.Err() - return - case <-time.After(c.action.client.pollBackoffFunc(retries)): - retries++ - } - - a, _, err := c.GetByID(ctx, action.ID) - if err != nil { - errCh <- err - return - } - if a == nil { - errCh <- fmt.Errorf("failed to wait for action %d: action not returned from API", action.ID) - return - } - - switch a.Status { - case ActionStatusRunning: - sendProgress(progressCh, a.Progress) - case ActionStatusSuccess: - sendProgress(progressCh, 100) - errCh <- nil - return - case ActionStatusError: - errCh <- a.Error() - return - } - } - }() - - return progressCh, errCh -} - -// sendProgress allows the user to only read from the error channel and ignore any progress updates. -func sendProgress(progressCh chan int, p int) { - select { - case progressCh <- p: - break - default: - break - } -} - // ResourceActionClient is a client for the actions API exposed by the resource. type ResourceActionClient struct { resource string diff --git a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/action_waiter.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/action_waiter.go new file mode 100644 index 000000000..ebfe8ef4e --- /dev/null +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/action_waiter.go @@ -0,0 +1,116 @@ +package hcloud + +import ( + "context" + "fmt" + "maps" + "slices" + "time" +) + +type ActionWaiter interface { + WaitForFunc(ctx context.Context, handleUpdate func(update *Action) error, actions ...*Action) error + WaitFor(ctx context.Context, actions ...*Action) error +} + +var _ ActionWaiter = (*ActionClient)(nil) + +// WaitForFunc waits until all actions are completed by polling the API at the interval +// defined by [WithPollBackoffFunc]. An action is considered as complete when its status is +// either [ActionStatusSuccess] or [ActionStatusError]. +// +// The handleUpdate callback is called every time an action is updated. +func (c *ActionClient) WaitForFunc(ctx context.Context, handleUpdate func(update *Action) error, actions ...*Action) error { + running := make(map[int64]struct{}, len(actions)) + for _, action := range actions { + if action.Status == ActionStatusRunning { + running[action.ID] = struct{}{} + } else if handleUpdate != nil { + // We filter out already completed actions from the API polling loop; while + // this isn't a real update, the caller should be notified about the new + // state. + if err := handleUpdate(action); err != nil { + return err + } + } + } + + retries := 0 + for { + if len(running) == 0 { + break + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(c.action.client.pollBackoffFunc(retries)): + retries++ + } + + opts := ActionListOpts{ + Sort: []string{"status", "id"}, + ID: make([]int64, 0, len(running)), + } + for actionID := range running { + opts.ID = append(opts.ID, actionID) + } + slices.Sort(opts.ID) + + updates, err := c.AllWithOpts(ctx, opts) + if err != nil { + return err + } + + if len(updates) != len(running) { + // Some actions may not exist in the API, also fail early to prevent an + // infinite loop when updates == 0. + + notFound := maps.Clone(running) + for _, update := range updates { + delete(notFound, update.ID) + } + notFoundIDs := make([]int64, 0, len(notFound)) + for unknownID := range notFound { + notFoundIDs = append(notFoundIDs, unknownID) + } + + return fmt.Errorf("actions not found: %v", notFoundIDs) + } + + for _, update := range updates { + if update.Status != ActionStatusRunning { + delete(running, update.ID) + } + + if handleUpdate != nil { + if err := handleUpdate(update); err != nil { + return err + } + } + } + } + + return nil +} + +// WaitFor waits until all actions succeed by polling the API at the interval defined by +// [WithPollBackoffFunc]. An action is considered as succeeded when its status is either +// [ActionStatusSuccess]. +// +// If a single action fails, the function will stop waiting and the error set in the +// action will be returned as an [ActionError]. +// +// For more flexibility, see the [ActionClient.WaitForFunc] function. +func (c *ActionClient) WaitFor(ctx context.Context, actions ...*Action) error { + return c.WaitForFunc( + ctx, + func(update *Action) error { + if update.Status == ActionStatusError { + return update.Error() + } + return nil + }, + actions..., + ) +} diff --git a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/action_watch.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/action_watch.go new file mode 100644 index 000000000..db3464f11 --- /dev/null +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/action_watch.go @@ -0,0 +1,131 @@ +package hcloud + +import ( + "context" + "fmt" +) + +// WatchOverallProgress watches several actions' progress until they complete +// with success or error. This watching happens in a goroutine and updates are +// provided through the two returned channels: +// +// - The first channel receives percentage updates of the progress, based on +// the number of completed versus total watched actions. The return value +// is an int between 0 and 100. +// - The second channel returned receives errors for actions that did not +// complete successfully, as well as any errors that happened while +// querying the API. +// +// By default, the method keeps watching until all actions have finished +// processing. If you want to be able to cancel the method or configure a +// timeout, use the [context.Context]. Once the method has stopped watching, +// both returned channels are closed. +// +// WatchOverallProgress uses the [WithPollBackoffFunc] of the [Client] to wait +// until sending the next request. +// +// Deprecated: WatchOverallProgress is deprecated, use [WaitForFunc] instead. +func (c *ActionClient) WatchOverallProgress(ctx context.Context, actions []*Action) (<-chan int, <-chan error) { + errCh := make(chan error, len(actions)) + progressCh := make(chan int) + + go func() { + defer close(errCh) + defer close(progressCh) + + previousGlobalProgress := 0 + progressByAction := make(map[int64]int, len(actions)) + err := c.WaitForFunc(ctx, func(update *Action) error { + switch update.Status { + case ActionStatusRunning: + progressByAction[update.ID] = update.Progress + case ActionStatusSuccess: + progressByAction[update.ID] = 100 + case ActionStatusError: + progressByAction[update.ID] = 100 + errCh <- fmt.Errorf("action %d failed: %w", update.ID, update.Error()) + } + + // Compute global progress + progressSum := 0 + for _, value := range progressByAction { + progressSum += value + } + globalProgress := progressSum / len(actions) + + // Only send progress when it changed + if globalProgress != 0 && globalProgress != previousGlobalProgress { + sendProgress(progressCh, globalProgress) + previousGlobalProgress = globalProgress + } + + return nil + }, actions...) + + if err != nil { + errCh <- err + } + }() + + return progressCh, errCh +} + +// WatchProgress watches one action's progress until it completes with success +// or error. This watching happens in a goroutine and updates are provided +// through the two returned channels: +// +// - The first channel receives percentage updates of the progress, based on +// the progress percentage indicated by the API. The return value is an int +// between 0 and 100. +// - The second channel receives any errors that happened while querying the +// API, as well as the error of the action if it did not complete +// successfully, or nil if it did. +// +// By default, the method keeps watching until the action has finished +// processing. If you want to be able to cancel the method or configure a +// timeout, use the [context.Context]. Once the method has stopped watching, +// both returned channels are closed. +// +// WatchProgress uses the [WithPollBackoffFunc] of the [Client] to wait until +// sending the next request. +// +// Deprecated: WatchProgress is deprecated, use [WaitForFunc] instead. +func (c *ActionClient) WatchProgress(ctx context.Context, action *Action) (<-chan int, <-chan error) { + errCh := make(chan error, 1) + progressCh := make(chan int) + + go func() { + defer close(errCh) + defer close(progressCh) + + err := c.WaitForFunc(ctx, func(update *Action) error { + switch update.Status { + case ActionStatusRunning: + sendProgress(progressCh, update.Progress) + case ActionStatusSuccess: + sendProgress(progressCh, 100) + case ActionStatusError: + // Do not wrap the action error + return update.Error() + } + + return nil + }, action) + + if err != nil { + errCh <- err + } + }() + + return progressCh, errCh +} + +// sendProgress allows the user to only read from the error channel and ignore any progress updates. +func sendProgress(progressCh chan int, p int) { + select { + case progressCh <- p: + break + default: + break + } +} diff --git a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/client.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/client.go index 917bb513a..403f35208 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/client.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/client.go @@ -236,6 +236,8 @@ func (c *Client) NewRequest(ctx context.Context, method, path string, body io.Re } // Do performs an HTTP request against the API. +// v can be nil, an io.Writer to write the response body to or a pointer to +// a struct to json.Unmarshal the response to. func (c *Client) Do(r *http.Request, v interface{}) (*Response, error) { var retries int var body []byte @@ -377,6 +379,10 @@ func errorFromResponse(resp *Response, body []byte) error { return hcErr } +const ( + headerCorrelationID = "X-Correlation-Id" +) + // Response represents a response from the API. It embeds http.Response. type Response struct { *http.Response @@ -410,6 +416,12 @@ func (r *Response) readMeta(body []byte) error { return nil } +// internalCorrelationID returns the unique ID of the request as set by the API. This ID can help with support requests, +// as it allows the people working on identify this request in particular. +func (r *Response) internalCorrelationID() string { + return r.Header.Get(headerCorrelationID) +} + // Meta represents meta information included in an API response. type Meta struct { Pagination *Pagination diff --git a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/error.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/error.go index 653043e6d..371a92e31 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/error.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/error.go @@ -1,6 +1,7 @@ package hcloud import ( + "errors" "fmt" "net" ) @@ -99,6 +100,13 @@ type Error struct { } func (e Error) Error() string { + if resp := e.Response(); resp != nil { + correlationID := resp.internalCorrelationID() + if correlationID != "" { + // For easier debugging, the error string contains the Correlation ID of the response. + return fmt.Sprintf("%s (%s, %s)", e.Message, e.Code, correlationID) + } + } return fmt.Sprintf("%s (%s)", e.Message, e.Code) } @@ -120,7 +128,8 @@ type ErrorDetailsInvalidInputField struct { // IsError returns whether err is an API error with the given error code. func IsError(err error, code ErrorCode) bool { - apiErr, ok := err.(Error) + var apiErr Error + ok := errors.As(err, &apiErr) return ok && apiErr.Code == code } diff --git a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/firewall.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/firewall.go index 512c32ffe..a22191353 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/firewall.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/firewall.go @@ -299,6 +299,7 @@ type FirewallSetRulesOpts struct { // SetRules sets the rules of a Firewall. func (c *FirewallClient) SetRules(ctx context.Context, firewall *Firewall, opts FirewallSetRulesOpts) ([]*Action, *Response, error) { reqBody := firewallSetRulesOptsToSchema(opts) + reqBodyData, err := json.Marshal(reqBody) if err != nil { return nil, nil, err diff --git a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/hcloud.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/hcloud.go index fe8550bf7..2c6745c2e 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/hcloud.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/hcloud.go @@ -2,4 +2,4 @@ package hcloud // Version is the library's version following Semantic Versioning. -const Version = "2.6.0" // x-release-please-version +const Version = "2.8.0" // x-release-please-version diff --git a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/interface_gen.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/interface_gen.go index d367899dd..2ae8cecb4 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/interface_gen.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/interface_gen.go @@ -1,6 +1,6 @@ package hcloud -//go:generate go run github.com/vburenin/ifacemaker -f action.go -s ActionClient -i IActionClient -p hcloud -o zz_action_client_iface.go +//go:generate go run github.com/vburenin/ifacemaker -f action.go -f action_watch.go -f action_waiter.go -s ActionClient -i IActionClient -p hcloud -o zz_action_client_iface.go //go:generate go run github.com/vburenin/ifacemaker -f action.go -s ResourceActionClient -i IResourceActionClient -p hcloud -o zz_resource_action_client_iface.go //go:generate go run github.com/vburenin/ifacemaker -f datacenter.go -s DatacenterClient -i IDatacenterClient -p hcloud -o zz_datacenter_client_iface.go //go:generate go run github.com/vburenin/ifacemaker -f floating_ip.go -s FloatingIPClient -i IFloatingIPClient -p hcloud -o zz_floating_ip_client_iface.go diff --git a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/firewall.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/firewall.go index 371e648f1..d393f1e0e 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/firewall.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/firewall.go @@ -12,8 +12,18 @@ type Firewall struct { AppliedTo []FirewallResource `json:"applied_to"` } -// FirewallRule defines the schema of a Firewall rule. +// FirewallRule defines the schema of a Firewall rule in responses. type FirewallRule struct { + Direction string `json:"direction"` + SourceIPs []string `json:"source_ips"` + DestinationIPs []string `json:"destination_ips"` + Protocol string `json:"protocol"` + Port *string `json:"port"` + Description *string `json:"description"` +} + +// FirewallRuleRequest defines the schema of a Firewall rule in requests. +type FirewallRuleRequest struct { Direction string `json:"direction"` SourceIPs []string `json:"source_ips,omitempty"` DestinationIPs []string `json:"destination_ips,omitempty"` @@ -34,10 +44,10 @@ type FirewallGetResponse struct { // FirewallCreateRequest defines the schema of the request to create a Firewall. type FirewallCreateRequest struct { - Name string `json:"name"` - Labels *map[string]string `json:"labels,omitempty"` - Rules []FirewallRule `json:"rules,omitempty"` - ApplyTo []FirewallResource `json:"apply_to,omitempty"` + Name string `json:"name"` + Labels *map[string]string `json:"labels,omitempty"` + Rules []FirewallRuleRequest `json:"rules,omitempty"` + ApplyTo []FirewallResource `json:"apply_to,omitempty"` } // FirewallResource defines the schema of a resource to apply the new Firewall on. @@ -76,7 +86,7 @@ type FirewallUpdateResponse struct { // FirewallActionSetRulesRequest defines the schema of the request when setting Firewall rules. type FirewallActionSetRulesRequest struct { - Rules []FirewallRule `json:"rules"` + Rules []FirewallRuleRequest `json:"rules"` } // FirewallActionSetRulesResponse defines the schema of the response when setting Firewall rules. diff --git a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/primary_ip.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/primary_ip.go index b685c386f..1901ce800 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/primary_ip.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/primary_ip.go @@ -11,7 +11,7 @@ type PrimaryIP struct { Type string `json:"type"` Protection PrimaryIPProtection `json:"protection"` DNSPtr []PrimaryIPDNSPTR `json:"dns_ptr"` - AssigneeID int64 `json:"assignee_id"` + AssigneeID *int64 `json:"assignee_id"` AssigneeType string `json:"assignee_type"` AutoDelete bool `json:"auto_delete"` Blocked bool `json:"blocked"` diff --git a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/volume.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/volume.go index 0dd391bcc..1de645955 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/volume.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/volume.go @@ -10,6 +10,7 @@ type Volume struct { Status string `json:"status"` Location Location `json:"location"` Size int `json:"size"` + Format *string `json:"format"` Protection VolumeProtection `json:"protection"` Labels map[string]string `json:"labels"` LinuxDevice string `json:"linux_device"` diff --git a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema_gen.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema_gen.go index 1008bddd8..ea002cce7 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema_gen.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema_gen.go @@ -78,6 +78,8 @@ You can find a documentation of goverter here: https://goverter.jmattheis.de/ // goverter:extend schemaFromLoadBalancerCreateOptsTargetServer // goverter:extend schemaFromLoadBalancerCreateOptsTargetIP // goverter:extend stringMapToStringMapPtr +// goverter:extend int64SlicePtrFromCertificatePtrSlice +// goverter:extend stringSlicePtrFromStringSlice type converter interface { // goverter:map Error.Code ErrorCode @@ -103,6 +105,7 @@ type converter interface { PrimaryIPFromSchema(schema.PrimaryIP) *PrimaryIP // goverter:map . IP | primaryIPToIPString + // goverter:map AssigneeID | mapZeroInt64ToNil SchemaFromPrimaryIP(*PrimaryIP) schema.PrimaryIP ISOFromSchema(schema.ISO) *ISO @@ -872,6 +875,13 @@ func stringPtrFromNetworkZone(z NetworkZone) *string { return mapEmptyStringToNil(string(z)) } +func mapZeroInt64ToNil(i int64) *int64 { + if i == 0 { + return nil + } + return &i +} + func mapZeroUint64ToNil(i uint64) *uint64 { if i == 0 { return nil @@ -926,3 +936,22 @@ func mapZeroFloat32ToNil(f float32) *float32 { func isDeprecationNotNil(d *DeprecationInfo) bool { return d != nil } + +// int64SlicePtrFromCertificatePtrSlice is needed so that a nil slice is mapped to nil instead of &nil. +func int64SlicePtrFromCertificatePtrSlice(s []*Certificate) *[]int64 { + if s == nil { + return nil + } + var ids = make([]int64, len(s)) + for i, cert := range s { + ids[i] = cert.ID + } + return &ids +} + +func stringSlicePtrFromStringSlice(s []string) *[]string { + if s == nil { + return nil + } + return &s +} diff --git a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/server.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/server.go index fe8bf934c..a9f1cb95f 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/server.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/server.go @@ -147,6 +147,7 @@ type ServerRescueType string // List of rescue types. const ( + // Deprecated: Use ServerRescueTypeLinux64 instead. ServerRescueTypeLinux32 ServerRescueType = "linux32" ServerRescueTypeLinux64 ServerRescueType = "linux64" ) diff --git a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/volume.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/volume.go index 3f23c6da7..c744b5a8c 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/volume.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/volume.go @@ -21,12 +21,18 @@ type Volume struct { Server *Server Location *Location Size int + Format *string Protection VolumeProtection Labels map[string]string LinuxDevice string Created time.Time } +const ( + VolumeFormatExt4 = "ext4" + VolumeFormatXFS = "xfs" +) + // VolumeProtection represents the protection level of a volume. type VolumeProtection struct { Delete bool diff --git a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/zz_action_client_iface.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/zz_action_client_iface.go index 1917574a9..13f8665df 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/zz_action_client_iface.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/zz_action_client_iface.go @@ -37,6 +37,8 @@ type IActionClient interface { // // WatchOverallProgress uses the [WithPollBackoffFunc] of the [Client] to wait // until sending the next request. + // + // Deprecated: WatchOverallProgress is deprecated, use [WaitForFunc] instead. WatchOverallProgress(ctx context.Context, actions []*Action) (<-chan int, <-chan error) // WatchProgress watches one action's progress until it completes with success // or error. This watching happens in a goroutine and updates are provided @@ -56,5 +58,22 @@ type IActionClient interface { // // WatchProgress uses the [WithPollBackoffFunc] of the [Client] to wait until // sending the next request. + // + // Deprecated: WatchProgress is deprecated, use [WaitForFunc] instead. WatchProgress(ctx context.Context, action *Action) (<-chan int, <-chan error) + // WaitForFunc waits until all actions are completed by polling the API at the interval + // defined by [WithPollBackoffFunc]. An action is considered as complete when its status is + // either [ActionStatusSuccess] or [ActionStatusError]. + // + // The handleUpdate callback is called every time an action is updated. + WaitForFunc(ctx context.Context, handleUpdate func(update *Action) error, actions ...*Action) error + // WaitFor waits until all actions succeed by polling the API at the interval defined by + // [WithPollBackoffFunc]. An action is considered as succeeded when its status is either + // [ActionStatusSuccess]. + // + // If a single action fails, the function will stop waiting and the error set in the + // action will be returned as an [ActionError]. + // + // For more flexibility, see the [ActionClient.WaitForFunc] function. + WaitFor(ctx context.Context, actions ...*Action) error } diff --git a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/zz_schema.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/zz_schema.go index a908ab360..c6ca97cab 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/zz_schema.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/zz_schema.go @@ -254,13 +254,16 @@ func (c *converterImpl) LoadBalancerMetricsFromSchema(source *schema.LoadBalance hcloudLoadBalancerMetrics.Start = c.timeTimeToTimeTime((*source).Metrics.Start) hcloudLoadBalancerMetrics.End = c.timeTimeToTimeTime((*source).Metrics.End) hcloudLoadBalancerMetrics.Step = (*source).Metrics.Step - mapStringHcloudLoadBalancerMetricsValueList := make(map[string][]LoadBalancerMetricsValue, len((*source).Metrics.TimeSeries)) - for key, value := range (*source).Metrics.TimeSeries { - hcloudLoadBalancerMetricsValueList, err := loadBalancerMetricsTimeSeriesFromSchema(value) - if err != nil { - return nil, err + var mapStringHcloudLoadBalancerMetricsValueList map[string][]LoadBalancerMetricsValue + if (*source).Metrics.TimeSeries != nil { + mapStringHcloudLoadBalancerMetricsValueList = make(map[string][]LoadBalancerMetricsValue, len((*source).Metrics.TimeSeries)) + for key, value := range (*source).Metrics.TimeSeries { + hcloudLoadBalancerMetricsValueList, err := loadBalancerMetricsTimeSeriesFromSchema(value) + if err != nil { + return nil, err + } + mapStringHcloudLoadBalancerMetricsValueList[key] = hcloudLoadBalancerMetricsValueList } - mapStringHcloudLoadBalancerMetricsValueList[key] = hcloudLoadBalancerMetricsValueList } hcloudLoadBalancerMetrics.TimeSeries = mapStringHcloudLoadBalancerMetricsValueList pHcloudLoadBalancerMetrics = &hcloudLoadBalancerMetrics @@ -471,7 +474,11 @@ func (c *converterImpl) PrimaryIPFromSchema(source schema.PrimaryIP) *PrimaryIP hcloudPrimaryIP.Type = PrimaryIPType(source.Type) hcloudPrimaryIP.Protection = c.schemaPrimaryIPProtectionToHcloudPrimaryIPProtection(source.Protection) hcloudPrimaryIP.DNSPtr = mapFromPrimaryIPDNSPtrSchema(source.DNSPtr) - hcloudPrimaryIP.AssigneeID = source.AssigneeID + var xint64 int64 + if source.AssigneeID != nil { + xint64 = *source.AssigneeID + } + hcloudPrimaryIP.AssigneeID = xint64 hcloudPrimaryIP.AssigneeType = source.AssigneeType hcloudPrimaryIP.AutoDelete = source.AutoDelete hcloudPrimaryIP.Blocked = source.Blocked @@ -612,14 +619,14 @@ func (c *converterImpl) SchemaFromFirewallCreateOpts(source FirewallCreateOpts) var schemaFirewallCreateRequest schema.FirewallCreateRequest schemaFirewallCreateRequest.Name = source.Name schemaFirewallCreateRequest.Labels = stringMapToStringMapPtr(source.Labels) - var schemaFirewallRuleList []schema.FirewallRule + var schemaFirewallRuleRequestList []schema.FirewallRuleRequest if source.Rules != nil { - schemaFirewallRuleList = make([]schema.FirewallRule, len(source.Rules)) + schemaFirewallRuleRequestList = make([]schema.FirewallRuleRequest, len(source.Rules)) for i := 0; i < len(source.Rules); i++ { - schemaFirewallRuleList[i] = c.hcloudFirewallRuleToSchemaFirewallRule(source.Rules[i]) + schemaFirewallRuleRequestList[i] = c.hcloudFirewallRuleToSchemaFirewallRuleRequest(source.Rules[i]) } } - schemaFirewallCreateRequest.Rules = schemaFirewallRuleList + schemaFirewallCreateRequest.Rules = schemaFirewallRuleRequestList var schemaFirewallResourceList []schema.FirewallResource if source.ApplyTo != nil { schemaFirewallResourceList = make([]schema.FirewallResource, len(source.ApplyTo)) @@ -639,14 +646,14 @@ func (c *converterImpl) SchemaFromFirewallResource(source FirewallResource) sche } func (c *converterImpl) SchemaFromFirewallSetRulesOpts(source FirewallSetRulesOpts) schema.FirewallActionSetRulesRequest { var schemaFirewallActionSetRulesRequest schema.FirewallActionSetRulesRequest - var schemaFirewallRuleList []schema.FirewallRule + var schemaFirewallRuleRequestList []schema.FirewallRuleRequest if source.Rules != nil { - schemaFirewallRuleList = make([]schema.FirewallRule, len(source.Rules)) + schemaFirewallRuleRequestList = make([]schema.FirewallRuleRequest, len(source.Rules)) for i := 0; i < len(source.Rules); i++ { - schemaFirewallRuleList[i] = c.hcloudFirewallRuleToSchemaFirewallRule(source.Rules[i]) + schemaFirewallRuleRequestList[i] = c.hcloudFirewallRuleToSchemaFirewallRuleRequest(source.Rules[i]) } } - schemaFirewallActionSetRulesRequest.Rules = schemaFirewallRuleList + schemaFirewallActionSetRulesRequest.Rules = schemaFirewallRuleRequestList return schemaFirewallActionSetRulesRequest } func (c *converterImpl) SchemaFromFloatingIP(source *FloatingIP) schema.FloatingIP { @@ -1042,7 +1049,7 @@ func (c *converterImpl) SchemaFromPrimaryIP(source *PrimaryIP) schema.PrimaryIP schemaPrimaryIP2.Type = string((*source).Type) schemaPrimaryIP2.Protection = c.hcloudPrimaryIPProtectionToSchemaPrimaryIPProtection((*source).Protection) schemaPrimaryIP2.DNSPtr = primaryIPDNSPtrSchemaFromMap((*source).DNSPtr) - schemaPrimaryIP2.AssigneeID = (*source).AssigneeID + schemaPrimaryIP2.AssigneeID = mapZeroInt64ToNil((*source).AssigneeID) schemaPrimaryIP2.AssigneeType = (*source).AssigneeType schemaPrimaryIP2.AutoDelete = (*source).AutoDelete schemaPrimaryIP2.Blocked = (*source).Blocked @@ -1208,6 +1215,7 @@ func (c *converterImpl) SchemaFromVolume(source *Volume) schema.Volume { schemaVolume2.Status = string((*source).Status) schemaVolume2.Location = c.SchemaFromLocation((*source).Location) schemaVolume2.Size = (*source).Size + schemaVolume2.Format = (*source).Format schemaVolume2.Protection = c.hcloudVolumeProtectionToSchemaVolumeProtection((*source).Protection) schemaVolume2.Labels = (*source).Labels schemaVolume2.LinuxDevice = (*source).LinuxDevice @@ -1283,13 +1291,16 @@ func (c *converterImpl) ServerMetricsFromSchema(source *schema.ServerGetMetricsR hcloudServerMetrics.Start = c.timeTimeToTimeTime((*source).Metrics.Start) hcloudServerMetrics.End = c.timeTimeToTimeTime((*source).Metrics.End) hcloudServerMetrics.Step = (*source).Metrics.Step - mapStringHcloudServerMetricsValueList := make(map[string][]ServerMetricsValue, len((*source).Metrics.TimeSeries)) - for key, value := range (*source).Metrics.TimeSeries { - hcloudServerMetricsValueList, err := serverMetricsTimeSeriesFromSchema(value) - if err != nil { - return nil, err + var mapStringHcloudServerMetricsValueList map[string][]ServerMetricsValue + if (*source).Metrics.TimeSeries != nil { + mapStringHcloudServerMetricsValueList = make(map[string][]ServerMetricsValue, len((*source).Metrics.TimeSeries)) + for key, value := range (*source).Metrics.TimeSeries { + hcloudServerMetricsValueList, err := serverMetricsTimeSeriesFromSchema(value) + if err != nil { + return nil, err + } + mapStringHcloudServerMetricsValueList[key] = hcloudServerMetricsValueList } - mapStringHcloudServerMetricsValueList[key] = hcloudServerMetricsValueList } hcloudServerMetrics.TimeSeries = mapStringHcloudServerMetricsValueList pHcloudServerMetrics = &hcloudServerMetrics @@ -1387,6 +1398,7 @@ func (c *converterImpl) VolumeFromSchema(source schema.Volume) *Volume { hcloudVolume.Server = pHcloudServer hcloudVolume.Location = c.LocationFromSchema(source.Location) hcloudVolume.Size = source.Size + hcloudVolume.Format = source.Format hcloudVolume.Protection = c.schemaVolumeProtectionToHcloudVolumeProtection(source.Protection) hcloudVolume.Labels = source.Labels hcloudVolume.LinuxDevice = source.LinuxDevice @@ -1459,6 +1471,30 @@ func (c *converterImpl) hcloudFirewallRuleToSchemaFirewallRule(source FirewallRu schemaFirewallRule.Description = source.Description return schemaFirewallRule } +func (c *converterImpl) hcloudFirewallRuleToSchemaFirewallRuleRequest(source FirewallRule) schema.FirewallRuleRequest { + var schemaFirewallRuleRequest schema.FirewallRuleRequest + schemaFirewallRuleRequest.Direction = string(source.Direction) + var stringList []string + if source.SourceIPs != nil { + stringList = make([]string, len(source.SourceIPs)) + for i := 0; i < len(source.SourceIPs); i++ { + stringList[i] = stringFromIPNet(source.SourceIPs[i]) + } + } + schemaFirewallRuleRequest.SourceIPs = stringList + var stringList2 []string + if source.DestinationIPs != nil { + stringList2 = make([]string, len(source.DestinationIPs)) + for j := 0; j < len(source.DestinationIPs); j++ { + stringList2[j] = stringFromIPNet(source.DestinationIPs[j]) + } + } + schemaFirewallRuleRequest.DestinationIPs = stringList2 + schemaFirewallRuleRequest.Protocol = string(source.Protocol) + schemaFirewallRuleRequest.Port = source.Port + schemaFirewallRuleRequest.Description = source.Description + return schemaFirewallRuleRequest +} func (c *converterImpl) hcloudFloatingIPProtectionToSchemaFloatingIPProtection(source FloatingIPProtection) schema.FloatingIPProtection { var schemaFloatingIPProtection schema.FloatingIPProtection schemaFloatingIPProtection.Delete = source.Delete @@ -1706,14 +1742,7 @@ func (c *converterImpl) pHcloudLoadBalancerAddServiceOptsHTTPToPSchemaLoadBalanc pInt = &xint } schemaLoadBalancerActionAddServiceRequestHTTP.CookieLifetime = pInt - var int64List []int64 - if (*source).Certificates != nil { - int64List = make([]int64, len((*source).Certificates)) - for i := 0; i < len((*source).Certificates); i++ { - int64List[i] = int64FromCertificate((*source).Certificates[i]) - } - } - schemaLoadBalancerActionAddServiceRequestHTTP.Certificates = &int64List + schemaLoadBalancerActionAddServiceRequestHTTP.Certificates = int64SlicePtrFromCertificatePtrSlice((*source).Certificates) schemaLoadBalancerActionAddServiceRequestHTTP.RedirectHTTP = (*source).RedirectHTTP schemaLoadBalancerActionAddServiceRequestHTTP.StickySessions = (*source).StickySessions pSchemaLoadBalancerActionAddServiceRequestHTTP = &schemaLoadBalancerActionAddServiceRequestHTTP @@ -1727,7 +1756,7 @@ func (c *converterImpl) pHcloudLoadBalancerAddServiceOptsHealthCheckHTTPToPSchem schemaLoadBalancerActionAddServiceRequestHealthCheckHTTP.Domain = (*source).Domain schemaLoadBalancerActionAddServiceRequestHealthCheckHTTP.Path = (*source).Path schemaLoadBalancerActionAddServiceRequestHealthCheckHTTP.Response = (*source).Response - schemaLoadBalancerActionAddServiceRequestHealthCheckHTTP.StatusCodes = &(*source).StatusCodes + schemaLoadBalancerActionAddServiceRequestHealthCheckHTTP.StatusCodes = stringSlicePtrFromStringSlice((*source).StatusCodes) schemaLoadBalancerActionAddServiceRequestHealthCheckHTTP.TLS = (*source).TLS pSchemaLoadBalancerActionAddServiceRequestHealthCheckHTTP = &schemaLoadBalancerActionAddServiceRequestHealthCheckHTTP } @@ -1777,14 +1806,7 @@ func (c *converterImpl) pHcloudLoadBalancerCreateOptsServiceHTTPToPSchemaLoadBal pInt = &xint } schemaLoadBalancerCreateRequestServiceHTTP.CookieLifetime = pInt - var int64List []int64 - if (*source).Certificates != nil { - int64List = make([]int64, len((*source).Certificates)) - for i := 0; i < len((*source).Certificates); i++ { - int64List[i] = int64FromCertificate((*source).Certificates[i]) - } - } - schemaLoadBalancerCreateRequestServiceHTTP.Certificates = &int64List + schemaLoadBalancerCreateRequestServiceHTTP.Certificates = int64SlicePtrFromCertificatePtrSlice((*source).Certificates) schemaLoadBalancerCreateRequestServiceHTTP.RedirectHTTP = (*source).RedirectHTTP schemaLoadBalancerCreateRequestServiceHTTP.StickySessions = (*source).StickySessions pSchemaLoadBalancerCreateRequestServiceHTTP = &schemaLoadBalancerCreateRequestServiceHTTP @@ -1798,7 +1820,7 @@ func (c *converterImpl) pHcloudLoadBalancerCreateOptsServiceHealthCheckHTTPToPSc schemaLoadBalancerCreateRequestServiceHealthCheckHTTP.Domain = (*source).Domain schemaLoadBalancerCreateRequestServiceHealthCheckHTTP.Path = (*source).Path schemaLoadBalancerCreateRequestServiceHealthCheckHTTP.Response = (*source).Response - schemaLoadBalancerCreateRequestServiceHealthCheckHTTP.StatusCodes = &(*source).StatusCodes + schemaLoadBalancerCreateRequestServiceHealthCheckHTTP.StatusCodes = stringSlicePtrFromStringSlice((*source).StatusCodes) schemaLoadBalancerCreateRequestServiceHealthCheckHTTP.TLS = (*source).TLS pSchemaLoadBalancerCreateRequestServiceHealthCheckHTTP = &schemaLoadBalancerCreateRequestServiceHealthCheckHTTP } @@ -1885,14 +1907,7 @@ func (c *converterImpl) pHcloudLoadBalancerUpdateServiceOptsHTTPToPSchemaLoadBal pInt = &xint } schemaLoadBalancerActionUpdateServiceRequestHTTP.CookieLifetime = pInt - var int64List []int64 - if (*source).Certificates != nil { - int64List = make([]int64, len((*source).Certificates)) - for i := 0; i < len((*source).Certificates); i++ { - int64List[i] = int64FromCertificate((*source).Certificates[i]) - } - } - schemaLoadBalancerActionUpdateServiceRequestHTTP.Certificates = &int64List + schemaLoadBalancerActionUpdateServiceRequestHTTP.Certificates = int64SlicePtrFromCertificatePtrSlice((*source).Certificates) schemaLoadBalancerActionUpdateServiceRequestHTTP.RedirectHTTP = (*source).RedirectHTTP schemaLoadBalancerActionUpdateServiceRequestHTTP.StickySessions = (*source).StickySessions pSchemaLoadBalancerActionUpdateServiceRequestHTTP = &schemaLoadBalancerActionUpdateServiceRequestHTTP @@ -1906,7 +1921,7 @@ func (c *converterImpl) pHcloudLoadBalancerUpdateServiceOptsHealthCheckHTTPToPSc schemaLoadBalancerActionUpdateServiceRequestHealthCheckHTTP.Domain = (*source).Domain schemaLoadBalancerActionUpdateServiceRequestHealthCheckHTTP.Path = (*source).Path schemaLoadBalancerActionUpdateServiceRequestHealthCheckHTTP.Response = (*source).Response - schemaLoadBalancerActionUpdateServiceRequestHealthCheckHTTP.StatusCodes = &(*source).StatusCodes + schemaLoadBalancerActionUpdateServiceRequestHealthCheckHTTP.StatusCodes = stringSlicePtrFromStringSlice((*source).StatusCodes) schemaLoadBalancerActionUpdateServiceRequestHealthCheckHTTP.TLS = (*source).TLS pSchemaLoadBalancerActionUpdateServiceRequestHealthCheckHTTP = &schemaLoadBalancerActionUpdateServiceRequestHealthCheckHTTP } diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 1f72cdde1..05c7359e4 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -55,6 +55,10 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 + +
+ See changes to v1.16.x + * July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7) * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829 @@ -93,6 +97,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 +
See changes to v1.15.x @@ -560,6 +565,8 @@ the stateless compress described below. For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). +To disable all assembly add `-tags=noasm`. This works across all packages. + # Stateless compression This package offers stateless compression as a special option for gzip/deflate. diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go new file mode 100644 index 000000000..66d1657d2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -0,0 +1,1017 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright (c) 2015 Klaus Post +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "math" +) + +const ( + NoCompression = 0 + BestSpeed = 1 + BestCompression = 9 + DefaultCompression = -1 + + // HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman + // entropy encoding. This mode is useful in compressing data that has + // already been compressed with an LZ style algorithm (e.g. Snappy or LZ4) + // that lacks an entropy encoder. Compression gains are achieved when + // certain bytes in the input stream occur more frequently than others. + // + // Note that HuffmanOnly produces a compressed output that is + // RFC 1951 compliant. That is, any valid DEFLATE decompressor will + // continue to be able to decompress this output. + HuffmanOnly = -2 + ConstantCompression = HuffmanOnly // compatibility alias. + + logWindowSize = 15 + windowSize = 1 << logWindowSize + windowMask = windowSize - 1 + logMaxOffsetSize = 15 // Standard DEFLATE + minMatchLength = 4 // The smallest match that the compressor looks for + maxMatchLength = 258 // The longest match for the compressor + minOffsetSize = 1 // The shortest offset that makes any sense + + // The maximum number of tokens we will encode at the time. + // Smaller sizes usually creates less optimal blocks. + // Bigger can make context switching slow. + // We use this for levels 7-9, so we make it big. + maxFlateBlockTokens = 1 << 15 + maxStoreBlockSize = 65535 + hashBits = 17 // After 17 performance degrades + hashSize = 1 << hashBits + hashMask = (1 << hashBits) - 1 + hashShift = (hashBits + minMatchLength - 1) / minMatchLength + maxHashOffset = 1 << 28 + + skipNever = math.MaxInt32 + + debugDeflate = false +) + +type compressionLevel struct { + good, lazy, nice, chain, fastSkipHashing, level int +} + +// Compression levels have been rebalanced from zlib deflate defaults +// to give a bigger spread in speed and compression. +// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/ +var levels = []compressionLevel{ + {}, // 0 + // Level 1-6 uses specialized algorithm - values not used + {0, 0, 0, 0, 0, 1}, + {0, 0, 0, 0, 0, 2}, + {0, 0, 0, 0, 0, 3}, + {0, 0, 0, 0, 0, 4}, + {0, 0, 0, 0, 0, 5}, + {0, 0, 0, 0, 0, 6}, + // Levels 7-9 use increasingly more lazy matching + // and increasingly stringent conditions for "good enough". + {8, 12, 16, 24, skipNever, 7}, + {16, 30, 40, 64, skipNever, 8}, + {32, 258, 258, 1024, skipNever, 9}, +} + +// advancedState contains state for the advanced levels, with bigger hash tables, etc. +type advancedState struct { + // deflate state + length int + offset int + maxInsertIndex int + chainHead int + hashOffset int + + ii uint16 // position of last match, intended to overflow to reset. + + // input window: unprocessed data is window[index:windowEnd] + index int + hashMatch [maxMatchLength + minMatchLength]uint32 + + // Input hash chains + // hashHead[hashValue] contains the largest inputIndex with the specified hash value + // If hashHead[hashValue] is within the current window, then + // hashPrev[hashHead[hashValue] & windowMask] contains the previous index + // with the same hash value. + hashHead [hashSize]uint32 + hashPrev [windowSize]uint32 +} + +type compressor struct { + compressionLevel + + h *huffmanEncoder + w *huffmanBitWriter + + // compression algorithm + fill func(*compressor, []byte) int // copy data to window + step func(*compressor) // process window + + window []byte + windowEnd int + blockStart int // window index where current tokens start + err error + + // queued output tokens + tokens tokens + fast fastEnc + state *advancedState + + sync bool // requesting flush + byteAvailable bool // if true, still need to process window[index-1]. +} + +func (d *compressor) fillDeflate(b []byte) int { + s := d.state + if s.index >= 2*windowSize-(minMatchLength+maxMatchLength) { + // shift the window by windowSize + //copy(d.window[:], d.window[windowSize:2*windowSize]) + *(*[windowSize]byte)(d.window) = *(*[windowSize]byte)(d.window[windowSize:]) + s.index -= windowSize + d.windowEnd -= windowSize + if d.blockStart >= windowSize { + d.blockStart -= windowSize + } else { + d.blockStart = math.MaxInt32 + } + s.hashOffset += windowSize + if s.hashOffset > maxHashOffset { + delta := s.hashOffset - 1 + s.hashOffset -= delta + s.chainHead -= delta + // Iterate over slices instead of arrays to avoid copying + // the entire table onto the stack (Issue #18625). + for i, v := range s.hashPrev[:] { + if int(v) > delta { + s.hashPrev[i] = uint32(int(v) - delta) + } else { + s.hashPrev[i] = 0 + } + } + for i, v := range s.hashHead[:] { + if int(v) > delta { + s.hashHead[i] = uint32(int(v) - delta) + } else { + s.hashHead[i] = 0 + } + } + } + } + n := copy(d.window[d.windowEnd:], b) + d.windowEnd += n + return n +} + +func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error { + if index > 0 || eof { + var window []byte + if d.blockStart <= index { + window = d.window[d.blockStart:index] + } + d.blockStart = index + //d.w.writeBlock(tok, eof, window) + d.w.writeBlockDynamic(tok, eof, window, d.sync) + return d.w.err + } + return nil +} + +// writeBlockSkip writes the current block and uses the number of tokens +// to determine if the block should be stored on no matches, or +// only huffman encoded. +func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error { + if index > 0 || eof { + if d.blockStart <= index { + window := d.window[d.blockStart:index] + // If we removed less than a 64th of all literals + // we huffman compress the block. + if int(tok.n) > len(window)-int(tok.n>>6) { + d.w.writeBlockHuff(eof, window, d.sync) + } else { + // Write a dynamic huffman block. + d.w.writeBlockDynamic(tok, eof, window, d.sync) + } + } else { + d.w.writeBlock(tok, eof, nil) + } + d.blockStart = index + return d.w.err + } + return nil +} + +// fillWindow will fill the current window with the supplied +// dictionary and calculate all hashes. +// This is much faster than doing a full encode. +// Should only be used after a start/reset. +func (d *compressor) fillWindow(b []byte) { + // Do not fill window if we are in store-only or huffman mode. + if d.level <= 0 && d.level > -MinCustomWindowSize { + return + } + if d.fast != nil { + // encode the last data, but discard the result + if len(b) > maxMatchOffset { + b = b[len(b)-maxMatchOffset:] + } + d.fast.Encode(&d.tokens, b) + d.tokens.Reset() + return + } + s := d.state + // If we are given too much, cut it. + if len(b) > windowSize { + b = b[len(b)-windowSize:] + } + // Add all to window. + n := copy(d.window[d.windowEnd:], b) + + // Calculate 256 hashes at the time (more L1 cache hits) + loops := (n + 256 - minMatchLength) / 256 + for j := 0; j < loops; j++ { + startindex := j * 256 + end := startindex + 256 + minMatchLength - 1 + if end > n { + end = n + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + + if dstSize <= 0 { + continue + } + + dst := s.hashMatch[:dstSize] + bulkHash4(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + s.hashPrev[di&windowMask] = s.hashHead[newH] + // Set the head of the hash chain to us. + s.hashHead[newH] = uint32(di + s.hashOffset) + } + } + // Update window information. + d.windowEnd += n + s.index = n +} + +// Try to find a match starting at index whose length is greater than prevSize. +// We only look at chainCount possibilities before giving up. +// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead +func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) { + minMatchLook := maxMatchLength + if lookahead < minMatchLook { + minMatchLook = lookahead + } + + win := d.window[0 : pos+minMatchLook] + + // We quit when we get a match that's at least nice long + nice := len(win) - pos + if d.nice < nice { + nice = d.nice + } + + // If we've got a match that's good enough, only look in 1/4 the chain. + tries := d.chain + length = minMatchLength - 1 + + wEnd := win[pos+length] + wPos := win[pos:] + minIndex := pos - windowSize + if minIndex < 0 { + minIndex = 0 + } + offset = 0 + + if d.chain < 100 { + for i := prevHead; tries > 0; tries-- { + if wEnd == win[i+length] { + n := matchLen(win[i:i+minMatchLook], wPos) + if n > length { + length = n + offset = pos - i + ok = true + if n >= nice { + // The match is good enough that we don't try to find a better one. + break + } + wEnd = win[pos+n] + } + } + if i <= minIndex { + // hashPrev[i & windowMask] has already been overwritten, so stop now. + break + } + i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset + if i < minIndex { + break + } + } + return + } + + // Minimum gain to accept a match. + cGain := 4 + + // Some like it higher (CSV), some like it lower (JSON) + const baseCost = 3 + // Base is 4 bytes at with an additional cost. + // Matches must be better than this. + + for i := prevHead; tries > 0; tries-- { + if wEnd == win[i+length] { + n := matchLen(win[i:i+minMatchLook], wPos) + if n > length { + // Calculate gain. Estimate + newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]]) + + //fmt.Println("gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n]), "this-len:", n, "prev-len:", length) + if newGain > cGain { + length = n + offset = pos - i + cGain = newGain + ok = true + if n >= nice { + // The match is good enough that we don't try to find a better one. + break + } + wEnd = win[pos+n] + } + } + } + if i <= minIndex { + // hashPrev[i & windowMask] has already been overwritten, so stop now. + break + } + i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset + if i < minIndex { + break + } + } + return +} + +func (d *compressor) writeStoredBlock(buf []byte) error { + if d.w.writeStoredHeader(len(buf), false); d.w.err != nil { + return d.w.err + } + d.w.writeBytes(buf) + return d.w.err +} + +// hash4 returns a hash representation of the first 4 bytes +// of the supplied slice. +// The caller must ensure that len(b) >= 4. +func hash4(b []byte) uint32 { + return hash4u(binary.LittleEndian.Uint32(b), hashBits) +} + +// hash4 returns the hash of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash4u(u uint32, h uint8) uint32 { + return (u * prime4bytes) >> (32 - h) +} + +// bulkHash4 will compute hashes using the same +// algorithm as hash4 +func bulkHash4(b []byte, dst []uint32) { + if len(b) < 4 { + return + } + hb := binary.LittleEndian.Uint32(b) + + dst[0] = hash4u(hb, hashBits) + end := len(b) - 4 + 1 + for i := 1; i < end; i++ { + hb = (hb >> 8) | uint32(b[i+3])<<24 + dst[i] = hash4u(hb, hashBits) + } +} + +func (d *compressor) initDeflate() { + d.window = make([]byte, 2*windowSize) + d.byteAvailable = false + d.err = nil + if d.state == nil { + return + } + s := d.state + s.index = 0 + s.hashOffset = 1 + s.length = minMatchLength - 1 + s.offset = 0 + s.chainHead = -1 +} + +// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, +// meaning it always has lazy matching on. +func (d *compressor) deflateLazy() { + s := d.state + // Sanity enables additional runtime tests. + // It's intended to be used during development + // to supplement the currently ad-hoc unit tests. + const sanity = debugDeflate + + if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync { + return + } + if d.windowEnd != s.index && d.chain > 100 { + // Get literal huffman coder. + if d.h == nil { + d.h = newHuffmanEncoder(maxFlateBlockTokens) + } + var tmp [256]uint16 + for _, v := range d.window[s.index:d.windowEnd] { + tmp[v]++ + } + d.h.generate(tmp[:], 15) + } + + s.maxInsertIndex = d.windowEnd - (minMatchLength - 1) + + for { + if sanity && s.index > d.windowEnd { + panic("index > windowEnd") + } + lookahead := d.windowEnd - s.index + if lookahead < minMatchLength+maxMatchLength { + if !d.sync { + return + } + if sanity && s.index > d.windowEnd { + panic("index > windowEnd") + } + if lookahead == 0 { + // Flush current output block if any. + if d.byteAvailable { + // There is still one pending token that needs to be flushed + d.tokens.AddLiteral(d.window[s.index-1]) + d.byteAvailable = false + } + if d.tokens.n > 0 { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + return + } + } + if s.index < s.maxInsertIndex { + // Update the hash + hash := hash4(d.window[s.index:]) + ch := s.hashHead[hash] + s.chainHead = int(ch) + s.hashPrev[s.index&windowMask] = ch + s.hashHead[hash] = uint32(s.index + s.hashOffset) + } + prevLength := s.length + prevOffset := s.offset + s.length = minMatchLength - 1 + s.offset = 0 + minIndex := s.index - windowSize + if minIndex < 0 { + minIndex = 0 + } + + if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { + if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok { + s.length = newLength + s.offset = newOffset + } + } + + if prevLength >= minMatchLength && s.length <= prevLength { + // No better match, but check for better match at end... + // + // Skip forward a number of bytes. + // Offset of 2 seems to yield best results. 3 is sometimes better. + const checkOff = 2 + + // Check all, except full length + if prevLength < maxMatchLength-checkOff { + prevIndex := s.index - 1 + if prevIndex+prevLength < s.maxInsertIndex { + end := lookahead + if lookahead > maxMatchLength+checkOff { + end = maxMatchLength + checkOff + } + end += prevIndex + + // Hash at match end. + h := hash4(d.window[prevIndex+prevLength:]) + ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength + if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff { + length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:]) + // It seems like a pure length metric is best. + if length > prevLength { + prevLength = length + prevOffset = prevIndex - ch2 + + // Extend back... + for i := checkOff - 1; i >= 0; i-- { + if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i] { + // Emit tokens we "owe" + for j := 0; j <= i; j++ { + d.tokens.AddLiteral(d.window[prevIndex+j]) + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + s.index++ + if s.index < s.maxInsertIndex { + h := hash4(d.window[s.index:]) + ch := s.hashHead[h] + s.chainHead = int(ch) + s.hashPrev[s.index&windowMask] = ch + s.hashHead[h] = uint32(s.index + s.hashOffset) + } + } + break + } else { + prevLength++ + } + } + } else if false { + // Check one further ahead. + // Only rarely better, disabled for now. + prevIndex++ + h := hash4(d.window[prevIndex+prevLength:]) + ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength + if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff { + length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:]) + // It seems like a pure length metric is best. + if length > prevLength+checkOff { + prevLength = length + prevOffset = prevIndex - ch2 + prevIndex-- + + // Extend back... + for i := checkOff; i >= 0; i-- { + if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i-1] { + // Emit tokens we "owe" + for j := 0; j <= i; j++ { + d.tokens.AddLiteral(d.window[prevIndex+j]) + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + s.index++ + if s.index < s.maxInsertIndex { + h := hash4(d.window[s.index:]) + ch := s.hashHead[h] + s.chainHead = int(ch) + s.hashPrev[s.index&windowMask] = ch + s.hashHead[h] = uint32(s.index + s.hashOffset) + } + } + break + } else { + prevLength++ + } + } + } + } + } + } + } + } + // There was a match at the previous step, and the current match is + // not better. Output the previous match. + d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) + + // Insert in the hash table all strings up to the end of the match. + // index and index-1 are already inserted. If there is not enough + // lookahead, the last two strings are not inserted into the hash + // table. + newIndex := s.index + prevLength - 1 + // Calculate missing hashes + end := newIndex + if end > s.maxInsertIndex { + end = s.maxInsertIndex + } + end += minMatchLength - 1 + startindex := s.index + 1 + if startindex > s.maxInsertIndex { + startindex = s.maxInsertIndex + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + if dstSize > 0 { + dst := s.hashMatch[:dstSize] + bulkHash4(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + s.hashPrev[di&windowMask] = s.hashHead[newH] + // Set the head of the hash chain to us. + s.hashHead[newH] = uint32(di + s.hashOffset) + } + } + + s.index = newIndex + d.byteAvailable = false + s.length = minMatchLength - 1 + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + s.ii = 0 + } else { + // Reset, if we got a match this run. + if s.length >= minMatchLength { + s.ii = 0 + } + // We have a byte waiting. Emit it. + if d.byteAvailable { + s.ii++ + d.tokens.AddLiteral(d.window[s.index-1]) + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + s.index++ + + // If we have a long run of no matches, skip additional bytes + // Resets when s.ii overflows after 64KB. + if n := int(s.ii) - d.chain; n > 0 { + n = 1 + int(n>>6) + for j := 0; j < n; j++ { + if s.index >= d.windowEnd-1 { + break + } + d.tokens.AddLiteral(d.window[s.index-1]) + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + // Index... + if s.index < s.maxInsertIndex { + h := hash4(d.window[s.index:]) + ch := s.hashHead[h] + s.chainHead = int(ch) + s.hashPrev[s.index&windowMask] = ch + s.hashHead[h] = uint32(s.index + s.hashOffset) + } + s.index++ + } + // Flush last byte + d.tokens.AddLiteral(d.window[s.index-1]) + d.byteAvailable = false + // s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + } + } else { + s.index++ + d.byteAvailable = true + } + } + } +} + +func (d *compressor) store() { + if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + d.windowEnd = 0 + } +} + +// fillWindow will fill the buffer with data for huffman-only compression. +// The number of bytes copied is returned. +func (d *compressor) fillBlock(b []byte) int { + n := copy(d.window[d.windowEnd:], b) + d.windowEnd += n + return n +} + +// storeHuff will compress and store the currently added data, +// if enough has been accumulated or we at the end of the stream. +// Any error that occurred will be in d.err +func (d *compressor) storeHuff() { + if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 { + return + } + d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) + d.err = d.w.err + d.windowEnd = 0 +} + +// storeFast will compress and store the currently added data, +// if enough has been accumulated or we at the end of the stream. +// Any error that occurred will be in d.err +func (d *compressor) storeFast() { + // We only compress if we have maxStoreBlockSize. + if d.windowEnd < len(d.window) { + if !d.sync { + return + } + // Handle extremely small sizes. + if d.windowEnd < 128 { + if d.windowEnd == 0 { + return + } + if d.windowEnd <= 32 { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + } else { + d.w.writeBlockHuff(false, d.window[:d.windowEnd], true) + d.err = d.w.err + } + d.tokens.Reset() + d.windowEnd = 0 + d.fast.Reset() + return + } + } + + d.fast.Encode(&d.tokens, d.window[:d.windowEnd]) + // If we made zero matches, store the block as is. + if d.tokens.n == 0 { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + // If we removed less than 1/16th, huffman compress the block. + } else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) { + d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) + d.err = d.w.err + } else { + d.w.writeBlockDynamic(&d.tokens, false, d.window[:d.windowEnd], d.sync) + d.err = d.w.err + } + d.tokens.Reset() + d.windowEnd = 0 +} + +// write will add input byte to the stream. +// Unless an error occurs all bytes will be consumed. +func (d *compressor) write(b []byte) (n int, err error) { + if d.err != nil { + return 0, d.err + } + n = len(b) + for len(b) > 0 { + if d.windowEnd == len(d.window) || d.sync { + d.step(d) + } + b = b[d.fill(d, b):] + if d.err != nil { + return 0, d.err + } + } + return n, d.err +} + +func (d *compressor) syncFlush() error { + d.sync = true + if d.err != nil { + return d.err + } + d.step(d) + if d.err == nil { + d.w.writeStoredHeader(0, false) + d.w.flush() + d.err = d.w.err + } + d.sync = false + return d.err +} + +func (d *compressor) init(w io.Writer, level int) (err error) { + d.w = newHuffmanBitWriter(w) + + switch { + case level == NoCompression: + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).store + case level == ConstantCompression: + d.w.logNewTablePenalty = 10 + d.window = make([]byte, 32<<10) + d.fill = (*compressor).fillBlock + d.step = (*compressor).storeHuff + case level == DefaultCompression: + level = 5 + fallthrough + case level >= 1 && level <= 6: + d.w.logNewTablePenalty = 7 + d.fast = newFastEnc(level) + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).storeFast + case 7 <= level && level <= 9: + d.w.logNewTablePenalty = 8 + d.state = &advancedState{} + d.compressionLevel = levels[level] + d.initDeflate() + d.fill = (*compressor).fillDeflate + d.step = (*compressor).deflateLazy + case -level >= MinCustomWindowSize && -level <= MaxCustomWindowSize: + d.w.logNewTablePenalty = 7 + d.fast = &fastEncL5Window{maxOffset: int32(-level), cur: maxStoreBlockSize} + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).storeFast + default: + return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level) + } + d.level = level + return nil +} + +// reset the state of the compressor. +func (d *compressor) reset(w io.Writer) { + d.w.reset(w) + d.sync = false + d.err = nil + // We only need to reset a few things for Snappy. + if d.fast != nil { + d.fast.Reset() + d.windowEnd = 0 + d.tokens.Reset() + return + } + switch d.compressionLevel.chain { + case 0: + // level was NoCompression or ConstantCompresssion. + d.windowEnd = 0 + default: + s := d.state + s.chainHead = -1 + for i := range s.hashHead { + s.hashHead[i] = 0 + } + for i := range s.hashPrev { + s.hashPrev[i] = 0 + } + s.hashOffset = 1 + s.index, d.windowEnd = 0, 0 + d.blockStart, d.byteAvailable = 0, false + d.tokens.Reset() + s.length = minMatchLength - 1 + s.offset = 0 + s.ii = 0 + s.maxInsertIndex = 0 + } +} + +func (d *compressor) close() error { + if d.err != nil { + return d.err + } + d.sync = true + d.step(d) + if d.err != nil { + return d.err + } + if d.w.writeStoredHeader(0, true); d.w.err != nil { + return d.w.err + } + d.w.flush() + d.w.reset(nil) + return d.w.err +} + +// NewWriter returns a new Writer compressing data at the given level. +// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression); +// higher levels typically run slower but compress more. +// Level 0 (NoCompression) does not attempt any compression; it only adds the +// necessary DEFLATE framing. +// Level -1 (DefaultCompression) uses the default compression level. +// Level -2 (ConstantCompression) will use Huffman compression only, giving +// a very fast compression for all types of input, but sacrificing considerable +// compression efficiency. +// +// If level is in the range [-2, 9] then the error returned will be nil. +// Otherwise the error returned will be non-nil. +func NewWriter(w io.Writer, level int) (*Writer, error) { + var dw Writer + if err := dw.d.init(w, level); err != nil { + return nil, err + } + return &dw, nil +} + +// NewWriterDict is like NewWriter but initializes the new +// Writer with a preset dictionary. The returned Writer behaves +// as if the dictionary had been written to it without producing +// any compressed output. The compressed data written to w +// can only be decompressed by a Reader initialized with the +// same dictionary. +func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) { + zw, err := NewWriter(w, level) + if err != nil { + return nil, err + } + zw.d.fillWindow(dict) + zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method. + return zw, err +} + +// MinCustomWindowSize is the minimum window size that can be sent to NewWriterWindow. +const MinCustomWindowSize = 32 + +// MaxCustomWindowSize is the maximum custom window that can be sent to NewWriterWindow. +const MaxCustomWindowSize = windowSize + +// NewWriterWindow returns a new Writer compressing data with a custom window size. +// windowSize must be from MinCustomWindowSize to MaxCustomWindowSize. +func NewWriterWindow(w io.Writer, windowSize int) (*Writer, error) { + if windowSize < MinCustomWindowSize { + return nil, errors.New("flate: requested window size less than MinWindowSize") + } + if windowSize > MaxCustomWindowSize { + return nil, errors.New("flate: requested window size bigger than MaxCustomWindowSize") + } + var dw Writer + if err := dw.d.init(w, -windowSize); err != nil { + return nil, err + } + return &dw, nil +} + +// A Writer takes data written to it and writes the compressed +// form of that data to an underlying writer (see NewWriter). +type Writer struct { + d compressor + dict []byte +} + +// Write writes data to w, which will eventually write the +// compressed form of data to its underlying writer. +func (w *Writer) Write(data []byte) (n int, err error) { + return w.d.write(data) +} + +// Flush flushes any pending data to the underlying writer. +// It is useful mainly in compressed network protocols, to ensure that +// a remote reader has enough data to reconstruct a packet. +// Flush does not return until the data has been written. +// Calling Flush when there is no pending data still causes the Writer +// to emit a sync marker of at least 4 bytes. +// If the underlying writer returns an error, Flush returns that error. +// +// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. +func (w *Writer) Flush() error { + // For more about flushing: + // http://www.bolet.org/~pornin/deflate-flush.html + return w.d.syncFlush() +} + +// Close flushes and closes the writer. +func (w *Writer) Close() error { + return w.d.close() +} + +// Reset discards the writer's state and makes it equivalent to +// the result of NewWriter or NewWriterDict called with dst +// and w's level and dictionary. +func (w *Writer) Reset(dst io.Writer) { + if len(w.dict) > 0 { + // w was created with NewWriterDict + w.d.reset(dst) + if dst != nil { + w.d.fillWindow(w.dict) + } + } else { + // w was created with NewWriter + w.d.reset(dst) + } +} + +// ResetDict discards the writer's state and makes it equivalent to +// the result of NewWriter or NewWriterDict called with dst +// and w's level, but sets a specific dictionary. +func (w *Writer) ResetDict(dst io.Writer, dict []byte) { + w.dict = dict + w.d.reset(dst) + w.d.fillWindow(w.dict) +} diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/vendor/github.com/klauspost/compress/flate/dict_decoder.go new file mode 100644 index 000000000..bb36351a5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/dict_decoder.go @@ -0,0 +1,184 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// dictDecoder implements the LZ77 sliding dictionary as used in decompression. +// LZ77 decompresses data through sequences of two forms of commands: +// +// - Literal insertions: Runs of one or more symbols are inserted into the data +// stream as is. This is accomplished through the writeByte method for a +// single symbol, or combinations of writeSlice/writeMark for multiple symbols. +// Any valid stream must start with a literal insertion if no preset dictionary +// is used. +// +// - Backward copies: Runs of one or more symbols are copied from previously +// emitted data. Backward copies come as the tuple (dist, length) where dist +// determines how far back in the stream to copy from and length determines how +// many bytes to copy. Note that it is valid for the length to be greater than +// the distance. Since LZ77 uses forward copies, that situation is used to +// perform a form of run-length encoding on repeated runs of symbols. +// The writeCopy and tryWriteCopy are used to implement this command. +// +// For performance reasons, this implementation performs little to no sanity +// checks about the arguments. As such, the invariants documented for each +// method call must be respected. +type dictDecoder struct { + hist []byte // Sliding window history + + // Invariant: 0 <= rdPos <= wrPos <= len(hist) + wrPos int // Current output position in buffer + rdPos int // Have emitted hist[:rdPos] already + full bool // Has a full window length been written yet? +} + +// init initializes dictDecoder to have a sliding window dictionary of the given +// size. If a preset dict is provided, it will initialize the dictionary with +// the contents of dict. +func (dd *dictDecoder) init(size int, dict []byte) { + *dd = dictDecoder{hist: dd.hist} + + if cap(dd.hist) < size { + dd.hist = make([]byte, size) + } + dd.hist = dd.hist[:size] + + if len(dict) > len(dd.hist) { + dict = dict[len(dict)-len(dd.hist):] + } + dd.wrPos = copy(dd.hist, dict) + if dd.wrPos == len(dd.hist) { + dd.wrPos = 0 + dd.full = true + } + dd.rdPos = dd.wrPos +} + +// histSize reports the total amount of historical data in the dictionary. +func (dd *dictDecoder) histSize() int { + if dd.full { + return len(dd.hist) + } + return dd.wrPos +} + +// availRead reports the number of bytes that can be flushed by readFlush. +func (dd *dictDecoder) availRead() int { + return dd.wrPos - dd.rdPos +} + +// availWrite reports the available amount of output buffer space. +func (dd *dictDecoder) availWrite() int { + return len(dd.hist) - dd.wrPos +} + +// writeSlice returns a slice of the available buffer to write data to. +// +// This invariant will be kept: len(s) <= availWrite() +func (dd *dictDecoder) writeSlice() []byte { + return dd.hist[dd.wrPos:] +} + +// writeMark advances the writer pointer by cnt. +// +// This invariant must be kept: 0 <= cnt <= availWrite() +func (dd *dictDecoder) writeMark(cnt int) { + dd.wrPos += cnt +} + +// writeByte writes a single byte to the dictionary. +// +// This invariant must be kept: 0 < availWrite() +func (dd *dictDecoder) writeByte(c byte) { + dd.hist[dd.wrPos] = c + dd.wrPos++ +} + +// writeCopy copies a string at a given (dist, length) to the output. +// This returns the number of bytes copied and may be less than the requested +// length if the available space in the output buffer is too small. +// +// This invariant must be kept: 0 < dist <= histSize() +func (dd *dictDecoder) writeCopy(dist, length int) int { + dstBase := dd.wrPos + dstPos := dstBase + srcPos := dstPos - dist + endPos := dstPos + length + if endPos > len(dd.hist) { + endPos = len(dd.hist) + } + + // Copy non-overlapping section after destination position. + // + // This section is non-overlapping in that the copy length for this section + // is always less than or equal to the backwards distance. This can occur + // if a distance refers to data that wraps-around in the buffer. + // Thus, a backwards copy is performed here; that is, the exact bytes in + // the source prior to the copy is placed in the destination. + if srcPos < 0 { + srcPos += len(dd.hist) + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:]) + srcPos = 0 + } + + // Copy possibly overlapping section before destination position. + // + // This section can overlap if the copy length for this section is larger + // than the backwards distance. This is allowed by LZ77 so that repeated + // strings can be succinctly represented using (dist, length) pairs. + // Thus, a forwards copy is performed here; that is, the bytes copied is + // possibly dependent on the resulting bytes in the destination as the copy + // progresses along. This is functionally equivalent to the following: + // + // for i := 0; i < endPos-dstPos; i++ { + // dd.hist[dstPos+i] = dd.hist[srcPos+i] + // } + // dstPos = endPos + // + for dstPos < endPos { + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) + } + + dd.wrPos = dstPos + return dstPos - dstBase +} + +// tryWriteCopy tries to copy a string at a given (distance, length) to the +// output. This specialized version is optimized for short distances. +// +// This method is designed to be inlined for performance reasons. +// +// This invariant must be kept: 0 < dist <= histSize() +func (dd *dictDecoder) tryWriteCopy(dist, length int) int { + dstPos := dd.wrPos + endPos := dstPos + length + if dstPos < dist || endPos > len(dd.hist) { + return 0 + } + dstBase := dstPos + srcPos := dstPos - dist + + // Copy possibly overlapping section before destination position. +loop: + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) + if dstPos < endPos { + goto loop // Avoid for-loop so that this function can be inlined + } + + dd.wrPos = dstPos + return dstPos - dstBase +} + +// readFlush returns a slice of the historical buffer that is ready to be +// emitted to the user. The data returned by readFlush must be fully consumed +// before calling any other dictDecoder methods. +func (dd *dictDecoder) readFlush() []byte { + toRead := dd.hist[dd.rdPos:dd.wrPos] + dd.rdPos = dd.wrPos + if dd.wrPos == len(dd.hist) { + dd.wrPos, dd.rdPos = 0, 0 + dd.full = true + } + return toRead +} diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go new file mode 100644 index 000000000..c8124b5c4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go @@ -0,0 +1,193 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Modified for deflate by Klaus Post (c) 2015. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "encoding/binary" + "fmt" +) + +type fastEnc interface { + Encode(dst *tokens, src []byte) + Reset() +} + +func newFastEnc(level int) fastEnc { + switch level { + case 1: + return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}} + case 2: + return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}} + case 3: + return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}} + case 4: + return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}} + case 5: + return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}} + case 6: + return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}} + default: + panic("invalid level specified") + } +} + +const ( + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32. + baseMatchOffset = 1 // The smallest match offset + baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5 + maxMatchOffset = 1 << 15 // The largest match offset + + bTableBits = 17 // Bits used in the big tables + bTableSize = 1 << bTableBits // Size of the table + allocHistory = maxStoreBlockSize * 5 // Size to preallocate for history. + bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this. +) + +const ( + prime3bytes = 506832829 + prime4bytes = 2654435761 + prime5bytes = 889523592379 + prime6bytes = 227718039650203 + prime7bytes = 58295818150454627 + prime8bytes = 0xcf1bbcdcb7a56463 +) + +func load3232(b []byte, i int32) uint32 { + return binary.LittleEndian.Uint32(b[i:]) +} + +func load6432(b []byte, i int32) uint64 { + return binary.LittleEndian.Uint64(b[i:]) +} + +type tableEntry struct { + offset int32 +} + +// fastGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type fastGen struct { + hist []byte + cur int32 +} + +func (e *fastGen) addBlock(src []byte) int32 { + // check if we have space already + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + e.hist = make([]byte, 0, allocHistory) + } else { + if cap(e.hist) < maxMatchOffset*2 { + panic("unexpected buffer size") + } + // Move down + offset := int32(len(e.hist)) - maxMatchOffset + // copy(e.hist[0:maxMatchOffset], e.hist[offset:]) + *(*[maxMatchOffset]byte)(e.hist) = *(*[maxMatchOffset]byte)(e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:maxMatchOffset] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +type tableEntryPrev struct { + Cur tableEntry + Prev tableEntry +} + +// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash7(u uint64, h uint8) uint32 { + return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64)) +} + +// hashLen returns a hash of the lowest mls bytes of with length output bits. +// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. +// length should always be < 32. +// Preferably length and mls should be a constant for inlining. +func hashLen(u uint64, length, mls uint8) uint32 { + switch mls { + case 3: + return (uint32(u<<8) * prime3bytes) >> (32 - length) + case 5: + return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) + case 6: + return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) + case 7: + return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) + case 8: + return uint32((u * prime8bytes) >> (64 - length)) + default: + return (uint32(u) * prime4bytes) >> (32 - length) + } +} + +// matchlen will return the match length between offsets and t in src. +// The maximum length returned is maxMatchLength - 4. +// It is assumed that s > t, that t >=0 and s < len(src). +func (e *fastGen) matchlen(s, t int32, src []byte) int32 { + if debugDecode { + if t >= s { + panic(fmt.Sprint("t >=s:", t, s)) + } + if int(s) >= len(src) { + panic(fmt.Sprint("s >= len(src):", s, len(src))) + } + if t < 0 { + panic(fmt.Sprint("t < 0:", t)) + } + if s-t > maxMatchOffset { + panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) + } + } + s1 := int(s) + maxMatchLength - 4 + if s1 > len(src) { + s1 = len(src) + } + + // Extend the match to be as long as possible. + return int32(matchLen(src[s:s1], src[t:])) +} + +// matchlenLong will return the match length between offsets and t in src. +// It is assumed that s > t, that t >=0 and s < len(src). +func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 { + if debugDeflate { + if t >= s { + panic(fmt.Sprint("t >=s:", t, s)) + } + if int(s) >= len(src) { + panic(fmt.Sprint("s >= len(src):", s, len(src))) + } + if t < 0 { + panic(fmt.Sprint("t < 0:", t)) + } + if s-t > maxMatchOffset { + panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) + } + } + // Extend the match to be as long as possible. + return int32(matchLen(src[s:], src[t:])) +} + +// Reset the encoding table. +func (e *fastGen) Reset() { + if cap(e.hist) < allocHistory { + e.hist = make([]byte, 0, allocHistory) + } + // We offset current position so everything will be out of reach. + // If we are above the buffer reset it will be cleared anyway since len(hist) == 0. + if e.cur <= bufferReset { + e.cur += maxMatchOffset + int32(len(e.hist)) + } + e.hist = e.hist[:0] +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go new file mode 100644 index 000000000..f70594c34 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -0,0 +1,1182 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "encoding/binary" + "fmt" + "io" + "math" +) + +const ( + // The largest offset code. + offsetCodeCount = 30 + + // The special code used to mark the end of a block. + endBlockMarker = 256 + + // The first length code. + lengthCodesStart = 257 + + // The number of codegen codes. + codegenCodeCount = 19 + badCode = 255 + + // maxPredefinedTokens is the maximum number of tokens + // where we check if fixed size is smaller. + maxPredefinedTokens = 250 + + // bufferFlushSize indicates the buffer size + // after which bytes are flushed to the writer. + // Should preferably be a multiple of 6, since + // we accumulate 6 bytes between writes to the buffer. + bufferFlushSize = 246 +) + +// Minimum length code that emits bits. +const lengthExtraBitsMinCode = 8 + +// The number of extra bits needed by length code X - LENGTH_CODES_START. +var lengthExtraBits = [32]uint8{ + /* 257 */ 0, 0, 0, + /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, + /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, + /* 280 */ 4, 5, 5, 5, 5, 0, +} + +// The length indicated by length code X - LENGTH_CODES_START. +var lengthBase = [32]uint8{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, + 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, + 64, 80, 96, 112, 128, 160, 192, 224, 255, +} + +// Minimum offset code that emits bits. +const offsetExtraBitsMinCode = 4 + +// offset code word extra bits. +var offsetExtraBits = [32]int8{ + 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, + /* extended window */ + 14, 14, +} + +var offsetCombined = [32]uint32{} + +func init() { + var offsetBase = [32]uint32{ + /* normal deflate */ + 0x000000, 0x000001, 0x000002, 0x000003, 0x000004, + 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018, + 0x000020, 0x000030, 0x000040, 0x000060, 0x000080, + 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300, + 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000, + 0x001800, 0x002000, 0x003000, 0x004000, 0x006000, + + /* extended window */ + 0x008000, 0x00c000, + } + + for i := range offsetCombined[:] { + // Don't use extended window values... + if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 { + continue + } + offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8) + } +} + +// The odd order in which the codegen code sizes are written. +var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} + +type huffmanBitWriter struct { + // writer is the underlying writer. + // Do not use it directly; use the write method, which ensures + // that Write errors are sticky. + writer io.Writer + + // Data waiting to be written is bytes[0:nbytes] + // and then the low nbits of bits. + bits uint64 + nbits uint8 + nbytes uint8 + lastHuffMan bool + literalEncoding *huffmanEncoder + tmpLitEncoding *huffmanEncoder + offsetEncoding *huffmanEncoder + codegenEncoding *huffmanEncoder + err error + lastHeader int + // Set between 0 (reused block can be up to 2x the size) + logNewTablePenalty uint + bytes [256 + 8]byte + literalFreq [lengthCodesStart + 32]uint16 + offsetFreq [32]uint16 + codegenFreq [codegenCodeCount]uint16 + + // codegen must have an extra space for the final symbol. + codegen [literalCount + offsetCodeCount + 1]uint8 +} + +// Huffman reuse. +// +// The huffmanBitWriter supports reusing huffman tables and thereby combining block sections. +// +// This is controlled by several variables: +// +// If lastHeader is non-zero the Huffman table can be reused. +// This also indicates that a Huffman table has been generated that can output all +// possible symbols. +// It also indicates that an EOB has not yet been emitted, so if a new tabel is generated +// an EOB with the previous table must be written. +// +// If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid. +// +// An incoming block estimates the output size of a new table using a 'fresh' by calculating the +// optimal size and adding a penalty in 'logNewTablePenalty'. +// A Huffman table is not optimal, which is why we add a penalty, and generating a new table +// is slower both for compression and decompression. + +func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { + return &huffmanBitWriter{ + writer: w, + literalEncoding: newHuffmanEncoder(literalCount), + tmpLitEncoding: newHuffmanEncoder(literalCount), + codegenEncoding: newHuffmanEncoder(codegenCodeCount), + offsetEncoding: newHuffmanEncoder(offsetCodeCount), + } +} + +func (w *huffmanBitWriter) reset(writer io.Writer) { + w.writer = writer + w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil + w.lastHeader = 0 + w.lastHuffMan = false +} + +func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) { + a := t.offHist[:offsetCodeCount] + b := w.offsetEncoding.codes + b = b[:len(a)] + for i, v := range a { + if v != 0 && b[i].zero() { + return false + } + } + + a = t.extraHist[:literalCount-256] + b = w.literalEncoding.codes[256:literalCount] + b = b[:len(a)] + for i, v := range a { + if v != 0 && b[i].zero() { + return false + } + } + + a = t.litHist[:256] + b = w.literalEncoding.codes[:len(a)] + for i, v := range a { + if v != 0 && b[i].zero() { + return false + } + } + return true +} + +func (w *huffmanBitWriter) flush() { + if w.err != nil { + w.nbits = 0 + return + } + if w.lastHeader > 0 { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + n := w.nbytes + for w.nbits != 0 { + w.bytes[n] = byte(w.bits) + w.bits >>= 8 + if w.nbits > 8 { // Avoid underflow + w.nbits -= 8 + } else { + w.nbits = 0 + } + n++ + } + w.bits = 0 + w.write(w.bytes[:n]) + w.nbytes = 0 +} + +func (w *huffmanBitWriter) write(b []byte) { + if w.err != nil { + return + } + _, w.err = w.writer.Write(b) +} + +func (w *huffmanBitWriter) writeBits(b int32, nb uint8) { + w.bits |= uint64(b) << (w.nbits & 63) + w.nbits += nb + if w.nbits >= 48 { + w.writeOutBits() + } +} + +func (w *huffmanBitWriter) writeBytes(bytes []byte) { + if w.err != nil { + return + } + n := w.nbytes + if w.nbits&7 != 0 { + w.err = InternalError("writeBytes with unfinished bits") + return + } + for w.nbits != 0 { + w.bytes[n] = byte(w.bits) + w.bits >>= 8 + w.nbits -= 8 + n++ + } + if n != 0 { + w.write(w.bytes[:n]) + } + w.nbytes = 0 + w.write(bytes) +} + +// RFC 1951 3.2.7 specifies a special run-length encoding for specifying +// the literal and offset lengths arrays (which are concatenated into a single +// array). This method generates that run-length encoding. +// +// The result is written into the codegen array, and the frequencies +// of each code is written into the codegenFreq array. +// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional +// information. Code badCode is an end marker +// +// numLiterals The number of literals in literalEncoding +// numOffsets The number of offsets in offsetEncoding +// litenc, offenc The literal and offset encoder to use +func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) { + for i := range w.codegenFreq { + w.codegenFreq[i] = 0 + } + // Note that we are using codegen both as a temporary variable for holding + // a copy of the frequencies, and as the place where we put the result. + // This is fine because the output is always shorter than the input used + // so far. + codegen := w.codegen[:] // cache + // Copy the concatenated code sizes to codegen. Put a marker at the end. + cgnl := codegen[:numLiterals] + for i := range cgnl { + cgnl[i] = litEnc.codes[i].len() + } + + cgnl = codegen[numLiterals : numLiterals+numOffsets] + for i := range cgnl { + cgnl[i] = offEnc.codes[i].len() + } + codegen[numLiterals+numOffsets] = badCode + + size := codegen[0] + count := 1 + outIndex := 0 + for inIndex := 1; size != badCode; inIndex++ { + // INVARIANT: We have seen "count" copies of size that have not yet + // had output generated for them. + nextSize := codegen[inIndex] + if nextSize == size { + count++ + continue + } + // We need to generate codegen indicating "count" of size. + if size != 0 { + codegen[outIndex] = size + outIndex++ + w.codegenFreq[size]++ + count-- + for count >= 3 { + n := 6 + if n > count { + n = count + } + codegen[outIndex] = 16 + outIndex++ + codegen[outIndex] = uint8(n - 3) + outIndex++ + w.codegenFreq[16]++ + count -= n + } + } else { + for count >= 11 { + n := 138 + if n > count { + n = count + } + codegen[outIndex] = 18 + outIndex++ + codegen[outIndex] = uint8(n - 11) + outIndex++ + w.codegenFreq[18]++ + count -= n + } + if count >= 3 { + // count >= 3 && count <= 10 + codegen[outIndex] = 17 + outIndex++ + codegen[outIndex] = uint8(count - 3) + outIndex++ + w.codegenFreq[17]++ + count = 0 + } + } + count-- + for ; count >= 0; count-- { + codegen[outIndex] = size + outIndex++ + w.codegenFreq[size]++ + } + // Set up invariant for next time through the loop. + size = nextSize + count = 1 + } + // Marker indicating the end of the codegen. + codegen[outIndex] = badCode +} + +func (w *huffmanBitWriter) codegens() int { + numCodegens := len(w.codegenFreq) + for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { + numCodegens-- + } + return numCodegens +} + +func (w *huffmanBitWriter) headerSize() (size, numCodegens int) { + numCodegens = len(w.codegenFreq) + for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { + numCodegens-- + } + return 3 + 5 + 5 + 4 + (3 * numCodegens) + + w.codegenEncoding.bitLength(w.codegenFreq[:]) + + int(w.codegenFreq[16])*2 + + int(w.codegenFreq[17])*3 + + int(w.codegenFreq[18])*7, numCodegens +} + +// dynamicSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) dynamicReuseSize(litEnc, offEnc *huffmanEncoder) (size int) { + size = litEnc.bitLength(w.literalFreq[:]) + + offEnc.bitLength(w.offsetFreq[:]) + return size +} + +// dynamicSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) { + header, numCodegens := w.headerSize() + size = header + + litEnc.bitLength(w.literalFreq[:]) + + offEnc.bitLength(w.offsetFreq[:]) + + extraBits + return size, numCodegens +} + +// extraBitSize will return the number of bits that will be written +// as "extra" bits on matches. +func (w *huffmanBitWriter) extraBitSize() int { + total := 0 + for i, n := range w.literalFreq[257:literalCount] { + total += int(n) * int(lengthExtraBits[i&31]) + } + for i, n := range w.offsetFreq[:offsetCodeCount] { + total += int(n) * int(offsetExtraBits[i&31]) + } + return total +} + +// fixedSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) fixedSize(extraBits int) int { + return 3 + + fixedLiteralEncoding.bitLength(w.literalFreq[:]) + + fixedOffsetEncoding.bitLength(w.offsetFreq[:]) + + extraBits +} + +// storedSize calculates the stored size, including header. +// The function returns the size in bits and whether the block +// fits inside a single block. +func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) { + if in == nil { + return 0, false + } + if len(in) <= maxStoreBlockSize { + return (len(in) + 5) * 8, true + } + return 0, false +} + +func (w *huffmanBitWriter) writeCode(c hcode) { + // The function does not get inlined if we "& 63" the shift. + w.bits |= c.code64() << (w.nbits & 63) + w.nbits += c.len() + if w.nbits >= 48 { + w.writeOutBits() + } +} + +// writeOutBits will write bits to the buffer. +func (w *huffmanBitWriter) writeOutBits() { + bits := w.bits + w.bits >>= 48 + w.nbits -= 48 + n := w.nbytes + + // We over-write, but faster... + binary.LittleEndian.PutUint64(w.bytes[n:], bits) + n += 6 + + if n >= bufferFlushSize { + if w.err != nil { + n = 0 + return + } + w.write(w.bytes[:n]) + n = 0 + } + + w.nbytes = n +} + +// Write the header of a dynamic Huffman block to the output stream. +// +// numLiterals The number of literals specified in codegen +// numOffsets The number of offsets specified in codegen +// numCodegens The number of codegens used in codegen +func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) { + if w.err != nil { + return + } + var firstBits int32 = 4 + if isEof { + firstBits = 5 + } + w.writeBits(firstBits, 3) + w.writeBits(int32(numLiterals-257), 5) + w.writeBits(int32(numOffsets-1), 5) + w.writeBits(int32(numCodegens-4), 4) + + for i := 0; i < numCodegens; i++ { + value := uint(w.codegenEncoding.codes[codegenOrder[i]].len()) + w.writeBits(int32(value), 3) + } + + i := 0 + for { + var codeWord = uint32(w.codegen[i]) + i++ + if codeWord == badCode { + break + } + w.writeCode(w.codegenEncoding.codes[codeWord]) + + switch codeWord { + case 16: + w.writeBits(int32(w.codegen[i]), 2) + i++ + case 17: + w.writeBits(int32(w.codegen[i]), 3) + i++ + case 18: + w.writeBits(int32(w.codegen[i]), 7) + i++ + } + } +} + +// writeStoredHeader will write a stored header. +// If the stored block is only used for EOF, +// it is replaced with a fixed huffman block. +func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) { + if w.err != nil { + return + } + if w.lastHeader > 0 { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + + // To write EOF, use a fixed encoding block. 10 bits instead of 5 bytes. + if length == 0 && isEof { + w.writeFixedHeader(isEof) + // EOB: 7 bits, value: 0 + w.writeBits(0, 7) + w.flush() + return + } + + var flag int32 + if isEof { + flag = 1 + } + w.writeBits(flag, 3) + w.flush() + w.writeBits(int32(length), 16) + w.writeBits(int32(^uint16(length)), 16) +} + +func (w *huffmanBitWriter) writeFixedHeader(isEof bool) { + if w.err != nil { + return + } + if w.lastHeader > 0 { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + + // Indicate that we are a fixed Huffman block + var value int32 = 2 + if isEof { + value = 3 + } + w.writeBits(value, 3) +} + +// writeBlock will write a block of tokens with the smallest encoding. +// The original input can be supplied, and if the huffman encoded data +// is larger than the original bytes, the data will be written as a +// stored block. +// If the input is nil, the tokens will always be Huffman encoded. +func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) { + if w.err != nil { + return + } + + tokens.AddEOB() + if w.lastHeader > 0 { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + numLiterals, numOffsets := w.indexTokens(tokens, false) + w.generate() + var extraBits int + storedSize, storable := w.storedSize(input) + if storable { + extraBits = w.extraBitSize() + } + + // Figure out smallest code. + // Fixed Huffman baseline. + var literalEncoding = fixedLiteralEncoding + var offsetEncoding = fixedOffsetEncoding + var size = math.MaxInt32 + if tokens.n < maxPredefinedTokens { + size = w.fixedSize(extraBits) + } + + // Dynamic Huffman? + var numCodegens int + + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) + + if dynamicSize < size { + size = dynamicSize + literalEncoding = w.literalEncoding + offsetEncoding = w.offsetEncoding + } + + // Stored bytes? + if storable && storedSize <= size { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + // Huffman. + if literalEncoding == fixedLiteralEncoding { + w.writeFixedHeader(eof) + } else { + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + } + + // Write the tokens. + w.writeTokens(tokens.Slice(), literalEncoding.codes, offsetEncoding.codes) +} + +// writeBlockDynamic encodes a block using a dynamic Huffman table. +// This should be used if the symbols used have a disproportionate +// histogram distribution. +// If input is supplied and the compression savings are below 1/16th of the +// input size the block is stored. +func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []byte, sync bool) { + if w.err != nil { + return + } + + sync = sync || eof + if sync { + tokens.AddEOB() + } + + // We cannot reuse pure huffman table, and must mark as EOF. + if (w.lastHuffMan || eof) && w.lastHeader > 0 { + // We will not try to reuse. + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + w.lastHuffMan = false + } + + // fillReuse enables filling of empty values. + // This will make encodings always reusable without testing. + // However, this does not appear to benefit on most cases. + const fillReuse = false + + // Check if we can reuse... + if !fillReuse && w.lastHeader > 0 && !w.canReuse(tokens) { + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + + numLiterals, numOffsets := w.indexTokens(tokens, !sync) + extraBits := 0 + ssize, storable := w.storedSize(input) + + const usePrefs = true + if storable || w.lastHeader > 0 { + extraBits = w.extraBitSize() + } + + var size int + + // Check if we should reuse. + if w.lastHeader > 0 { + // Estimate size for using a new table. + // Use the previous header size as the best estimate. + newSize := w.lastHeader + tokens.EstimatedBits() + newSize += int(w.literalEncoding.codes[endBlockMarker].len()) + newSize>>w.logNewTablePenalty + + // The estimated size is calculated as an optimal table. + // We add a penalty to make it more realistic and re-use a bit more. + reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + extraBits + + // Check if a new table is better. + if newSize < reuseSize { + // Write the EOB we owe. + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + size = newSize + w.lastHeader = 0 + } else { + size = reuseSize + } + + if tokens.n < maxPredefinedTokens { + if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size { + // Check if we get a reasonable size decrease. + if storable && ssize <= size { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + w.writeFixedHeader(eof) + if !sync { + tokens.AddEOB() + } + w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) + return + } + } + // Check if we get a reasonable size decrease. + if storable && ssize <= size { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + } + + // We want a new block/table + if w.lastHeader == 0 { + if fillReuse && !sync { + w.fillTokens() + numLiterals, numOffsets = maxNumLit, maxNumDist + } else { + w.literalFreq[endBlockMarker] = 1 + } + + w.generate() + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + + var numCodegens int + if fillReuse && !sync { + // Reindex for accurate size... + w.indexTokens(tokens, true) + } + size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) + + // Store predefined, if we don't get a reasonable improvement. + if tokens.n < maxPredefinedTokens { + if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size { + // Store bytes, if we don't get an improvement. + if storable && ssize <= preSize { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + w.writeFixedHeader(eof) + if !sync { + tokens.AddEOB() + } + w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) + return + } + } + + if storable && ssize <= size { + // Store bytes, if we don't get an improvement. + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + // Write Huffman table. + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + if !sync { + w.lastHeader, _ = w.headerSize() + } + w.lastHuffMan = false + } + + if sync { + w.lastHeader = 0 + } + // Write the tokens. + w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes) +} + +func (w *huffmanBitWriter) fillTokens() { + for i, v := range w.literalFreq[:literalCount] { + if v == 0 { + w.literalFreq[i] = 1 + } + } + for i, v := range w.offsetFreq[:offsetCodeCount] { + if v == 0 { + w.offsetFreq[i] = 1 + } + } +} + +// indexTokens indexes a slice of tokens, and updates +// literalFreq and offsetFreq, and generates literalEncoding +// and offsetEncoding. +// The number of literal and offset tokens is returned. +func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) { + //copy(w.literalFreq[:], t.litHist[:]) + *(*[256]uint16)(w.literalFreq[:]) = t.litHist + //copy(w.literalFreq[256:], t.extraHist[:]) + *(*[32]uint16)(w.literalFreq[256:]) = t.extraHist + w.offsetFreq = t.offHist + + if t.n == 0 { + return + } + if filled { + return maxNumLit, maxNumDist + } + // get the number of literals + numLiterals = len(w.literalFreq) + for w.literalFreq[numLiterals-1] == 0 { + numLiterals-- + } + // get the number of offsets + numOffsets = len(w.offsetFreq) + for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 { + numOffsets-- + } + if numOffsets == 0 { + // We haven't found a single match. If we want to go with the dynamic encoding, + // we should count at least one offset to be sure that the offset huffman tree could be encoded. + w.offsetFreq[0] = 1 + numOffsets = 1 + } + return +} + +func (w *huffmanBitWriter) generate() { + w.literalEncoding.generate(w.literalFreq[:literalCount], 15) + w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15) +} + +// writeTokens writes a slice of tokens to the output. +// codes for literal and offset encoding must be supplied. +func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) { + if w.err != nil { + return + } + if len(tokens) == 0 { + return + } + + // Only last token should be endBlockMarker. + var deferEOB bool + if tokens[len(tokens)-1] == endBlockMarker { + tokens = tokens[:len(tokens)-1] + deferEOB = true + } + + // Create slices up to the next power of two to avoid bounds checks. + lits := leCodes[:256] + offs := oeCodes[:32] + lengths := leCodes[lengthCodesStart:] + lengths = lengths[:32] + + // Go 1.16 LOVES having these on stack. + bits, nbits, nbytes := w.bits, w.nbits, w.nbytes + + for _, t := range tokens { + if t < 256 { + //w.writeCode(lits[t.literal()]) + c := lits[t] + bits |= c.code64() << (nbits & 63) + nbits += c.len() + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + continue + } + + // Write the length + length := t.length() + lengthCode := lengthCode(length) & 31 + if false { + w.writeCode(lengths[lengthCode]) + } else { + // inlined + c := lengths[lengthCode] + bits |= c.code64() << (nbits & 63) + nbits += c.len() + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + } + + if lengthCode >= lengthExtraBitsMinCode { + extraLengthBits := lengthExtraBits[lengthCode] + //w.writeBits(extraLength, extraLengthBits) + extraLength := int32(length - lengthBase[lengthCode]) + bits |= uint64(extraLength) << (nbits & 63) + nbits += extraLengthBits + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + } + // Write the offset + offset := t.offset() + offsetCode := (offset >> 16) & 31 + if false { + w.writeCode(offs[offsetCode]) + } else { + // inlined + c := offs[offsetCode] + bits |= c.code64() << (nbits & 63) + nbits += c.len() + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + } + + if offsetCode >= offsetExtraBitsMinCode { + offsetComb := offsetCombined[offsetCode] + //w.writeBits(extraOffset, extraOffsetBits) + bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63) + nbits += uint8(offsetComb) + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + } + } + // Restore... + w.bits, w.nbits, w.nbytes = bits, nbits, nbytes + + if deferEOB { + w.writeCode(leCodes[endBlockMarker]) + } +} + +// huffOffset is a static offset encoder used for huffman only encoding. +// It can be reused since we will not be encoding offset values. +var huffOffset *huffmanEncoder + +func init() { + w := newHuffmanBitWriter(nil) + w.offsetFreq[0] = 1 + huffOffset = newHuffmanEncoder(offsetCodeCount) + huffOffset.generate(w.offsetFreq[:offsetCodeCount], 15) +} + +// writeBlockHuff encodes a block of bytes as either +// Huffman encoded literals or uncompressed bytes if the +// results only gains very little from compression. +func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { + if w.err != nil { + return + } + + // Clear histogram + for i := range w.literalFreq[:] { + w.literalFreq[i] = 0 + } + if !w.lastHuffMan { + for i := range w.offsetFreq[:] { + w.offsetFreq[i] = 0 + } + } + + const numLiterals = endBlockMarker + 1 + const numOffsets = 1 + + // Add everything as literals + // We have to estimate the header size. + // Assume header is around 70 bytes: + // https://stackoverflow.com/a/25454430 + const guessHeaderSizeBits = 70 * 8 + histogram(input, w.literalFreq[:numLiterals]) + ssize, storable := w.storedSize(input) + if storable && len(input) > 1024 { + // Quick check for incompressible content. + abs := float64(0) + avg := float64(len(input)) / 256 + max := float64(len(input) * 2) + for _, v := range w.literalFreq[:256] { + diff := float64(v) - avg + abs += diff * diff + if abs > max { + break + } + } + if abs < max { + if debugDeflate { + fmt.Println("stored", abs, "<", max) + } + // No chance we can compress this... + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + } + w.literalFreq[endBlockMarker] = 1 + w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15) + estBits := w.tmpLitEncoding.canReuseBits(w.literalFreq[:numLiterals]) + if estBits < math.MaxInt32 { + estBits += w.lastHeader + if w.lastHeader == 0 { + estBits += guessHeaderSizeBits + } + estBits += estBits >> w.logNewTablePenalty + } + + // Store bytes, if we don't get a reasonable improvement. + if storable && ssize <= estBits { + if debugDeflate { + fmt.Println("stored,", ssize, "<=", estBits) + } + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + if w.lastHeader > 0 { + reuseSize := w.literalEncoding.canReuseBits(w.literalFreq[:256]) + + if estBits < reuseSize { + if debugDeflate { + fmt.Println("NOT reusing, reuse:", reuseSize/8, "> new:", estBits/8, "header est:", w.lastHeader/8, "bytes") + } + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } else if debugDeflate { + fmt.Println("reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8) + } + } + + count := 0 + if w.lastHeader == 0 { + // Use the temp encoding, so swap. + w.literalEncoding, w.tmpLitEncoding = w.tmpLitEncoding, w.literalEncoding + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + numCodegens := w.codegens() + + // Huffman. + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + w.lastHuffMan = true + w.lastHeader, _ = w.headerSize() + if debugDeflate { + count += w.lastHeader + fmt.Println("header:", count/8) + } + } + + encoding := w.literalEncoding.codes[:256] + // Go 1.16 LOVES having these on stack. At least 1.5x the speed. + bits, nbits, nbytes := w.bits, w.nbits, w.nbytes + + if debugDeflate { + count -= int(nbytes)*8 + int(nbits) + } + // Unroll, write 3 codes/loop. + // Fastest number of unrolls. + for len(input) > 3 { + // We must have at least 48 bits free. + if nbits >= 8 { + n := nbits >> 3 + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + bits >>= (n * 8) & 63 + nbits -= n * 8 + nbytes += n + } + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + if debugDeflate { + count += int(nbytes) * 8 + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + a, b := encoding[input[0]], encoding[input[1]] + bits |= a.code64() << (nbits & 63) + bits |= b.code64() << ((nbits + a.len()) & 63) + c := encoding[input[2]] + nbits += b.len() + a.len() + bits |= c.code64() << (nbits & 63) + nbits += c.len() + input = input[3:] + } + + // Remaining... + for _, t := range input { + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + if debugDeflate { + count += int(nbytes) * 8 + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + // Bitwriting inlined, ~30% speedup + c := encoding[t] + bits |= c.code64() << (nbits & 63) + + nbits += c.len() + if debugDeflate { + count += int(c.len()) + } + } + // Restore... + w.bits, w.nbits, w.nbytes = bits, nbits, nbytes + + if debugDeflate { + nb := count + int(nbytes)*8 + int(nbits) + fmt.Println("wrote", nb, "bits,", nb/8, "bytes.") + } + // Flush if needed to have space. + if w.nbits >= 48 { + w.writeOutBits() + } + + if eof || sync { + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + w.lastHuffMan = false + } +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go new file mode 100644 index 000000000..be7b58b47 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -0,0 +1,417 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "math" + "math/bits" +) + +const ( + maxBitsLimit = 16 + // number of valid literals + literalCount = 286 +) + +// hcode is a huffman code with a bit code and bit length. +type hcode uint32 + +func (h hcode) len() uint8 { + return uint8(h) +} + +func (h hcode) code64() uint64 { + return uint64(h >> 8) +} + +func (h hcode) zero() bool { + return h == 0 +} + +type huffmanEncoder struct { + codes []hcode + bitCount [17]int32 + + // Allocate a reusable buffer with the longest possible frequency table. + // Possible lengths are codegenCodeCount, offsetCodeCount and literalCount. + // The largest of these is literalCount, so we allocate for that case. + freqcache [literalCount + 1]literalNode +} + +type literalNode struct { + literal uint16 + freq uint16 +} + +// A levelInfo describes the state of the constructed tree for a given depth. +type levelInfo struct { + // Our level. for better printing + level int32 + + // The frequency of the last node at this level + lastFreq int32 + + // The frequency of the next character to add to this level + nextCharFreq int32 + + // The frequency of the next pair (from level below) to add to this level. + // Only valid if the "needed" value of the next lower level is 0. + nextPairFreq int32 + + // The number of chains remaining to generate for this level before moving + // up to the next level + needed int32 +} + +// set sets the code and length of an hcode. +func (h *hcode) set(code uint16, length uint8) { + *h = hcode(length) | (hcode(code) << 8) +} + +func newhcode(code uint16, length uint8) hcode { + return hcode(length) | (hcode(code) << 8) +} + +func reverseBits(number uint16, bitLength byte) uint16 { + return bits.Reverse16(number << ((16 - bitLength) & 15)) +} + +func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxUint16} } + +func newHuffmanEncoder(size int) *huffmanEncoder { + // Make capacity to next power of two. + c := uint(bits.Len32(uint32(size - 1))) + return &huffmanEncoder{codes: make([]hcode, size, 1<= 3 +// The cases of 0, 1, and 2 literals are handled by special case code. +// +// list An array of the literals with non-zero frequencies +// +// and their associated frequencies. The array is in order of increasing +// frequency, and has as its last element a special element with frequency +// MaxInt32 +// +// maxBits The maximum number of bits that should be used to encode any literal. +// +// Must be less than 16. +// +// return An integer array in which array[i] indicates the number of literals +// +// that should be encoded in i bits. +func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { + if maxBits >= maxBitsLimit { + panic("flate: maxBits too large") + } + n := int32(len(list)) + list = list[0 : n+1] + list[n] = maxNode() + + // The tree can't have greater depth than n - 1, no matter what. This + // saves a little bit of work in some small cases + if maxBits > n-1 { + maxBits = n - 1 + } + + // Create information about each of the levels. + // A bogus "Level 0" whose sole purpose is so that + // level1.prev.needed==0. This makes level1.nextPairFreq + // be a legitimate value that never gets chosen. + var levels [maxBitsLimit]levelInfo + // leafCounts[i] counts the number of literals at the left + // of ancestors of the rightmost node at level i. + // leafCounts[i][j] is the number of literals at the left + // of the level j ancestor. + var leafCounts [maxBitsLimit][maxBitsLimit]int32 + + // Descending to only have 1 bounds check. + l2f := int32(list[2].freq) + l1f := int32(list[1].freq) + l0f := int32(list[0].freq) + int32(list[1].freq) + + for level := int32(1); level <= maxBits; level++ { + // For every level, the first two items are the first two characters. + // We initialize the levels as if we had already figured this out. + levels[level] = levelInfo{ + level: level, + lastFreq: l1f, + nextCharFreq: l2f, + nextPairFreq: l0f, + } + leafCounts[level][level] = 2 + if level == 1 { + levels[level].nextPairFreq = math.MaxInt32 + } + } + + // We need a total of 2*n - 2 items at top level and have already generated 2. + levels[maxBits].needed = 2*n - 4 + + level := uint32(maxBits) + for level < 16 { + l := &levels[level] + if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 { + // We've run out of both leafs and pairs. + // End all calculations for this level. + // To make sure we never come back to this level or any lower level, + // set nextPairFreq impossibly large. + l.needed = 0 + levels[level+1].nextPairFreq = math.MaxInt32 + level++ + continue + } + + prevFreq := l.lastFreq + if l.nextCharFreq < l.nextPairFreq { + // The next item on this row is a leaf node. + n := leafCounts[level][level] + 1 + l.lastFreq = l.nextCharFreq + // Lower leafCounts are the same of the previous node. + leafCounts[level][level] = n + e := list[n] + if e.literal < math.MaxUint16 { + l.nextCharFreq = int32(e.freq) + } else { + l.nextCharFreq = math.MaxInt32 + } + } else { + // The next item on this row is a pair from the previous row. + // nextPairFreq isn't valid until we generate two + // more values in the level below + l.lastFreq = l.nextPairFreq + // Take leaf counts from the lower level, except counts[level] remains the same. + if true { + save := leafCounts[level][level] + leafCounts[level] = leafCounts[level-1] + leafCounts[level][level] = save + } else { + copy(leafCounts[level][:level], leafCounts[level-1][:level]) + } + levels[l.level-1].needed = 2 + } + + if l.needed--; l.needed == 0 { + // We've done everything we need to do for this level. + // Continue calculating one level up. Fill in nextPairFreq + // of that level with the sum of the two nodes we've just calculated on + // this level. + if l.level == maxBits { + // All done! + break + } + levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq + level++ + } else { + // If we stole from below, move down temporarily to replenish it. + for levels[level-1].needed > 0 { + level-- + } + } + } + + // Somethings is wrong if at the end, the top level is null or hasn't used + // all of the leaves. + if leafCounts[maxBits][maxBits] != n { + panic("leafCounts[maxBits][maxBits] != n") + } + + bitCount := h.bitCount[:maxBits+1] + bits := 1 + counts := &leafCounts[maxBits] + for level := maxBits; level > 0; level-- { + // chain.leafCount gives the number of literals requiring at least "bits" + // bits to encode. + bitCount[bits] = counts[level] - counts[level-1] + bits++ + } + return bitCount +} + +// Look at the leaves and assign them a bit count and an encoding as specified +// in RFC 1951 3.2.2 +func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) { + code := uint16(0) + for n, bits := range bitCount { + code <<= 1 + if n == 0 || bits == 0 { + continue + } + // The literals list[len(list)-bits] .. list[len(list)-bits] + // are encoded using "bits" bits, and get the values + // code, code + 1, .... The code values are + // assigned in literal order (not frequency order). + chunk := list[len(list)-int(bits):] + + sortByLiteral(chunk) + for _, node := range chunk { + h.codes[node.literal] = newhcode(reverseBits(code, uint8(n)), uint8(n)) + code++ + } + list = list[0 : len(list)-int(bits)] + } +} + +// Update this Huffman Code object to be the minimum code for the specified frequency count. +// +// freq An array of frequencies, in which frequency[i] gives the frequency of literal i. +// maxBits The maximum number of bits to use for any literal. +func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { + list := h.freqcache[:len(freq)+1] + codes := h.codes[:len(freq)] + // Number of non-zero literals + count := 0 + // Set list to be the set of all non-zero literals and their frequencies + for i, f := range freq { + if f != 0 { + list[count] = literalNode{uint16(i), f} + count++ + } else { + codes[i] = 0 + } + } + list[count] = literalNode{} + + list = list[:count] + if count <= 2 { + // Handle the small cases here, because they are awkward for the general case code. With + // two or fewer literals, everything has bit length 1. + for i, node := range list { + // "list" is in order of increasing literal value. + h.codes[node.literal].set(uint16(i), 1) + } + return + } + sortByFreq(list) + + // Get the number of literals for each bit count + bitCount := h.bitCounts(list, maxBits) + // And do the assignment + h.assignEncodingAndSize(bitCount, list) +} + +// atLeastOne clamps the result between 1 and 15. +func atLeastOne(v float32) float32 { + if v < 1 { + return 1 + } + if v > 15 { + return 15 + } + return v +} + +func histogram(b []byte, h []uint16) { + if true && len(b) >= 8<<10 { + // Split for bigger inputs + histogramSplit(b, h) + } else { + h = h[:256] + for _, t := range b { + h[t]++ + } + } +} + +func histogramSplit(b []byte, h []uint16) { + // Tested, and slightly faster than 2-way. + // Writing to separate arrays and combining is also slightly slower. + h = h[:256] + for len(b)&3 != 0 { + h[b[0]]++ + b = b[1:] + } + n := len(b) / 4 + x, y, z, w := b[:n], b[n:], b[n+n:], b[n+n+n:] + y, z, w = y[:len(x)], z[:len(x)], w[:len(x)] + for i, t := range x { + v0 := &h[t] + v1 := &h[y[i]] + v3 := &h[w[i]] + v2 := &h[z[i]] + *v0++ + *v1++ + *v2++ + *v3++ + } +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go new file mode 100644 index 000000000..6c05ba8c1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go @@ -0,0 +1,159 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// Sort sorts data. +// It makes one call to data.Len to determine n, and O(n*log(n)) calls to +// data.Less and data.Swap. The sort is not guaranteed to be stable. +func sortByFreq(data []literalNode) { + n := len(data) + quickSortByFreq(data, 0, n, maxDepth(n)) +} + +func quickSortByFreq(data []literalNode, a, b, maxDepth int) { + for b-a > 12 { // Use ShellSort for slices <= 12 elements + if maxDepth == 0 { + heapSort(data, a, b) + return + } + maxDepth-- + mlo, mhi := doPivotByFreq(data, a, b) + // Avoiding recursion on the larger subproblem guarantees + // a stack depth of at most lg(b-a). + if mlo-a < b-mhi { + quickSortByFreq(data, a, mlo, maxDepth) + a = mhi // i.e., quickSortByFreq(data, mhi, b) + } else { + quickSortByFreq(data, mhi, b, maxDepth) + b = mlo // i.e., quickSortByFreq(data, a, mlo) + } + } + if b-a > 1 { + // Do ShellSort pass with gap 6 + // It could be written in this simplified form cause b-a <= 12 + for i := a + 6; i < b; i++ { + if data[i].freq == data[i-6].freq && data[i].literal < data[i-6].literal || data[i].freq < data[i-6].freq { + data[i], data[i-6] = data[i-6], data[i] + } + } + insertionSortByFreq(data, a, b) + } +} + +func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) { + m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. + if hi-lo > 40 { + // Tukey's ``Ninther,'' median of three medians of three. + s := (hi - lo) / 8 + medianOfThreeSortByFreq(data, lo, lo+s, lo+2*s) + medianOfThreeSortByFreq(data, m, m-s, m+s) + medianOfThreeSortByFreq(data, hi-1, hi-1-s, hi-1-2*s) + } + medianOfThreeSortByFreq(data, lo, m, hi-1) + + // Invariants are: + // data[lo] = pivot (set up by ChoosePivot) + // data[lo < i < a] < pivot + // data[a <= i < b] <= pivot + // data[b <= i < c] unexamined + // data[c <= i < hi-1] > pivot + // data[hi-1] >= pivot + pivot := lo + a, c := lo+1, hi-1 + + for ; a < c && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { + } + b := a + for { + for ; b < c && (data[pivot].freq == data[b].freq && data[pivot].literal > data[b].literal || data[pivot].freq > data[b].freq); b++ { // data[b] <= pivot + } + for ; b < c && (data[pivot].freq == data[c-1].freq && data[pivot].literal < data[c-1].literal || data[pivot].freq < data[c-1].freq); c-- { // data[c-1] > pivot + } + if b >= c { + break + } + // data[b] > pivot; data[c-1] <= pivot + data[b], data[c-1] = data[c-1], data[b] + b++ + c-- + } + // If hi-c<3 then there are duplicates (by property of median of nine). + // Let's be a bit more conservative, and set border to 5. + protect := hi-c < 5 + if !protect && hi-c < (hi-lo)/4 { + // Lets test some points for equality to pivot + dups := 0 + if data[pivot].freq == data[hi-1].freq && data[pivot].literal > data[hi-1].literal || data[pivot].freq > data[hi-1].freq { // data[hi-1] = pivot + data[c], data[hi-1] = data[hi-1], data[c] + c++ + dups++ + } + if data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq { // data[b-1] = pivot + b-- + dups++ + } + // m-lo = (hi-lo)/2 > 6 + // b-lo > (hi-lo)*3/4-1 > 8 + // ==> m < b ==> data[m] <= pivot + if data[m].freq == data[pivot].freq && data[m].literal > data[pivot].literal || data[m].freq > data[pivot].freq { // data[m] = pivot + data[m], data[b-1] = data[b-1], data[m] + b-- + dups++ + } + // if at least 2 points are equal to pivot, assume skewed distribution + protect = dups > 1 + } + if protect { + // Protect against a lot of duplicates + // Add invariant: + // data[a <= i < b] unexamined + // data[b <= i < c] = pivot + for { + for ; a < b && (data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq); b-- { // data[b] == pivot + } + for ; a < b && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { // data[a] < pivot + } + if a >= b { + break + } + // data[a] == pivot; data[b-1] < pivot + data[a], data[b-1] = data[b-1], data[a] + a++ + b-- + } + } + // Swap pivot into middle + data[pivot], data[b-1] = data[b-1], data[pivot] + return b - 1, c +} + +// Insertion sort +func insertionSortByFreq(data []literalNode, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && (data[j].freq == data[j-1].freq && data[j].literal < data[j-1].literal || data[j].freq < data[j-1].freq); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// quickSortByFreq, loosely following Bentley and McIlroy, +// ``Engineering a Sort Function,'' SP&E November 1993. + +// medianOfThreeSortByFreq moves the median of the three values data[m0], data[m1], data[m2] into data[m1]. +func medianOfThreeSortByFreq(data []literalNode, m1, m0, m2 int) { + // sort 3 elements + if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq { + data[m1], data[m0] = data[m0], data[m1] + } + // data[m0] <= data[m1] + if data[m2].freq == data[m1].freq && data[m2].literal < data[m1].literal || data[m2].freq < data[m1].freq { + data[m2], data[m1] = data[m1], data[m2] + // data[m0] <= data[m2] && data[m1] < data[m2] + if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq { + data[m1], data[m0] = data[m0], data[m1] + } + } + // now data[m0] <= data[m1] <= data[m2] +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go new file mode 100644 index 000000000..93f1aea10 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go @@ -0,0 +1,201 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// Sort sorts data. +// It makes one call to data.Len to determine n, and O(n*log(n)) calls to +// data.Less and data.Swap. The sort is not guaranteed to be stable. +func sortByLiteral(data []literalNode) { + n := len(data) + quickSort(data, 0, n, maxDepth(n)) +} + +func quickSort(data []literalNode, a, b, maxDepth int) { + for b-a > 12 { // Use ShellSort for slices <= 12 elements + if maxDepth == 0 { + heapSort(data, a, b) + return + } + maxDepth-- + mlo, mhi := doPivot(data, a, b) + // Avoiding recursion on the larger subproblem guarantees + // a stack depth of at most lg(b-a). + if mlo-a < b-mhi { + quickSort(data, a, mlo, maxDepth) + a = mhi // i.e., quickSort(data, mhi, b) + } else { + quickSort(data, mhi, b, maxDepth) + b = mlo // i.e., quickSort(data, a, mlo) + } + } + if b-a > 1 { + // Do ShellSort pass with gap 6 + // It could be written in this simplified form cause b-a <= 12 + for i := a + 6; i < b; i++ { + if data[i].literal < data[i-6].literal { + data[i], data[i-6] = data[i-6], data[i] + } + } + insertionSort(data, a, b) + } +} +func heapSort(data []literalNode, a, b int) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDown(data, i, hi, first) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDown(data, lo, i, first) + } +} + +// siftDown implements the heap property on data[lo, hi). +// first is an offset into the array where the root of the heap lies. +func siftDown(data []literalNode, lo, hi, first int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && data[first+child].literal < data[first+child+1].literal { + child++ + } + if data[first+root].literal > data[first+child].literal { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} +func doPivot(data []literalNode, lo, hi int) (midlo, midhi int) { + m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. + if hi-lo > 40 { + // Tukey's ``Ninther,'' median of three medians of three. + s := (hi - lo) / 8 + medianOfThree(data, lo, lo+s, lo+2*s) + medianOfThree(data, m, m-s, m+s) + medianOfThree(data, hi-1, hi-1-s, hi-1-2*s) + } + medianOfThree(data, lo, m, hi-1) + + // Invariants are: + // data[lo] = pivot (set up by ChoosePivot) + // data[lo < i < a] < pivot + // data[a <= i < b] <= pivot + // data[b <= i < c] unexamined + // data[c <= i < hi-1] > pivot + // data[hi-1] >= pivot + pivot := lo + a, c := lo+1, hi-1 + + for ; a < c && data[a].literal < data[pivot].literal; a++ { + } + b := a + for { + for ; b < c && data[pivot].literal > data[b].literal; b++ { // data[b] <= pivot + } + for ; b < c && data[pivot].literal < data[c-1].literal; c-- { // data[c-1] > pivot + } + if b >= c { + break + } + // data[b] > pivot; data[c-1] <= pivot + data[b], data[c-1] = data[c-1], data[b] + b++ + c-- + } + // If hi-c<3 then there are duplicates (by property of median of nine). + // Let's be a bit more conservative, and set border to 5. + protect := hi-c < 5 + if !protect && hi-c < (hi-lo)/4 { + // Lets test some points for equality to pivot + dups := 0 + if data[pivot].literal > data[hi-1].literal { // data[hi-1] = pivot + data[c], data[hi-1] = data[hi-1], data[c] + c++ + dups++ + } + if data[b-1].literal > data[pivot].literal { // data[b-1] = pivot + b-- + dups++ + } + // m-lo = (hi-lo)/2 > 6 + // b-lo > (hi-lo)*3/4-1 > 8 + // ==> m < b ==> data[m] <= pivot + if data[m].literal > data[pivot].literal { // data[m] = pivot + data[m], data[b-1] = data[b-1], data[m] + b-- + dups++ + } + // if at least 2 points are equal to pivot, assume skewed distribution + protect = dups > 1 + } + if protect { + // Protect against a lot of duplicates + // Add invariant: + // data[a <= i < b] unexamined + // data[b <= i < c] = pivot + for { + for ; a < b && data[b-1].literal > data[pivot].literal; b-- { // data[b] == pivot + } + for ; a < b && data[a].literal < data[pivot].literal; a++ { // data[a] < pivot + } + if a >= b { + break + } + // data[a] == pivot; data[b-1] < pivot + data[a], data[b-1] = data[b-1], data[a] + a++ + b-- + } + } + // Swap pivot into middle + data[pivot], data[b-1] = data[b-1], data[pivot] + return b - 1, c +} + +// Insertion sort +func insertionSort(data []literalNode, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && data[j].literal < data[j-1].literal; j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// maxDepth returns a threshold at which quicksort should switch +// to heapsort. It returns 2*ceil(lg(n+1)). +func maxDepth(n int) int { + var depth int + for i := n; i > 0; i >>= 1 { + depth++ + } + return depth * 2 +} + +// medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1]. +func medianOfThree(data []literalNode, m1, m0, m2 int) { + // sort 3 elements + if data[m1].literal < data[m0].literal { + data[m1], data[m0] = data[m0], data[m1] + } + // data[m0] <= data[m1] + if data[m2].literal < data[m1].literal { + data[m2], data[m1] = data[m1], data[m2] + // data[m0] <= data[m2] && data[m1] < data[m2] + if data[m1].literal < data[m0].literal { + data[m1], data[m0] = data[m0], data[m1] + } + } + // now data[m0] <= data[m1] <= data[m2] +} diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go new file mode 100644 index 000000000..2f410d64f --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -0,0 +1,829 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package flate implements the DEFLATE compressed data format, described in +// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file +// formats. +package flate + +import ( + "bufio" + "compress/flate" + "fmt" + "io" + "math/bits" + "sync" +) + +const ( + maxCodeLen = 16 // max length of Huffman code + maxCodeLenMask = 15 // mask for max length of Huffman code + // The next three numbers come from the RFC section 3.2.7, with the + // additional proviso in section 3.2.5 which implies that distance codes + // 30 and 31 should never occur in compressed data. + maxNumLit = 286 + maxNumDist = 30 + numCodes = 19 // number of codes in Huffman meta-code + + debugDecode = false +) + +// Value of length - 3 and extra bits. +type lengthExtra struct { + length, extra uint8 +} + +var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}} + +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + +// Initialize the fixedHuffmanDecoder only once upon first use. +var fixedOnce sync.Once +var fixedHuffmanDecoder huffmanDecoder + +// A CorruptInputError reports the presence of corrupt input at a given offset. +type CorruptInputError = flate.CorruptInputError + +// An InternalError reports an error in the flate code itself. +type InternalError string + +func (e InternalError) Error() string { return "flate: internal error: " + string(e) } + +// A ReadError reports an error encountered while reading input. +// +// Deprecated: No longer returned. +type ReadError = flate.ReadError + +// A WriteError reports an error encountered while writing output. +// +// Deprecated: No longer returned. +type WriteError = flate.WriteError + +// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to +// to switch to a new underlying Reader. This permits reusing a ReadCloser +// instead of allocating a new one. +type Resetter interface { + // Reset discards any buffered data and resets the Resetter as if it was + // newly initialized with the given reader. + Reset(r io.Reader, dict []byte) error +} + +// The data structure for decoding Huffman tables is based on that of +// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits), +// For codes smaller than the table width, there are multiple entries +// (each combination of trailing bits has the same value). For codes +// larger than the table width, the table contains a link to an overflow +// table. The width of each entry in the link table is the maximum code +// size minus the chunk width. +// +// Note that you can do a lookup in the table even without all bits +// filled. Since the extra bits are zero, and the DEFLATE Huffman codes +// have the property that shorter codes come before longer ones, the +// bit length estimate in the result is a lower bound on the actual +// number of bits. +// +// See the following: +// http://www.gzip.org/algorithm.txt + +// chunk & 15 is number of bits +// chunk >> 4 is value, including table link + +const ( + huffmanChunkBits = 9 + huffmanNumChunks = 1 << huffmanChunkBits + huffmanCountMask = 15 + huffmanValueShift = 4 +) + +type huffmanDecoder struct { + maxRead int // the maximum number of bits we can read and not overread + chunks *[huffmanNumChunks]uint16 // chunks as described above + links [][]uint16 // overflow links + linkMask uint32 // mask the width of the link table +} + +// Initialize Huffman decoding tables from array of code lengths. +// Following this function, h is guaranteed to be initialized into a complete +// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a +// degenerate case where the tree has only a single symbol with length 1. Empty +// trees are permitted. +func (h *huffmanDecoder) init(lengths []int) bool { + // Sanity enables additional runtime tests during Huffman + // table construction. It's intended to be used during + // development to supplement the currently ad-hoc unit tests. + const sanity = false + + if h.chunks == nil { + h.chunks = new([huffmanNumChunks]uint16) + } + + if h.maxRead != 0 { + *h = huffmanDecoder{chunks: h.chunks, links: h.links} + } + + // Count number of codes of each length, + // compute maxRead and max length. + var count [maxCodeLen]int + var min, max int + for _, n := range lengths { + if n == 0 { + continue + } + if min == 0 || n < min { + min = n + } + if n > max { + max = n + } + count[n&maxCodeLenMask]++ + } + + // Empty tree. The decompressor.huffSym function will fail later if the tree + // is used. Technically, an empty tree is only valid for the HDIST tree and + // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree + // is guaranteed to fail since it will attempt to use the tree to decode the + // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is + // guaranteed to fail later since the compressed data section must be + // composed of at least one symbol (the end-of-block marker). + if max == 0 { + return true + } + + code := 0 + var nextcode [maxCodeLen]int + for i := min; i <= max; i++ { + code <<= 1 + nextcode[i&maxCodeLenMask] = code + code += count[i&maxCodeLenMask] + } + + // Check that the coding is complete (i.e., that we've + // assigned all 2-to-the-max possible bit sequences). + // Exception: To be compatible with zlib, we also need to + // accept degenerate single-code codings. See also + // TestDegenerateHuffmanCoding. + if code != 1< huffmanChunkBits { + numLinks := 1 << (uint(max) - huffmanChunkBits) + h.linkMask = uint32(numLinks - 1) + + // create link tables + link := nextcode[huffmanChunkBits+1] >> 1 + if cap(h.links) < huffmanNumChunks-link { + h.links = make([][]uint16, huffmanNumChunks-link) + } else { + h.links = h.links[:huffmanNumChunks-link] + } + for j := uint(link); j < huffmanNumChunks; j++ { + reverse := int(bits.Reverse16(uint16(j))) + reverse >>= uint(16 - huffmanChunkBits) + off := j - uint(link) + if sanity && h.chunks[reverse] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[reverse] = uint16(off<>= uint(16 - n) + if n <= huffmanChunkBits { + for off := reverse; off < len(h.chunks); off += 1 << uint(n) { + // We should never need to overwrite + // an existing chunk. Also, 0 is + // never a valid chunk, because the + // lower 4 "count" bits should be + // between 1 and 15. + if sanity && h.chunks[off] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[off] = chunk + } + } else { + j := reverse & (huffmanNumChunks - 1) + if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 { + // Longer codes should have been + // associated with a link table above. + panic("impossible: not an indirect chunk") + } + value := h.chunks[j] >> huffmanValueShift + linktab := h.links[value] + reverse >>= huffmanChunkBits + for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) { + if sanity && linktab[off] != 0 { + panic("impossible: overwriting existing chunk") + } + linktab[off] = chunk + } + } + } + + if sanity { + // Above we've sanity checked that we never overwrote + // an existing entry. Here we additionally check that + // we filled the tables completely. + for i, chunk := range h.chunks { + if chunk == 0 { + // As an exception, in the degenerate + // single-code case, we allow odd + // chunks to be missing. + if code == 1 && i%2 == 1 { + continue + } + panic("impossible: missing chunk") + } + } + for _, linktab := range h.links { + for _, chunk := range linktab { + if chunk == 0 { + panic("impossible: missing chunk") + } + } + } + } + + return true +} + +// Reader is the actual read interface needed by NewReader. +// If the passed in io.Reader does not also have ReadByte, +// the NewReader will introduce its own buffering. +type Reader interface { + io.Reader + io.ByteReader +} + +type step uint8 + +const ( + copyData step = iota + 1 + nextBlock + huffmanBytesBuffer + huffmanBytesReader + huffmanBufioReader + huffmanStringsReader + huffmanGenericReader +) + +// Decompress state. +type decompressor struct { + // Input source. + r Reader + roffset int64 + + // Huffman decoders for literal/length, distance. + h1, h2 huffmanDecoder + + // Length arrays used to define Huffman codes. + bits *[maxNumLit + maxNumDist]int + codebits *[numCodes]int + + // Output history, buffer. + dict dictDecoder + + // Next step in the decompression, + // and decompression state. + step step + stepState int + err error + toRead []byte + hl, hd *huffmanDecoder + copyLen int + copyDist int + + // Temporary buffer (avoids repeated allocation). + buf [4]byte + + // Input bits, in top of b. + b uint32 + + nb uint + final bool +} + +func (f *decompressor) nextBlock() { + for f.nb < 1+2 { + if f.err = f.moreBits(); f.err != nil { + return + } + } + f.final = f.b&1 == 1 + f.b >>= 1 + typ := f.b & 3 + f.b >>= 2 + f.nb -= 1 + 2 + switch typ { + case 0: + f.dataBlock() + if debugDecode { + fmt.Println("stored block") + } + case 1: + // compressed, fixed Huffman tables + f.hl = &fixedHuffmanDecoder + f.hd = nil + f.huffmanBlockDecoder() + if debugDecode { + fmt.Println("predefinied huffman block") + } + case 2: + // compressed, dynamic Huffman tables + if f.err = f.readHuffman(); f.err != nil { + break + } + f.hl = &f.h1 + f.hd = &f.h2 + f.huffmanBlockDecoder() + if debugDecode { + fmt.Println("dynamic huffman block") + } + default: + // 3 is reserved. + if debugDecode { + fmt.Println("reserved data block encountered") + } + f.err = CorruptInputError(f.roffset) + } +} + +func (f *decompressor) Read(b []byte) (int, error) { + for { + if len(f.toRead) > 0 { + n := copy(b, f.toRead) + f.toRead = f.toRead[n:] + if len(f.toRead) == 0 { + return n, f.err + } + return n, nil + } + if f.err != nil { + return 0, f.err + } + + f.doStep() + + if f.err != nil && len(f.toRead) == 0 { + f.toRead = f.dict.readFlush() // Flush what's left in case of error + } + } +} + +// WriteTo implements the io.WriteTo interface for io.Copy and friends. +func (f *decompressor) WriteTo(w io.Writer) (int64, error) { + total := int64(0) + flushed := false + for { + if len(f.toRead) > 0 { + n, err := w.Write(f.toRead) + total += int64(n) + if err != nil { + f.err = err + return total, err + } + if n != len(f.toRead) { + return total, io.ErrShortWrite + } + f.toRead = f.toRead[:0] + } + if f.err != nil && flushed { + if f.err == io.EOF { + return total, nil + } + return total, f.err + } + if f.err == nil { + f.doStep() + } + if len(f.toRead) == 0 && f.err != nil && !flushed { + f.toRead = f.dict.readFlush() // Flush what's left in case of error + flushed = true + } + } +} + +func (f *decompressor) Close() error { + if f.err == io.EOF { + return nil + } + return f.err +} + +// RFC 1951 section 3.2.7. +// Compression with dynamic Huffman codes + +var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} + +func (f *decompressor) readHuffman() error { + // HLIT[5], HDIST[5], HCLEN[4]. + for f.nb < 5+5+4 { + if err := f.moreBits(); err != nil { + return err + } + } + nlit := int(f.b&0x1F) + 257 + if nlit > maxNumLit { + if debugDecode { + fmt.Println("nlit > maxNumLit", nlit) + } + return CorruptInputError(f.roffset) + } + f.b >>= 5 + ndist := int(f.b&0x1F) + 1 + if ndist > maxNumDist { + if debugDecode { + fmt.Println("ndist > maxNumDist", ndist) + } + return CorruptInputError(f.roffset) + } + f.b >>= 5 + nclen := int(f.b&0xF) + 4 + // numCodes is 19, so nclen is always valid. + f.b >>= 4 + f.nb -= 5 + 5 + 4 + + // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order. + for i := 0; i < nclen; i++ { + for f.nb < 3 { + if err := f.moreBits(); err != nil { + return err + } + } + f.codebits[codeOrder[i]] = int(f.b & 0x7) + f.b >>= 3 + f.nb -= 3 + } + for i := nclen; i < len(codeOrder); i++ { + f.codebits[codeOrder[i]] = 0 + } + if !f.h1.init(f.codebits[0:]) { + if debugDecode { + fmt.Println("init codebits failed") + } + return CorruptInputError(f.roffset) + } + + // HLIT + 257 code lengths, HDIST + 1 code lengths, + // using the code length Huffman code. + for i, n := 0, nlit+ndist; i < n; { + x, err := f.huffSym(&f.h1) + if err != nil { + return err + } + if x < 16 { + // Actual length. + f.bits[i] = x + i++ + continue + } + // Repeat previous length or zero. + var rep int + var nb uint + var b int + switch x { + default: + return InternalError("unexpected length code") + case 16: + rep = 3 + nb = 2 + if i == 0 { + if debugDecode { + fmt.Println("i==0") + } + return CorruptInputError(f.roffset) + } + b = f.bits[i-1] + case 17: + rep = 3 + nb = 3 + b = 0 + case 18: + rep = 11 + nb = 7 + b = 0 + } + for f.nb < nb { + if err := f.moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits:", err) + } + return err + } + } + rep += int(f.b & uint32(1<<(nb®SizeMaskUint32)-1)) + f.b >>= nb & regSizeMaskUint32 + f.nb -= nb + if i+rep > n { + if debugDecode { + fmt.Println("i+rep > n", i, rep, n) + } + return CorruptInputError(f.roffset) + } + for j := 0; j < rep; j++ { + f.bits[i] = b + i++ + } + } + + if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) { + if debugDecode { + fmt.Println("init2 failed") + } + return CorruptInputError(f.roffset) + } + + // As an optimization, we can initialize the maxRead bits to read at a time + // for the HLIT tree to the length of the EOB marker since we know that + // every block must terminate with one. This preserves the property that + // we never read any extra bytes after the end of the DEFLATE stream. + if f.h1.maxRead < f.bits[endBlockMarker] { + f.h1.maxRead = f.bits[endBlockMarker] + } + if !f.final { + // If not the final block, the smallest block possible is + // a predefined table, BTYPE=01, with a single EOB marker. + // This will take up 3 + 7 bits. + f.h1.maxRead += 10 + } + + return nil +} + +// Copy a single uncompressed data block from input to output. +func (f *decompressor) dataBlock() { + // Uncompressed. + // Discard current half-byte. + left := (f.nb) & 7 + f.nb -= left + f.b >>= left + + offBytes := f.nb >> 3 + // Unfilled values will be overwritten. + f.buf[0] = uint8(f.b) + f.buf[1] = uint8(f.b >> 8) + f.buf[2] = uint8(f.b >> 16) + f.buf[3] = uint8(f.b >> 24) + + f.roffset += int64(offBytes) + f.nb, f.b = 0, 0 + + // Length then ones-complement of length. + nr, err := io.ReadFull(f.r, f.buf[offBytes:4]) + f.roffset += int64(nr) + if err != nil { + f.err = noEOF(err) + return + } + n := uint16(f.buf[0]) | uint16(f.buf[1])<<8 + nn := uint16(f.buf[2]) | uint16(f.buf[3])<<8 + if nn != ^n { + if debugDecode { + ncomp := ^n + fmt.Println("uint16(nn) != uint16(^n)", nn, ncomp) + } + f.err = CorruptInputError(f.roffset) + return + } + + if n == 0 { + f.toRead = f.dict.readFlush() + f.finishBlock() + return + } + + f.copyLen = int(n) + f.copyData() +} + +// copyData copies f.copyLen bytes from the underlying reader into f.hist. +// It pauses for reads when f.hist is full. +func (f *decompressor) copyData() { + buf := f.dict.writeSlice() + if len(buf) > f.copyLen { + buf = buf[:f.copyLen] + } + + cnt, err := io.ReadFull(f.r, buf) + f.roffset += int64(cnt) + f.copyLen -= cnt + f.dict.writeMark(cnt) + if err != nil { + f.err = noEOF(err) + return + } + + if f.dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = f.dict.readFlush() + f.step = copyData + return + } + f.finishBlock() +} + +func (f *decompressor) finishBlock() { + if f.final { + if f.dict.availRead() > 0 { + f.toRead = f.dict.readFlush() + } + f.err = io.EOF + } + f.step = nextBlock +} + +func (f *decompressor) doStep() { + switch f.step { + case copyData: + f.copyData() + case nextBlock: + f.nextBlock() + case huffmanBytesBuffer: + f.huffmanBytesBuffer() + case huffmanBytesReader: + f.huffmanBytesReader() + case huffmanBufioReader: + f.huffmanBufioReader() + case huffmanStringsReader: + f.huffmanStringsReader() + case huffmanGenericReader: + f.huffmanGenericReader() + default: + panic("BUG: unexpected step state") + } +} + +// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF. +func noEOF(e error) error { + if e == io.EOF { + return io.ErrUnexpectedEOF + } + return e +} + +func (f *decompressor) moreBits() error { + c, err := f.r.ReadByte() + if err != nil { + return noEOF(err) + } + f.roffset++ + f.b |= uint32(c) << (f.nb & regSizeMaskUint32) + f.nb += 8 + return nil +} + +// Read the next Huffman-encoded symbol from f according to h. +func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(h.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + nb, b := f.nb, f.b + for { + for nb < n { + c, err := f.r.ReadByte() + if err != nil { + f.b = b + f.nb = nb + return 0, noEOF(err) + } + f.roffset++ + b |= uint32(c) << (nb & regSizeMaskUint32) + nb += 8 + } + chunk := h.chunks[b&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= nb { + if n == 0 { + f.b = b + f.nb = nb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return 0, f.err + } + f.b = b >> (n & regSizeMaskUint32) + f.nb = nb - n + return int(chunk >> huffmanValueShift), nil + } + } +} + +func makeReader(r io.Reader) Reader { + if rr, ok := r.(Reader); ok { + return rr + } + return bufio.NewReader(r) +} + +func fixedHuffmanDecoderInit() { + fixedOnce.Do(func() { + // These come from the RFC section 3.2.6. + var bits [288]int + for i := 0; i < 144; i++ { + bits[i] = 8 + } + for i := 144; i < 256; i++ { + bits[i] = 9 + } + for i := 256; i < 280; i++ { + bits[i] = 7 + } + for i := 280; i < 288; i++ { + bits[i] = 8 + } + fixedHuffmanDecoder.init(bits[:]) + }) +} + +func (f *decompressor) Reset(r io.Reader, dict []byte) error { + *f = decompressor{ + r: makeReader(r), + bits: f.bits, + codebits: f.codebits, + h1: f.h1, + h2: f.h2, + dict: f.dict, + step: nextBlock, + } + f.dict.init(maxMatchOffset, dict) + return nil +} + +// NewReader returns a new ReadCloser that can be used +// to read the uncompressed version of r. +// If r does not also implement io.ByteReader, +// the decompressor may read more data than necessary from r. +// It is the caller's responsibility to call Close on the ReadCloser +// when finished reading. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReader(r io.Reader) io.ReadCloser { + fixedHuffmanDecoderInit() + + var f decompressor + f.r = makeReader(r) + f.bits = new([maxNumLit + maxNumDist]int) + f.codebits = new([numCodes]int) + f.step = nextBlock + f.dict.init(maxMatchOffset, nil) + return &f +} + +// NewReaderDict is like NewReader but initializes the reader +// with a preset dictionary. The returned Reader behaves as if +// the uncompressed data stream started with the given dictionary, +// which has already been read. NewReaderDict is typically used +// to read data compressed by NewWriterDict. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser { + fixedHuffmanDecoderInit() + + var f decompressor + f.r = makeReader(r) + f.bits = new([maxNumLit + maxNumDist]int) + f.codebits = new([numCodes]int) + f.step = nextBlock + f.dict.init(maxMatchOffset, dict) + return &f +} diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go new file mode 100644 index 000000000..2b2f993f7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/inflate_gen.go @@ -0,0 +1,1283 @@ +// Code generated by go generate gen_inflate.go. DO NOT EDIT. + +package flate + +import ( + "bufio" + "bytes" + "fmt" + "math/bits" + "strings" +) + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanBytesBuffer() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*bytes.Buffer) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = huffmanBytesBuffer + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = huffmanBytesBuffer // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanBytesReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*bytes.Reader) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = huffmanBytesReader + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = huffmanBytesReader // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanBufioReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*bufio.Reader) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = huffmanBufioReader + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = huffmanBufioReader // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanStringsReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*strings.Reader) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = huffmanStringsReader + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = huffmanStringsReader // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanGenericReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(Reader) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = huffmanGenericReader + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = huffmanGenericReader // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +func (f *decompressor) huffmanBlockDecoder() { + switch f.r.(type) { + case *bytes.Buffer: + f.huffmanBytesBuffer() + case *bytes.Reader: + f.huffmanBytesReader() + case *bufio.Reader: + f.huffmanBufioReader() + case *strings.Reader: + f.huffmanStringsReader() + case Reader: + f.huffmanGenericReader() + default: + f.huffmanGenericReader() + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go new file mode 100644 index 000000000..703b9a89a --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level1.go @@ -0,0 +1,241 @@ +package flate + +import ( + "encoding/binary" + "fmt" + "math/bits" +) + +// fastGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type fastEncL1 struct { + fastGen + table [tableSize]tableEntry +} + +// EncodeL1 uses a similar algorithm to level 1 +func (e *fastEncL1) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashBytes = 5 + ) + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + + for { + const skipLog = 5 + const doEvery = 2 + + nextS := s + var candidate tableEntry + for { + nextHash := hashLen(cv, tableBits, hashBytes) + candidate = e.table[nextHash] + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + + now := load6432(src, nextS) + e.table[nextHash] = tableEntry{offset: s + e.cur} + nextHash = hashLen(now, tableBits, hashBytes) + + offset := s - (candidate.offset - e.cur) + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + e.table[nextHash] = tableEntry{offset: nextS + e.cur} + break + } + + // Do one right away... + cv = now + s = nextS + nextS++ + candidate = e.table[nextHash] + now >>= 8 + e.table[nextHash] = tableEntry{offset: s + e.cur} + + offset = s - (candidate.offset - e.cur) + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + e.table[nextHash] = tableEntry{offset: nextS + e.cur} + break + } + cv = now + s = nextS + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + t := candidate.offset - e.cur + var l = int32(4) + if false { + l = e.matchlenLong(s+4, t+4, src) + 4 + } else { + // inlined: + a := src[s+4:] + b := src[t+4:] + for len(a) >= 8 { + if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { + l += int32(bits.TrailingZeros64(diff) >> 3) + break + } + l += 8 + a = a[8:] + b = b[8:] + } + if len(a) < 8 { + b = b[:len(a)] + for i := range a { + if a[i] != b[i] { + break + } + l++ + } + } + } + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + + // Save the match found + if false { + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + } else { + // Inlined... + xoffset := uint32(s - t - baseMatchOffset) + xlength := l + oc := offsetCode(xoffset) + xoffset |= oc << 16 + for xlength > 0 { + xl := xlength + if xl > 258 { + if xl > 258+baseMatchLength { + xl = 258 + } else { + xl = 258 - baseMatchLength + } + } + xlength -= xl + xl -= baseMatchLength + dst.extraHist[lengthCodes1[uint8(xl)]]++ + dst.offHist[oc]++ + dst.tokens[dst.n] = token(matchType | uint32(xl)<= s { + s = nextS + 1 + } + if s >= sLimit { + // Index first pair after match end. + if int(s+l+8) < len(src) { + cv := load6432(src, s) + e.table[hashLen(cv, tableBits, hashBytes)] = tableEntry{offset: s + e.cur} + } + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6432(src, s-2) + o := e.cur + s - 2 + prevHash := hashLen(x, tableBits, hashBytes) + e.table[prevHash] = tableEntry{offset: o} + x >>= 16 + currHash := hashLen(x, tableBits, hashBytes) + candidate = e.table[currHash] + e.table[currHash] = tableEntry{offset: o + 2} + + offset := s - (candidate.offset - e.cur) + if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) { + cv = x >> 8 + s++ + break + } + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go new file mode 100644 index 000000000..876dfbe30 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level2.go @@ -0,0 +1,214 @@ +package flate + +import "fmt" + +// fastGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type fastEncL2 struct { + fastGen + table [bTableSize]tableEntry +} + +// EncodeL2 uses a similar algorithm to level 1, but is capable +// of matching across blocks giving better compression at a small slowdown. +func (e *fastEncL2) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashBytes = 5 + ) + + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + // When should we start skipping if we haven't found matches in a long while. + const skipLog = 5 + const doEvery = 2 + + nextS := s + var candidate tableEntry + for { + nextHash := hashLen(cv, bTableBits, hashBytes) + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + candidate = e.table[nextHash] + now := load6432(src, nextS) + e.table[nextHash] = tableEntry{offset: s + e.cur} + nextHash = hashLen(now, bTableBits, hashBytes) + + offset := s - (candidate.offset - e.cur) + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + e.table[nextHash] = tableEntry{offset: nextS + e.cur} + break + } + + // Do one right away... + cv = now + s = nextS + nextS++ + candidate = e.table[nextHash] + now >>= 8 + e.table[nextHash] = tableEntry{offset: s + e.cur} + + offset = s - (candidate.offset - e.cur) + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + break + } + cv = now + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + t := candidate.offset - e.cur + l := e.matchlenLong(s+4, t+4, src) + 4 + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + // Index first pair after match end. + if int(s+l+8) < len(src) { + cv := load6432(src, s) + e.table[hashLen(cv, bTableBits, hashBytes)] = tableEntry{offset: s + e.cur} + } + goto emitRemainder + } + + // Store every second hash in-between, but offset by 1. + for i := s - l + 2; i < s-5; i += 7 { + x := load6432(src, i) + nextHash := hashLen(x, bTableBits, hashBytes) + e.table[nextHash] = tableEntry{offset: e.cur + i} + // Skip one + x >>= 16 + nextHash = hashLen(x, bTableBits, hashBytes) + e.table[nextHash] = tableEntry{offset: e.cur + i + 2} + // Skip one + x >>= 16 + nextHash = hashLen(x, bTableBits, hashBytes) + e.table[nextHash] = tableEntry{offset: e.cur + i + 4} + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 to s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6432(src, s-2) + o := e.cur + s - 2 + prevHash := hashLen(x, bTableBits, hashBytes) + prevHash2 := hashLen(x>>8, bTableBits, hashBytes) + e.table[prevHash] = tableEntry{offset: o} + e.table[prevHash2] = tableEntry{offset: o + 1} + currHash := hashLen(x>>16, bTableBits, hashBytes) + candidate = e.table[currHash] + e.table[currHash] = tableEntry{offset: o + 2} + + offset := s - (candidate.offset - e.cur) + if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) { + cv = x >> 24 + s++ + break + } + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go new file mode 100644 index 000000000..7aa2b72a1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level3.go @@ -0,0 +1,241 @@ +package flate + +import "fmt" + +// fastEncL3 +type fastEncL3 struct { + fastGen + table [1 << 16]tableEntryPrev +} + +// Encode uses a similar algorithm to level 2, will check up to two candidates. +func (e *fastEncL3) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + tableBits = 16 + tableSize = 1 << tableBits + hashBytes = 5 + ) + + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntryPrev{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i] + if v.Cur.offset <= minOff { + v.Cur.offset = 0 + } else { + v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset + } + if v.Prev.offset <= minOff { + v.Prev.offset = 0 + } else { + v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset + } + e.table[i] = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // Skip if too small. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + const skipLog = 7 + nextS := s + var candidate tableEntry + for { + nextHash := hashLen(cv, tableBits, hashBytes) + s = nextS + nextS = s + 1 + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + candidates := e.table[nextHash] + now := load6432(src, nextS) + + // Safe offset distance until s + 4... + minOffset := e.cur + s - (maxMatchOffset - 4) + e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur}} + + // Check both candidates + candidate = candidates.Cur + if candidate.offset < minOffset { + cv = now + // Previous will also be invalid, we have nothing. + continue + } + + if uint32(cv) == load3232(src, candidate.offset-e.cur) { + if candidates.Prev.offset < minOffset || uint32(cv) != load3232(src, candidates.Prev.offset-e.cur) { + break + } + // Both match and are valid, pick longest. + offset := s - (candidate.offset - e.cur) + o2 := s - (candidates.Prev.offset - e.cur) + l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:]) + if l2 > l1 { + candidate = candidates.Prev + } + break + } else { + // We only check if value mismatches. + // Offset will always be invalid in other cases. + candidate = candidates.Prev + if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + break + } + } + cv = now + } + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + // + t := candidate.offset - e.cur + l := e.matchlenLong(s+4, t+4, src) + 4 + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + t += l + // Index first pair after match end. + if int(t+8) < len(src) && t > 0 { + cv = load6432(src, t) + nextHash := hashLen(cv, tableBits, hashBytes) + e.table[nextHash] = tableEntryPrev{ + Prev: e.table[nextHash].Cur, + Cur: tableEntry{offset: e.cur + t}, + } + } + goto emitRemainder + } + + // Store every 5th hash in-between. + for i := s - l + 2; i < s-5; i += 6 { + nextHash := hashLen(load6432(src, i), tableBits, hashBytes) + e.table[nextHash] = tableEntryPrev{ + Prev: e.table[nextHash].Cur, + Cur: tableEntry{offset: e.cur + i}} + } + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 to s. + x := load6432(src, s-2) + prevHash := hashLen(x, tableBits, hashBytes) + + e.table[prevHash] = tableEntryPrev{ + Prev: e.table[prevHash].Cur, + Cur: tableEntry{offset: e.cur + s - 2}, + } + x >>= 8 + prevHash = hashLen(x, tableBits, hashBytes) + + e.table[prevHash] = tableEntryPrev{ + Prev: e.table[prevHash].Cur, + Cur: tableEntry{offset: e.cur + s - 1}, + } + x >>= 8 + currHash := hashLen(x, tableBits, hashBytes) + candidates := e.table[currHash] + cv = x + e.table[currHash] = tableEntryPrev{ + Prev: candidates.Cur, + Cur: tableEntry{offset: s + e.cur}, + } + + // Check both candidates + candidate = candidates.Cur + minOffset := e.cur + s - (maxMatchOffset - 4) + + if candidate.offset > minOffset { + if uint32(cv) == load3232(src, candidate.offset-e.cur) { + // Found a match... + continue + } + candidate = candidates.Prev + if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + // Match at prev... + continue + } + } + cv = x >> 8 + s++ + break + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go new file mode 100644 index 000000000..23c08b325 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level4.go @@ -0,0 +1,221 @@ +package flate + +import "fmt" + +type fastEncL4 struct { + fastGen + table [tableSize]tableEntry + bTable [tableSize]tableEntry +} + +func (e *fastEncL4) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashShortBytes = 4 + ) + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.bTable[:] { + e.bTable[i] = tableEntry{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + for i := range e.bTable[:] { + v := e.bTable[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.bTable[i].offset = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + const skipLog = 6 + const doEvery = 1 + + nextS := s + var t int32 + for { + nextHashS := hashLen(cv, tableBits, hashShortBytes) + nextHashL := hash7(cv, tableBits) + + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + // Fetch a short+long candidate + sCandidate := e.table[nextHashS] + lCandidate := e.bTable[nextHashL] + next := load6432(src, nextS) + entry := tableEntry{offset: s + e.cur} + e.table[nextHashS] = entry + e.bTable[nextHashL] = entry + + t = lCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.offset-e.cur) { + // We got a long match. Use that. + break + } + + t = sCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + // Found a 4 match... + lCandidate = e.bTable[hash7(next, tableBits)] + + // If the next long is a candidate, check if we should use that instead... + lOff := nextS - (lCandidate.offset - e.cur) + if lOff < maxMatchOffset && load3232(src, lCandidate.offset-e.cur) == uint32(next) { + l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:]) + if l2 > l1 { + s = nextS + t = lCandidate.offset - e.cur + } + } + break + } + cv = next + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + // Extend the 4-byte match as long as possible. + l := e.matchlenLong(s+4, t+4, src) + 4 + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + if debugDeflate { + if t >= s { + panic("s-t") + } + if (s - t) > maxMatchOffset { + panic(fmt.Sprintln("mmo", t)) + } + if l < baseMatchLength { + panic("bml") + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + // Index first pair after match end. + if int(s+8) < len(src) { + cv := load6432(src, s) + e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: s + e.cur} + e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur} + } + goto emitRemainder + } + + // Store every 3rd hash in-between + if true { + i := nextS + if i < s-1 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} + e.bTable[hash7(cv, tableBits)] = t + e.bTable[hash7(cv>>8, tableBits)] = t2 + e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 + + i += 3 + for ; i < s-1; i += 3 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} + e.bTable[hash7(cv, tableBits)] = t + e.bTable[hash7(cv>>8, tableBits)] = t2 + e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 + } + } + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. + x := load6432(src, s-1) + o := e.cur + s - 1 + prevHashS := hashLen(x, tableBits, hashShortBytes) + prevHashL := hash7(x, tableBits) + e.table[prevHashS] = tableEntry{offset: o} + e.bTable[prevHashL] = tableEntry{offset: o} + cv = x >> 8 + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go new file mode 100644 index 000000000..1f61ec182 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level5.go @@ -0,0 +1,708 @@ +package flate + +import "fmt" + +type fastEncL5 struct { + fastGen + table [tableSize]tableEntry + bTable [tableSize]tableEntryPrev +} + +func (e *fastEncL5) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashShortBytes = 4 + ) + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.bTable[:] { + e.bTable[i] = tableEntryPrev{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + for i := range e.bTable[:] { + v := e.bTable[i] + if v.Cur.offset <= minOff { + v.Cur.offset = 0 + v.Prev.offset = 0 + } else { + v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset + if v.Prev.offset <= minOff { + v.Prev.offset = 0 + } else { + v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset + } + } + e.bTable[i] = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + const skipLog = 6 + const doEvery = 1 + + nextS := s + var l int32 + var t int32 + for { + nextHashS := hashLen(cv, tableBits, hashShortBytes) + nextHashL := hash7(cv, tableBits) + + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + // Fetch a short+long candidate + sCandidate := e.table[nextHashS] + lCandidate := e.bTable[nextHashL] + next := load6432(src, nextS) + entry := tableEntry{offset: s + e.cur} + e.table[nextHashS] = entry + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = entry, eLong.Cur + + nextHashS = hashLen(next, tableBits, hashShortBytes) + nextHashL = hash7(next, tableBits) + + t = lCandidate.Cur.offset - e.cur + if s-t < maxMatchOffset { + if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + t2 := lCandidate.Prev.offset - e.cur + if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + l = e.matchlen(s+4, t+4, src) + 4 + ml1 := e.matchlen(s+4, t2+4, src) + 4 + if ml1 > l { + t = t2 + l = ml1 + break + } + } + break + } + t = lCandidate.Prev.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + break + } + } + + t = sCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + // Found a 4 match... + l = e.matchlen(s+4, t+4, src) + 4 + lCandidate = e.bTable[nextHashL] + // Store the next match + + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + // If the next long is a candidate, use that... + t2 := lCandidate.Cur.offset - e.cur + if nextS-t2 < maxMatchOffset { + if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + // If the previous long is a candidate, use that... + t2 = lCandidate.Prev.offset - e.cur + if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + } + break + } + cv = next + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + if l == 0 { + // Extend the 4-byte match as long as possible. + l = e.matchlenLong(s+4, t+4, src) + 4 + } else if l == maxMatchLength { + l += e.matchlenLong(s+l, t+l, src) + } + + // Try to locate a better match by checking the end of best match... + if sAt := s + l; l < 30 && sAt < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is 2/3 bytes depending on input. + // 3 is only a little better when it is but sometimes a lot worse. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 2 + eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset + t2 := eLong - e.cur - l + skipBeginning + s2 := s + skipBeginning + off := s2 - t2 + if t2 >= 0 && off < maxMatchOffset && off > 0 { + if l2 := e.matchlenLong(s2, t2, src); l2 > l { + t = t2 + l = l2 + s = s2 + } + } + } + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + if debugDeflate { + if t >= s { + panic(fmt.Sprintln("s-t", s, t)) + } + if (s - t) > maxMatchOffset { + panic(fmt.Sprintln("mmo", s-t)) + } + if l < baseMatchLength { + panic("bml") + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + goto emitRemainder + } + + // Store every 3rd hash in-between. + if true { + const hashEvery = 3 + i := s - l + 1 + if i < s-1 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + e.table[hashLen(cv, tableBits, hashShortBytes)] = t + eLong := &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + + // Do an long at i+1 + cv >>= 8 + t = tableEntry{offset: t.offset + 1} + eLong = &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + + // We only have enough bits for a short entry at i+2 + cv >>= 8 + t = tableEntry{offset: t.offset + 1} + e.table[hashLen(cv, tableBits, hashShortBytes)] = t + + // Skip one - otherwise we risk hitting 's' + i += 4 + for ; i < s-1; i += hashEvery { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} + eLong := &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 + } + } + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. + x := load6432(src, s-1) + o := e.cur + s - 1 + prevHashS := hashLen(x, tableBits, hashShortBytes) + prevHashL := hash7(x, tableBits) + e.table[prevHashS] = tableEntry{offset: o} + eLong := &e.bTable[prevHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur + cv = x >> 8 + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} + +// fastEncL5Window is a level 5 encoder, +// but with a custom window size. +type fastEncL5Window struct { + hist []byte + cur int32 + maxOffset int32 + table [tableSize]tableEntry + bTable [tableSize]tableEntryPrev +} + +func (e *fastEncL5Window) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashShortBytes = 4 + ) + maxMatchOffset := e.maxOffset + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.bTable[:] { + e.bTable[i] = tableEntryPrev{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + for i := range e.bTable[:] { + v := e.bTable[i] + if v.Cur.offset <= minOff { + v.Cur.offset = 0 + v.Prev.offset = 0 + } else { + v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset + if v.Prev.offset <= minOff { + v.Prev.offset = 0 + } else { + v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset + } + } + e.bTable[i] = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + const skipLog = 6 + const doEvery = 1 + + nextS := s + var l int32 + var t int32 + for { + nextHashS := hashLen(cv, tableBits, hashShortBytes) + nextHashL := hash7(cv, tableBits) + + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + // Fetch a short+long candidate + sCandidate := e.table[nextHashS] + lCandidate := e.bTable[nextHashL] + next := load6432(src, nextS) + entry := tableEntry{offset: s + e.cur} + e.table[nextHashS] = entry + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = entry, eLong.Cur + + nextHashS = hashLen(next, tableBits, hashShortBytes) + nextHashL = hash7(next, tableBits) + + t = lCandidate.Cur.offset - e.cur + if s-t < maxMatchOffset { + if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + t2 := lCandidate.Prev.offset - e.cur + if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + l = e.matchlen(s+4, t+4, src) + 4 + ml1 := e.matchlen(s+4, t2+4, src) + 4 + if ml1 > l { + t = t2 + l = ml1 + break + } + } + break + } + t = lCandidate.Prev.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + break + } + } + + t = sCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + // Found a 4 match... + l = e.matchlen(s+4, t+4, src) + 4 + lCandidate = e.bTable[nextHashL] + // Store the next match + + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + // If the next long is a candidate, use that... + t2 := lCandidate.Cur.offset - e.cur + if nextS-t2 < maxMatchOffset { + if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + // If the previous long is a candidate, use that... + t2 = lCandidate.Prev.offset - e.cur + if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + } + break + } + cv = next + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + if l == 0 { + // Extend the 4-byte match as long as possible. + l = e.matchlenLong(s+4, t+4, src) + 4 + } else if l == maxMatchLength { + l += e.matchlenLong(s+l, t+l, src) + } + + // Try to locate a better match by checking the end of best match... + if sAt := s + l; l < 30 && sAt < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is 2/3 bytes depending on input. + // 3 is only a little better when it is but sometimes a lot worse. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 2 + eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset + t2 := eLong - e.cur - l + skipBeginning + s2 := s + skipBeginning + off := s2 - t2 + if t2 >= 0 && off < maxMatchOffset && off > 0 { + if l2 := e.matchlenLong(s2, t2, src); l2 > l { + t = t2 + l = l2 + s = s2 + } + } + } + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + if debugDeflate { + if t >= s { + panic(fmt.Sprintln("s-t", s, t)) + } + if (s - t) > maxMatchOffset { + panic(fmt.Sprintln("mmo", s-t)) + } + if l < baseMatchLength { + panic("bml") + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + goto emitRemainder + } + + // Store every 3rd hash in-between. + if true { + const hashEvery = 3 + i := s - l + 1 + if i < s-1 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + e.table[hashLen(cv, tableBits, hashShortBytes)] = t + eLong := &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + + // Do an long at i+1 + cv >>= 8 + t = tableEntry{offset: t.offset + 1} + eLong = &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + + // We only have enough bits for a short entry at i+2 + cv >>= 8 + t = tableEntry{offset: t.offset + 1} + e.table[hashLen(cv, tableBits, hashShortBytes)] = t + + // Skip one - otherwise we risk hitting 's' + i += 4 + for ; i < s-1; i += hashEvery { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} + eLong := &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 + } + } + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. + x := load6432(src, s-1) + o := e.cur + s - 1 + prevHashS := hashLen(x, tableBits, hashShortBytes) + prevHashL := hash7(x, tableBits) + e.table[prevHashS] = tableEntry{offset: o} + eLong := &e.bTable[prevHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur + cv = x >> 8 + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} + +// Reset the encoding table. +func (e *fastEncL5Window) Reset() { + // We keep the same allocs, since we are compressing the same block sizes. + if cap(e.hist) < allocHistory { + e.hist = make([]byte, 0, allocHistory) + } + + // We offset current position so everything will be out of reach. + // If we are above the buffer reset it will be cleared anyway since len(hist) == 0. + if e.cur <= int32(bufferReset) { + e.cur += e.maxOffset + int32(len(e.hist)) + } + e.hist = e.hist[:0] +} + +func (e *fastEncL5Window) addBlock(src []byte) int32 { + // check if we have space already + maxMatchOffset := e.maxOffset + + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + e.hist = make([]byte, 0, allocHistory) + } else { + if cap(e.hist) < int(maxMatchOffset*2) { + panic("unexpected buffer size") + } + // Move down + offset := int32(len(e.hist)) - maxMatchOffset + copy(e.hist[0:maxMatchOffset], e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:maxMatchOffset] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +// matchlen will return the match length between offsets and t in src. +// The maximum length returned is maxMatchLength - 4. +// It is assumed that s > t, that t >=0 and s < len(src). +func (e *fastEncL5Window) matchlen(s, t int32, src []byte) int32 { + if debugDecode { + if t >= s { + panic(fmt.Sprint("t >=s:", t, s)) + } + if int(s) >= len(src) { + panic(fmt.Sprint("s >= len(src):", s, len(src))) + } + if t < 0 { + panic(fmt.Sprint("t < 0:", t)) + } + if s-t > e.maxOffset { + panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) + } + } + s1 := int(s) + maxMatchLength - 4 + if s1 > len(src) { + s1 = len(src) + } + + // Extend the match to be as long as possible. + return int32(matchLen(src[s:s1], src[t:])) +} + +// matchlenLong will return the match length between offsets and t in src. +// It is assumed that s > t, that t >=0 and s < len(src). +func (e *fastEncL5Window) matchlenLong(s, t int32, src []byte) int32 { + if debugDeflate { + if t >= s { + panic(fmt.Sprint("t >=s:", t, s)) + } + if int(s) >= len(src) { + panic(fmt.Sprint("s >= len(src):", s, len(src))) + } + if t < 0 { + panic(fmt.Sprint("t < 0:", t)) + } + if s-t > e.maxOffset { + panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) + } + } + // Extend the match to be as long as possible. + return int32(matchLen(src[s:], src[t:])) +} diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go new file mode 100644 index 000000000..f1e9d98fa --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level6.go @@ -0,0 +1,325 @@ +package flate + +import "fmt" + +type fastEncL6 struct { + fastGen + table [tableSize]tableEntry + bTable [tableSize]tableEntryPrev +} + +func (e *fastEncL6) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashShortBytes = 4 + ) + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.bTable[:] { + e.bTable[i] = tableEntryPrev{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + for i := range e.bTable[:] { + v := e.bTable[i] + if v.Cur.offset <= minOff { + v.Cur.offset = 0 + v.Prev.offset = 0 + } else { + v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset + if v.Prev.offset <= minOff { + v.Prev.offset = 0 + } else { + v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset + } + } + e.bTable[i] = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + // Repeat MUST be > 1 and within range + repeat := int32(1) + for { + const skipLog = 7 + const doEvery = 1 + + nextS := s + var l int32 + var t int32 + for { + nextHashS := hashLen(cv, tableBits, hashShortBytes) + nextHashL := hash7(cv, tableBits) + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + // Fetch a short+long candidate + sCandidate := e.table[nextHashS] + lCandidate := e.bTable[nextHashL] + next := load6432(src, nextS) + entry := tableEntry{offset: s + e.cur} + e.table[nextHashS] = entry + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = entry, eLong.Cur + + // Calculate hashes of 'next' + nextHashS = hashLen(next, tableBits, hashShortBytes) + nextHashL = hash7(next, tableBits) + + t = lCandidate.Cur.offset - e.cur + if s-t < maxMatchOffset { + if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { + // Long candidate matches at least 4 bytes. + + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + // Check the previous long candidate as well. + t2 := lCandidate.Prev.offset - e.cur + if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + l = e.matchlen(s+4, t+4, src) + 4 + ml1 := e.matchlen(s+4, t2+4, src) + 4 + if ml1 > l { + t = t2 + l = ml1 + break + } + } + break + } + // Current value did not match, but check if previous long value does. + t = lCandidate.Prev.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + break + } + } + + t = sCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + // Found a 4 match... + l = e.matchlen(s+4, t+4, src) + 4 + + // Look up next long candidate (at nextS) + lCandidate = e.bTable[nextHashL] + + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + // Check repeat at s + repOff + const repOff = 1 + t2 := s - repeat + repOff + if load3232(src, t2) == uint32(cv>>(8*repOff)) { + ml := e.matchlen(s+4+repOff, t2+4, src) + 4 + if ml > l { + t = t2 + l = ml + s += repOff + // Not worth checking more. + break + } + } + + // If the next long is a candidate, use that... + t2 = lCandidate.Cur.offset - e.cur + if nextS-t2 < maxMatchOffset { + if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + // This is ok, but check previous as well. + } + } + // If the previous long is a candidate, use that... + t2 = lCandidate.Prev.offset - e.cur + if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + } + break + } + cv = next + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + // Extend the 4-byte match as long as possible. + if l == 0 { + l = e.matchlenLong(s+4, t+4, src) + 4 + } else if l == maxMatchLength { + l += e.matchlenLong(s+l, t+l, src) + } + + // Try to locate a better match by checking the end-of-match... + if sAt := s + l; sAt < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is 2/3 bytes depending on input. + // 3 is only a little better when it is but sometimes a lot worse. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 2 + eLong := &e.bTable[hash7(load6432(src, sAt), tableBits)] + // Test current + t2 := eLong.Cur.offset - e.cur - l + skipBeginning + s2 := s + skipBeginning + off := s2 - t2 + if off < maxMatchOffset { + if off > 0 && t2 >= 0 { + if l2 := e.matchlenLong(s2, t2, src); l2 > l { + t = t2 + l = l2 + s = s2 + } + } + // Test next: + t2 = eLong.Prev.offset - e.cur - l + skipBeginning + off := s2 - t2 + if off > 0 && off < maxMatchOffset && t2 >= 0 { + if l2 := e.matchlenLong(s2, t2, src); l2 > l { + t = t2 + l = l2 + s = s2 + } + } + } + } + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + if false { + if t >= s { + panic(fmt.Sprintln("s-t", s, t)) + } + if (s - t) > maxMatchOffset { + panic(fmt.Sprintln("mmo", s-t)) + } + if l < baseMatchLength { + panic("bml") + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + repeat = s - t + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + // Index after match end. + for i := nextS + 1; i < int32(len(src))-8; i += 2 { + cv := load6432(src, i) + e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: i + e.cur} + eLong := &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur + } + goto emitRemainder + } + + // Store every long hash in-between and every second short. + if true { + for i := nextS + 1; i < s-1; i += 2 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} + eLong := &e.bTable[hash7(cv, tableBits)] + eLong2 := &e.bTable[hash7(cv>>8, tableBits)] + e.table[hashLen(cv, tableBits, hashShortBytes)] = t + eLong.Cur, eLong.Prev = t, eLong.Cur + eLong2.Cur, eLong2.Prev = t2, eLong2.Cur + } + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. + cv = load6432(src, s) + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go new file mode 100644 index 000000000..4bd388584 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go @@ -0,0 +1,16 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package flate + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// +// len(a) <= len(b) and len(a) > 0 +// +//go:noescape +func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s new file mode 100644 index 000000000..9a7655c0f --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s @@ -0,0 +1,68 @@ +// Copied from S2 implementation. + +//go:build !appengine && !noasm && gc && !noasm + +#include "textflag.h" + +// func matchLen(a []byte, b []byte) int +// Requires: BMI +TEXT ·matchLen(SB), NOSPLIT, $0-56 + MOVQ a_base+0(FP), AX + MOVQ b_base+24(FP), CX + MOVQ a_len+8(FP), DX + + // matchLen + XORL SI, SI + CMPL DX, $0x08 + JB matchlen_match4_standalone + +matchlen_loopback_standalone: + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + TESTQ BX, BX + JZ matchlen_loop_standalone + +#ifdef GOAMD64_v3 + TZCNTQ BX, BX +#else + BSFQ BX, BX +#endif + SARQ $0x03, BX + LEAL (SI)(BX*1), SI + JMP gen_match_len_end + +matchlen_loop_standalone: + LEAL -8(DX), DX + LEAL 8(SI), SI + CMPL DX, $0x08 + JAE matchlen_loopback_standalone + +matchlen_match4_standalone: + CMPL DX, $0x04 + JB matchlen_match2_standalone + MOVL (AX)(SI*1), BX + CMPL (CX)(SI*1), BX + JNE matchlen_match2_standalone + LEAL -4(DX), DX + LEAL 4(SI), SI + +matchlen_match2_standalone: + CMPL DX, $0x02 + JB matchlen_match1_standalone + MOVW (AX)(SI*1), BX + CMPW (CX)(SI*1), BX + JNE matchlen_match1_standalone + LEAL -2(DX), DX + LEAL 2(SI), SI + +matchlen_match1_standalone: + CMPL DX, $0x01 + JB gen_match_len_end + MOVB (AX)(SI*1), BL + CMPB (CX)(SI*1), BL + JNE gen_match_len_end + INCL SI + +gen_match_len_end: + MOVQ SI, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_generic.go b/vendor/github.com/klauspost/compress/flate/matchlen_generic.go new file mode 100644 index 000000000..ad5cd814b --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/matchlen_generic.go @@ -0,0 +1,33 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package flate + +import ( + "encoding/binary" + "math/bits" +) + +// matchLen returns the maximum common prefix length of a and b. +// a must be the shortest of the two. +func matchLen(a, b []byte) (n int) { + for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { + diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + if diff != 0 { + return n + bits.TrailingZeros64(diff)>>3 + } + n += 8 + } + + for i := range a { + if a[i] != b[i] { + break + } + n++ + } + return n + +} diff --git a/vendor/github.com/klauspost/compress/flate/regmask_amd64.go b/vendor/github.com/klauspost/compress/flate/regmask_amd64.go new file mode 100644 index 000000000..6ed28061b --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/regmask_amd64.go @@ -0,0 +1,37 @@ +package flate + +const ( + // Masks for shifts with register sizes of the shift value. + // This can be used to work around the x86 design of shifting by mod register size. + // It can be used when a variable shift is always smaller than the register size. + + // reg8SizeMaskX - shift value is 8 bits, shifted is X + reg8SizeMask8 = 7 + reg8SizeMask16 = 15 + reg8SizeMask32 = 31 + reg8SizeMask64 = 63 + + // reg16SizeMaskX - shift value is 16 bits, shifted is X + reg16SizeMask8 = reg8SizeMask8 + reg16SizeMask16 = reg8SizeMask16 + reg16SizeMask32 = reg8SizeMask32 + reg16SizeMask64 = reg8SizeMask64 + + // reg32SizeMaskX - shift value is 32 bits, shifted is X + reg32SizeMask8 = reg8SizeMask8 + reg32SizeMask16 = reg8SizeMask16 + reg32SizeMask32 = reg8SizeMask32 + reg32SizeMask64 = reg8SizeMask64 + + // reg64SizeMaskX - shift value is 64 bits, shifted is X + reg64SizeMask8 = reg8SizeMask8 + reg64SizeMask16 = reg8SizeMask16 + reg64SizeMask32 = reg8SizeMask32 + reg64SizeMask64 = reg8SizeMask64 + + // regSizeMaskUintX - shift value is uint, shifted is X + regSizeMaskUint8 = reg8SizeMask8 + regSizeMaskUint16 = reg8SizeMask16 + regSizeMaskUint32 = reg8SizeMask32 + regSizeMaskUint64 = reg8SizeMask64 +) diff --git a/vendor/github.com/klauspost/compress/flate/regmask_other.go b/vendor/github.com/klauspost/compress/flate/regmask_other.go new file mode 100644 index 000000000..1b7a2cbd7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/regmask_other.go @@ -0,0 +1,40 @@ +//go:build !amd64 +// +build !amd64 + +package flate + +const ( + // Masks for shifts with register sizes of the shift value. + // This can be used to work around the x86 design of shifting by mod register size. + // It can be used when a variable shift is always smaller than the register size. + + // reg8SizeMaskX - shift value is 8 bits, shifted is X + reg8SizeMask8 = 0xff + reg8SizeMask16 = 0xff + reg8SizeMask32 = 0xff + reg8SizeMask64 = 0xff + + // reg16SizeMaskX - shift value is 16 bits, shifted is X + reg16SizeMask8 = 0xffff + reg16SizeMask16 = 0xffff + reg16SizeMask32 = 0xffff + reg16SizeMask64 = 0xffff + + // reg32SizeMaskX - shift value is 32 bits, shifted is X + reg32SizeMask8 = 0xffffffff + reg32SizeMask16 = 0xffffffff + reg32SizeMask32 = 0xffffffff + reg32SizeMask64 = 0xffffffff + + // reg64SizeMaskX - shift value is 64 bits, shifted is X + reg64SizeMask8 = 0xffffffffffffffff + reg64SizeMask16 = 0xffffffffffffffff + reg64SizeMask32 = 0xffffffffffffffff + reg64SizeMask64 = 0xffffffffffffffff + + // regSizeMaskUintX - shift value is uint, shifted is X + regSizeMaskUint8 = ^uint(0) + regSizeMaskUint16 = ^uint(0) + regSizeMaskUint32 = ^uint(0) + regSizeMaskUint64 = ^uint(0) +) diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go new file mode 100644 index 000000000..f3d4139ef --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/stateless.go @@ -0,0 +1,318 @@ +package flate + +import ( + "io" + "math" + "sync" +) + +const ( + maxStatelessBlock = math.MaxInt16 + // dictionary will be taken from maxStatelessBlock, so limit it. + maxStatelessDict = 8 << 10 + + slTableBits = 13 + slTableSize = 1 << slTableBits + slTableShift = 32 - slTableBits +) + +type statelessWriter struct { + dst io.Writer + closed bool +} + +func (s *statelessWriter) Close() error { + if s.closed { + return nil + } + s.closed = true + // Emit EOF block + return StatelessDeflate(s.dst, nil, true, nil) +} + +func (s *statelessWriter) Write(p []byte) (n int, err error) { + err = StatelessDeflate(s.dst, p, false, nil) + if err != nil { + return 0, err + } + return len(p), nil +} + +func (s *statelessWriter) Reset(w io.Writer) { + s.dst = w + s.closed = false +} + +// NewStatelessWriter will do compression but without maintaining any state +// between Write calls. +// There will be no memory kept between Write calls, +// but compression and speed will be suboptimal. +// Because of this, the size of actual Write calls will affect output size. +func NewStatelessWriter(dst io.Writer) io.WriteCloser { + return &statelessWriter{dst: dst} +} + +// bitWriterPool contains bit writers that can be reused. +var bitWriterPool = sync.Pool{ + New: func() interface{} { + return newHuffmanBitWriter(nil) + }, +} + +// StatelessDeflate allows compressing directly to a Writer without retaining state. +// When returning everything will be flushed. +// Up to 8KB of an optional dictionary can be given which is presumed to precede the block. +// Longer dictionaries will be truncated and will still produce valid output. +// Sending nil dictionary is perfectly fine. +func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { + var dst tokens + bw := bitWriterPool.Get().(*huffmanBitWriter) + bw.reset(out) + defer func() { + // don't keep a reference to our output + bw.reset(nil) + bitWriterPool.Put(bw) + }() + if eof && len(in) == 0 { + // Just write an EOF block. + // Could be faster... + bw.writeStoredHeader(0, true) + bw.flush() + return bw.err + } + + // Truncate dict + if len(dict) > maxStatelessDict { + dict = dict[len(dict)-maxStatelessDict:] + } + + // For subsequent loops, keep shallow dict reference to avoid alloc+copy. + var inDict []byte + + for len(in) > 0 { + todo := in + if len(inDict) > 0 { + if len(todo) > maxStatelessBlock-maxStatelessDict { + todo = todo[:maxStatelessBlock-maxStatelessDict] + } + } else if len(todo) > maxStatelessBlock-len(dict) { + todo = todo[:maxStatelessBlock-len(dict)] + } + inOrg := in + in = in[len(todo):] + uncompressed := todo + if len(dict) > 0 { + // combine dict and source + bufLen := len(todo) + len(dict) + combined := make([]byte, bufLen) + copy(combined, dict) + copy(combined[len(dict):], todo) + todo = combined + } + // Compress + if len(inDict) == 0 { + statelessEnc(&dst, todo, int16(len(dict))) + } else { + statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict) + } + isEof := eof && len(in) == 0 + + if dst.n == 0 { + bw.writeStoredHeader(len(uncompressed), isEof) + if bw.err != nil { + return bw.err + } + bw.writeBytes(uncompressed) + } else if int(dst.n) > len(uncompressed)-len(uncompressed)>>4 { + // If we removed less than 1/16th, huffman compress the block. + bw.writeBlockHuff(isEof, uncompressed, len(in) == 0) + } else { + bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0) + } + if len(in) > 0 { + // Retain a dict if we have more + inDict = inOrg[len(uncompressed)-maxStatelessDict:] + dict = nil + dst.Reset() + } + if bw.err != nil { + return bw.err + } + } + if !eof { + // Align, only a stored block can do that. + bw.writeStoredHeader(0, false) + } + bw.flush() + return bw.err +} + +func hashSL(u uint32) uint32 { + return (u * 0x1e35a7bd) >> slTableShift +} + +func load3216(b []byte, i int16) uint32 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:4] + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load6416(b []byte, i int16) uint64 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:8] + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func statelessEnc(dst *tokens, src []byte, startAt int16) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + type tableEntry struct { + offset int16 + } + + var table [slTableSize]tableEntry + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src)-int(startAt) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = 0 + return + } + // Index until startAt + if startAt > 0 { + cv := load3232(src, 0) + for i := int16(0); i < startAt; i++ { + table[hashSL(cv)] = tableEntry{offset: i} + cv = (cv >> 8) | (uint32(src[i+4]) << 24) + } + } + + s := startAt + 1 + nextEmit := startAt + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int16(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load3216(src, s) + + for { + const skipLog = 5 + const doEvery = 2 + + nextS := s + var candidate tableEntry + for { + nextHash := hashSL(cv) + candidate = table[nextHash] + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit || nextS <= 0 { + goto emitRemainder + } + + now := load6416(src, nextS) + table[nextHash] = tableEntry{offset: s} + nextHash = hashSL(uint32(now)) + + if cv == load3216(src, candidate.offset) { + table[nextHash] = tableEntry{offset: nextS} + break + } + + // Do one right away... + cv = uint32(now) + s = nextS + nextS++ + candidate = table[nextHash] + now >>= 8 + table[nextHash] = tableEntry{offset: s} + + if cv == load3216(src, candidate.offset) { + table[nextHash] = tableEntry{offset: nextS} + break + } + cv = uint32(now) + s = nextS + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + t := candidate.offset + l := int16(matchLen(src[s+4:], src[t+4:]) + 4) + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + + // Save the match found + dst.AddMatchLong(int32(l), uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6416(src, s-2) + o := s - 2 + prevHash := hashSL(uint32(x)) + table[prevHash] = tableEntry{offset: o} + x >>= 16 + currHash := hashSL(uint32(x)) + candidate = table[currHash] + table[currHash] = tableEntry{offset: o + 2} + + if uint32(x) != load3216(src, candidate.offset) { + cv = uint32(x >> 8) + s++ + break + } + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go new file mode 100644 index 000000000..d818790c1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/token.go @@ -0,0 +1,379 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "math" +) + +const ( + // bits 0-16 xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits + // bits 16-22 offsetcode - 5 bits + // bits 22-30 xlength = length - MIN_MATCH_LENGTH - 8 bits + // bits 30-32 type 0 = literal 1=EOF 2=Match 3=Unused - 2 bits + lengthShift = 22 + offsetMask = 1<maxnumlit + offHist [32]uint16 // offset codes + litHist [256]uint16 // codes 0->255 + nFilled int + n uint16 // Must be able to contain maxStoreBlockSize + tokens [maxStoreBlockSize + 1]token +} + +func (t *tokens) Reset() { + if t.n == 0 { + return + } + t.n = 0 + t.nFilled = 0 + for i := range t.litHist[:] { + t.litHist[i] = 0 + } + for i := range t.extraHist[:] { + t.extraHist[i] = 0 + } + for i := range t.offHist[:] { + t.offHist[i] = 0 + } +} + +func (t *tokens) Fill() { + if t.n == 0 { + return + } + for i, v := range t.litHist[:] { + if v == 0 { + t.litHist[i] = 1 + t.nFilled++ + } + } + for i, v := range t.extraHist[:literalCount-256] { + if v == 0 { + t.nFilled++ + t.extraHist[i] = 1 + } + } + for i, v := range t.offHist[:offsetCodeCount] { + if v == 0 { + t.offHist[i] = 1 + } + } +} + +func indexTokens(in []token) tokens { + var t tokens + t.indexTokens(in) + return t +} + +func (t *tokens) indexTokens(in []token) { + t.Reset() + for _, tok := range in { + if tok < matchType { + t.AddLiteral(tok.literal()) + continue + } + t.AddMatch(uint32(tok.length()), tok.offset()&matchOffsetOnlyMask) + } +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +func emitLiteral(dst *tokens, lit []byte) { + for _, v := range lit { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } +} + +func (t *tokens) AddLiteral(lit byte) { + t.tokens[t.n] = token(lit) + t.litHist[lit]++ + t.n++ +} + +// from https://stackoverflow.com/a/28730362 +func mFastLog2(val float32) float32 { + ux := int32(math.Float32bits(val)) + log2 := (float32)(((ux >> 23) & 255) - 128) + ux &= -0x7f800001 + ux += 127 << 23 + uval := math.Float32frombits(uint32(ux)) + log2 += ((-0.34484843)*uval+2.02466578)*uval - 0.67487759 + return log2 +} + +// EstimatedBits will return an minimum size estimated by an *optimal* +// compression of the block. +// The size of the block +func (t *tokens) EstimatedBits() int { + shannon := float32(0) + bits := int(0) + nMatches := 0 + total := int(t.n) + t.nFilled + if total > 0 { + invTotal := 1.0 / float32(total) + for _, v := range t.litHist[:] { + if v > 0 { + n := float32(v) + shannon += atLeastOne(-mFastLog2(n*invTotal)) * n + } + } + // Just add 15 for EOB + shannon += 15 + for i, v := range t.extraHist[1 : literalCount-256] { + if v > 0 { + n := float32(v) + shannon += atLeastOne(-mFastLog2(n*invTotal)) * n + bits += int(lengthExtraBits[i&31]) * int(v) + nMatches += int(v) + } + } + } + if nMatches > 0 { + invTotal := 1.0 / float32(nMatches) + for i, v := range t.offHist[:offsetCodeCount] { + if v > 0 { + n := float32(v) + shannon += atLeastOne(-mFastLog2(n*invTotal)) * n + bits += int(offsetExtraBits[i&31]) * int(v) + } + } + } + return int(shannon) + bits +} + +// AddMatch adds a match to the tokens. +// This function is very sensitive to inlining and right on the border. +func (t *tokens) AddMatch(xlength uint32, xoffset uint32) { + if debugDeflate { + if xlength >= maxMatchLength+baseMatchLength { + panic(fmt.Errorf("invalid length: %v", xlength)) + } + if xoffset >= maxMatchOffset+baseMatchOffset { + panic(fmt.Errorf("invalid offset: %v", xoffset)) + } + } + oCode := offsetCode(xoffset) + xoffset |= oCode << 16 + + t.extraHist[lengthCodes1[uint8(xlength)]]++ + t.offHist[oCode&31]++ + t.tokens[t.n] = token(matchType | xlength<= maxMatchOffset+baseMatchOffset { + panic(fmt.Errorf("invalid offset: %v", xoffset)) + } + } + oc := offsetCode(xoffset) + xoffset |= oc << 16 + for xlength > 0 { + xl := xlength + if xl > 258 { + // We need to have at least baseMatchLength left over for next loop. + if xl > 258+baseMatchLength { + xl = 258 + } else { + xl = 258 - baseMatchLength + } + } + xlength -= xl + xl -= baseMatchLength + t.extraHist[lengthCodes1[uint8(xl)]]++ + t.offHist[oc&31]++ + t.tokens[t.n] = token(matchType | uint32(xl)<> lengthShift) } + +// Convert length to code. +func lengthCode(len uint8) uint8 { return lengthCodes[len] } + +// Returns the offset code corresponding to a specific offset +func offsetCode(off uint32) uint32 { + if false { + if off < uint32(len(offsetCodes)) { + return offsetCodes[off&255] + } else if off>>7 < uint32(len(offsetCodes)) { + return offsetCodes[(off>>7)&255] + 14 + } else { + return offsetCodes[(off>>14)&255] + 28 + } + } + if off < uint32(len(offsetCodes)) { + return offsetCodes[uint8(off)] + } + return offsetCodes14[uint8(off>>7)] +} diff --git a/vendor/github.com/klauspost/compress/gzip/gunzip.go b/vendor/github.com/klauspost/compress/gzip/gunzip.go new file mode 100644 index 000000000..00a0a2c38 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gzip/gunzip.go @@ -0,0 +1,380 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gzip implements reading and writing of gzip format compressed files, +// as specified in RFC 1952. +package gzip + +import ( + "bufio" + "compress/gzip" + "encoding/binary" + "hash/crc32" + "io" + "time" + + "github.com/klauspost/compress/flate" +) + +const ( + gzipID1 = 0x1f + gzipID2 = 0x8b + gzipDeflate = 8 + flagText = 1 << 0 + flagHdrCrc = 1 << 1 + flagExtra = 1 << 2 + flagName = 1 << 3 + flagComment = 1 << 4 +) + +var ( + // ErrChecksum is returned when reading GZIP data that has an invalid checksum. + ErrChecksum = gzip.ErrChecksum + // ErrHeader is returned when reading GZIP data that has an invalid header. + ErrHeader = gzip.ErrHeader +) + +var le = binary.LittleEndian + +// noEOF converts io.EOF to io.ErrUnexpectedEOF. +func noEOF(err error) error { + if err == io.EOF { + return io.ErrUnexpectedEOF + } + return err +} + +// The gzip file stores a header giving metadata about the compressed file. +// That header is exposed as the fields of the Writer and Reader structs. +// +// Strings must be UTF-8 encoded and may only contain Unicode code points +// U+0001 through U+00FF, due to limitations of the GZIP file format. +type Header struct { + Comment string // comment + Extra []byte // "extra data" + ModTime time.Time // modification time + Name string // file name + OS byte // operating system type +} + +// A Reader is an io.Reader that can be read to retrieve +// uncompressed data from a gzip-format compressed file. +// +// In general, a gzip file can be a concatenation of gzip files, +// each with its own header. Reads from the Reader +// return the concatenation of the uncompressed data of each. +// Only the first header is recorded in the Reader fields. +// +// Gzip files store a length and checksum of the uncompressed data. +// The Reader will return a ErrChecksum when Read +// reaches the end of the uncompressed data if it does not +// have the expected length or checksum. Clients should treat data +// returned by Read as tentative until they receive the io.EOF +// marking the end of the data. +type Reader struct { + Header // valid after NewReader or Reader.Reset + r flate.Reader + br *bufio.Reader + decompressor io.ReadCloser + digest uint32 // CRC-32, IEEE polynomial (section 8) + size uint32 // Uncompressed size (section 2.3.1) + buf [512]byte + err error + multistream bool +} + +// NewReader creates a new Reader reading the given reader. +// If r does not also implement io.ByteReader, +// the decompressor may read more data than necessary from r. +// +// It is the caller's responsibility to call Close on the Reader when done. +// +// The Reader.Header fields will be valid in the Reader returned. +func NewReader(r io.Reader) (*Reader, error) { + z := new(Reader) + if err := z.Reset(r); err != nil { + return nil, err + } + return z, nil +} + +// Reset discards the Reader z's state and makes it equivalent to the +// result of its original state from NewReader, but reading from r instead. +// This permits reusing a Reader rather than allocating a new one. +func (z *Reader) Reset(r io.Reader) error { + *z = Reader{ + decompressor: z.decompressor, + multistream: true, + br: z.br, + } + if rr, ok := r.(flate.Reader); ok { + z.r = rr + } else { + // Reuse if we can. + if z.br != nil { + z.br.Reset(r) + } else { + z.br = bufio.NewReader(r) + } + z.r = z.br + } + z.Header, z.err = z.readHeader() + return z.err +} + +// Multistream controls whether the reader supports multistream files. +// +// If enabled (the default), the Reader expects the input to be a sequence +// of individually gzipped data streams, each with its own header and +// trailer, ending at EOF. The effect is that the concatenation of a sequence +// of gzipped files is treated as equivalent to the gzip of the concatenation +// of the sequence. This is standard behavior for gzip readers. +// +// Calling Multistream(false) disables this behavior; disabling the behavior +// can be useful when reading file formats that distinguish individual gzip +// data streams or mix gzip data streams with other data streams. +// In this mode, when the Reader reaches the end of the data stream, +// Read returns io.EOF. If the underlying reader implements io.ByteReader, +// it will be left positioned just after the gzip stream. +// To start the next stream, call z.Reset(r) followed by z.Multistream(false). +// If there is no next stream, z.Reset(r) will return io.EOF. +func (z *Reader) Multistream(ok bool) { + z.multistream = ok +} + +// readString reads a NUL-terminated string from z.r. +// It treats the bytes read as being encoded as ISO 8859-1 (Latin-1) and +// will output a string encoded using UTF-8. +// This method always updates z.digest with the data read. +func (z *Reader) readString() (string, error) { + var err error + needConv := false + for i := 0; ; i++ { + if i >= len(z.buf) { + return "", ErrHeader + } + z.buf[i], err = z.r.ReadByte() + if err != nil { + return "", err + } + if z.buf[i] > 0x7f { + needConv = true + } + if z.buf[i] == 0 { + // Digest covers the NUL terminator. + z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:i+1]) + + // Strings are ISO 8859-1, Latin-1 (RFC 1952, section 2.3.1). + if needConv { + s := make([]rune, 0, i) + for _, v := range z.buf[:i] { + s = append(s, rune(v)) + } + return string(s), nil + } + return string(z.buf[:i]), nil + } + } +} + +// readHeader reads the GZIP header according to section 2.3.1. +// This method does not set z.err. +func (z *Reader) readHeader() (hdr Header, err error) { + if _, err = io.ReadFull(z.r, z.buf[:10]); err != nil { + // RFC 1952, section 2.2, says the following: + // A gzip file consists of a series of "members" (compressed data sets). + // + // Other than this, the specification does not clarify whether a + // "series" is defined as "one or more" or "zero or more". To err on the + // side of caution, Go interprets this to mean "zero or more". + // Thus, it is okay to return io.EOF here. + return hdr, err + } + if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate { + return hdr, ErrHeader + } + flg := z.buf[3] + hdr.ModTime = time.Unix(int64(le.Uint32(z.buf[4:8])), 0) + // z.buf[8] is XFL and is currently ignored. + hdr.OS = z.buf[9] + z.digest = crc32.ChecksumIEEE(z.buf[:10]) + + if flg&flagExtra != 0 { + if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil { + return hdr, noEOF(err) + } + z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:2]) + data := make([]byte, le.Uint16(z.buf[:2])) + if _, err = io.ReadFull(z.r, data); err != nil { + return hdr, noEOF(err) + } + z.digest = crc32.Update(z.digest, crc32.IEEETable, data) + hdr.Extra = data + } + + var s string + if flg&flagName != 0 { + if s, err = z.readString(); err != nil { + return hdr, err + } + hdr.Name = s + } + + if flg&flagComment != 0 { + if s, err = z.readString(); err != nil { + return hdr, err + } + hdr.Comment = s + } + + if flg&flagHdrCrc != 0 { + if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil { + return hdr, noEOF(err) + } + digest := le.Uint16(z.buf[:2]) + if digest != uint16(z.digest) { + return hdr, ErrHeader + } + } + + // Reserved FLG bits must be zero. + if flg>>5 != 0 { + return hdr, ErrHeader + } + + z.digest = 0 + if z.decompressor == nil { + z.decompressor = flate.NewReader(z.r) + } else { + z.decompressor.(flate.Resetter).Reset(z.r, nil) + } + return hdr, nil +} + +// Read implements io.Reader, reading uncompressed bytes from its underlying Reader. +func (z *Reader) Read(p []byte) (n int, err error) { + if z.err != nil { + return 0, z.err + } + + for n == 0 { + n, z.err = z.decompressor.Read(p) + z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n]) + z.size += uint32(n) + if z.err != io.EOF { + // In the normal case we return here. + return n, z.err + } + + // Finished file; check checksum and size. + if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil { + z.err = noEOF(err) + return n, z.err + } + digest := le.Uint32(z.buf[:4]) + size := le.Uint32(z.buf[4:8]) + if digest != z.digest || size != z.size { + z.err = ErrChecksum + return n, z.err + } + z.digest, z.size = 0, 0 + + // File is ok; check if there is another. + if !z.multistream { + return n, io.EOF + } + z.err = nil // Remove io.EOF + + if _, z.err = z.readHeader(); z.err != nil { + return n, z.err + } + } + + return n, nil +} + +type crcer interface { + io.Writer + Sum32() uint32 + Reset() +} +type crcUpdater struct { + z *Reader +} + +func (c *crcUpdater) Write(p []byte) (int, error) { + c.z.digest = crc32.Update(c.z.digest, crc32.IEEETable, p) + return len(p), nil +} + +func (c *crcUpdater) Sum32() uint32 { + return c.z.digest +} + +func (c *crcUpdater) Reset() { + c.z.digest = 0 +} + +// WriteTo support the io.WriteTo interface for io.Copy and friends. +func (z *Reader) WriteTo(w io.Writer) (int64, error) { + total := int64(0) + crcWriter := crcer(crc32.NewIEEE()) + if z.digest != 0 { + crcWriter = &crcUpdater{z: z} + } + for { + if z.err != nil { + if z.err == io.EOF { + return total, nil + } + return total, z.err + } + + // We write both to output and digest. + mw := io.MultiWriter(w, crcWriter) + n, err := z.decompressor.(io.WriterTo).WriteTo(mw) + total += n + z.size += uint32(n) + if err != nil { + z.err = err + return total, z.err + } + + // Finished file; check checksum + size. + if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + z.err = err + return total, err + } + z.digest = crcWriter.Sum32() + digest := le.Uint32(z.buf[:4]) + size := le.Uint32(z.buf[4:8]) + if digest != z.digest || size != z.size { + z.err = ErrChecksum + return total, z.err + } + z.digest, z.size = 0, 0 + + // File is ok; check if there is another. + if !z.multistream { + return total, nil + } + crcWriter.Reset() + z.err = nil // Remove io.EOF + + if _, z.err = z.readHeader(); z.err != nil { + if z.err == io.EOF { + return total, nil + } + return total, z.err + } + } +} + +// Close closes the Reader. It does not close the underlying io.Reader. +// In order for the GZIP checksum to be verified, the reader must be +// fully consumed until the io.EOF. +func (z *Reader) Close() error { return z.decompressor.Close() } diff --git a/vendor/github.com/klauspost/compress/gzip/gzip.go b/vendor/github.com/klauspost/compress/gzip/gzip.go new file mode 100644 index 000000000..5bc720593 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gzip/gzip.go @@ -0,0 +1,290 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gzip + +import ( + "errors" + "fmt" + "hash/crc32" + "io" + + "github.com/klauspost/compress/flate" +) + +// These constants are copied from the flate package, so that code that imports +// "compress/gzip" does not also have to import "compress/flate". +const ( + NoCompression = flate.NoCompression + BestSpeed = flate.BestSpeed + BestCompression = flate.BestCompression + DefaultCompression = flate.DefaultCompression + ConstantCompression = flate.ConstantCompression + HuffmanOnly = flate.HuffmanOnly + + // StatelessCompression will do compression but without maintaining any state + // between Write calls. + // There will be no memory kept between Write calls, + // but compression and speed will be suboptimal. + // Because of this, the size of actual Write calls will affect output size. + StatelessCompression = -3 +) + +// A Writer is an io.WriteCloser. +// Writes to a Writer are compressed and written to w. +type Writer struct { + Header // written at first call to Write, Flush, or Close + w io.Writer + level int + err error + compressor *flate.Writer + digest uint32 // CRC-32, IEEE polynomial (section 8) + size uint32 // Uncompressed size (section 2.3.1) + wroteHeader bool + closed bool + buf [10]byte +} + +// NewWriter returns a new Writer. +// Writes to the returned writer are compressed and written to w. +// +// It is the caller's responsibility to call Close on the WriteCloser when done. +// Writes may be buffered and not flushed until Close. +// +// Callers that wish to set the fields in Writer.Header must do so before +// the first call to Write, Flush, or Close. +func NewWriter(w io.Writer) *Writer { + z, _ := NewWriterLevel(w, DefaultCompression) + return z +} + +// NewWriterLevel is like NewWriter but specifies the compression level instead +// of assuming DefaultCompression. +// +// The compression level can be DefaultCompression, NoCompression, or any +// integer value between BestSpeed and BestCompression inclusive. The error +// returned will be nil if the level is valid. +func NewWriterLevel(w io.Writer, level int) (*Writer, error) { + if level < StatelessCompression || level > BestCompression { + return nil, fmt.Errorf("gzip: invalid compression level: %d", level) + } + z := new(Writer) + z.init(w, level) + return z, nil +} + +// MinCustomWindowSize is the minimum window size that can be sent to NewWriterWindow. +const MinCustomWindowSize = flate.MinCustomWindowSize + +// MaxCustomWindowSize is the maximum custom window that can be sent to NewWriterWindow. +const MaxCustomWindowSize = flate.MaxCustomWindowSize + +// NewWriterWindow returns a new Writer compressing data with a custom window size. +// windowSize must be from MinCustomWindowSize to MaxCustomWindowSize. +func NewWriterWindow(w io.Writer, windowSize int) (*Writer, error) { + if windowSize < MinCustomWindowSize { + return nil, errors.New("gzip: requested window size less than MinWindowSize") + } + if windowSize > MaxCustomWindowSize { + return nil, errors.New("gzip: requested window size bigger than MaxCustomWindowSize") + } + + z := new(Writer) + z.init(w, -windowSize) + return z, nil +} + +func (z *Writer) init(w io.Writer, level int) { + compressor := z.compressor + if level != StatelessCompression { + if compressor != nil { + compressor.Reset(w) + } + } + + *z = Writer{ + Header: Header{ + OS: 255, // unknown + }, + w: w, + level: level, + compressor: compressor, + } +} + +// Reset discards the Writer z's state and makes it equivalent to the +// result of its original state from NewWriter or NewWriterLevel, but +// writing to w instead. This permits reusing a Writer rather than +// allocating a new one. +func (z *Writer) Reset(w io.Writer) { + z.init(w, z.level) +} + +// writeBytes writes a length-prefixed byte slice to z.w. +func (z *Writer) writeBytes(b []byte) error { + if len(b) > 0xffff { + return errors.New("gzip.Write: Extra data is too large") + } + le.PutUint16(z.buf[:2], uint16(len(b))) + _, err := z.w.Write(z.buf[:2]) + if err != nil { + return err + } + _, err = z.w.Write(b) + return err +} + +// writeString writes a UTF-8 string s in GZIP's format to z.w. +// GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1). +func (z *Writer) writeString(s string) (err error) { + // GZIP stores Latin-1 strings; error if non-Latin-1; convert if non-ASCII. + needconv := false + for _, v := range s { + if v == 0 || v > 0xff { + return errors.New("gzip.Write: non-Latin-1 header string") + } + if v > 0x7f { + needconv = true + } + } + if needconv { + b := make([]byte, 0, len(s)) + for _, v := range s { + b = append(b, byte(v)) + } + _, err = z.w.Write(b) + } else { + _, err = io.WriteString(z.w, s) + } + if err != nil { + return err + } + // GZIP strings are NUL-terminated. + z.buf[0] = 0 + _, err = z.w.Write(z.buf[:1]) + return err +} + +// Write writes a compressed form of p to the underlying io.Writer. The +// compressed bytes are not necessarily flushed until the Writer is closed. +func (z *Writer) Write(p []byte) (int, error) { + if z.err != nil { + return 0, z.err + } + var n int + // Write the GZIP header lazily. + if !z.wroteHeader { + z.wroteHeader = true + z.buf[0] = gzipID1 + z.buf[1] = gzipID2 + z.buf[2] = gzipDeflate + z.buf[3] = 0 + if z.Extra != nil { + z.buf[3] |= 0x04 + } + if z.Name != "" { + z.buf[3] |= 0x08 + } + if z.Comment != "" { + z.buf[3] |= 0x10 + } + le.PutUint32(z.buf[4:8], uint32(z.ModTime.Unix())) + if z.level == BestCompression { + z.buf[8] = 2 + } else if z.level == BestSpeed { + z.buf[8] = 4 + } else { + z.buf[8] = 0 + } + z.buf[9] = z.OS + n, z.err = z.w.Write(z.buf[:10]) + if z.err != nil { + return n, z.err + } + if z.Extra != nil { + z.err = z.writeBytes(z.Extra) + if z.err != nil { + return n, z.err + } + } + if z.Name != "" { + z.err = z.writeString(z.Name) + if z.err != nil { + return n, z.err + } + } + if z.Comment != "" { + z.err = z.writeString(z.Comment) + if z.err != nil { + return n, z.err + } + } + + if z.compressor == nil && z.level != StatelessCompression { + z.compressor, _ = flate.NewWriter(z.w, z.level) + } + } + z.size += uint32(len(p)) + z.digest = crc32.Update(z.digest, crc32.IEEETable, p) + if z.level == StatelessCompression { + return len(p), flate.StatelessDeflate(z.w, p, false, nil) + } + n, z.err = z.compressor.Write(p) + return n, z.err +} + +// Flush flushes any pending compressed data to the underlying writer. +// +// It is useful mainly in compressed network protocols, to ensure that +// a remote reader has enough data to reconstruct a packet. Flush does +// not return until the data has been written. If the underlying +// writer returns an error, Flush returns that error. +// +// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. +func (z *Writer) Flush() error { + if z.err != nil { + return z.err + } + if z.closed || z.level == StatelessCompression { + return nil + } + if !z.wroteHeader { + z.Write(nil) + if z.err != nil { + return z.err + } + } + z.err = z.compressor.Flush() + return z.err +} + +// Close closes the Writer, flushing any unwritten data to the underlying +// io.Writer, but does not close the underlying io.Writer. +func (z *Writer) Close() error { + if z.err != nil { + return z.err + } + if z.closed { + return nil + } + z.closed = true + if !z.wroteHeader { + z.Write(nil) + if z.err != nil { + return z.err + } + } + if z.level == StatelessCompression { + z.err = flate.StatelessDeflate(z.w, nil, true, nil) + } else { + z.err = z.compressor.Close() + } + if z.err != nil { + return z.err + } + le.PutUint32(z.buf[:4], z.digest) + le.PutUint32(z.buf[4:8], z.size) + _, z.err = z.w.Write(z.buf[:8]) + return z.err +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go index 2aa6a95a0..2754bac6f 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -51,7 +51,7 @@ func emitCopy(dst []byte, offset, length int) int { i := 0 // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The // threshold for this loop is a little higher (at 68 = 64 + 4), and the - // length emitted down below is is a little lower (at 60 = 64 - 4), because + // length emitted down below is a little lower (at 60 = 64 - 4), because // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 9f17ce601..03744fbc7 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -554,6 +554,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { if debugDecoder { printf("Compression modes: 0b%b", compMode) } + if compMode&3 != 0 { + return errors.New("corrupt block: reserved bits not zero") + } for i := uint(0); i < 3; i++ { mode := seqCompMode((compMode >> (6 - i*2)) & 3) if debugDecoder { diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index 2cfe925ad..32a7f401d 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -427,6 +427,16 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error { return nil } +// encodeRLE will encode an RLE block. +func (b *blockEnc) encodeRLE(val byte, length uint32) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(length) + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, val) +} + // fuzzFseEncoder can be used to fuzz the FSE encoder. func fuzzFseEncoder(data []byte) int { if len(data) > maxSequences || len(data) < 2 { @@ -479,6 +489,16 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { if len(b.sequences) == 0 { return b.encodeLits(b.literals, rawAllLits) } + if len(b.sequences) == 1 && len(org) > 0 && len(b.literals) <= 1 { + // Check common RLE cases. + seq := b.sequences[0] + if seq.litLen == uint32(len(b.literals)) && seq.offset-3 == 1 { + // Offset == 1 and 0 or 1 literals. + b.encodeRLE(org[0], b.sequences[0].matchLen+zstdMinMatch+seq.litLen) + return nil + } + } + // We want some difference to at least account for the headers. saved := b.size - len(b.literals) - (b.size >> 6) if saved < 16 { diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index f04aaa21e..bbca17234 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -82,7 +82,7 @@ var ( // can run multiple concurrent stateless decodes. It is even possible to // use stateless decodes while a stream is being decoded. // -// The Reset function can be used to initiate a new stream, which is will considerably +// The Reset function can be used to initiate a new stream, which will considerably // reduce the allocations normally caused by NewReader. func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { initPredefined() diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index 87f42879a..4613724e9 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -135,8 +135,20 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { break } + // Add block to history s := e.addBlock(src) blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + if len(src) < minNonLiteralBlockSize { blk.extraLits = len(src) blk.literals = blk.literals[:len(src)] diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 20d25b0e0..a4f5bf91f 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -102,9 +102,20 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { e.cur = e.maxMatchOff break } - + // Add block to history s := e.addBlock(src) blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + if len(src) < minNonLiteralBlockSize { blk.extraLits = len(src) blk.literals = blk.literals[:len(src)] diff --git a/vendor/github.com/knadh/koanf/v2/go.work b/vendor/github.com/knadh/koanf/v2/go.work index da5b46d30..b5337e02c 100644 --- a/vendor/github.com/knadh/koanf/v2/go.work +++ b/vendor/github.com/knadh/koanf/v2/go.work @@ -2,5 +2,30 @@ go 1.18 use ( . + ./examples + ./maps + ./parsers/dotenv + ./parsers/hcl + ./parsers/hjson + ./parsers/json + ./parsers/kdl + ./parsers/nestedtext + ./parsers/toml + ./parsers/yaml + ./providers/appconfig + ./providers/basicflag + ./providers/confmap + ./providers/consul + ./providers/env + ./providers/etcd + ./providers/file + ./providers/fs + ./providers/nats + ./providers/parameterstore + ./providers/posflag + ./providers/rawbytes + ./providers/s3 + ./providers/structs + ./providers/vault ./tests ) diff --git a/vendor/github.com/knadh/koanf/v2/go.work.sum b/vendor/github.com/knadh/koanf/v2/go.work.sum new file mode 100644 index 000000000..b879b3bf7 --- /dev/null +++ b/vendor/github.com/knadh/koanf/v2/go.work.sum @@ -0,0 +1,155 @@ +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= +cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= +cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= +cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= +cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk= +cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4= +cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA= +cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= +cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/consul/sdk v0.13.1/go.mod h1:SW/mM4LbKfqmMvcFu8v+eiQQ7oitXEFeiBe9StxERb0= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/vendor/github.com/linode/linodego/.golangci.yml b/vendor/github.com/linode/linodego/.golangci.yml index bc15e56d7..063292f35 100644 --- a/vendor/github.com/linode/linodego/.golangci.yml +++ b/vendor/github.com/linode/linodego/.golangci.yml @@ -7,22 +7,15 @@ linters-settings: check-blank: true govet: - check-shadowing: true - enable: - atomicalign - enable-all: false - disable: - shadow + enable-all: false disable-all: false - golint: - min-confidence: 0.8 gocyclo: min-complexity: 30 gocognit: min-complexity: 30 - maligned: - suggest-new: true dupl: threshold: 100 @@ -87,4 +80,5 @@ linters: - depguard - tagalign - inamedparam + - perfsprint fast: false diff --git a/vendor/github.com/linode/linodego/Makefile b/vendor/github.com/linode/linodego/Makefile index 3597c2a7b..460dd0802 100644 --- a/vendor/github.com/linode/linodego/Makefile +++ b/vendor/github.com/linode/linodego/Makefile @@ -25,9 +25,10 @@ citest: lint test testunit: go test -v $(PACKAGES) $(ARGS) + cd test && make testunit testint: - cd test && make test + cd test && make testint testcov-func: @go test -v -coverprofile="coverage.txt" . > /dev/null 2>&1 diff --git a/vendor/github.com/linode/linodego/README.md b/vendor/github.com/linode/linodego/README.md index cf73b4dfd..ec07e731d 100644 --- a/vendor/github.com/linode/linodego/README.md +++ b/vendor/github.com/linode/linodego/README.md @@ -1,6 +1,6 @@ # linodego -![Tests](https://img.shields.io/github/actions/workflow/status/linode/linodego/test.yml?branch=main) +![Tests](https://img.shields.io/github/actions/workflow/status/linode/linodego/ci.yml?branch=main) [![Release](https://img.shields.io/github/v/release/linode/linodego)](https://github.com/linode/linodego/releases/latest) [![GoDoc](https://godoc.org/github.com/linode/linodego?status.svg)](https://godoc.org/github.com/linode/linodego) [![Go Report Card](https://goreportcard.com/badge/github.com/linode/linodego)](https://goreportcard.com/report/github.com/linode/linodego) diff --git a/vendor/github.com/linode/linodego/account_availability.go b/vendor/github.com/linode/linodego/account_availability.go index 9a846415c..d0341083b 100644 --- a/vendor/github.com/linode/linodego/account_availability.go +++ b/vendor/github.com/linode/linodego/account_availability.go @@ -8,13 +8,16 @@ import ( "github.com/go-resty/resty/v2" ) -// AccountAvailability returns the resources information in a region which are NOT available to an account. +// AccountAvailability returns the resources availability in a region to an account. type AccountAvailability struct { // region id Region string `json:"region"` // the unavailable resources in a region to the customer Unavailable []string `json:"unavailable"` + + // the available resources in a region to the customer + Available []string `json:"available"` } // AccountAvailabilityPagedResponse represents a paginated Account Availability API response @@ -38,7 +41,7 @@ func (resp *AccountAvailabilityPagedResponse) castResult(r *resty.Request, e str return castedRes.Pages, castedRes.Results, nil } -// ListAccountAvailabilities lists all available regions and the resources which are NOT available to the account. +// ListAccountAvailabilities lists all regions and the resource availabilities to the account. func (c *Client) ListAccountAvailabilities(ctx context.Context, opts *ListOptions) ([]AccountAvailability, error) { response := AccountAvailabilityPagedResponse{} err := c.listHelper(ctx, &response, opts) @@ -48,7 +51,7 @@ func (c *Client) ListAccountAvailabilities(ctx context.Context, opts *ListOption return response.Data, nil } -// GetAccountAvailability gets the unavailable resources in a region to the customer. +// GetAccountAvailability gets the resources availability in a region to the customer. func (c *Client) GetAccountAvailability(ctx context.Context, regionID string) (*AccountAvailability, error) { req := c.R(ctx).SetResult(&AccountAvailability{}) regionID = url.PathEscape(regionID) diff --git a/vendor/github.com/linode/linodego/account_events.go b/vendor/github.com/linode/linodego/account_events.go index 8a9d9667f..b7f18a55d 100644 --- a/vendor/github.com/linode/linodego/account_events.go +++ b/vendor/github.com/linode/linodego/account_events.go @@ -62,7 +62,7 @@ const ( ActionBackupsRestore EventAction = "backups_restore" ActionCommunityQuestionReply EventAction = "community_question_reply" ActionCommunityLike EventAction = "community_like" - ActionCreateCardUpdated EventAction = "credit_card_updated" + ActionCreditCardUpdated EventAction = "credit_card_updated" ActionDatabaseCreate EventAction = "database_create" ActionDatabaseDegraded EventAction = "database_degraded" ActionDatabaseDelete EventAction = "database_delete" @@ -127,6 +127,9 @@ const ( ActionLinodeConfigUpdate EventAction = "linode_config_update" ActionLishBoot EventAction = "lish_boot" ActionLKENodeCreate EventAction = "lke_node_create" + ActionLKEControlPlaneACLCreate EventAction = "lke_control_plane_acl_create" + ActionLKEControlPlaneACLUpdate EventAction = "lke_control_plane_acl_update" + ActionLKEControlPlaneACLDelete EventAction = "lke_control_plane_acl_delete" ActionLongviewClientCreate EventAction = "longviewclient_create" ActionLongviewClientDelete EventAction = "longviewclient_delete" ActionLongviewClientUpdate EventAction = "longviewclient_update" @@ -149,6 +152,9 @@ const ( ActionOAuthClientDelete EventAction = "oauth_client_delete" ActionOAuthClientSecretReset EventAction = "oauth_client_secret_reset" //#nosec G101 ActionOAuthClientUpdate EventAction = "oauth_client_update" + ActionOBJAccessKeyCreate EventAction = "obj_access_key_create" + ActionOBJAccessKeyDelete EventAction = "obj_access_key_delete" + ActionOBJAccessKeyUpdate EventAction = "obj_access_key_update" ActionPaymentMethodAdd EventAction = "payment_method_add" ActionPaymentSubmitted EventAction = "payment_submitted" ActionPasswordReset EventAction = "password_reset" @@ -190,9 +196,13 @@ const ( ActionVPCSubnetDelete EventAction = "subnet_delete" ActionVPCSubnetUpdate EventAction = "subnet_update" - // deprecated due to incorrect spelling, + // Deprecated: incorrect spelling, // to be removed in the next major version release. ActionVolumeDelte EventAction = "volume_delete" + + // Deprecated: incorrect spelling, + // to be removed in the next major version + ActionCreateCardUpdated = ActionCreditCardUpdated ) // EntityType constants start with Entity and include Linode API Event Entity Types @@ -200,14 +210,31 @@ type EntityType string // EntityType contants are the entities an Event can be related to. const ( - EntityLinode EntityType = "linode" - EntityDisk EntityType = "disk" - EntityDatabase EntityType = "database" - EntityDomain EntityType = "domain" - EntityFirewall EntityType = "firewall" - EntityNodebalancer EntityType = "nodebalancer" - EntityVPC EntityType = "vpc" - EntityVPCSubnet EntityType = "subnet" + EntityAccount EntityType = "account" + EntityBackups EntityType = "backups" + EntityCommunity EntityType = "community" + EntityDatabase EntityType = "database" + EntityDisk EntityType = "disk" + EntityDomain EntityType = "domain" + EntityTransfer EntityType = "entity_transfer" + EntityFirewall EntityType = "firewall" + EntityImage EntityType = "image" + EntityIPAddress EntityType = "ipaddress" + EntityLinode EntityType = "linode" + EntityLongview EntityType = "longview" + EntityManagedService EntityType = "managed_service" + EntityNodebalancer EntityType = "nodebalancer" + EntityOAuthClient EntityType = "oauth_client" + EntityProfile EntityType = "profile" + EntityStackscript EntityType = "stackscript" + EntityTag EntityType = "tag" + EntityTicket EntityType = "ticket" + EntityToken EntityType = "token" + EntityUser EntityType = "user" + EntityUserSSHKey EntityType = "user_ssh_key" + EntityVolume EntityType = "volume" + EntityVPC EntityType = "vpc" + EntityVPCSubnet EntityType = "subnet" ) // EventStatus constants start with Event and include Linode API Event Status values diff --git a/vendor/github.com/linode/linodego/client.go b/vendor/github.com/linode/linodego/client.go index a2d676f5e..4c9eedb36 100644 --- a/vendor/github.com/linode/linodego/client.go +++ b/vendor/github.com/linode/linodego/client.go @@ -10,7 +10,9 @@ import ( "path" "path/filepath" "reflect" + "regexp" "strconv" + "strings" "sync" "time" @@ -39,7 +41,8 @@ const ( // APISecondsPerPoll how frequently to poll for new Events or Status in WaitFor functions APISecondsPerPoll = 3 // Maximum wait time for retries - APIRetryMaxWaitTime = time.Duration(30) * time.Second + APIRetryMaxWaitTime = time.Duration(30) * time.Second + APIDefaultCacheExpiration = time.Minute * 15 ) var envDebug = false @@ -87,7 +90,7 @@ type ( ) func init() { - // Wether or not we will enable Resty debugging output + // Whether we will enable Resty debugging output if apiDebug, ok := os.LookupEnv("LINODE_DEBUG"); ok { if parsed, err := strconv.ParseBool(apiDebug); err == nil { envDebug = parsed @@ -133,11 +136,42 @@ func (c *Client) SetLogger(logger Logger) *Client { // OnBeforeRequest adds a handler to the request body to run before the request is sent func (c *Client) OnBeforeRequest(m func(request *Request) error) { - c.resty.OnBeforeRequest(func(client *resty.Client, req *resty.Request) error { + c.resty.OnBeforeRequest(func(_ *resty.Client, req *resty.Request) error { return m(req) }) } +// UseURL parses the individual components of the given API URL and configures the client +// accordingly. For example, a valid URL. +// For example: +// +// client.UseURL("https://api.test.linode.com/v4beta") +func (c *Client) UseURL(apiURL string) (*Client, error) { + parsedURL, err := url.Parse(apiURL) + if err != nil { + return nil, fmt.Errorf("failed to parse URL: %w", err) + } + + // Create a new URL excluding the path to use as the base URL + baseURL := &url.URL{ + Host: parsedURL.Host, + Scheme: parsedURL.Scheme, + } + + c.SetBaseURL(baseURL.String()) + + versionMatches := regexp.MustCompile(`/v[a-zA-Z0-9]+`).FindAllString(parsedURL.Path, -1) + + // Only set the version if a version is found in the URL, else use the default + if len(versionMatches) > 0 { + c.SetAPIVersion( + strings.Trim(versionMatches[len(versionMatches)-1], "/"), + ) + } + + return c, nil +} + // SetBaseURL sets the base URL of the Linode v4 API (https://api.linode.com/v4) func (c *Client) SetBaseURL(baseURL string) *Client { baseURLPath, _ := url.Parse(baseURL) @@ -378,6 +412,16 @@ func (c *Client) SetHeader(name, value string) { c.resty.SetHeader(name, value) } +func (c *Client) enableLogSanitization() *Client { + c.resty.OnRequestLog(func(r *resty.RequestLog) error { + // masking authorization header + r.Header.Set("Authorization", "Bearer *******************************") + return nil + }) + + return c +} + // NewClient factory to create new Client struct func NewClient(hc *http.Client) (client Client) { if hc != nil { @@ -387,7 +431,7 @@ func NewClient(hc *http.Client) (client Client) { } client.shouldCache = true - client.cacheExpiration = time.Minute * 15 + client.cacheExpiration = APIDefaultCacheExpiration client.cachedEntries = make(map[string]clientCacheEntry) client.cachedEntryLock = &sync.RWMutex{} @@ -421,10 +465,11 @@ func NewClient(hc *http.Client) (client Client) { } client. - SetRetryWaitTime((1000 * APISecondsPerPoll) * time.Millisecond). + SetRetryWaitTime(APISecondsPerPoll * time.Second). SetPollDelay(APISecondsPerPoll * time.Second). SetRetries(). - SetDebug(envDebug) + SetDebug(envDebug). + enableLogSanitization() return } @@ -483,7 +528,7 @@ func (c *Client) preLoadConfig(configPath string) error { } // We don't want to load the profile until the user is actually making requests - c.OnBeforeRequest(func(request *Request) error { + c.OnBeforeRequest(func(_ *Request) error { if c.loadedProfile != c.selectedProfile { if err := c.UseProfile(c.selectedProfile); err != nil { return err diff --git a/vendor/github.com/linode/linodego/errors.go b/vendor/github.com/linode/linodego/errors.go index fc3eb6547..ab9613be9 100644 --- a/vendor/github.com/linode/linodego/errors.go +++ b/vendor/github.com/linode/linodego/errors.go @@ -149,3 +149,34 @@ func NewError(err any) *Error { return &Error{Code: ErrorUnsupported, Message: fmt.Sprintf("Unsupported type to linodego.NewError: %s", reflect.TypeOf(e))} } } + +// IsNotFound indicates if err indicates a 404 Not Found error from the Linode API. +func IsNotFound(err error) bool { + return ErrHasStatus(err, http.StatusNotFound) +} + +// ErrHasStatus checks if err is an error from the Linode API, and whether it contains the given HTTP status code. +// More than one status code may be given. +// If len(code) == 0, err is nil or is not a [Error], ErrHasStatus will return false. +func ErrHasStatus(err error, code ...int) bool { + if err == nil { + return false + } + + // Short-circuit if the caller did not provide any status codes. + if len(code) == 0 { + return false + } + + var e *Error + if !errors.As(err, &e) { + return false + } + ec := e.StatusCode() + for _, c := range code { + if ec == c { + return true + } + } + return false +} diff --git a/vendor/github.com/linode/linodego/go.work b/vendor/github.com/linode/linodego/go.work index db3a7dbc0..193259886 100644 --- a/vendor/github.com/linode/linodego/go.work +++ b/vendor/github.com/linode/linodego/go.work @@ -1,4 +1,4 @@ -go 1.20 +go 1.21 use ( . diff --git a/vendor/github.com/linode/linodego/go.work.sum b/vendor/github.com/linode/linodego/go.work.sum index 2a469314f..295c2d24a 100644 --- a/vendor/github.com/linode/linodego/go.work.sum +++ b/vendor/github.com/linode/linodego/go.work.sum @@ -1,7 +1,15 @@ cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= +cloud.google.com/go v0.110.2 h1:sdFPBr6xG9/wkBbfhmUz/JmZC7X6LavQgcrVINrKiVA= +cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw= cloud.google.com/go/bigquery v1.8.0 h1:PQcPefKFdaIzjQFbiyOgAqyx8q5djaE7x9Sqe712DPA= +cloud.google.com/go/compute v1.20.1 h1:6aKEtlUiwEpJzM001l0yFkpXmUVXaN8W+fbkb2AZNbg= +cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.0 h1:nBbNSZyDpkNlo3DepaaLKVuO7ClyifSAmNloSCZrHnQ= cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= +cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= cloud.google.com/go/pubsub v1.3.1 h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU= cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= @@ -34,8 +42,11 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4 github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= github.com/creack/pty v1.1.9 h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w= @@ -50,6 +61,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad h1:E github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c= @@ -64,11 +76,13 @@ github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+ github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= +github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= +github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g= +github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -76,25 +90,39 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.5.0 h1:jlYHihg//f7RRwuPfptm04yp4s7O6Kw8EZiVYIGcH0g= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5 h1:zIaiqGYDQwa4HVx5wGRTXbx38Pqxjemn4BP98wpzpXo= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA= +github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= +github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= +github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= +github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= @@ -107,7 +135,7 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/errcheck v1.5.0 h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY= github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw= github.com/linode/linodego v0.20.1 h1:Kw5Qes0E0wlKVx5EbITI+F/ambO6G+PQyK0Yi7i4EyQ= github.com/linode/linodego v0.20.1/go.mod h1:XOWXRHjqeU2uPS84tKLgfWIfTlv3TYzCS0io4GOQzEI= @@ -129,12 +157,11 @@ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo= github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/onsi/gomega v1.27.3/go.mod h1:5vG284IBtfDAmDyrK+eGyZmUgUlmi+Wngqo557cZ6Gw= github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A= @@ -144,7 +171,6 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/rogpeppe/go-internal v1.3.0 h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= @@ -152,12 +178,17 @@ github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzu github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/yuin/goldmark v1.2.1 h1:ruQGxdhGHe7FWOJPT0mKs5+pD2Xs1Bm/kdGlHO04FmM= github.com/yuin/goldmark v1.3.5 h1:dPmz1Snjq0kmkz159iL7S6WzdahUTHnHB5M56WFVifs= github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= @@ -170,6 +201,10 @@ golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI= @@ -178,24 +213,65 @@ golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o= +google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc h1:8DyZCyvI8mE1IdLy/60bS+52xfymkE72wv1asokgtao= +google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= +google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1:kVKPf/IiYSBWEWtkIn6wZXwWGCnLKcC8oWfZvXjsGnM= +google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag= +google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c h1:GohjlNKauSai7gN4wsJkeZ3WAJx4Sh+oT/b5IYn5suA= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/vendor/github.com/linode/linodego/images.go b/vendor/github.com/linode/linodego/images.go index 7875d6fdc..6ff8a3a89 100644 --- a/vendor/github.com/linode/linodego/images.go +++ b/vendor/github.com/linode/linodego/images.go @@ -3,9 +3,7 @@ package linodego import ( "context" "encoding/json" - "fmt" "io" - "net/url" "time" "github.com/go-resty/resty/v2" @@ -35,8 +33,10 @@ type Image struct { Size int `json:"size"` IsPublic bool `json:"is_public"` Deprecated bool `json:"deprecated"` + Updated *time.Time `json:"-"` Created *time.Time `json:"-"` Expiry *time.Time `json:"-"` + EOL *time.Time `json:"-"` } // ImageCreateOptions fields are those accepted by CreateImage @@ -82,8 +82,10 @@ func (i *Image) UnmarshalJSON(b []byte) error { p := struct { *Mask + Updated *parseabletime.ParseableTime `json:"updated"` Created *parseabletime.ParseableTime `json:"created"` Expiry *parseabletime.ParseableTime `json:"expiry"` + EOL *parseabletime.ParseableTime `json:"eol"` }{ Mask: (*Mask)(i), } @@ -92,8 +94,10 @@ func (i *Image) UnmarshalJSON(b []byte) error { return err } + i.Updated = (*time.Time)(p.Updated) i.Created = (*time.Time)(p.Created) i.Expiry = (*time.Time)(p.Expiry) + i.EOL = (*time.Time)(p.EOL) return nil } @@ -105,110 +109,66 @@ func (i Image) GetUpdateOptions() (iu ImageUpdateOptions) { return } -// ImagesPagedResponse represents a linode API response for listing of images -type ImagesPagedResponse struct { - *PageOptions - Data []Image `json:"data"` -} - -func (ImagesPagedResponse) endpoint(_ ...any) string { - return "images" -} - -func (resp *ImagesPagedResponse) castResult(r *resty.Request, e string) (int, int, error) { - res, err := coupleAPIErrors(r.SetResult(ImagesPagedResponse{}).Get(e)) - if err != nil { - return 0, 0, err - } - castedRes := res.Result().(*ImagesPagedResponse) - resp.Data = append(resp.Data, castedRes.Data...) - return castedRes.Pages, castedRes.Results, nil -} - // ListImages lists Images func (c *Client) ListImages(ctx context.Context, opts *ListOptions) ([]Image, error) { - response := ImagesPagedResponse{} - err := c.listHelper(ctx, &response, opts) - if err != nil { - return nil, err - } - return response.Data, nil + return getPaginatedResults[Image]( + ctx, + c, + "images", + opts, + ) } // GetImage gets the Image with the provided ID func (c *Client) GetImage(ctx context.Context, imageID string) (*Image, error) { - imageID = url.PathEscape(imageID) - - e := fmt.Sprintf("images/%s", imageID) - req := c.R(ctx).SetResult(&Image{}) - r, err := coupleAPIErrors(req.Get(e)) - if err != nil { - return nil, err - } - return r.Result().(*Image), nil + return doGETRequest[Image]( + ctx, + c, + formatAPIPath("images/%s", imageID), + ) } // CreateImage creates an Image func (c *Client) CreateImage(ctx context.Context, opts ImageCreateOptions) (*Image, error) { - body, err := json.Marshal(opts) - if err != nil { - return nil, err - } - - e := "images" - req := c.R(ctx).SetResult(&Image{}).SetBody(string(body)) - r, err := coupleAPIErrors(req.Post(e)) - if err != nil { - return nil, err - } - return r.Result().(*Image), nil + return doPOSTRequest[Image]( + ctx, + c, + "images", + opts, + ) } // UpdateImage updates the Image with the specified id func (c *Client) UpdateImage(ctx context.Context, imageID string, opts ImageUpdateOptions) (*Image, error) { - body, err := json.Marshal(opts) - if err != nil { - return nil, err - } - - imageID = url.PathEscape(imageID) - - e := fmt.Sprintf("images/%s", imageID) - req := c.R(ctx).SetResult(&Image{}).SetBody(string(body)) - r, err := coupleAPIErrors(req.Put(e)) - if err != nil { - return nil, err - } - return r.Result().(*Image), nil + return doPUTRequest[Image]( + ctx, + c, + formatAPIPath("images/%s", imageID), + opts, + ) } // DeleteImage deletes the Image with the specified id func (c *Client) DeleteImage(ctx context.Context, imageID string) error { - imageID = url.PathEscape(imageID) - e := fmt.Sprintf("images/%s", imageID) - _, err := coupleAPIErrors(c.R(ctx).Delete(e)) - return err + return doDELETERequest( + ctx, + c, + formatAPIPath("images/%s", imageID), + ) } // CreateImageUpload creates an Image and an upload URL func (c *Client) CreateImageUpload(ctx context.Context, opts ImageCreateUploadOptions) (*Image, string, error) { - body, err := json.Marshal(opts) + result, err := doPOSTRequest[ImageCreateUploadResponse]( + ctx, + c, + "images/upload", + opts, + ) if err != nil { return nil, "", err } - e := "images/upload" - req := c.R(ctx).SetResult(&ImageCreateUploadResponse{}).SetBody(string(body)) - r, err := coupleAPIErrors(req.Post(e)) - if err != nil { - return nil, "", err - } - - result, ok := r.Result().(*ImageCreateUploadResponse) - if !ok { - return nil, "", fmt.Errorf("failed to parse result") - } - return result.Image, result.UploadTo, nil } diff --git a/vendor/github.com/linode/linodego/instance_config_interfaces.go b/vendor/github.com/linode/linodego/instance_config_interfaces.go index d975ddcbc..cacce476e 100644 --- a/vendor/github.com/linode/linodego/instance_config_interfaces.go +++ b/vendor/github.com/linode/linodego/instance_config_interfaces.go @@ -16,13 +16,13 @@ type InstanceConfigInterface struct { Active bool `json:"active"` VPCID *int `json:"vpc_id"` SubnetID *int `json:"subnet_id"` - IPv4 VPCIPv4 `json:"ipv4"` + IPv4 *VPCIPv4 `json:"ipv4"` IPRanges []string `json:"ip_ranges"` } type VPCIPv4 struct { - VPC string `json:"vpc,omitempty"` - NAT1To1 string `json:"nat_1_1,omitempty"` + VPC string `json:"vpc,omitempty"` + NAT1To1 *string `json:"nat_1_1,omitempty"` } type InstanceConfigInterfaceCreateOptions struct { @@ -36,9 +36,9 @@ type InstanceConfigInterfaceCreateOptions struct { } type InstanceConfigInterfaceUpdateOptions struct { - Primary bool `json:"primary,omitempty"` - IPv4 *VPCIPv4 `json:"ipv4,omitempty"` - IPRanges []string `json:"ip_ranges,omitempty"` + Primary bool `json:"primary,omitempty"` + IPv4 *VPCIPv4 `json:"ipv4,omitempty"` + IPRanges *[]string `json:"ip_ranges,omitempty"` } type InstanceConfigInterfacesReorderOptions struct { @@ -67,20 +67,14 @@ func (i InstanceConfigInterface) GetCreateOptions() InstanceConfigInterfaceCreat opts.IPRanges = i.IPRanges } - if i.Purpose == InterfacePurposeVPC && - i.IPv4.NAT1To1 != "" && i.IPv4.VPC != "" { + if i.Purpose == InterfacePurposeVPC && i.IPv4 != nil { opts.IPv4 = &VPCIPv4{ VPC: i.IPv4.VPC, NAT1To1: i.IPv4.NAT1To1, } } - // workaround for API issue - if i.IPAMAddress == "222" { - opts.IPAMAddress = "" - } else { - opts.IPAMAddress = i.IPAMAddress - } + opts.IPAMAddress = i.IPAMAddress return opts } @@ -90,15 +84,20 @@ func (i InstanceConfigInterface) GetUpdateOptions() InstanceConfigInterfaceUpdat Primary: i.Primary, } - if i.Purpose == InterfacePurposeVPC { + if i.Purpose == InterfacePurposeVPC && i.IPv4 != nil { opts.IPv4 = &VPCIPv4{ VPC: i.IPv4.VPC, NAT1To1: i.IPv4.NAT1To1, } } - if len(i.IPRanges) > 0 { - opts.IPRanges = i.IPRanges + if i.IPRanges != nil { + // Copy the slice to prevent accidental + // mutations + copiedIPRanges := make([]string, len(i.IPRanges)) + copy(copiedIPRanges, i.IPRanges) + + opts.IPRanges = &copiedIPRanges } return opts diff --git a/vendor/github.com/linode/linodego/instance_disks.go b/vendor/github.com/linode/linodego/instance_disks.go index 6be455c9e..b776d5c9b 100644 --- a/vendor/github.com/linode/linodego/instance_disks.go +++ b/vendor/github.com/linode/linodego/instance_disks.go @@ -61,15 +61,13 @@ type InstanceDiskCreateOptions struct { Filesystem string `json:"filesystem,omitempty"` AuthorizedKeys []string `json:"authorized_keys,omitempty"` AuthorizedUsers []string `json:"authorized_users,omitempty"` - ReadOnly bool `json:"read_only,omitempty"` StackscriptID int `json:"stackscript_id,omitempty"` StackscriptData map[string]string `json:"stackscript_data,omitempty"` } // InstanceDiskUpdateOptions are InstanceDisk settings that can be used in updates type InstanceDiskUpdateOptions struct { - Label string `json:"label"` - ReadOnly bool `json:"read_only"` + Label string `json:"label"` } // endpoint gets the endpoint URL for InstanceDisks of a given Instance diff --git a/vendor/github.com/linode/linodego/instance_ips.go b/vendor/github.com/linode/linodego/instance_ips.go index beaeb0a21..bb3eb2364 100644 --- a/vendor/github.com/linode/linodego/instance_ips.go +++ b/vendor/github.com/linode/linodego/instance_ips.go @@ -19,6 +19,7 @@ type InstanceIPv4Response struct { Private []*InstanceIP `json:"private"` Shared []*InstanceIP `json:"shared"` Reserved []*InstanceIP `json:"reserved"` + VPC []*VPCIP `json:"vpc"` } // InstanceIP represents an Instance IP with additional DNS and networking details @@ -35,6 +36,23 @@ type InstanceIP struct { VPCNAT1To1 *InstanceIPNAT1To1 `json:"vpc_nat_1_1"` } +// VPCIP represents a private IP address in a VPC subnet with additional networking details +type VPCIP struct { + Address *string `json:"address"` + AddressRange *string `json:"address_range"` + Gateway string `json:"gateway"` + SubnetMask string `json:"subnet_mask"` + Prefix int `json:"prefix"` + LinodeID int `json:"linode_id"` + Region string `json:"region"` + Active bool `json:"active"` + NAT1To1 *string `json:"nat_1_1"` + VPCID int `json:"vpc_id"` + SubnetID int `json:"subnet_id"` + ConfigID int `json:"config_id"` + InterfaceID int `json:"interface_id"` +} + // InstanceIPv6Response contains the IPv6 addresses and ranges for an Instance type InstanceIPv6Response struct { LinkLocal *InstanceIP `json:"link_local"` diff --git a/vendor/github.com/linode/linodego/instances.go b/vendor/github.com/linode/linodego/instances.go index f56597305..a56c3321b 100644 --- a/vendor/github.com/linode/linodego/instances.go +++ b/vendor/github.com/linode/linodego/instances.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "net" - "net/url" "time" "github.com/go-resty/resty/v2" @@ -117,7 +116,6 @@ type InstanceCreateOptions struct { Region string `json:"region"` Type string `json:"type"` Label string `json:"label,omitempty"` - Group string `json:"group,omitempty"` RootPass string `json:"root_pass,omitempty"` AuthorizedKeys []string `json:"authorized_keys,omitempty"` AuthorizedUsers []string `json:"authorized_users,omitempty"` @@ -135,16 +133,21 @@ type InstanceCreateOptions struct { // Creation fields that need to be set explicitly false, "", or 0 use pointers SwapSize *int `json:"swap_size,omitempty"` Booted *bool `json:"booted,omitempty"` + + // Deprecated: group is a deprecated property denoting a group label for the Linode. + Group string `json:"group,omitempty"` } // InstanceUpdateOptions is an options struct used when Updating an Instance type InstanceUpdateOptions struct { Label string `json:"label,omitempty"` - Group string `json:"group,omitempty"` Backups *InstanceBackup `json:"backups,omitempty"` Alerts *InstanceAlert `json:"alerts,omitempty"` WatchdogEnabled *bool `json:"watchdog_enabled,omitempty"` Tags *[]string `json:"tags,omitempty"` + + // Deprecated: group is a deprecated property denoting a group label for the Linode. + Group *string `json:"group,omitempty"` } // UnmarshalJSON implements the json.Unmarshaler interface @@ -173,7 +176,7 @@ func (i *Instance) UnmarshalJSON(b []byte) error { func (i *Instance) GetUpdateOptions() InstanceUpdateOptions { return InstanceUpdateOptions{ Label: i.Label, - Group: i.Group, + Group: &i.Group, Backups: i.Backups, Alerts: i.Alerts, WatchdogEnabled: &i.WatchdogEnabled, @@ -189,12 +192,14 @@ type InstanceCloneOptions struct { // LinodeID is an optional existing instance to use as the target of the clone LinodeID int `json:"linode_id,omitempty"` Label string `json:"label,omitempty"` - Group string `json:"group,omitempty"` BackupsEnabled bool `json:"backups_enabled"` Disks []int `json:"disks,omitempty"` Configs []int `json:"configs,omitempty"` PrivateIP bool `json:"private_ip,omitempty"` Metadata *InstanceMetadataOptions `json:"metadata,omitempty"` + + // Deprecated: group is a deprecated property denoting a group label for the Linode. + Group string `json:"group,omitempty"` } // InstanceResizeOptions is an options struct used when resizing an instance @@ -444,8 +449,10 @@ func (c *Client) MigrateInstance(ctx context.Context, linodeID int, opts Instanc // simpleInstanceAction is a helper for Instance actions that take no parameters // and return empty responses `{}` unless they return a standard error func (c *Client) simpleInstanceAction(ctx context.Context, action string, linodeID int) error { - action = url.PathEscape(action) - e := fmt.Sprintf("linode/instances/%d/%s", linodeID, action) - _, err := coupleAPIErrors(c.R(ctx).Post(e)) + _, err := doPOSTRequest[any, any]( + ctx, + c, + fmt.Sprintf("linode/instances/%d/%s", linodeID, action), + ) return err } diff --git a/vendor/github.com/linode/linodego/lke_clusters.go b/vendor/github.com/linode/linodego/lke_clusters.go index ff9b67659..7cc6d77a5 100644 --- a/vendor/github.com/linode/linodego/lke_clusters.go +++ b/vendor/github.com/linode/linodego/lke_clusters.go @@ -35,20 +35,20 @@ type LKECluster struct { // LKEClusterCreateOptions fields are those accepted by CreateLKECluster type LKEClusterCreateOptions struct { - NodePools []LKENodePoolCreateOptions `json:"node_pools"` - Label string `json:"label"` - Region string `json:"region"` - K8sVersion string `json:"k8s_version"` - Tags []string `json:"tags,omitempty"` - ControlPlane *LKEClusterControlPlane `json:"control_plane,omitempty"` + NodePools []LKENodePoolCreateOptions `json:"node_pools"` + Label string `json:"label"` + Region string `json:"region"` + K8sVersion string `json:"k8s_version"` + Tags []string `json:"tags,omitempty"` + ControlPlane *LKEClusterControlPlaneOptions `json:"control_plane,omitempty"` } // LKEClusterUpdateOptions fields are those accepted by UpdateLKECluster type LKEClusterUpdateOptions struct { - K8sVersion string `json:"k8s_version,omitempty"` - Label string `json:"label,omitempty"` - Tags *[]string `json:"tags,omitempty"` - ControlPlane *LKEClusterControlPlane `json:"control_plane,omitempty"` + K8sVersion string `json:"k8s_version,omitempty"` + Label string `json:"label,omitempty"` + Tags *[]string `json:"tags,omitempty"` + ControlPlane *LKEClusterControlPlaneOptions `json:"control_plane,omitempty"` } // LKEClusterAPIEndpoint fields are those returned by ListLKEClusterAPIEndpoints @@ -66,11 +66,6 @@ type LKEClusterDashboard struct { URL string `json:"url"` } -// LKEClusterControlPlane fields contained within the `control_plane` attribute of an LKE cluster. -type LKEClusterControlPlane struct { - HighAvailability bool `json:"high_availability"` -} - // LKEVersion fields are those returned by GetLKEVersion type LKEVersion struct { ID string `json:"id"` @@ -110,7 +105,14 @@ func (i LKECluster) GetCreateOptions() (o LKEClusterCreateOptions) { o.Region = i.Region o.K8sVersion = i.K8sVersion o.Tags = i.Tags - o.ControlPlane = &i.ControlPlane + + isHA := i.ControlPlane.HighAvailability + + o.ControlPlane = &LKEClusterControlPlaneOptions{ + HighAvailability: &isHA, + // ACL will not be populated in the control plane response + } + // @TODO copy NodePools? return } @@ -120,7 +122,14 @@ func (i LKECluster) GetUpdateOptions() (o LKEClusterUpdateOptions) { o.K8sVersion = i.K8sVersion o.Label = i.Label o.Tags = &i.Tags - o.ControlPlane = &i.ControlPlane + + isHA := i.ControlPlane.HighAvailability + + o.ControlPlane = &LKEClusterControlPlaneOptions{ + HighAvailability: &isHA, + // ACL will not be populated in the control plane response + } + return } diff --git a/vendor/github.com/linode/linodego/lke_clusters_control_plane.go b/vendor/github.com/linode/linodego/lke_clusters_control_plane.go new file mode 100644 index 000000000..d71da8ab1 --- /dev/null +++ b/vendor/github.com/linode/linodego/lke_clusters_control_plane.go @@ -0,0 +1,94 @@ +package linodego + +import "context" + +// LKEClusterControlPlane fields contained within the `control_plane` attribute of an LKE cluster. +type LKEClusterControlPlane struct { + HighAvailability bool `json:"high_availability"` +} + +// LKEClusterControlPlaneACLAddresses describes the +// allowed IP ranges for an LKE cluster's control plane. +type LKEClusterControlPlaneACLAddresses struct { + IPv4 []string `json:"ipv4"` + IPv6 []string `json:"ipv6"` +} + +// LKEClusterControlPlaneACL describes the ACL configuration +// for an LKE cluster's control plane. +type LKEClusterControlPlaneACL struct { + Enabled bool `json:"enabled"` + Addresses *LKEClusterControlPlaneACLAddresses `json:"addresses"` +} + +// LKEClusterControlPlaneACLAddressesOptions are the options used to +// specify the allowed IP ranges for an LKE cluster's control plane. +type LKEClusterControlPlaneACLAddressesOptions struct { + IPv4 *[]string `json:"ipv4,omitempty"` + IPv6 *[]string `json:"ipv6,omitempty"` +} + +// LKEClusterControlPlaneACLOptions represents the options used when +// configuring an LKE cluster's control plane ACL policy. +type LKEClusterControlPlaneACLOptions struct { + Enabled *bool `json:"enabled,omitempty"` + Addresses *LKEClusterControlPlaneACLAddressesOptions `json:"addresses,omitempty"` +} + +// LKEClusterControlPlaneOptions represents the options used when +// configuring an LKE cluster's control plane. +type LKEClusterControlPlaneOptions struct { + HighAvailability *bool `json:"high_availability,omitempty"` + ACL *LKEClusterControlPlaneACLOptions `json:"acl,omitempty"` +} + +// LKEClusterControlPlaneACLUpdateOptions represents the options +// available when updating the ACL configuration of an LKE cluster's +// control plane. +type LKEClusterControlPlaneACLUpdateOptions struct { + ACL LKEClusterControlPlaneACLOptions `json:"acl"` +} + +// LKEClusterControlPlaneACLResponse represents the response structure +// for the Client.GetLKEClusterControlPlaneACL(...) method. +type LKEClusterControlPlaneACLResponse struct { + ACL LKEClusterControlPlaneACL `json:"acl"` +} + +// GetLKEClusterControlPlaneACL gets the ACL configuration for the +// given cluster's control plane. +func (c *Client) GetLKEClusterControlPlaneACL(ctx context.Context, clusterID int) (*LKEClusterControlPlaneACLResponse, error) { + return doGETRequest[LKEClusterControlPlaneACLResponse]( + ctx, + c, + formatAPIPath("lke/clusters/%d/control_plane_acl", clusterID), + ) +} + +// UpdateLKEClusterControlPlaneACL updates the ACL configuration for the +// given cluster's control plane. +func (c *Client) UpdateLKEClusterControlPlaneACL( + ctx context.Context, + clusterID int, + opts LKEClusterControlPlaneACLUpdateOptions, +) (*LKEClusterControlPlaneACLResponse, error) { + return doPUTRequest[LKEClusterControlPlaneACLResponse]( + ctx, + c, + formatAPIPath("lke/clusters/%d/control_plane_acl", clusterID), + opts, + ) +} + +// DeleteLKEClusterControlPlaneACL deletes the ACL configuration for the +// given cluster's control plane. +func (c *Client) DeleteLKEClusterControlPlaneACL( + ctx context.Context, + clusterID int, +) error { + return doDELETERequest( + ctx, + c, + formatAPIPath("lke/clusters/%d/control_plane_acl", clusterID), + ) +} diff --git a/vendor/github.com/linode/linodego/nodebalancer_configs.go b/vendor/github.com/linode/linodego/nodebalancer_configs.go index c9f1870da..462c8d67f 100644 --- a/vendor/github.com/linode/linodego/nodebalancer_configs.go +++ b/vendor/github.com/linode/linodego/nodebalancer_configs.go @@ -120,22 +120,30 @@ type NodeBalancerConfigCreateOptions struct { // NodeBalancerConfigRebuildOptions used by RebuildNodeBalancerConfig type NodeBalancerConfigRebuildOptions struct { - Port int `json:"port"` - Protocol ConfigProtocol `json:"protocol,omitempty"` - ProxyProtocol ConfigProxyProtocol `json:"proxy_protocol,omitempty"` - Algorithm ConfigAlgorithm `json:"algorithm,omitempty"` - Stickiness ConfigStickiness `json:"stickiness,omitempty"` - Check ConfigCheck `json:"check,omitempty"` - CheckInterval int `json:"check_interval,omitempty"` - CheckAttempts int `json:"check_attempts,omitempty"` - CheckPath string `json:"check_path,omitempty"` - CheckBody string `json:"check_body,omitempty"` - CheckPassive *bool `json:"check_passive,omitempty"` - CheckTimeout int `json:"check_timeout,omitempty"` - CipherSuite ConfigCipher `json:"cipher_suite,omitempty"` - SSLCert string `json:"ssl_cert,omitempty"` - SSLKey string `json:"ssl_key,omitempty"` - Nodes []NodeBalancerNodeCreateOptions `json:"nodes"` + Port int `json:"port"` + Protocol ConfigProtocol `json:"protocol,omitempty"` + ProxyProtocol ConfigProxyProtocol `json:"proxy_protocol,omitempty"` + Algorithm ConfigAlgorithm `json:"algorithm,omitempty"` + Stickiness ConfigStickiness `json:"stickiness,omitempty"` + Check ConfigCheck `json:"check,omitempty"` + CheckInterval int `json:"check_interval,omitempty"` + CheckAttempts int `json:"check_attempts,omitempty"` + CheckPath string `json:"check_path,omitempty"` + CheckBody string `json:"check_body,omitempty"` + CheckPassive *bool `json:"check_passive,omitempty"` + CheckTimeout int `json:"check_timeout,omitempty"` + CipherSuite ConfigCipher `json:"cipher_suite,omitempty"` + SSLCert string `json:"ssl_cert,omitempty"` + SSLKey string `json:"ssl_key,omitempty"` + Nodes []NodeBalancerConfigRebuildNodeOptions `json:"nodes"` +} + +// NodeBalancerConfigRebuildNodeOptions represents a node defined when rebuilding a +// NodeBalancer config. +type NodeBalancerConfigRebuildNodeOptions struct { + NodeBalancerNodeCreateOptions + + ID int `json:"id,omitempty"` } // NodeBalancerConfigUpdateOptions are permitted by UpdateNodeBalancerConfig @@ -201,7 +209,7 @@ func (i NodeBalancerConfig) GetRebuildOptions() NodeBalancerConfigRebuildOptions CipherSuite: i.CipherSuite, SSLCert: i.SSLCert, SSLKey: i.SSLKey, - Nodes: make([]NodeBalancerNodeCreateOptions, 0), + Nodes: make([]NodeBalancerConfigRebuildNodeOptions, 0), } } diff --git a/vendor/github.com/linode/linodego/regions.go b/vendor/github.com/linode/linodego/regions.go index f4210383e..b68053b35 100644 --- a/vendor/github.com/linode/linodego/regions.go +++ b/vendor/github.com/linode/linodego/regions.go @@ -9,18 +9,54 @@ import ( "github.com/go-resty/resty/v2" ) +// This is an enumeration of Capabilities Linode offers that can be referenced +// through the user-facing parts of the application. +// Defined as strings rather than a custom type to avoid breaking change. +// Can be changed in the potential v2 version. +const ( + Linodes string = "Linodes" + NodeBalancers string = "NodeBalancers" + BlockStorage string = "Block Storage" + ObjectStorage string = "Object Storage" + ObjectStorageRegions string = "Object Storage Access Key Regions" + LKE string = "Kubernetes" + LkeHaControlPlanes string = "LKE HA Control Planes" + CloudFirewall string = "Cloud Firewall" + GPU string = "GPU Linodes" + Vlans string = "Vlans" + VPCs string = "VPCs" + VPCsExtra string = "VPCs Extra" + MachineImages string = "Machine Images" + BareMetal string = "Bare Metal" + DBAAS string = "Managed Databases" + BlockStorageMigrations string = "Block Storage Migrations" + Metadata string = "Metadata" + PremiumPlans string = "Premium Plans" + EdgePlans string = "Edge Plans" + LKEControlPlaneACL string = "LKE Network Access Control List (IP ACL)" + ACLB string = "Akamai Cloud Load Balancer" + SupportTicketSeverity string = "Support Ticket Severity" + Backups string = "Backups" + PlacementGroup string = "Placement Group" + DiskEncryption string = "Disk Encryption" +) + // Region-related endpoints have a custom expiry time as the // `status` field may update for database outages. var cacheExpiryTime = time.Minute // Region represents a linode region object type Region struct { - ID string `json:"id"` - Country string `json:"country"` - Capabilities []string `json:"capabilities"` - Status string `json:"status"` - Resolvers RegionResolvers `json:"resolvers"` - Label string `json:"label"` + ID string `json:"id"` + Country string `json:"country"` + + // A List of enums from the above constants + Capabilities []string `json:"capabilities"` + + Status string `json:"status"` + Resolvers RegionResolvers `json:"resolvers"` + Label string `json:"label"` + SiteType string `json:"site_type"` } // RegionResolvers contains the DNS resolvers of a region diff --git a/vendor/github.com/linode/linodego/request_helpers.go b/vendor/github.com/linode/linodego/request_helpers.go new file mode 100644 index 000000000..49b5dc401 --- /dev/null +++ b/vendor/github.com/linode/linodego/request_helpers.go @@ -0,0 +1,209 @@ +package linodego + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + "strconv" +) + +// paginatedResponse represents a single response from a paginated +// endpoint. +type paginatedResponse[T any] struct { + Page int `json:"page" url:"page,omitempty"` + Pages int `json:"pages" url:"pages,omitempty"` + Results int `json:"results" url:"results,omitempty"` + Data []T `json:"data"` +} + +// getPaginatedResults aggregates results from the given +// paginated endpoint using the provided ListOptions. +// nolint:funlen +func getPaginatedResults[T any]( + ctx context.Context, + client *Client, + endpoint string, + opts *ListOptions, +) ([]T, error) { + var resultType paginatedResponse[T] + + result := make([]T, 0) + + req := client.R(ctx).SetResult(resultType) + + if opts == nil { + opts = &ListOptions{PageOptions: &PageOptions{Page: 0}} + } + + if opts.PageOptions == nil { + opts.PageOptions = &PageOptions{Page: 0} + } + + // Apply all user-provided list options to the base request + if err := applyListOptionsToRequest(opts, req); err != nil { + return nil, err + } + + // Makes a request to a particular page and + // appends the response to the result + handlePage := func(page int) error { + req.SetQueryParam("page", strconv.Itoa(page)) + + res, err := coupleAPIErrors(req.Get(endpoint)) + if err != nil { + return err + } + + response := res.Result().(*paginatedResponse[T]) + + opts.Page = page + opts.Pages = response.Pages + opts.Results = response.Results + + result = append(result, response.Data...) + return nil + } + + // This helps simplify the logic below + startingPage := 1 + pageDefined := opts.Page > 0 + + if pageDefined { + startingPage = opts.Page + } + + // Get the first page + if err := handlePage(startingPage); err != nil { + return nil, err + } + + // If the user has explicitly specified a page, we don't + // need to get any other pages. + if pageDefined { + return result, nil + } + + // Get the rest of the pages + for page := 2; page <= opts.Pages; page++ { + if err := handlePage(page); err != nil { + return nil, err + } + } + + return result, nil +} + +// doGETRequest runs a GET request using the given client and API endpoint, +// and returns the result +func doGETRequest[T any]( + ctx context.Context, + client *Client, + endpoint string, +) (*T, error) { + var resultType T + + req := client.R(ctx).SetResult(&resultType) + r, err := coupleAPIErrors(req.Get(endpoint)) + if err != nil { + return nil, err + } + + return r.Result().(*T), nil +} + +// doPOSTRequest runs a PUT request using the given client, API endpoint, +// and options/body. +func doPOSTRequest[T, O any]( + ctx context.Context, + client *Client, + endpoint string, + options ...O, +) (*T, error) { + var resultType T + + numOpts := len(options) + + if numOpts > 1 { + return nil, fmt.Errorf("invalid number of options: %d", len(options)) + } + + req := client.R(ctx).SetResult(&resultType) + + if numOpts > 0 { + body, err := json.Marshal(options[0]) + if err != nil { + return nil, err + } + + req.SetBody(string(body)) + } + + r, err := coupleAPIErrors(req.Post(endpoint)) + if err != nil { + return nil, err + } + + return r.Result().(*T), nil +} + +// doPUTRequest runs a PUT request using the given client, API endpoint, +// and options/body. +func doPUTRequest[T, O any]( + ctx context.Context, + client *Client, + endpoint string, + options ...O, +) (*T, error) { + var resultType T + + numOpts := len(options) + + if numOpts > 1 { + return nil, fmt.Errorf("invalid number of options: %d", len(options)) + } + + req := client.R(ctx).SetResult(&resultType) + + if numOpts > 0 { + body, err := json.Marshal(options[0]) + if err != nil { + return nil, err + } + + req.SetBody(string(body)) + } + + r, err := coupleAPIErrors(req.Put(endpoint)) + if err != nil { + return nil, err + } + + return r.Result().(*T), nil +} + +// doDELETERequest runs a DELETE request using the given client +// and API endpoint. +func doDELETERequest( + ctx context.Context, + client *Client, + endpoint string, +) error { + req := client.R(ctx) + _, err := coupleAPIErrors(req.Delete(endpoint)) + return err +} + +// formatAPIPath allows us to safely build an API request with path escaping +func formatAPIPath(format string, args ...any) string { + escapedArgs := make([]any, len(args)) + for i, arg := range args { + if typeStr, ok := arg.(string); ok { + arg = url.PathEscape(typeStr) + } + + escapedArgs[i] = arg + } + + return fmt.Sprintf(format, escapedArgs...) +} diff --git a/vendor/github.com/linode/linodego/retries.go b/vendor/github.com/linode/linodego/retries.go index 1474242ec..148864420 100644 --- a/vendor/github.com/linode/linodego/retries.go +++ b/vendor/github.com/linode/linodego/retries.go @@ -14,6 +14,8 @@ import ( const ( retryAfterHeaderName = "Retry-After" maintenanceModeHeaderName = "X-Maintenance-Mode" + + defaultRetryCount = 1000 ) // type RetryConditional func(r *resty.Response) (shouldRetry bool) @@ -27,7 +29,7 @@ type RetryAfter resty.RetryAfterFunc // If the Retry-After header is not set, we fall back to value of SetPollDelay. func configureRetries(c *Client) { c.resty. - SetRetryCount(1000). + SetRetryCount(defaultRetryCount). AddRetryCondition(checkRetryConditionals(c)). SetRetryAfter(respectRetryAfter) } diff --git a/vendor/github.com/linode/linodego/vpc_ips.go b/vendor/github.com/linode/linodego/vpc_ips.go new file mode 100644 index 000000000..7ccef887e --- /dev/null +++ b/vendor/github.com/linode/linodego/vpc_ips.go @@ -0,0 +1,20 @@ +package linodego + +import ( + "context" + "fmt" +) + +// ListAllVPCIPAddresses gets the list of all IP addresses of all VPCs in the Linode account. +func (c *Client) ListAllVPCIPAddresses( + ctx context.Context, opts *ListOptions, +) ([]VPCIP, error) { + return getPaginatedResults[VPCIP](ctx, c, "vpcs/ips", opts) +} + +// ListVPCIPAddresses gets the list of all IP addresses of a specific VPC. +func (c *Client) ListVPCIPAddresses( + ctx context.Context, vpcID int, opts *ListOptions, +) ([]VPCIP, error) { + return getPaginatedResults[VPCIP](ctx, c, fmt.Sprintf("vpcs/%d/ips", vpcID), opts) +} diff --git a/vendor/github.com/lufia/plan9stats/README.md b/vendor/github.com/lufia/plan9stats/README.md index a21700c0c..04bdcef73 100644 --- a/vendor/github.com/lufia/plan9stats/README.md +++ b/vendor/github.com/lufia/plan9stats/README.md @@ -1,2 +1,13 @@ # plan9stats A module for retrieving statistics of Plan 9 + +[![GoDev][godev-image]][godev-url] +[![Actions Status][actions-image]][actions-url] +[![Coverage Status][coveralls-image]][coveralls-url] + +[godev-image]: https://pkg.go.dev/badge/github.com/lufia/plan9stats +[godev-url]: https://pkg.go.dev/github.com/lufia/plan9stats +[actions-image]: https://github.com/lufia/plan9stats/workflows/Test/badge.svg?branch=main +[actions-url]: https://github.com/lufia/plan9stats/actions?workflow=Test +[coveralls-image]: https://coveralls.io/repos/github/lufia/plan9stats/badge.svg +[coveralls-url]: https://coveralls.io/github/lufia/plan9stats diff --git a/vendor/github.com/lufia/plan9stats/cpu.go b/vendor/github.com/lufia/plan9stats/cpu.go index a101b9119..eaff362c3 100644 --- a/vendor/github.com/lufia/plan9stats/cpu.go +++ b/vendor/github.com/lufia/plan9stats/cpu.go @@ -178,9 +178,12 @@ func ReadCPUStats(ctx context.Context, opts ...Option) (*CPUStats, error) { var up uint32parser pids := make([]uint32, len(names)) for i, s := range names { + if s == "trace" { + continue + } pids[i] = up.Parse(s) } - if up.err != nil { + if err := up.err; err != nil { return nil, err } sort.Slice(pids, func(i, j int) bool { diff --git a/vendor/github.com/lufia/plan9stats/disk.go b/vendor/github.com/lufia/plan9stats/disk.go new file mode 100644 index 000000000..4a4fa0cd9 --- /dev/null +++ b/vendor/github.com/lufia/plan9stats/disk.go @@ -0,0 +1,116 @@ +package stats + +import ( + "bufio" + "bytes" + "context" + "os" + "path/filepath" + "strings" +) + +// Storage represents /dev/sdXX/ctl. +type Storage struct { + Name string + Model string + Capacity int64 + Partitions []*Partition +} + +// Partition represents a part of /dev/sdXX/ctl. +type Partition struct { + Name string + Start uint64 + End uint64 +} + +func ReadStorages(ctx context.Context, opts ...Option) ([]*Storage, error) { + cfg := newConfig(opts...) + sdctl := filepath.Join(cfg.rootdir, "/dev/sdctl") + f, err := os.Open(sdctl) + if err != nil { + return nil, err + } + defer f.Close() + + var a []*Storage + scanner := bufio.NewScanner(f) + for scanner.Scan() { + fields := bytes.Split(scanner.Bytes(), delim) + if len(fields) == 0 { + continue + } + exp := string(fields[0]) + "*" + if !strings.HasPrefix(exp, "sd") { + continue + } + dir := filepath.Join(cfg.rootdir, "/dev", exp) + m, err := filepath.Glob(dir) + if err != nil { + return nil, err + } + for _, dir := range m { + s, err := readStorage(dir) + if err != nil { + return nil, err + } + a = append(a, s) + } + } + if err := scanner.Err(); err != nil { + return nil, err + } + return a, nil +} + +func readStorage(dir string) (*Storage, error) { + ctl := filepath.Join(dir, "ctl") + f, err := os.Open(ctl) + if err != nil { + return nil, err + } + defer f.Close() + + var s Storage + s.Name = filepath.Base(dir) + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Bytes() + switch { + case bytes.HasPrefix(line, []byte("inquiry ")): + s.Model = string(bytes.TrimSpace(line[7:])) + case bytes.HasPrefix(line, []byte("geometry ")): + fields := bytes.Split(line, delim) + if len(fields) < 3 { + continue + } + var p intParser + sec := p.ParseInt64(string(fields[1]), 10) + size := p.ParseInt64(string(fields[2]), 10) + if err := p.Err(); err != nil { + return nil, err + } + s.Capacity = sec * size + case bytes.HasPrefix(line, []byte("part ")): + fields := bytes.Split(line, delim) + if len(fields) < 4 { + continue + } + var p intParser + start := p.ParseUint64(string(fields[2]), 10) + end := p.ParseUint64(string(fields[3]), 10) + if err := p.Err(); err != nil { + return nil, err + } + s.Partitions = append(s.Partitions, &Partition{ + Name: string(fields[1]), + Start: start, + End: end, + }) + } + } + if err := scanner.Err(); err != nil { + return nil, err + } + return &s, nil +} diff --git a/vendor/github.com/lufia/plan9stats/host.go b/vendor/github.com/lufia/plan9stats/host.go index 957e90348..a3921c0e3 100644 --- a/vendor/github.com/lufia/plan9stats/host.go +++ b/vendor/github.com/lufia/plan9stats/host.go @@ -109,12 +109,6 @@ func parseGauge(s string, r *Gauge) error { return nil } -type Storage struct { - Name string - Model string - Capacity int64 -} - type Interface struct { Name string Addr string @@ -177,7 +171,7 @@ func ReadHost(ctx context.Context, opts ...Option) (*Host, error) { } h.Sysname = name - a, err := readStorages(cfg.rootdir) + a, err := ReadStorages(ctx, opts...) if err != nil { return nil, err } @@ -203,80 +197,6 @@ func readSysname(rootdir string) (string, error) { return string(bytes.TrimSpace(b)), nil } -func readStorages(rootdir string) ([]*Storage, error) { - sdctl := filepath.Join(rootdir, "/dev/sdctl") - f, err := os.Open(sdctl) - if err != nil { - return nil, err - } - defer f.Close() - - var a []*Storage - scanner := bufio.NewScanner(f) - for scanner.Scan() { - fields := bytes.Split(scanner.Bytes(), delim) - if len(fields) == 0 { - continue - } - exp := string(fields[0]) + "*" - if !strings.HasPrefix(exp, "sd") { - continue - } - dir := filepath.Join(rootdir, "/dev", exp) - m, err := filepath.Glob(dir) - if err != nil { - return nil, err - } - for _, dir := range m { - s, err := readStorage(dir) - if err != nil { - return nil, err - } - a = append(a, s) - } - } - if err := scanner.Err(); err != nil { - return nil, err - } - return a, nil -} - -func readStorage(dir string) (*Storage, error) { - ctl := filepath.Join(dir, "ctl") - f, err := os.Open(ctl) - if err != nil { - return nil, err - } - defer f.Close() - - var s Storage - s.Name = filepath.Base(dir) - scanner := bufio.NewScanner(f) - for scanner.Scan() { - line := scanner.Bytes() - switch { - case bytes.HasPrefix(line, []byte("inquiry")): - s.Model = string(bytes.TrimSpace(line[7:])) - case bytes.HasPrefix(line, []byte("geometry")): - fields := bytes.Split(line, delim) - if len(fields) < 3 { - continue - } - var p intParser - sec := p.ParseInt64(string(fields[1]), 10) - size := p.ParseInt64(string(fields[2]), 10) - if err := p.Err(); err != nil { - return nil, err - } - s.Capacity = sec * size - } - } - if err := scanner.Err(); err != nil { - return nil, err - } - return &s, nil -} - type IPStats struct { ID int // number of interface in ipifc dir Device string // associated physical device diff --git a/vendor/github.com/lufia/plan9stats/int.go b/vendor/github.com/lufia/plan9stats/int.go index db133c43e..e3c9dc834 100644 --- a/vendor/github.com/lufia/plan9stats/int.go +++ b/vendor/github.com/lufia/plan9stats/int.go @@ -26,6 +26,15 @@ func (p *intParser) ParseInt64(s string, base int) int64 { return n } +func (p *intParser) ParseUint64(s string, base int) uint64 { + if p.err != nil { + return 0 + } + var n uint64 + n, p.err = strconv.ParseUint(s, base, 64) + return n +} + func (p *intParser) Err() error { return p.err } diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go index d569c0c94..d0ea68f40 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_bsd.go +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -1,6 +1,7 @@ -//go:build (darwin || freebsd || openbsd || netbsd || dragonfly || hurd) && !appengine +//go:build (darwin || freebsd || openbsd || netbsd || dragonfly || hurd) && !appengine && !tinygo // +build darwin freebsd openbsd netbsd dragonfly hurd // +build !appengine +// +build !tinygo package isatty diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go index 31503226f..7402e0618 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_others.go +++ b/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -1,5 +1,6 @@ -//go:build appengine || js || nacl || wasm -// +build appengine js nacl wasm +//go:build (appengine || js || nacl || tinygo || wasm) && !windows +// +build appengine js nacl tinygo wasm +// +build !windows package isatty diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go index 67787657f..0337d8cf6 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go +++ b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go @@ -1,6 +1,7 @@ -//go:build (linux || aix || zos) && !appengine +//go:build (linux || aix || zos) && !appengine && !tinygo // +build linux aix zos // +build !appengine +// +build !tinygo package isatty diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md index e57d86afe..58275db3b 100644 --- a/vendor/github.com/miekg/dns/README.md +++ b/vendor/github.com/miekg/dns/README.md @@ -83,6 +83,8 @@ A not-so-up-to-date-list-that-may-be-actually-current: * https://github.com/egbakou/domainverifier * https://github.com/semihalev/sdns * https://github.com/wintbiit/NineDNS +* https://linuxcontainers.org/incus/ +* https://ifconfig.es Send pull request if you want to be listed here. diff --git a/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/miekg/dns/defaults.go index 02d9199a4..68e766c68 100644 --- a/vendor/github.com/miekg/dns/defaults.go +++ b/vendor/github.com/miekg/dns/defaults.go @@ -198,10 +198,12 @@ func IsDomainName(s string) (labels int, ok bool) { off int begin int wasDot bool + escape bool ) for i := 0; i < len(s); i++ { switch s[i] { case '\\': + escape = !escape if off+1 > lenmsg { return labels, false } @@ -217,6 +219,7 @@ func IsDomainName(s string) (labels int, ok bool) { wasDot = false case '.': + escape = false if i == 0 && len(s) > 1 { // leading dots are not legal except for the root zone return labels, false @@ -243,10 +246,13 @@ func IsDomainName(s string) (labels int, ok bool) { labels++ begin = i + 1 default: + escape = false wasDot = false } } - + if escape { + return labels, false + } return labels, true } diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go index 8294d0395..5fa7f9e83 100644 --- a/vendor/github.com/miekg/dns/msg.go +++ b/vendor/github.com/miekg/dns/msg.go @@ -714,7 +714,7 @@ func (h *MsgHdr) String() string { return s } -// Pack packs a Msg: it is converted to to wire format. +// Pack packs a Msg: it is converted to wire format. // If the dns.Compress is true the message will be in compressed wire format. func (dns *Msg) Pack() (msg []byte, err error) { return dns.PackBuffer(nil) diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go index 1f92ae421..e26e8027a 100644 --- a/vendor/github.com/miekg/dns/scan.go +++ b/vendor/github.com/miekg/dns/scan.go @@ -101,12 +101,13 @@ type ttlState struct { isByDirective bool // isByDirective indicates whether ttl was set by a $TTL directive } -// NewRR reads the RR contained in the string s. Only the first RR is returned. +// NewRR reads a string s and returns the first RR. // If s contains no records, NewRR will return nil with no error. // -// The class defaults to IN and TTL defaults to 3600. The full zone file syntax -// like $TTL, $ORIGIN, etc. is supported. All fields of the returned RR are -// set, except RR.Header().Rdlength which is set to 0. +// The class defaults to IN, TTL defaults to 3600, and +// origin for resolving relative domain names defaults to the DNS root (.). +// Full zone file syntax is supported, including directives like $TTL and $ORIGIN. +// All fields of the returned RR are set from the read data, except RR.Header().Rdlength which is set to 0. func NewRR(s string) (RR, error) { if len(s) > 0 && s[len(s)-1] != '\n' { // We need a closing newline return ReadRR(strings.NewReader(s+"\n"), "") @@ -1282,7 +1283,7 @@ func stringToCm(token string) (e, m uint8, ok bool) { cmeters *= 10 } } - // This slighly ugly condition will allow omitting the 'meter' part, like .01 (meaning 0.01m = 1cm). + // This slightly ugly condition will allow omitting the 'meter' part, like .01 (meaning 0.01m = 1cm). if !hasCM || mStr != "" { meters, err = strconv.Atoi(mStr) // RFC1876 states the max value is 90000000.00. The latter two conditions enforce it. diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go index 1a90c61f8..7d1ade7d8 100644 --- a/vendor/github.com/miekg/dns/scan_rr.go +++ b/vendor/github.com/miekg/dns/scan_rr.go @@ -51,25 +51,21 @@ func endingToTxtSlice(c *zlexer, errstr string) ([]string, *ParseError) { switch l.value { case zString: empty = false - if len(l.token) > 255 { - // split up tokens that are larger than 255 into 255-chunks - sx := []string{} - p, i := 0, 255 - for { - if i <= len(l.token) { - sx = append(sx, l.token[p:i]) - } else { - sx = append(sx, l.token[p:]) - break - - } - p, i = p+255, i+255 + // split up tokens that are larger than 255 into 255-chunks + sx := []string{} + p := 0 + for { + i := escapedStringOffset(l.token[p:], 255) + if i != -1 && p+i != len(l.token) { + sx = append(sx, l.token[p:p+i]) + } else { + sx = append(sx, l.token[p:]) + break + } - s = append(s, sx...) - break + p += i } - - s = append(s, l.token) + s = append(s, sx...) case zBlank: if quote { // zBlank can only be seen in between txt parts. @@ -1920,3 +1916,32 @@ func (rr *APL) parse(c *zlexer, o string) *ParseError { rr.Prefixes = prefixes return nil } + +// escapedStringOffset finds the offset within a string (which may contain escape +// sequences) that corresponds to a certain byte offset. If the input offset is +// out of bounds, -1 is returned. +func escapedStringOffset(s string, byteOffset int) int { + if byteOffset == 0 { + return 0 + } + + offset := 0 + for i := 0; i < len(s); i++ { + offset += 1 + + // Skip escape sequences + if s[i] != '\\' { + // Not an escape sequence; nothing to do. + } else if isDDD(s[i+1:]) { + i += 3 + } else { + i++ + } + + if offset >= byteOffset { + return i + 1 + } + } + + return -1 +} diff --git a/vendor/github.com/miekg/dns/xfr.go b/vendor/github.com/miekg/dns/xfr.go index 05b3c5add..2187c456d 100644 --- a/vendor/github.com/miekg/dns/xfr.go +++ b/vendor/github.com/miekg/dns/xfr.go @@ -1,6 +1,7 @@ package dns import ( + "crypto/tls" "fmt" "time" ) @@ -20,6 +21,7 @@ type Transfer struct { TsigProvider TsigProvider // An implementation of the TsigProvider interface. If defined it replaces TsigSecret and is used for all TSIG operations. TsigSecret map[string]string // Secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2) tsigTimersOnly bool + TLS *tls.Config // TLS config. If Xfr over TLS will be attempted } func (t *Transfer) tsigProvider() TsigProvider { @@ -57,7 +59,11 @@ func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) { } if t.Conn == nil { - t.Conn, err = DialTimeout("tcp", a, timeout) + if t.TLS != nil { + t.Conn, err = DialTimeoutWithTLS("tcp-tls", a, t.TLS, timeout) + } else { + t.Conn, err = DialTimeout("tcp", a, timeout) + } if err != nil { return nil, err } @@ -182,7 +188,7 @@ func (t *Transfer) inIxfr(q *Msg, c chan *Envelope) { if v, ok := rr.(*SOA); ok { if v.Serial == serial { n++ - // quit if it's a full axfr or the the servers' SOA is repeated the third time + // quit if it's a full axfr or the servers' SOA is repeated the third time if axfr && n == 2 || n == 3 { c <- &Envelope{in.Answer, nil} return diff --git a/vendor/github.com/moby/docker-image-spec/LICENSE b/vendor/github.com/moby/docker-image-spec/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/moby/docker-image-spec/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/docker/image/spec/specs-go/v1/image.go b/vendor/github.com/moby/docker-image-spec/specs-go/v1/image.go similarity index 100% rename from vendor/github.com/docker/docker/image/spec/specs-go/v1/image.go rename to vendor/github.com/moby/docker-image-spec/specs-go/v1/image.go diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector/README.md index 95af41b7c..278868f08 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector/README.md @@ -2,13 +2,12 @@ | Status | | | ------------- |-----------| -| Distributions | [contrib], [sumo] | +| Distributions | [contrib] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aconnector%2Fcount%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aconnector%2Fcount) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aconnector%2Fcount%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aconnector%2Fcount) | | [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@djaglowski](https://www.github.com/djaglowski), [@jpkrohling](https://www.github.com/jpkrohling) | [development]: https://github.com/open-telemetry/opentelemetry-collector#development [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib -[sumo]: https://github.com/SumoLogic/sumologic-otel-collector ## Supported Pipeline Types @@ -101,7 +100,7 @@ connectors: If attributes are specified for custom metrics, a separate count will be generated for each unique set of attribute values. Each count will be emitted as a data point on the same metric. -Optionally, include a `default_value` for an attribute, to count data that does not contain the attribute. +Optionally, include a `default_value` for an attribute, to count data that does not contain the attribute. The `default_value` value can be of type string, integer, or float. ```yaml receivers: diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector/config.go index 998b32ceb..0c3ddfb68 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector/config.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector/config.go @@ -48,7 +48,7 @@ type MetricInfo struct { type AttributeConfig struct { Key string `mapstructure:"key"` - DefaultValue string `mapstructure:"default_value"` + DefaultValue any `mapstructure:"default_value"` } func (c *Config) Validate() error { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector/counter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector/counter.go index 52afc6db2..e08178e19 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector/counter.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector/counter.go @@ -41,9 +41,29 @@ func (c *counter[K]) update(ctx context.Context, attrs pcommon.Map, tCtx K) erro countAttrs := pcommon.NewMap() for _, attr := range md.attrs { if attrVal, ok := attrs.Get(attr.Key); ok { - countAttrs.PutStr(attr.Key, attrVal.Str()) - } else if attr.DefaultValue != "" { - countAttrs.PutStr(attr.Key, attr.DefaultValue) + switch typeAttr := attrVal.Type(); typeAttr { + case pcommon.ValueTypeInt: + countAttrs.PutInt(attr.Key, attrVal.Int()) + case pcommon.ValueTypeDouble: + countAttrs.PutDouble(attr.Key, attrVal.Double()) + default: + countAttrs.PutStr(attr.Key, attrVal.Str()) + } + } else if attr.DefaultValue != nil { + switch v := attr.DefaultValue.(type) { + case string: + if v != "" { + countAttrs.PutStr(attr.Key, v) + } + case int: + if v != 0 { + countAttrs.PutInt(attr.Key, int64(v)) + } + case float64: + if v != 0 { + countAttrs.PutDouble(attr.Key, float64(v)) + } + } } } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector/internal/metadata/generated_status.go index ea152f0aa..6bdacd9ef 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector/internal/metadata/generated_status.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector/internal/metadata/generated_status.go @@ -4,8 +4,6 @@ package metadata import ( "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/trace" ) var ( @@ -17,11 +15,3 @@ const ( MetricsToMetricsStability = component.StabilityLevelDevelopment LogsToMetricsStability = component.StabilityLevelDevelopment ) - -func Meter(settings component.TelemetrySettings) metric.Meter { - return settings.MeterProvider.Meter("otelcol/countconnector") -} - -func Tracer(settings component.TelemetrySettings) trace.Tracer { - return settings.TracerProvider.Tracer("otelcol/countconnector") -} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector/internal/metadata/generated_telemetry.go new file mode 100644 index 000000000..8a930ee16 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector/internal/metadata/generated_telemetry.go @@ -0,0 +1,17 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" +) + +func Meter(settings component.TelemetrySettings) metric.Meter { + return settings.MeterProvider.Meter("otelcol/countconnector") +} + +func Tracer(settings component.TelemetrySettings) trace.Tracer { + return settings.TracerProvider.Tracer("otelcol/countconnector") +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector/metadata.yaml index b41ec7c0e..1bc57a1ff 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector/metadata.yaml +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector/metadata.yaml @@ -5,7 +5,7 @@ status: class: connector stability: development: [traces_to_metrics, metrics_to_metrics, logs_to_metrics] - distributions: [contrib, sumo] + distributions: [contrib] codeowners: active: [djaglowski, jpkrohling] diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/README.md index 0c31b7afd..27112d9ac 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/README.md @@ -13,19 +13,13 @@ | Status | | | ------------- |-----------| | Stability | [beta] | -| Distributions | [core], [contrib], [aws], [liatrio], [observiq], [redhat], [splunk], [sumo] | +| Distributions | [core], [contrib] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aextension%2Fhealthcheck%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aextension%2Fhealthcheck) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aextension%2Fhealthcheck%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aextension%2Fhealthcheck) | | [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@jpkrohling](https://www.github.com/jpkrohling) | [beta]: https://github.com/open-telemetry/opentelemetry-collector#beta [core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib -[aws]: https://github.com/aws-observability/aws-otel-collector -[liatrio]: https://github.com/liatrio/liatrio-otel-collector -[observiq]: https://github.com/observIQ/observiq-otel-collector -[redhat]: https://github.com/os-observability/redhat-opentelemetry-collector -[splunk]: https://github.com/signalfx/splunk-otel-collector -[sumo]: https://github.com/SumoLogic/sumologic-otel-collector Health Check extension enables an HTTP url that can be probed to check the diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/healthcheckextension.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/healthcheckextension.go index 55307b922..91ce98b5e 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/healthcheckextension.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/healthcheckextension.go @@ -30,15 +30,15 @@ type healthCheckExtension struct { var _ extension.PipelineWatcher = (*healthCheckExtension)(nil) -func (hc *healthCheckExtension) Start(_ context.Context, host component.Host) error { +func (hc *healthCheckExtension) Start(ctx context.Context, host component.Host) error { hc.logger.Info("Starting health_check extension", zap.Any("config", hc.config)) - ln, err := hc.config.ToListener() + ln, err := hc.config.ToListener(ctx) if err != nil { return fmt.Errorf("failed to bind to address %s: %w", hc.config.Endpoint, err) } - hc.server, err = hc.config.ToServer(host, hc.settings, nil) + hc.server, err = hc.config.ToServer(ctx, host, hc.settings, nil) if err != nil { return err } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/internal/metadata/generated_status.go index 64a946c0e..dce6a5513 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/internal/metadata/generated_status.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/internal/metadata/generated_status.go @@ -4,8 +4,6 @@ package metadata import ( "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/trace" ) var ( @@ -15,11 +13,3 @@ var ( const ( ExtensionStability = component.StabilityLevelBeta ) - -func Meter(settings component.TelemetrySettings) metric.Meter { - return settings.MeterProvider.Meter("otelcol/healthcheck") -} - -func Tracer(settings component.TelemetrySettings) trace.Tracer { - return settings.TracerProvider.Tracer("otelcol/healthcheck") -} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/internal/metadata/generated_telemetry.go new file mode 100644 index 000000000..e193abbb6 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/internal/metadata/generated_telemetry.go @@ -0,0 +1,17 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" +) + +func Meter(settings component.TelemetrySettings) metric.Meter { + return settings.MeterProvider.Meter("otelcol/healthcheck") +} + +func Tracer(settings component.TelemetrySettings) trace.Tracer { + return settings.TracerProvider.Tracer("otelcol/healthcheck") +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/metadata.yaml index 3e97caa0e..1e200d112 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/metadata.yaml +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/metadata.yaml @@ -5,10 +5,15 @@ status: class: extension stability: beta: [extension] - distributions: [core, contrib, splunk, observiq, sumo, aws, redhat, liatrio] + distributions: [core, contrib] codeowners: active: [jpkrohling] tests: config: endpoint: localhost:0 + goleak: + ignore: + top: + # See https://github.com/census-instrumentation/opencensus-go/issues/1191 for more information. + - "go.opencensus.io/stats/view.(*worker).start" diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/README.md index 2bd541235..36fb14346 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/README.md @@ -4,15 +4,12 @@ | Status | | | ------------- |-----------| | Stability | [beta] | -| Distributions | [contrib], [observiq], [splunk], [sumo] | +| Distributions | [contrib] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aextension%2Ffilestorage%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aextension%2Ffilestorage) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aextension%2Ffilestorage%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aextension%2Ffilestorage) | | [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@djaglowski](https://www.github.com/djaglowski) \| Seeking more code owners! | [beta]: https://github.com/open-telemetry/opentelemetry-collector#beta [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib -[observiq]: https://github.com/observIQ/observiq-otel-collector -[splunk]: https://github.com/signalfx/splunk-otel-collector -[sumo]: https://github.com/SumoLogic/sumologic-otel-collector The File Storage extension can persist state to the local file system. @@ -37,6 +34,10 @@ The default timeout is `1s`. `compaction.max_transaction_size` (default: 65536): defines maximum size of the compaction transaction. A value of zero will ignore transaction sizes. +`compaction.cleanup_on_start` (default: false) - specifies if removal of compaction temporary files is performed on start. +It will remove all temporary files in the compaction directory (those which start with `tempdb`), +temp files will be left if a previous run of the process is killed while compacting. + ### Rebound (online) compaction For rebound compaction, there are two additional parameters available: @@ -132,7 +133,36 @@ The schedule for this feature gate is: - Introduced in v0.87.0 (October 2023) as `alpha` - disabled by default. - Moved to `beta` in v0.92.0 (January 2024) - enabled by default. -- Moved to `stable` in April 2024 - cannot be disabled. -- Removed three releases after `stable`. +- Moved to `stable` in v0.99.0 (April 2024) - cannot be disabled. +- Removed in v0.102.0 (three releases after `stable`). [unicode_chars]: https://en.wikipedia.org/wiki/List_of_Unicode_characters + +## Troubleshooting + +_Currently, the File Storage extension uses [bbolt](https://github.com/etcd-io/bbolt) to store and read data on disk. The +following troubleshooting method works for bbolt-managed files. As such, there is no guarantee that this method will continue to work in the future, particularly if the extension switches away from bbolt._ + +When troubleshooting components that use the File Storage extension, it is sometimes helpful to read the raw contents of +files created by the extension for the component. The simplest way to read files +created by the File Storage extension is to use the strings utility ([Linux](https://linux.die.net/man/1/strings), +[Windows](https://learn.microsoft.com/en-us/sysinternals/downloads/strings)). + +For example, here are the contents of the file created by the File Storage extension when it's configured as the storage +for the `filelog` receiver. + +```sh +$ strings /tmp/otelcol/file_storage/filelogreceiver/receiver_filelog_ +default +file_input.knownFiles2 +{"Fingerprint":{"first_bytes":"MzEwNzkKMjE5Cg=="},"Offset":10,"FileAttributes":{"log.file.name":"1.log"},"HeaderFinalized":false,"FlushState":{"LastDataChange":"2024-03-20T18:16:18.164331-07:00","LastDataLength":0}} +{"Fingerprint":{"first_bytes":"MjQ0MDMK"},"Offset":6,"FileAttributes":{"log.file.name":"2.log"},"HeaderFinalized":false,"FlushState":{"LastDataChange":"2024-03-20T18:16:39.96429-07:00","LastDataLength":0}} +default +file_input.knownFiles2 +{"Fingerprint":{"first_bytes":"MzEwNzkKMjE5Cg=="},"Offset":10,"FileAttributes":{"log.file.name":"1.log"},"HeaderFinalized":false,"FlushState":{"LastDataChange":"2024-03-20T18:16:18.164331-07:00","LastDataLength":0}} +{"Fingerprint":{"first_bytes":"MjQ0MDMK"},"Offset":6,"FileAttributes":{"log.file.name":"2.log"},"HeaderFinalized":false,"FlushState":{"LastDataChange":"2024-03-20T18:16:39.96429-07:00","LastDataLength":0}} +default +file_input.knownFiles2 +{"Fingerprint":{"first_bytes":"MzEwNzkKMjE5Cg=="},"Offset":10,"FileAttributes":{"log.file.name":"1.log"},"HeaderFinalized":false,"FlushState":{"LastDataChange":"2024-03-20T18:16:18.164331-07:00","LastDataLength":0}} +{"Fingerprint":{"first_bytes":"MjQ0MDMK"},"Offset":6,"FileAttributes":{"log.file.name":"2.log"},"HeaderFinalized":false,"FlushState":{"LastDataChange":"2024-03-20T18:16:39.96429-07:00","LastDataLength":0}} +``` \ No newline at end of file diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/client.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/client.go index c8fca4ba0..b97cad73c 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/client.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/client.go @@ -20,6 +20,8 @@ import ( var defaultBucket = []byte(`default`) const ( + TempDbPrefix = "tempdb" + elapsedKey = "elapsed" directoryKey = "directory" tempDirectoryKey = "tempDirectory" @@ -152,7 +154,7 @@ func (c *fileStorageClient) Compact(compactionDirectory string, timeout time.Dur var compactedDb *bbolt.DB // create temporary file in compactionDirectory - file, err = os.CreateTemp(compactionDirectory, "tempdb") + file, err = os.CreateTemp(compactionDirectory, TempDbPrefix) if err != nil { return err } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/config.go index d71bbe023..19e288a76 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/config.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/config.go @@ -45,6 +45,10 @@ type CompactionConfig struct { MaxTransactionSize int64 `mapstructure:"max_transaction_size,omitempty"` // CheckInterval specifies frequency of compaction check CheckInterval time.Duration `mapstructure:"check_interval,omitempty"` + // CleanupOnStart specifies removal of temporary files is performed on start. + // It will remove all the files in the compaction directory starting with tempdb, + // temp files will be left if a previous run of the process is killed while compacting. + CleanupOnStart bool `mapstructure:"cleanup_on_start,omitempty"` } func (cfg *Config) Validate() error { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/extension.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/extension.go index 2b74c5fa3..fdbf53315 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/extension.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/extension.go @@ -5,7 +5,9 @@ package filestorage // import "github.com/open-telemetry/opentelemetry-collector import ( "context" + "errors" "fmt" + "os" "path/filepath" "strings" @@ -18,10 +20,11 @@ import ( var replaceUnsafeCharactersFeatureGate = featuregate.GlobalRegistry().MustRegister( "extension.filestorage.replaceUnsafeCharacters", - featuregate.StageBeta, + featuregate.StageStable, featuregate.WithRegisterDescription("When enabled, characters that are not safe in file paths are replaced in component name using the extension. For example, the data for component `filelog/logs/json` will be stored in file `receiver_filelog_logs~007Ejson` and not in `receiver_filelog_logs/json`."), featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/3148"), featuregate.WithRegisterFromVersion("v0.87.0"), + featuregate.WithRegisterToVersion("v0.102.0"), ) type localFileStorage struct { @@ -39,8 +42,11 @@ func newLocalFileStorage(logger *zap.Logger, config *Config) (extension.Extensio }, nil } -// Start does nothing +// Start runs cleanup if configured func (lfs *localFileStorage) Start(context.Context, component.Host) error { + if lfs.cfg.Compaction.CleanupOnStart { + return lfs.cleanup(lfs.cfg.Compaction.Directory) + } return nil } @@ -134,3 +140,30 @@ func isSafe(character rune) bool { } return false } + +// cleanup left compaction temporary files from previous killed process +func (lfs *localFileStorage) cleanup(compactionDirectory string) error { + pattern := filepath.Join(compactionDirectory, fmt.Sprintf("%s*", TempDbPrefix)) + contents, err := filepath.Glob(pattern) + if err != nil { + lfs.logger.Info("cleanup error listing temporary files", + zap.Error(err)) + return err + } + + var errs []error + for _, item := range contents { + err = os.Remove(item) + if err == nil { + lfs.logger.Debug("cleanup", + zap.String("deletedFile", item)) + } else { + errs = append(errs, err) + } + } + if errs != nil { + lfs.logger.Info("cleanup errors", + zap.Error(errors.Join(errs...))) + } + return nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/factory.go index ef3e04e9d..18178c54a 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/factory.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/factory.go @@ -45,6 +45,7 @@ func createDefaultConfig() component.Config { ReboundNeededThresholdMiB: defaultReboundNeededThresholdMib, ReboundTriggerThresholdMiB: defaultReboundTriggerThresholdMib, CheckInterval: defaultCompactionInterval, + CleanupOnStart: false, }, Timeout: time.Second, FSync: false, diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/internal/metadata/generated_status.go index 7f4eae03a..f1831d98f 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/internal/metadata/generated_status.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/internal/metadata/generated_status.go @@ -4,8 +4,6 @@ package metadata import ( "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/trace" ) var ( @@ -15,11 +13,3 @@ var ( const ( ExtensionStability = component.StabilityLevelBeta ) - -func Meter(settings component.TelemetrySettings) metric.Meter { - return settings.MeterProvider.Meter("otelcol/filestorage") -} - -func Tracer(settings component.TelemetrySettings) trace.Tracer { - return settings.TracerProvider.Tracer("otelcol/filestorage") -} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/internal/metadata/generated_telemetry.go new file mode 100644 index 000000000..6a9852454 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/internal/metadata/generated_telemetry.go @@ -0,0 +1,17 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" +) + +func Meter(settings component.TelemetrySettings) metric.Meter { + return settings.MeterProvider.Meter("otelcol/filestorage") +} + +func Tracer(settings component.TelemetrySettings) trace.Tracer { + return settings.TracerProvider.Tracer("otelcol/filestorage") +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/metadata.yaml index 3568a2776..6707480d4 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/metadata.yaml +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/metadata.yaml @@ -5,7 +5,7 @@ status: class: extension stability: beta: [extension] - distributions: [contrib, observiq, splunk, sumo] + distributions: [contrib] codeowners: active: [djaglowski] seeking_new: true diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil/client.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil/client.go index 116d2b0c7..613a12a43 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil/client.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil/client.go @@ -4,6 +4,7 @@ package ecsutil // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil" import ( + "context" "fmt" "io" "net/http" @@ -45,6 +46,7 @@ type defaultClientProvider struct { func (dcp *defaultClientProvider) BuildClient() (Client, error) { return defaultClient( + context.Background(), dcp.baseURL, dcp.clientSettings, dcp.host, @@ -53,12 +55,13 @@ func (dcp *defaultClientProvider) BuildClient() (Client, error) { } func defaultClient( + ctx context.Context, baseURL url.URL, clientSettings confighttp.ClientConfig, host component.Host, settings component.TelemetrySettings, ) (*clientImpl, error) { - client, err := clientSettings.ToClient(host, settings) + client, err := clientSettings.ToClient(ctx, host, settings) if err != nil { return nil, err } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/localhostgate/featuregate.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/localhostgate/featuregate.go index ce6f38830..238d909f0 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/localhostgate/featuregate.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/localhostgate/featuregate.go @@ -18,9 +18,9 @@ import ( const UseLocalHostAsDefaultHostID = "component.UseLocalHostAsDefaultHost" -// useLocalHostAsDefaultHostfeatureGate is the feature gate that controls whether +// UseLocalHostAsDefaultHostfeatureGate is the feature gate that controls whether // server-like receivers and extensions such as the OTLP receiver use localhost as the default host for their endpoints. -var useLocalHostAsDefaultHostfeatureGate = mustRegisterOrLoad( +var UseLocalHostAsDefaultHostfeatureGate = mustRegisterOrLoad( featuregate.GlobalRegistry(), UseLocalHostAsDefaultHostID, featuregate.StageAlpha, @@ -51,7 +51,7 @@ func mustRegisterOrLoad(reg *featuregate.Registry, id string, stage featuregate. // EndpointForPort gets the endpoint for a given port using localhost or 0.0.0.0 depending on the feature gate. func EndpointForPort(port int) string { host := "localhost" - if !useLocalHostAsDefaultHostfeatureGate.IsEnabled() { + if !UseLocalHostAsDefaultHostfeatureGate.IsEnabled() { host = "0.0.0.0" } return fmt.Sprintf("%s:%d", host, port) @@ -59,7 +59,7 @@ func EndpointForPort(port int) string { // LogAboutUseLocalHostAsDefault logs about the upcoming change from 0.0.0.0 to localhost on server-like components. func LogAboutUseLocalHostAsDefault(logger *zap.Logger) { - if !useLocalHostAsDefaultHostfeatureGate.IsEnabled() { + if !UseLocalHostAsDefaultHostfeatureGate.IsEnabled() { logger.Warn( "The default endpoints for all servers in components will change to use localhost instead of 0.0.0.0 in a future version. Use the feature gate to preview the new default.", zap.String("feature gate ID", UseLocalHostAsDefaultHostID), diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt.go index 29c520741..52170879a 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt.go @@ -86,10 +86,10 @@ var ctimeSubstitutes = map[string]string{ // %S - Second as a zero-padded decimal number (00, 01, ..., 59) // %L - Millisecond as a decimal number, zero-padded on the left (000, 001, ..., 999) // %f - Microsecond as a decimal number, zero-padded on the left (000000, ..., 999999) -// %s - Nanosecond as a decimal number, zero-padded on the left (000000, ..., 999999) +// %s - Nanosecond as a decimal number, zero-padded on the left (00000000, ..., 99999999) // %z - UTC offset in the form ±HHMM[SS[.ffffff]] or empty(+0000, -0400) // %Z - Timezone name or abbreviation or empty (UTC, EST, CST) -// %D, %x - Short MM/DD/YY date, equivalent to %m/%d/%y +// %D, %x - Short MM/DD/YYYY date, equivalent to %m/%d/%y // %F - Short YYYY-MM-DD date, equivalent to %Y-%m-%d // %T, %X - ISO 8601 time format (HH:MM:SS), equivalent to %H:%M:%S // %r - 12-hour clock time (02:55:02 pm) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr/matcher.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr/matcher.go index f73e9f8e8..037b4888d 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr/matcher.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr/matcher.go @@ -25,6 +25,16 @@ func Not[K any](matcher BoolExpr[K]) BoolExpr[K] { return notMatcher[K]{matcher: matcher} } +type alwaysTrueMatcher[K any] struct{} + +func (alm alwaysTrueMatcher[K]) Eval(_ context.Context, _ K) (bool, error) { + return true, nil +} + +func AlwaysTrue[K any]() BoolExpr[K] { + return alwaysTrueMatcher[K]{} +} + type orMatcher[K any] struct { matchers []BoolExpr[K] } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/filter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/filter.go index 6324c8a35..e4dad6ee9 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/filter.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/filter.go @@ -12,6 +12,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent" ) @@ -111,3 +112,19 @@ func NewBoolExprForResource(conditions []string, functions map[string]ottl.Facto c := ottlresource.NewConditionSequence(statements, set, ottlresource.WithConditionSequenceErrorMode(errorMode)) return &c, nil } + +// NewBoolExprForScope creates a BoolExpr[ottlscope.TransformContext] that will return true if any of the given OTTL conditions evaluate to true. +// The passed in functions should use the ottlresource.TransformContext. +// If a function named `match` is not present in the function map it will be added automatically so that parsing works as expected +func NewBoolExprForScope(conditions []string, functions map[string]ottl.Factory[ottlscope.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottlscope.TransformContext], error) { + parser, err := ottlscope.NewParser(functions, set) + if err != nil { + return nil, err + } + statements, err := parser.ParseConditions(conditions) + if err != nil { + return nil, err + } + c := ottlscope.NewConditionSequence(statements, set, ottlscope.WithConditionSequenceErrorMode(errorMode)) + return &c, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/functions.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/functions.go index 355a148f6..c3ce56ce4 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/functions.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/functions.go @@ -14,6 +14,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" @@ -40,6 +41,10 @@ func StandardDataPointFuncs() map[string]ottl.Factory[ottldatapoint.TransformCon return ottlfuncs.StandardConverters[ottldatapoint.TransformContext]() } +func StandardScopeFuncs() map[string]ottl.Factory[ottlscope.TransformContext] { + return ottlfuncs.StandardConverters[ottlscope.TransformContext]() +} + func StandardLogFuncs() map[string]ottl.Factory[ottllog.TransformContext] { return ottlfuncs.StandardConverters[ottllog.TransformContext]() } @@ -68,7 +73,7 @@ func createHasAttributeOnDatapointFunction(_ ottl.FunctionContext, oArgs ottl.Ar } func hasAttributeOnDatapoint(key string, expectedVal string) (ottl.ExprFunc[ottlmetric.TransformContext], error) { - return func(ctx context.Context, tCtx ottlmetric.TransformContext) (any, error) { + return func(_ context.Context, tCtx ottlmetric.TransformContext) (any, error) { return checkDataPoints(tCtx, key, &expectedVal) }, nil } @@ -92,7 +97,7 @@ func createHasAttributeKeyOnDatapointFunction(_ ottl.FunctionContext, oArgs ottl } func hasAttributeKeyOnDatapoint(key string) (ottl.ExprFunc[ottlmetric.TransformContext], error) { - return func(ctx context.Context, tCtx ottlmetric.TransformContext) (any, error) { + return func(_ context.Context, tCtx ottlmetric.TransformContext) (any, error) { return checkDataPoints(tCtx, key, nil) }, nil } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig/config.go index b1ef343d9..578a6fe07 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig/config.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig/config.go @@ -4,16 +4,24 @@ package k8sconfig // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig" import ( + "context" "fmt" "net" "net/http" "os" + "time" quotaclientset "github.com/openshift/client-go/quota/clientset/versioned" + api_v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" k8sruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/dynamic" k8s "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/clientcmd" ) @@ -179,3 +187,25 @@ func MakeOpenShiftQuotaClient(apiConf APIConfig) (quotaclientset.Interface, erro return client, nil } + +func NewNodeSharedInformer(client k8s.Interface, nodeName string, watchSyncPeriod time.Duration) cache.SharedInformer { + informer := cache.NewSharedInformer( + &cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + if nodeName != "" { + opts.FieldSelector = fields.OneTermEqualSelector("metadata.name", nodeName).String() + } + return client.CoreV1().Nodes().List(context.Background(), opts) + }, + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + if nodeName != "" { + opts.FieldSelector = fields.OneTermEqualSelector("metadata.name", nodeName).String() + } + return client.CoreV1().Nodes().Watch(context.Background(), opts) + }, + }, + &api_v1.Node{}, + watchSyncPeriod, + ) + return informer +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders/azure/metadata.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders/azure/metadata.go index b6241feab..54f73d3a2 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders/azure/metadata.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders/azure/metadata.go @@ -74,12 +74,13 @@ func (p *azureProviderImpl) Metadata(ctx context.Context) (*ComputeMetadata, err resp, err := p.client.Do(req) if err != nil { return nil, fmt.Errorf("failed to query Azure IMDS: %w", err) - } else if resp.StatusCode != 200 { + } + defer resp.Body.Close() + if resp.StatusCode != 200 { //lint:ignore ST1005 Azure is a capitalized proper noun here return nil, fmt.Errorf("Azure IMDS replied with status code: %s", resp.Status) } - defer resp.Body.Close() respBody, err := io.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("failed to read Azure IMDS reply: %w", err) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/README.md index 2c25f7460..b57b2de5c 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/README.md @@ -4,12 +4,12 @@ | ------------- |-----------| | Stability | [alpha]: traces, metrics, logs | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Apkg%2Fottl%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Apkg%2Fottl) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Apkg%2Fottl%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Apkg%2Fottl) | -| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@TylerHelmuth](https://www.github.com/TylerHelmuth), [@kentquirk](https://www.github.com/kentquirk), [@bogdandrutu](https://www.github.com/bogdandrutu), [@evan-bradley](https://www.github.com/evan-bradley) | +| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@TylerHelmuth](https://www.github.com/TylerHelmuth), [@kentquirk](https://www.github.com/kentquirk), [@bogdandrutu](https://www.github.com/bogdandrutu), [@evan-bradley](https://www.github.com/evan-bradley) \| Seeking more code owners! | [alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha -The OpenTelemetry Transformation Language is a language for transforming open telemetry data based on the [OpenTelemetry Collector Processing Exploration](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/processing.md). +The OpenTelemetry Transformation Language is a language for transforming open telemetry data based on the [OpenTelemetry Collector Processing Exploration](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/rfcs/processing.md). This package reads in OTTL statements and converts them to invokable functions/booleans based on the OTTL's grammar. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/compare.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/compare.go index 50d1109f0..0c20542f2 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/compare.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/compare.go @@ -7,7 +7,6 @@ import ( "bytes" "time" - "go.uber.org/zap" "golang.org/x/exp/constraints" ) @@ -17,9 +16,7 @@ import ( // invalidComparison returns false for everything except ne (where it returns true to indicate that the // objects were definitely not equivalent). -// It also gives us an opportunity to log something. -func (p *Parser[K]) invalidComparison(msg string, op compareOp) bool { - p.telemetrySettings.Logger.Debug(msg, zap.Any("op", op)) +func (p *Parser[K]) invalidComparison(op compareOp) bool { return op == ne } @@ -87,7 +84,7 @@ func (p *Parser[K]) compareBool(a bool, b any, op compareOp) bool { case bool: return compareBools(a, v, op) default: - return p.invalidComparison("bool to non-bool", op) + return p.invalidComparison(op) } } @@ -96,7 +93,7 @@ func (p *Parser[K]) compareString(a string, b any, op compareOp) bool { case string: return comparePrimitives(a, v, op) default: - return p.invalidComparison("string to non-string", op) + return p.invalidComparison(op) } } @@ -110,7 +107,7 @@ func (p *Parser[K]) compareByte(a []byte, b any, op compareOp) bool { } return compareBytes(a, v, op) default: - return p.invalidComparison("Bytes to non-Bytes", op) + return p.invalidComparison(op) } } @@ -121,7 +118,7 @@ func (p *Parser[K]) compareInt64(a int64, b any, op compareOp) bool { case float64: return comparePrimitives(float64(a), v, op) default: - return p.invalidComparison("int to non-numeric value", op) + return p.invalidComparison(op) } } @@ -132,7 +129,7 @@ func (p *Parser[K]) compareFloat64(a float64, b any, op compareOp) bool { case float64: return comparePrimitives(a, v, op) default: - return p.invalidComparison("float to non-numeric value", op) + return p.invalidComparison(op) } } @@ -143,7 +140,7 @@ func (p *Parser[K]) compareDuration(a time.Duration, b any, op compareOp) bool { vnsecs := v.Nanoseconds() return comparePrimitives(ansecs, vnsecs, op) default: - return p.invalidComparison("cannot compare invalid duration", op) + return p.invalidComparison(op) } } @@ -164,10 +161,10 @@ func (p *Parser[K]) compareTime(a time.Time, b any, op compareOp) bool { case gt: return a.After(v) default: - return p.invalidComparison("invalid comparison operator", op) + return p.invalidComparison(op) } default: - return p.invalidComparison("time to non-time value", op) + return p.invalidComparison(op) } } @@ -211,7 +208,7 @@ func (p *Parser[K]) compare(a any, b any, op compareOp) bool { case ne: return a != b default: - return p.invalidComparison("unsupported type for inequality on left", op) + return p.invalidComparison(op) } } } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/metric.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/metric.go index 97aa011d1..e2944a73d 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/metric.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/metric.go @@ -53,10 +53,10 @@ func MetricPathGetSetter[K MetricContext](path ottl.Path[K]) (ottl.GetSetter[K], func accessMetric[K MetricContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetMetric(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if newMetric, ok := val.(pmetric.Metric); ok { newMetric.CopyTo(tCtx.GetMetric()) } @@ -67,10 +67,10 @@ func accessMetric[K MetricContext]() ottl.StandardGetSetter[K] { func accessName[K MetricContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetMetric().Name(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { tCtx.GetMetric().SetName(str) } @@ -81,10 +81,10 @@ func accessName[K MetricContext]() ottl.StandardGetSetter[K] { func accessDescription[K MetricContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetMetric().Description(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { tCtx.GetMetric().SetDescription(str) } @@ -95,10 +95,10 @@ func accessDescription[K MetricContext]() ottl.StandardGetSetter[K] { func accessUnit[K MetricContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetMetric().Unit(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { tCtx.GetMetric().SetUnit(str) } @@ -109,10 +109,10 @@ func accessUnit[K MetricContext]() ottl.StandardGetSetter[K] { func accessType[K MetricContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return int64(tCtx.GetMetric().Type()), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, _ K, _ any) error { // TODO Implement methods so correctly convert data types. // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/10130 return nil @@ -122,7 +122,7 @@ func accessType[K MetricContext]() ottl.StandardGetSetter[K] { func accessAggTemporality[K MetricContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { metric := tCtx.GetMetric() switch metric.Type() { case pmetric.MetricTypeSum: @@ -134,7 +134,7 @@ func accessAggTemporality[K MetricContext]() ottl.StandardGetSetter[K] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if newAggTemporality, ok := val.(int64); ok { metric := tCtx.GetMetric() switch metric.Type() { @@ -153,14 +153,14 @@ func accessAggTemporality[K MetricContext]() ottl.StandardGetSetter[K] { func accessIsMonotonic[K MetricContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { metric := tCtx.GetMetric() if metric.Type() == pmetric.MetricTypeSum { return metric.Sum().IsMonotonic(), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if newIsMonotonic, ok := val.(bool); ok { metric := tCtx.GetMetric() if metric.Type() == pmetric.MetricTypeSum { @@ -174,7 +174,7 @@ func accessIsMonotonic[K MetricContext]() ottl.StandardGetSetter[K] { func accessDataPoints[K MetricContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { metric := tCtx.GetMetric() switch metric.Type() { case pmetric.MetricTypeSum: @@ -190,7 +190,7 @@ func accessDataPoints[K MetricContext]() ottl.StandardGetSetter[K] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { metric := tCtx.GetMetric() switch metric.Type() { case pmetric.MetricTypeSum: diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/resource.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/resource.go index 3606f5d6b..6e86ae81d 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/resource.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/resource.go @@ -34,10 +34,10 @@ func ResourcePathGetSetter[K ResourceContext](path ottl.Path[K]) (ottl.GetSetter func accessResource[K ResourceContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetResource(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if newRes, ok := val.(pcommon.Resource); ok { newRes.CopyTo(tCtx.GetResource()) } @@ -48,10 +48,10 @@ func accessResource[K ResourceContext]() ottl.StandardGetSetter[K] { func accessResourceAttributes[K ResourceContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetResource().Attributes(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if attrs, ok := val.(pcommon.Map); ok { attrs.CopyTo(tCtx.GetResource().Attributes()) } @@ -73,10 +73,10 @@ func accessResourceAttributesKey[K ResourceContext](keys []ottl.Key[K]) ottl.Sta func accessResourceDroppedAttributesCount[K ResourceContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return int64(tCtx.GetResource().DroppedAttributesCount()), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(int64); ok { tCtx.GetResource().SetDroppedAttributesCount(uint32(i)) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/scope.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/scope.go index dcf85ae68..f207106c4 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/scope.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/scope.go @@ -39,10 +39,10 @@ func ScopePathGetSetter[K InstrumentationScopeContext](path ottl.Path[K]) (ottl. func accessInstrumentationScope[K InstrumentationScopeContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetInstrumentationScope(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if newIl, ok := val.(pcommon.InstrumentationScope); ok { newIl.CopyTo(tCtx.GetInstrumentationScope()) } @@ -53,10 +53,10 @@ func accessInstrumentationScope[K InstrumentationScopeContext]() ottl.StandardGe func accessInstrumentationScopeAttributes[K InstrumentationScopeContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetInstrumentationScope().Attributes(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if attrs, ok := val.(pcommon.Map); ok { attrs.CopyTo(tCtx.GetInstrumentationScope().Attributes()) } @@ -78,10 +78,10 @@ func accessInstrumentationScopeAttributesKey[K InstrumentationScopeContext](keys func accessInstrumentationScopeName[K InstrumentationScopeContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetInstrumentationScope().Name(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { tCtx.GetInstrumentationScope().SetName(str) } @@ -92,10 +92,10 @@ func accessInstrumentationScopeName[K InstrumentationScopeContext]() ottl.Standa func accessInstrumentationScopeVersion[K InstrumentationScopeContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetInstrumentationScope().Version(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { tCtx.GetInstrumentationScope().SetVersion(str) } @@ -106,10 +106,10 @@ func accessInstrumentationScopeVersion[K InstrumentationScopeContext]() ottl.Sta func accessInstrumentationScopeDroppedAttributesCount[K InstrumentationScopeContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return int64(tCtx.GetInstrumentationScope().DroppedAttributesCount()), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(int64); ok { tCtx.GetInstrumentationScope().SetDroppedAttributesCount(uint32(i)) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/span.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/span.go index e7270cbc0..607cb2e11 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/span.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/span.go @@ -134,10 +134,10 @@ func SpanPathGetSetter[K SpanContext](path ottl.Path[K]) (ottl.GetSetter[K], err func accessSpan[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if newSpan, ok := val.(ptrace.Span); ok { newSpan.CopyTo(tCtx.GetSpan()) } @@ -148,10 +148,10 @@ func accessSpan[K SpanContext]() ottl.StandardGetSetter[K] { func accessTraceID[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().TraceID(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if newTraceID, ok := val.(pcommon.TraceID); ok { tCtx.GetSpan().SetTraceID(newTraceID) } @@ -162,11 +162,11 @@ func accessTraceID[K SpanContext]() ottl.StandardGetSetter[K] { func accessStringTraceID[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { id := tCtx.GetSpan().TraceID() return hex.EncodeToString(id[:]), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { id, err := ParseTraceID(str) if err != nil { @@ -181,10 +181,10 @@ func accessStringTraceID[K SpanContext]() ottl.StandardGetSetter[K] { func accessSpanID[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().SpanID(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if newSpanID, ok := val.(pcommon.SpanID); ok { tCtx.GetSpan().SetSpanID(newSpanID) } @@ -195,11 +195,11 @@ func accessSpanID[K SpanContext]() ottl.StandardGetSetter[K] { func accessStringSpanID[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { id := tCtx.GetSpan().SpanID() return hex.EncodeToString(id[:]), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { id, err := ParseSpanID(str) if err != nil { @@ -214,10 +214,10 @@ func accessStringSpanID[K SpanContext]() ottl.StandardGetSetter[K] { func accessTraceState[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().TraceState().AsRaw(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { tCtx.GetSpan().TraceState().FromRaw(str) } @@ -266,10 +266,10 @@ func accessTraceStateKey[K SpanContext](keys []ottl.Key[K]) (ottl.StandardGetSet func accessParentSpanID[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().ParentSpanID(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if newParentSpanID, ok := val.(pcommon.SpanID); ok { tCtx.GetSpan().SetParentSpanID(newParentSpanID) } @@ -280,11 +280,11 @@ func accessParentSpanID[K SpanContext]() ottl.StandardGetSetter[K] { func accessStringParentSpanID[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { id := tCtx.GetSpan().ParentSpanID() return hex.EncodeToString(id[:]), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { id, err := ParseSpanID(str) if err != nil { @@ -299,10 +299,10 @@ func accessStringParentSpanID[K SpanContext]() ottl.StandardGetSetter[K] { func accessSpanName[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().Name(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { tCtx.GetSpan().SetName(str) } @@ -313,10 +313,10 @@ func accessSpanName[K SpanContext]() ottl.StandardGetSetter[K] { func accessKind[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return int64(tCtx.GetSpan().Kind()), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(int64); ok { tCtx.GetSpan().SetKind(ptrace.SpanKind(i)) } @@ -327,10 +327,10 @@ func accessKind[K SpanContext]() ottl.StandardGetSetter[K] { func accessStringKind[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().Kind().String(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if s, ok := val.(string); ok { var kind ptrace.SpanKind switch s { @@ -358,10 +358,10 @@ func accessStringKind[K SpanContext]() ottl.StandardGetSetter[K] { func accessDeprecatedStringKind[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return traceutil.SpanKindStr(tCtx.GetSpan().Kind()), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if s, ok := val.(string); ok { var kind ptrace.SpanKind switch s { @@ -389,10 +389,10 @@ func accessDeprecatedStringKind[K SpanContext]() ottl.StandardGetSetter[K] { func accessStartTimeUnixNano[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().StartTimestamp().AsTime().UnixNano(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(int64); ok { tCtx.GetSpan().SetStartTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, i))) } @@ -403,10 +403,10 @@ func accessStartTimeUnixNano[K SpanContext]() ottl.StandardGetSetter[K] { func accessEndTimeUnixNano[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().EndTimestamp().AsTime().UnixNano(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(int64); ok { tCtx.GetSpan().SetEndTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, i))) } @@ -417,10 +417,10 @@ func accessEndTimeUnixNano[K SpanContext]() ottl.StandardGetSetter[K] { func accessStartTime[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().StartTimestamp().AsTime(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(time.Time); ok { tCtx.GetSpan().SetStartTimestamp(pcommon.NewTimestampFromTime(i)) } @@ -431,10 +431,10 @@ func accessStartTime[K SpanContext]() ottl.StandardGetSetter[K] { func accessEndTime[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().EndTimestamp().AsTime(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(time.Time); ok { tCtx.GetSpan().SetEndTimestamp(pcommon.NewTimestampFromTime(i)) } @@ -445,10 +445,10 @@ func accessEndTime[K SpanContext]() ottl.StandardGetSetter[K] { func accessAttributes[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().Attributes(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if attrs, ok := val.(pcommon.Map); ok { attrs.CopyTo(tCtx.GetSpan().Attributes()) } @@ -470,10 +470,10 @@ func accessAttributesKey[K SpanContext](keys []ottl.Key[K]) ottl.StandardGetSett func accessSpanDroppedAttributesCount[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return int64(tCtx.GetSpan().DroppedAttributesCount()), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(int64); ok { tCtx.GetSpan().SetDroppedAttributesCount(uint32(i)) } @@ -484,12 +484,12 @@ func accessSpanDroppedAttributesCount[K SpanContext]() ottl.StandardGetSetter[K] func accessEvents[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().Events(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if slc, ok := val.(ptrace.SpanEventSlice); ok { - tCtx.GetSpan().Events().RemoveIf(func(event ptrace.SpanEvent) bool { + tCtx.GetSpan().Events().RemoveIf(func(_ ptrace.SpanEvent) bool { return true }) slc.CopyTo(tCtx.GetSpan().Events()) @@ -501,10 +501,10 @@ func accessEvents[K SpanContext]() ottl.StandardGetSetter[K] { func accessDroppedEventsCount[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return int64(tCtx.GetSpan().DroppedEventsCount()), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(int64); ok { tCtx.GetSpan().SetDroppedEventsCount(uint32(i)) } @@ -515,12 +515,12 @@ func accessDroppedEventsCount[K SpanContext]() ottl.StandardGetSetter[K] { func accessLinks[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().Links(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if slc, ok := val.(ptrace.SpanLinkSlice); ok { - tCtx.GetSpan().Links().RemoveIf(func(event ptrace.SpanLink) bool { + tCtx.GetSpan().Links().RemoveIf(func(_ ptrace.SpanLink) bool { return true }) slc.CopyTo(tCtx.GetSpan().Links()) @@ -532,10 +532,10 @@ func accessLinks[K SpanContext]() ottl.StandardGetSetter[K] { func accessDroppedLinksCount[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return int64(tCtx.GetSpan().DroppedLinksCount()), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(int64); ok { tCtx.GetSpan().SetDroppedLinksCount(uint32(i)) } @@ -546,10 +546,10 @@ func accessDroppedLinksCount[K SpanContext]() ottl.StandardGetSetter[K] { func accessStatus[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().Status(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if status, ok := val.(ptrace.Status); ok { status.CopyTo(tCtx.GetSpan().Status()) } @@ -560,10 +560,10 @@ func accessStatus[K SpanContext]() ottl.StandardGetSetter[K] { func accessStatusCode[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return int64(tCtx.GetSpan().Status().Code()), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(int64); ok { tCtx.GetSpan().Status().SetCode(ptrace.StatusCode(i)) } @@ -574,10 +574,10 @@ func accessStatusCode[K SpanContext]() ottl.StandardGetSetter[K] { func accessStatusMessage[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().Status().Message(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { tCtx.GetSpan().Status().SetMessage(str) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint/datapoint.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint/datapoint.go index 37a39068d..6d55fa91a 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint/datapoint.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint/datapoint.go @@ -227,10 +227,10 @@ func (pep *pathExpressionParser) parsePath(path ottl.Path[TransformContext]) (ot func accessCache() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.getCache(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if m, ok := val.(pcommon.Map); ok { m.CopyTo(tCtx.getCache()) } @@ -252,7 +252,7 @@ func accessCacheKey(key []ottl.Key[TransformContext]) ottl.StandardGetSetter[Tra func accessAttributes() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: return tCtx.GetDataPoint().(pmetric.NumberDataPoint).Attributes(), nil @@ -265,7 +265,7 @@ func accessAttributes() ottl.StandardGetSetter[TransformContext] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: if attrs, ok := val.(pcommon.Map); ok { @@ -322,7 +322,7 @@ func accessAttributesKey(key []ottl.Key[TransformContext]) ottl.StandardGetSette func accessStartTimeUnixNano() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: return tCtx.GetDataPoint().(pmetric.NumberDataPoint).StartTimestamp().AsTime().UnixNano(), nil @@ -335,7 +335,7 @@ func accessStartTimeUnixNano() ottl.StandardGetSetter[TransformContext] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newTime, ok := val.(int64); ok { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: @@ -355,7 +355,7 @@ func accessStartTimeUnixNano() ottl.StandardGetSetter[TransformContext] { func accessStartTime() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: return tCtx.GetDataPoint().(pmetric.NumberDataPoint).StartTimestamp().AsTime(), nil @@ -368,7 +368,7 @@ func accessStartTime() ottl.StandardGetSetter[TransformContext] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newTime, ok := val.(time.Time); ok { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: @@ -388,7 +388,7 @@ func accessStartTime() ottl.StandardGetSetter[TransformContext] { func accessTimeUnixNano() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: return tCtx.GetDataPoint().(pmetric.NumberDataPoint).Timestamp().AsTime().UnixNano(), nil @@ -401,7 +401,7 @@ func accessTimeUnixNano() ottl.StandardGetSetter[TransformContext] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newTime, ok := val.(int64); ok { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: @@ -421,7 +421,7 @@ func accessTimeUnixNano() ottl.StandardGetSetter[TransformContext] { func accessTime() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: return tCtx.GetDataPoint().(pmetric.NumberDataPoint).Timestamp().AsTime(), nil @@ -434,7 +434,7 @@ func accessTime() ottl.StandardGetSetter[TransformContext] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newTime, ok := val.(time.Time); ok { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: @@ -454,13 +454,13 @@ func accessTime() ottl.StandardGetSetter[TransformContext] { func accessDoubleValue() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if numberDataPoint, ok := tCtx.GetDataPoint().(pmetric.NumberDataPoint); ok { return numberDataPoint.DoubleValue(), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newDouble, ok := val.(float64); ok { if numberDataPoint, ok := tCtx.GetDataPoint().(pmetric.NumberDataPoint); ok { numberDataPoint.SetDoubleValue(newDouble) @@ -473,13 +473,13 @@ func accessDoubleValue() ottl.StandardGetSetter[TransformContext] { func accessIntValue() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if numberDataPoint, ok := tCtx.GetDataPoint().(pmetric.NumberDataPoint); ok { return numberDataPoint.IntValue(), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newInt, ok := val.(int64); ok { if numberDataPoint, ok := tCtx.GetDataPoint().(pmetric.NumberDataPoint); ok { numberDataPoint.SetIntValue(newInt) @@ -492,7 +492,7 @@ func accessIntValue() ottl.StandardGetSetter[TransformContext] { func accessExemplars() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: return tCtx.GetDataPoint().(pmetric.NumberDataPoint).Exemplars(), nil @@ -503,7 +503,7 @@ func accessExemplars() ottl.StandardGetSetter[TransformContext] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newExemplars, ok := val.(pmetric.ExemplarSlice); ok { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: @@ -521,7 +521,7 @@ func accessExemplars() ottl.StandardGetSetter[TransformContext] { func accessFlags() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: return int64(tCtx.GetDataPoint().(pmetric.NumberDataPoint).Flags()), nil @@ -534,7 +534,7 @@ func accessFlags() ottl.StandardGetSetter[TransformContext] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newFlags, ok := val.(int64); ok { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: @@ -554,7 +554,7 @@ func accessFlags() ottl.StandardGetSetter[TransformContext] { func accessCount() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { switch tCtx.GetDataPoint().(type) { case pmetric.HistogramDataPoint: return int64(tCtx.GetDataPoint().(pmetric.HistogramDataPoint).Count()), nil @@ -565,7 +565,7 @@ func accessCount() ottl.StandardGetSetter[TransformContext] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newCount, ok := val.(int64); ok { switch tCtx.GetDataPoint().(type) { case pmetric.HistogramDataPoint: @@ -583,7 +583,7 @@ func accessCount() ottl.StandardGetSetter[TransformContext] { func accessSum() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { switch tCtx.GetDataPoint().(type) { case pmetric.HistogramDataPoint: return tCtx.GetDataPoint().(pmetric.HistogramDataPoint).Sum(), nil @@ -594,7 +594,7 @@ func accessSum() ottl.StandardGetSetter[TransformContext] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newSum, ok := val.(float64); ok { switch tCtx.GetDataPoint().(type) { case pmetric.HistogramDataPoint: @@ -612,13 +612,13 @@ func accessSum() ottl.StandardGetSetter[TransformContext] { func accessExplicitBounds() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if histogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.HistogramDataPoint); ok { return histogramDataPoint.ExplicitBounds().AsRaw(), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newExplicitBounds, ok := val.([]float64); ok { if histogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.HistogramDataPoint); ok { histogramDataPoint.ExplicitBounds().FromRaw(newExplicitBounds) @@ -631,13 +631,13 @@ func accessExplicitBounds() ottl.StandardGetSetter[TransformContext] { func accessBucketCounts() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if histogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.HistogramDataPoint); ok { return histogramDataPoint.BucketCounts().AsRaw(), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newBucketCount, ok := val.([]uint64); ok { if histogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.HistogramDataPoint); ok { histogramDataPoint.BucketCounts().FromRaw(newBucketCount) @@ -650,13 +650,13 @@ func accessBucketCounts() ottl.StandardGetSetter[TransformContext] { func accessScale() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { return int64(expoHistogramDataPoint.Scale()), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newScale, ok := val.(int64); ok { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { expoHistogramDataPoint.SetScale(int32(newScale)) @@ -669,13 +669,13 @@ func accessScale() ottl.StandardGetSetter[TransformContext] { func accessZeroCount() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { return int64(expoHistogramDataPoint.ZeroCount()), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newZeroCount, ok := val.(int64); ok { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { expoHistogramDataPoint.SetZeroCount(uint64(newZeroCount)) @@ -688,13 +688,13 @@ func accessZeroCount() ottl.StandardGetSetter[TransformContext] { func accessPositive() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { return expoHistogramDataPoint.Positive(), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newPositive, ok := val.(pmetric.ExponentialHistogramDataPointBuckets); ok { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { newPositive.CopyTo(expoHistogramDataPoint.Positive()) @@ -707,13 +707,13 @@ func accessPositive() ottl.StandardGetSetter[TransformContext] { func accessPositiveOffset() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { return int64(expoHistogramDataPoint.Positive().Offset()), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newPositiveOffset, ok := val.(int64); ok { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { expoHistogramDataPoint.Positive().SetOffset(int32(newPositiveOffset)) @@ -726,13 +726,13 @@ func accessPositiveOffset() ottl.StandardGetSetter[TransformContext] { func accessPositiveBucketCounts() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { return expoHistogramDataPoint.Positive().BucketCounts().AsRaw(), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newPositiveBucketCounts, ok := val.([]uint64); ok { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { expoHistogramDataPoint.Positive().BucketCounts().FromRaw(newPositiveBucketCounts) @@ -745,13 +745,13 @@ func accessPositiveBucketCounts() ottl.StandardGetSetter[TransformContext] { func accessNegative() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { return expoHistogramDataPoint.Negative(), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newNegative, ok := val.(pmetric.ExponentialHistogramDataPointBuckets); ok { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { newNegative.CopyTo(expoHistogramDataPoint.Negative()) @@ -764,13 +764,13 @@ func accessNegative() ottl.StandardGetSetter[TransformContext] { func accessNegativeOffset() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { return int64(expoHistogramDataPoint.Negative().Offset()), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newNegativeOffset, ok := val.(int64); ok { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { expoHistogramDataPoint.Negative().SetOffset(int32(newNegativeOffset)) @@ -783,13 +783,13 @@ func accessNegativeOffset() ottl.StandardGetSetter[TransformContext] { func accessNegativeBucketCounts() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { return expoHistogramDataPoint.Negative().BucketCounts().AsRaw(), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newNegativeBucketCounts, ok := val.([]uint64); ok { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { expoHistogramDataPoint.Negative().BucketCounts().FromRaw(newNegativeBucketCounts) @@ -802,13 +802,13 @@ func accessNegativeBucketCounts() ottl.StandardGetSetter[TransformContext] { func accessQuantileValues() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if summaryDataPoint, ok := tCtx.GetDataPoint().(pmetric.SummaryDataPoint); ok { return summaryDataPoint.QuantileValues(), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newQuantileValues, ok := val.(pmetric.SummaryDataPointValueAtQuantileSlice); ok { if summaryDataPoint, ok := tCtx.GetDataPoint().(pmetric.SummaryDataPoint); ok { newQuantileValues.CopyTo(summaryDataPoint.QuantileValues()) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog/log.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog/log.go index 7e73c2dbd..3111268ec 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog/log.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog/log.go @@ -222,10 +222,10 @@ func (pep *pathExpressionParser) parsePath(path ottl.Path[TransformContext]) (ot func accessCache() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.getCache(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if m, ok := val.(pcommon.Map); ok { m.CopyTo(tCtx.getCache()) } @@ -247,10 +247,10 @@ func accessCacheKey(key []ottl.Key[TransformContext]) ottl.StandardGetSetter[Tra func accessTimeUnixNano() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetLogRecord().Timestamp().AsTime().UnixNano(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if i, ok := val.(int64); ok { tCtx.GetLogRecord().SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, i))) } @@ -261,10 +261,10 @@ func accessTimeUnixNano() ottl.StandardGetSetter[TransformContext] { func accessObservedTimeUnixNano() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetLogRecord().ObservedTimestamp().AsTime().UnixNano(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if i, ok := val.(int64); ok { tCtx.GetLogRecord().SetObservedTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, i))) } @@ -275,10 +275,10 @@ func accessObservedTimeUnixNano() ottl.StandardGetSetter[TransformContext] { func accessTime() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetLogRecord().Timestamp().AsTime(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if i, ok := val.(time.Time); ok { tCtx.GetLogRecord().SetTimestamp(pcommon.NewTimestampFromTime(i)) } @@ -289,10 +289,10 @@ func accessTime() ottl.StandardGetSetter[TransformContext] { func accessObservedTime() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetLogRecord().ObservedTimestamp().AsTime(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if i, ok := val.(time.Time); ok { tCtx.GetLogRecord().SetObservedTimestamp(pcommon.NewTimestampFromTime(i)) } @@ -303,10 +303,10 @@ func accessObservedTime() ottl.StandardGetSetter[TransformContext] { func accessSeverityNumber() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return int64(tCtx.GetLogRecord().SeverityNumber()), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if i, ok := val.(int64); ok { tCtx.GetLogRecord().SetSeverityNumber(plog.SeverityNumber(i)) } @@ -317,10 +317,10 @@ func accessSeverityNumber() ottl.StandardGetSetter[TransformContext] { func accessSeverityText() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetLogRecord().SeverityText(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if s, ok := val.(string); ok { tCtx.GetLogRecord().SetSeverityText(s) } @@ -331,10 +331,10 @@ func accessSeverityText() ottl.StandardGetSetter[TransformContext] { func accessBody() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return ottlcommon.GetValue(tCtx.GetLogRecord().Body()), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { return internal.SetValue(tCtx.GetLogRecord().Body(), val) }, } @@ -369,10 +369,10 @@ func accessBodyKey(key []ottl.Key[TransformContext]) ottl.StandardGetSetter[Tran func accessStringBody() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetLogRecord().Body().AsString(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if str, ok := val.(string); ok { tCtx.GetLogRecord().Body().SetStr(str) } @@ -383,10 +383,10 @@ func accessStringBody() ottl.StandardGetSetter[TransformContext] { func accessAttributes() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetLogRecord().Attributes(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if attrs, ok := val.(pcommon.Map); ok { attrs.CopyTo(tCtx.GetLogRecord().Attributes()) } @@ -408,10 +408,10 @@ func accessAttributesKey(key []ottl.Key[TransformContext]) ottl.StandardGetSette func accessDroppedAttributesCount() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return int64(tCtx.GetLogRecord().DroppedAttributesCount()), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if i, ok := val.(int64); ok { tCtx.GetLogRecord().SetDroppedAttributesCount(uint32(i)) } @@ -422,10 +422,10 @@ func accessDroppedAttributesCount() ottl.StandardGetSetter[TransformContext] { func accessFlags() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return int64(tCtx.GetLogRecord().Flags()), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if i, ok := val.(int64); ok { tCtx.GetLogRecord().SetFlags(plog.LogRecordFlags(i)) } @@ -436,10 +436,10 @@ func accessFlags() ottl.StandardGetSetter[TransformContext] { func accessTraceID() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetLogRecord().TraceID(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newTraceID, ok := val.(pcommon.TraceID); ok { tCtx.GetLogRecord().SetTraceID(newTraceID) } @@ -450,11 +450,11 @@ func accessTraceID() ottl.StandardGetSetter[TransformContext] { func accessStringTraceID() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { id := tCtx.GetLogRecord().TraceID() return hex.EncodeToString(id[:]), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if str, ok := val.(string); ok { id, err := internal.ParseTraceID(str) if err != nil { @@ -469,10 +469,10 @@ func accessStringTraceID() ottl.StandardGetSetter[TransformContext] { func accessSpanID() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetLogRecord().SpanID(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newSpanID, ok := val.(pcommon.SpanID); ok { tCtx.GetLogRecord().SetSpanID(newSpanID) } @@ -483,11 +483,11 @@ func accessSpanID() ottl.StandardGetSetter[TransformContext] { func accessStringSpanID() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { id := tCtx.GetLogRecord().SpanID() return hex.EncodeToString(id[:]), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if str, ok := val.(string); ok { id, err := internal.ParseSpanID(str) if err != nil { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric/metrics.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric/metrics.go index 0cdc916fd..964210d46 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric/metrics.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric/metrics.go @@ -145,10 +145,10 @@ func (pep *pathExpressionParser) parsePath(path ottl.Path[TransformContext]) (ot func accessCache() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.getCache(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if m, ok := val.(pcommon.Map); ok { m.CopyTo(tCtx.getCache()) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource/resource.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource/resource.go index a7bd95612..e37440d9d 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource/resource.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource/resource.go @@ -112,10 +112,10 @@ func (pep *pathExpressionParser) parsePath(path ottl.Path[TransformContext]) (ot func accessCache() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.getCache(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if m, ok := val.(pcommon.Map); ok { m.CopyTo(tCtx.getCache()) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope/scope.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope/scope.go index c19e21892..f7a9d92ee 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope/scope.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope/scope.go @@ -121,10 +121,10 @@ func (pep *pathExpressionParser) parsePath(path ottl.Path[TransformContext]) (ot func accessCache() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.getCache(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if m, ok := val.(pcommon.Map); ok { m.CopyTo(tCtx.getCache()) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan/span.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan/span.go index 8f400540b..a680fd68a 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan/span.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan/span.go @@ -136,10 +136,10 @@ func (pep *pathExpressionParser) parsePath(path ottl.Path[TransformContext]) (ot func accessCache() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.getCache(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if m, ok := val.(pcommon.Map); ok { m.CopyTo(tCtx.getCache()) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent/span_events.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent/span_events.go index d8182f4ec..baf2aadcb 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent/span_events.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent/span_events.go @@ -159,10 +159,10 @@ func (pep *pathExpressionParser) parsePath(path ottl.Path[TransformContext]) (ot } func accessCache() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.getCache(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if m, ok := val.(pcommon.Map); ok { m.CopyTo(tCtx.getCache()) } @@ -184,10 +184,10 @@ func accessCacheKey(key []ottl.Key[TransformContext]) ottl.StandardGetSetter[Tra func accessSpanEventTimeUnixNano() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetSpanEvent().Timestamp().AsTime().UnixNano(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newTimestamp, ok := val.(int64); ok { tCtx.GetSpanEvent().SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, newTimestamp))) } @@ -198,10 +198,10 @@ func accessSpanEventTimeUnixNano() ottl.StandardGetSetter[TransformContext] { func accessSpanEventTime() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetSpanEvent().Timestamp().AsTime(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newTimestamp, ok := val.(time.Time); ok { tCtx.GetSpanEvent().SetTimestamp(pcommon.NewTimestampFromTime(newTimestamp)) } @@ -212,10 +212,10 @@ func accessSpanEventTime() ottl.StandardGetSetter[TransformContext] { func accessSpanEventName() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetSpanEvent().Name(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newName, ok := val.(string); ok { tCtx.GetSpanEvent().SetName(newName) } @@ -226,10 +226,10 @@ func accessSpanEventName() ottl.StandardGetSetter[TransformContext] { func accessSpanEventAttributes() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetSpanEvent().Attributes(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if attrs, ok := val.(pcommon.Map); ok { attrs.CopyTo(tCtx.GetSpanEvent().Attributes()) } @@ -251,10 +251,10 @@ func accessSpanEventAttributesKey(key []ottl.Key[TransformContext]) ottl.Standar func accessSpanEventDroppedAttributeCount() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return int64(tCtx.GetSpanEvent().DroppedAttributesCount()), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newCount, ok := val.(int64); ok { tCtx.GetSpanEvent().SetDroppedAttributesCount(uint32(newCount)) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/functions.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/functions.go index 86782be3c..98f730a24 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/functions.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/functions.go @@ -283,8 +283,8 @@ func (p *Parser[K]) buildArgs(ed editor, argsVal reflect.Value) error { switch { case arg.Value.Enum != nil: name = string(*arg.Value.Enum) - case arg.Value.FunctionName != nil: - name = *arg.Value.FunctionName + case arg.FunctionName != nil: + name = *arg.FunctionName default: return fmt.Errorf("invalid function name given") } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/grammar.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/grammar.go index ebd8e58ec..04352a6b7 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/grammar.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/grammar.go @@ -218,8 +218,9 @@ type converter struct { } type argument struct { - Name string `parser:"(@(Lowercase(Uppercase | Lowercase)*) Equal)?"` - Value value `parser:"@@"` + Name string `parser:"(@(Lowercase(Uppercase | Lowercase)*) Equal)?"` + Value value `parser:"( @@"` + FunctionName *string `parser:"| @(Uppercase(Uppercase | Lowercase)*) )"` } func (a *argument) checkForCustomError() error { @@ -236,7 +237,6 @@ type value struct { String *string `parser:"| @String"` Bool *boolean `parser:"| @Boolean"` Enum *enumSymbol `parser:"| @Uppercase (?! Lowercase)"` - FunctionName *string `parser:"| @(Uppercase(Uppercase | Lowercase)*)"` List *list `parser:"| @@)"` } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/metadata.yaml index e7333a2c5..b326d06fb 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/metadata.yaml +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/metadata.yaml @@ -5,4 +5,5 @@ status: stability: alpha: [ traces, metrics, logs ] codeowners: - active: [TylerHelmuth, kentquirk, bogdandrutu, evan-bradley] \ No newline at end of file + active: [TylerHelmuth, kentquirk, bogdandrutu, evan-bradley] + seeking_new: true \ No newline at end of file diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/README.md index 94712ca30..338bf59d6 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/README.md @@ -392,6 +392,7 @@ Available Converters: - [IsInt](#isint) - [IsMap](#ismap) - [IsMatch](#ismatch) +- [IsList](#islist) - [IsString](#isstring) - [Len](#len) - [Log](#log) @@ -409,10 +410,12 @@ Available Converters: - [SHA256](#sha256) - [SpanID](#spanid) - [Split](#split) +- [String](#string) - [Substring](#substring) - [Time](#time) - [TraceID](#traceid) - [TruncateTime](#truncatetime) +- [Unix](#unix) - [UnixMicro](#unixmicro) - [UnixMilli](#unixmilli) - [UnixNano](#unixnano) @@ -438,9 +441,9 @@ Examples: `Concat(values[], delimiter)` -The `Concat` Converter takes a delimiter and a sequence of values and concatenates their string representation. Unsupported values, such as lists or maps that may substantially increase payload size, are not added to the resulting string. +The `Concat` Converter takes a sequence of values and a delimiter and concatenates their string representation. Unsupported values, such as lists or maps that may substantially increase payload size, are not added to the resulting string. -`values` is a list of values passed as arguments. It supports paths, primitive values, and byte slices (such as trace IDs or span IDs). +`values` is a list of values. It supports paths, primitive values, and byte slices (such as trace IDs or span IDs). `delimiter` is a string value that is placed between strings during concatenation. If no delimiter is desired, then simply pass an empty string. @@ -706,6 +709,22 @@ Examples: - `IsMatch("string", ".*ring")` +### IsList + +`IsList(value)` + +The `IsList` Converter returns true if the given value is a list. + +The `value` is either a path expression to a telemetry field to retrieve or a literal. + +If `value` is a `list`, `pcommon.ValueTypeSlice`. `pcommon.Slice`, or any other list type, then returns `true`, otherwise returns `false`. + +Examples: + +- `IsList(body)` + +- `IsList(attributes["maybe a slice"])` + ### IsString `IsString(value)` @@ -1068,6 +1087,33 @@ Examples: - `Split("A|B|C", "|")` +### String + +`String(value)` + +The `String` Converter converts the `value` to string type. + +The returned type is `string`. + +- string. The function returns the `value` without changes. +- []byte. The function returns the `value` as a string encoded in hexadecimal. +- map. The function returns the `value` as a key-value-pair of type string. +- slice. The function returns the `value` as a list formatted string. +- pcommon.Value. The function returns the `value` as a string type. + +If `value` is of another type it gets marshalled to string type. +If `value` is empty, or parsing failed, nil is always returned. + +The `value` is either a path expression to a telemetry field to retrieve, or a literal. + +Examples: + +- `String("test")` +- `String(attributes["http.method"])` +- `String(span_id)` +- `String([1,2,3])` +- `String(false)` + ### Substring `Substring(target, start, length)` @@ -1085,15 +1131,69 @@ Examples: ### Time -The `Time` Converter takes a string representation of a time and converts it to a Golang `time.Time`. +`Time(target, format, Optional[location])` -`time` is a string. `format` is a string. +The `Time` Converter takes a string representation of a time and converts it to a Golang `time.Time`. -If either `time` or `format` are nil, an error is returned. The parser used is the parser at [internal/coreinternal/parser](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/internal/coreinternal/timeutils). If the time and format do not follow the parsing rules used by this parser, an error is returned. +`target` is a string. `format` is a string, `location` is an optional string. + +If either `target` or `format` are nil, an error is returned. The parser used is the parser at [internal/coreinternal/parser](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/internal/coreinternal/timeutils). If the `target` and `format` do not follow the parsing rules used by this parser, an error is returned. + +`format` denotes a textual representation of the time value formatted according to ctime-like format string. It follows [standard Go Layout formatting](https://pkg.go.dev/time#pkg-constants) with few additional substitutes: +| substitution | description | examples | +|-----|-----|-----| +|`%Y` | Year as a zero-padded number | 0001, 0002, ..., 2019, 2020, ..., 9999 | +|`%y` | Year, last two digits as a zero-padded number | 01, ..., 99 | +|`%m` | Month as a zero-padded number | 01, 02, ..., 12 | +|`%o` | Month as a space-padded number | 1, 2, ..., 12 | +|`%q` | Month as an unpadded number | 1,2,...,12 | +|`%b`, `%h` | Abbreviated month name | Jan, Feb, ... | +|`%B` | Full month name | January, February, ... | +|`%d` | Day of the month as a zero-padded number | 01, 02, ..., 31 | +|`%e` | Day of the month as a space-padded number| 1, 2, ..., 31 | +|`%g` | Day of the month as a unpadded number | 1,2,...,31 | +|`%a` | Abbreviated weekday name | Sun, Mon, ... | +|`%A` | Full weekday name | Sunday, Monday, ... | +|`%H` | Hour (24-hour clock) as a zero-padded number | 00, ..., 24 | +|`%I` | Hour (12-hour clock) as a zero-padded number | 00, ..., 12 | +|`%l` | Hour 12-hour clock | 0, ..., 24 | +|`%p` | Locale’s equivalent of either AM or PM | AM, PM | +|`%P` | Locale’s equivalent of either am or pm | am, pm | +|`%M` | Minute as a zero-padded number | 00, 01, ..., 59 | +|`%S` | Second as a zero-padded number | 00, 01, ..., 59 | +|`%L` | Millisecond as a zero-padded number | 000, 001, ..., 999 | +|`%f` | Microsecond as a zero-padded number | 000000, ..., 999999 | +|`%s` | Nanosecond as a zero-padded number | 00000000, ..., 99999999 | +|`%z` | UTC offset in the form ±HHMM[SS[.ffffff]] or empty | +0000, -0400 | +|`%Z` | Timezone name or abbreviation or empty | UTC, EST, CST | +|`%D`, `%x` | Short MM/DD/YYYY date, equivalent to %m/%d/%y | 01/21/2031 | +|`%F` | Short YYYY-MM-DD date, equivalent to %Y-%m-%d | 2031-01-21 | +|`%T`,`%X` | ISO 8601 time format (HH:MM:SS), equivalent to %H:%M:%S | 02:55:02 | +|`%r` | 12-hour clock time | 02:55:02 pm | +|`%R` | 24-hour HH:MM time, equivalent to %H:%M | 13:55 | +|`%n` | New-line character ('\n') | | +|`%t` | Horizontal-tab character ('\t') | | +|`%%` | A % sign | | +|`%c` | Date and time representation | Mon Jan 02 15:04:05 2006 | + +`location` specifies a default time zone canonical ID to be used for date parsing in case it is not part of `format`. + +When loading `location`, this function will look for the IANA Time Zone database in the following locations in order: +- a directory or uncompressed zip file named by the ZONEINFO environment variable +- on a Unix system, the system standard installation location +- $GOROOT/lib/time/zoneinfo.zip +- the `time/tzdata` package, if it was imported. + +When building a Collector binary, importing `time/tzdata` in any Go source file will bundle the database into the binary, which guarantees the lookups will work regardless of the setup on the host setup. Note this will add roughly 500kB to binary size. Examples: - `Time("02/04/2023", "%m/%d/%Y")` +- `Time("Feb 15, 2023", "%b %d, %Y")` +- `Time("2023-05-26 12:34:56 HST", "%Y-%m-%d %H:%M:%S %Z")` +- `Time("1986-10-01T00:17:33 MST", "%Y-%m-%dT%H:%M:%S %Z")` +- `Time("2012-11-01T22:08:41+0000 EST", "%Y-%m-%dT%H:%M:%S%z %Z")` +- `Time("2023-05-26 12:34:56", "%Y-%m-%d %H:%M:%S", "America/New_York")` ### TraceID @@ -1121,6 +1221,21 @@ Examples: - `TruncateTime(start_time, Duration("1s"))` +### Unix + +`Unix(seconds, Optional[nanoseconds])` + +The `Unix` Converter returns an epoch timestamp as a Unix time. Similar to [Golang's Unix function](https://pkg.go.dev/time#Unix). + +`seconds` is `int64`. If `seconds` is another type an error is returned. +`nanoseconds` is `int64`. It is optional and its default value is 0. If `nanoseconds` is another type an error is returned. + +The returned type is `time.Time`. + +Examples: + +- `Unix(1672527600)` + ### UnixMicro `UnixMicro(value)` diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_is_list.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_is_list.go new file mode 100644 index 000000000..137ccb470 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_is_list.go @@ -0,0 +1,53 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type IsListArguments[K any] struct { + Target ottl.Getter[K] +} + +func NewIsListFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("IsList", &IsListArguments[K]{}, createIsListFunction[K]) +} + +func createIsListFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*IsListArguments[K]) + + if !ok { + return nil, fmt.Errorf("IsListFactory args must be of type *IsListArguments[K]") + } + + return isList(args.Target), nil +} + +func isList[K any](target ottl.Getter[K]) ottl.ExprFunc[K] { + return func(ctx context.Context, tCtx K) (any, error) { + val, err := target.Get(ctx, tCtx) + if err != nil { + return false, err + } + + switch valType := val.(type) { + case pcommon.Value: + return valType.Type() == pcommon.ValueTypeSlice, nil + + case pcommon.Slice, plog.LogRecordSlice, plog.ResourceLogsSlice, plog.ScopeLogsSlice, pmetric.ExemplarSlice, pmetric.ExponentialHistogramDataPointSlice, pmetric.HistogramDataPointSlice, pmetric.MetricSlice, pmetric.NumberDataPointSlice, pmetric.ResourceMetricsSlice, pmetric.ScopeMetricsSlice, pmetric.SummaryDataPointSlice, pmetric.SummaryDataPointValueAtQuantileSlice, ptrace.ResourceSpansSlice, ptrace.ScopeSpansSlice, ptrace.SpanEventSlice, ptrace.SpanLinkSlice, ptrace.SpanSlice, []string, []bool, []int64, []float64, [][]byte, []any: + return true, nil + } + + return false, nil + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_keep_keys.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_keep_keys.go index 6cae0c4c3..8370414c6 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_keep_keys.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_keep_keys.go @@ -42,7 +42,7 @@ func keepKeys[K any](target ottl.PMapGetter[K], keys []string) ottl.ExprFunc[K] if err != nil { return nil, err } - val.RemoveIf(func(key string, value pcommon.Value) bool { + val.RemoveIf(func(key string, _ pcommon.Value) bool { _, ok := keySet[key] return !ok }) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_limit.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_limit.go index 81c3a4a7f..0010c0e3d 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_limit.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_limit.go @@ -64,7 +64,7 @@ func limit[K any](target ottl.PMapGetter[K], limit int64, priorityKeys []string) } } - val.RemoveIf(func(key string, value pcommon.Value) bool { + val.RemoveIf(func(key string, _ pcommon.Value) bool { if _, ok := keep[key]; ok { return false } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_now.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_now.go index 51815d3f6..698a274a3 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_now.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_now.go @@ -11,7 +11,7 @@ import ( ) func now[K any]() (ottl.ExprFunc[K], error) { - return func(ctx context.Context, tCtx K) (any, error) { + return func(_ context.Context, _ K) (any, error) { return time.Now(), nil }, nil } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_replace_all_matches.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_replace_all_matches.go index 1e4fc2874..650630e00 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_replace_all_matches.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_replace_all_matches.go @@ -74,7 +74,7 @@ func replaceAllMatches[K any](target ottl.PMapGetter[K], pattern string, replace return nil, err } } - val.Range(func(key string, value pcommon.Value) bool { + val.Range(func(_ string, value pcommon.Value) bool { if glob.Match(value.Str()) { value.SetStr(replacementVal) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_string.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_string.go new file mode 100644 index 000000000..c677421e7 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_string.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type StringArguments[K any] struct { + Target ottl.StringLikeGetter[K] +} + +func NewStringFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("String", &StringArguments[K]{}, createStringFunction[K]) +} + +func createStringFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*StringArguments[K]) + + if !ok { + return nil, fmt.Errorf("StringFactory args must be of type *StringArguments[K]") + } + + return stringFunc(args.Target), nil +} + +func stringFunc[K any](target ottl.StringLikeGetter[K]) ottl.ExprFunc[K] { + return func(ctx context.Context, tCtx K) (any, error) { + value, err := target.Get(ctx, tCtx) + if err != nil { + return nil, err + } + if value == nil { + return nil, nil + } + return *value, nil + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_time.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_time.go index a6ef708b0..b6d793cc3 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_time.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_time.go @@ -12,8 +12,9 @@ import ( ) type TimeArguments[K any] struct { - Time ottl.StringGetter[K] - Format string + Time ottl.StringGetter[K] + Format string + Location ottl.Optional[string] } func NewTimeFactory[K any]() ottl.Factory[K] { @@ -26,14 +27,20 @@ func createTimeFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ot return nil, fmt.Errorf("TimeFactory args must be of type *TimeArguments[K]") } - return Time(args.Time, args.Format) + return Time(args.Time, args.Format, args.Location) } -func Time[K any](inputTime ottl.StringGetter[K], format string) (ottl.ExprFunc[K], error) { +func Time[K any](inputTime ottl.StringGetter[K], format string, location ottl.Optional[string]) (ottl.ExprFunc[K], error) { if format == "" { return nil, fmt.Errorf("format cannot be nil") } - loc, err := timeutils.GetLocation(nil, &format) + var defaultLocation *string + if !location.IsEmpty() { + l := location.Get() + defaultLocation = &l + } + + loc, err := timeutils.GetLocation(defaultLocation, &format) if err != nil { return nil, err } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_truncate_all.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_truncate_all.go index dd47e7e96..b10479bd6 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_truncate_all.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_truncate_all.go @@ -44,7 +44,7 @@ func TruncateAll[K any](target ottl.PMapGetter[K], limit int64) (ottl.ExprFunc[K if err != nil { return nil, err } - val.Range(func(key string, value pcommon.Value) bool { + val.Range(func(_ string, value pcommon.Value) bool { stringVal := value.Str() if int64(len(stringVal)) > limit { value.SetStr(stringVal[:limit]) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix.go new file mode 100644 index 000000000..06de04b41 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix.go @@ -0,0 +1,50 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + "time" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type UnixArguments[K any] struct { + Seconds ottl.IntGetter[K] + Nanoseconds ottl.Optional[ottl.IntGetter[K]] +} + +func NewUnixFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("Unix", &UnixArguments[K]{}, createUnixFunction[K]) +} +func createUnixFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*UnixArguments[K]) + + if !ok { + return nil, fmt.Errorf("UnixFactory args must be of type *UnixArguments[K]") + } + + return Unix(args.Seconds, args.Nanoseconds) +} + +func Unix[K any](seconds ottl.IntGetter[K], nanoseconds ottl.Optional[ottl.IntGetter[K]]) (ottl.ExprFunc[K], error) { + return func(ctx context.Context, tCtx K) (any, error) { + sec, err := seconds.Get(ctx, tCtx) + if err != nil { + return nil, err + } + + var nsec int64 + + if !nanoseconds.IsEmpty() { + nsec, err = nanoseconds.Get().Get(ctx, tCtx) + if err != nil { + return nil, err + } + } + + return time.Unix(sec, nsec), nil + }, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_uuid.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_uuid.go index 9c03835f5..fec985989 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_uuid.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_uuid.go @@ -12,7 +12,7 @@ import ( ) func uuid[K any]() (ottl.ExprFunc[K], error) { - return func(ctx context.Context, tCtx K) (any, error) { + return func(_ context.Context, _ K) (any, error) { u := guuid.New() return u.String(), nil }, nil diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/functions.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/functions.go index 9bb33ff32..23522e9a1 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/functions.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/functions.go @@ -47,6 +47,7 @@ func converters[K any]() []ottl.Factory[K] { NewIntFactory[K](), NewIsBoolFactory[K](), NewIsDoubleFactory[K](), + NewIsListFactory[K](), NewIsIntFactory[K](), NewIsMapFactory[K](), NewIsMatchFactory[K](), @@ -67,10 +68,12 @@ func converters[K any]() []ottl.Factory[K] { NewSHA256Factory[K](), NewSpanIDFactory[K](), NewSplitFactory[K](), + NewStringFactory[K](), NewSubstringFactory[K](), NewTimeFactory[K](), NewTruncateTimeFactory[K](), NewTraceIDFactory[K](), + NewUnixFactory[K](), NewUnixMicroFactory[K](), NewUnixMilliFactory[K](), NewUnixNanoFactory[K](), diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/hash.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/hash.go index 650258445..6826de769 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/hash.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/hash.go @@ -82,7 +82,7 @@ func (hw *hashWriter) writeMapHash(m pcommon.Map) { // on the first call due to it being cleared of any added keys at then end of the function. nextIndex := len(hw.keysBuf) - m.Range(func(k string, v pcommon.Value) bool { + m.Range(func(k string, _ pcommon.Value) bool { hw.keysBuf = append(hw.keysBuf, k) return true }) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/converter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/converter.go index ada2d7582..de7c259d6 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/converter.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/converter.go @@ -15,6 +15,7 @@ import ( "sync" "github.com/cespare/xxhash/v2" + "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/zap" @@ -52,6 +53,8 @@ import ( // │ downstream consumers via OutChannel() │ // └─────────────────────────────────────────────────────┘ type Converter struct { + set component.TelemetrySettings + // pLogsChan is a channel on which aggregated logs will be sent to. pLogsChan chan plog.Logs @@ -70,8 +73,6 @@ type Converter struct { // wg is a WaitGroup that makes sure that we wait for spun up goroutines exit // when Stop() is called. wg sync.WaitGroup - - logger *zap.Logger } type converterOption interface { @@ -90,14 +91,15 @@ func (o workerCountOption) apply(c *Converter) { c.workerCount = o.workerCount } -func NewConverter(logger *zap.Logger, opts ...converterOption) *Converter { +func NewConverter(set component.TelemetrySettings, opts ...converterOption) *Converter { + set.Logger = set.Logger.With(zap.String("component", "converter")) c := &Converter{ + set: set, workerChan: make(chan []*entry.Entry), workerCount: int(math.Max(1, float64(runtime.NumCPU()/4))), pLogsChan: make(chan plog.Logs), stopChan: make(chan struct{}), flushChan: make(chan plog.Logs), - logger: logger, } for _, opt := range opts { opt.apply(c) @@ -106,7 +108,7 @@ func NewConverter(logger *zap.Logger, opts ...converterOption) *Converter { } func (c *Converter) Start() { - c.logger.Debug("Starting log converter", zap.Int("worker_count", c.workerCount)) + c.set.Logger.Debug("Starting log converter", zap.Int("worker_count", c.workerCount)) c.wg.Add(c.workerCount) for i := 0; i < c.workerCount; i++ { @@ -148,19 +150,35 @@ func (c *Converter) workerLoop() { } resourceHashToIdx := make(map[uint64]int) + scopeIdxByResource := make(map[uint64]map[string]int) pLogs := plog.NewLogs() var sl plog.ScopeLogs + for _, e := range entries { resourceID := HashResource(e.Resource) + var rl plog.ResourceLogs + resourceIdx, ok := resourceHashToIdx[resourceID] if !ok { resourceHashToIdx[resourceID] = pLogs.ResourceLogs().Len() - rl := pLogs.ResourceLogs().AppendEmpty() + + rl = pLogs.ResourceLogs().AppendEmpty() upsertToMap(e.Resource, rl.Resource().Attributes()) + + scopeIdxByResource[resourceID] = map[string]int{e.ScopeName: 0} sl = rl.ScopeLogs().AppendEmpty() + sl.Scope().SetName(e.ScopeName) } else { - sl = pLogs.ResourceLogs().At(resourceIdx).ScopeLogs().At(0) + rl = pLogs.ResourceLogs().At(resourceIdx) + scopeIdxInResource, ok := scopeIdxByResource[resourceID][e.ScopeName] + if !ok { + scopeIdxByResource[resourceID][e.ScopeName] = rl.ScopeLogs().Len() + sl = rl.ScopeLogs().AppendEmpty() + sl.Scope().SetName(e.ScopeName) + } else { + sl = pLogs.ResourceLogs().At(resourceIdx).ScopeLogs().At(scopeIdxInResource) + } } convertInto(e, sl.LogRecords().AppendEmpty()) } @@ -186,7 +204,7 @@ func (c *Converter) flushLoop() { case pLogs := <-c.flushChan: if err := c.flush(ctx, pLogs); err != nil { - c.logger.Debug("Problem sending log entries", + c.set.Logger.Debug("Problem sending log entries", zap.Error(err), ) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/emitter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/emitter.go index aa1fe90fa..acd78ce02 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/emitter.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/emitter.go @@ -4,174 +4,16 @@ package adapter // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter" import ( - "context" - "sync" - "time" - + "go.opentelemetry.io/collector/component" "go.uber.org/zap" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper" ) -// LogEmitter is a stanza operator that emits log entries to a channel -type LogEmitter struct { - helper.OutputOperator - logChan chan []*entry.Entry - stopOnce sync.Once - cancel context.CancelFunc - batchMux sync.Mutex - batch []*entry.Entry - wg sync.WaitGroup - maxBatchSize uint - flushInterval time.Duration -} - -var ( - defaultFlushInterval = 100 * time.Millisecond - defaultMaxBatchSize uint = 100 -) - -type emitterOption interface { - apply(*LogEmitter) -} - -func withMaxBatchSize(maxBatchSize uint) emitterOption { - return maxBatchSizeOption{maxBatchSize} -} - -type maxBatchSizeOption struct { - maxBatchSize uint -} - -func (o maxBatchSizeOption) apply(e *LogEmitter) { - e.maxBatchSize = o.maxBatchSize -} - -func withFlushInterval(flushInterval time.Duration) emitterOption { - return flushIntervalOption{flushInterval} -} - -type flushIntervalOption struct { - flushInterval time.Duration -} - -func (o flushIntervalOption) apply(e *LogEmitter) { - e.flushInterval = o.flushInterval -} - -// NewLogEmitter creates a new receiver output -func NewLogEmitter(logger *zap.SugaredLogger, opts ...emitterOption) *LogEmitter { - e := &LogEmitter{ - OutputOperator: helper.OutputOperator{ - BasicOperator: helper.BasicOperator{ - OperatorID: "log_emitter", - OperatorType: "log_emitter", - SugaredLogger: logger, - }, - }, - logChan: make(chan []*entry.Entry), - maxBatchSize: defaultMaxBatchSize, - batch: make([]*entry.Entry, 0, defaultMaxBatchSize), - flushInterval: defaultFlushInterval, - cancel: func() {}, - } - for _, opt := range opts { - opt.apply(e) - } - return e -} - -// Start starts the goroutine(s) required for this operator -func (e *LogEmitter) Start(_ operator.Persister) error { - ctx, cancel := context.WithCancel(context.Background()) - e.cancel = cancel - - e.wg.Add(1) - go e.flusher(ctx) - return nil -} - -// Stop will close the log channel and stop running goroutines -func (e *LogEmitter) Stop() error { - e.stopOnce.Do(func() { - e.cancel() - e.wg.Wait() - - close(e.logChan) - }) - - return nil -} - -// OutChannel returns the channel on which entries will be sent to. -func (e *LogEmitter) OutChannel() <-chan []*entry.Entry { - return e.logChan -} - -// Process will emit an entry to the output channel -func (e *LogEmitter) Process(ctx context.Context, ent *entry.Entry) error { - if oldBatch := e.appendEntry(ent); len(oldBatch) > 0 { - e.flush(ctx, oldBatch) - } - - return nil -} - -// appendEntry appends the entry to the current batch. If maxBatchSize is reached, a new batch will be made, and the old batch -// (which should be flushed) will be returned -func (e *LogEmitter) appendEntry(ent *entry.Entry) []*entry.Entry { - e.batchMux.Lock() - defer e.batchMux.Unlock() - - e.batch = append(e.batch, ent) - if uint(len(e.batch)) >= e.maxBatchSize { - var oldBatch []*entry.Entry - oldBatch, e.batch = e.batch, make([]*entry.Entry, 0, e.maxBatchSize) - return oldBatch - } - - return nil -} - -// flusher flushes the current batch every flush interval. Intended to be run as a goroutine -func (e *LogEmitter) flusher(ctx context.Context) { - defer e.wg.Done() - - ticker := time.NewTicker(e.flushInterval) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - if oldBatch := e.makeNewBatch(); len(oldBatch) > 0 { - e.flush(ctx, oldBatch) - } - case <-ctx.Done(): - return - } - } -} - -// flush flushes the provided batch to the log channel. -func (e *LogEmitter) flush(ctx context.Context, batch []*entry.Entry) { - select { - case e.logChan <- batch: - case <-ctx.Done(): - } -} - -// makeNewBatch replaces the current batch on the log emitter with a new batch, returning the old one -func (e *LogEmitter) makeNewBatch() []*entry.Entry { - e.batchMux.Lock() - defer e.batchMux.Unlock() - - if len(e.batch) == 0 { - return nil - } +// Deprecated [v0.101.0] Use helper.LogEmitter directly instead +type LogEmitter = helper.LogEmitter - var oldBatch []*entry.Entry - oldBatch, e.batch = e.batch, make([]*entry.Entry, 0, e.maxBatchSize) - return oldBatch +// Deprecated [v0.101.0] Use helper.NewLogEmitter directly instead +func NewLogEmitter(logger *zap.SugaredLogger, opts ...helper.EmitterOption) *LogEmitter { + return helper.NewLogEmitter(component.TelemetrySettings{Logger: logger.Desugar()}, opts...) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/factory.go index 34907b3d3..9c46aac9c 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/factory.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/factory.go @@ -13,6 +13,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/consumerretry" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/pipeline" ) @@ -35,7 +36,7 @@ func NewFactory(logReceiverType LogReceiverType, sl component.StabilityLevel) rc func createLogsReceiver(logReceiverType LogReceiverType) rcvr.CreateLogsFunc { return func( - ctx context.Context, + _ context.Context, params rcvr.CreateSettings, cfg component.Config, nextConsumer consumer.Logs, @@ -45,18 +46,18 @@ func createLogsReceiver(logReceiverType LogReceiverType) rcvr.CreateLogsFunc { operators := append([]operator.Config{inputCfg}, baseCfg.Operators...) - emitterOpts := []emitterOption{} + emitterOpts := []helper.EmitterOption{} if baseCfg.maxBatchSize > 0 { - emitterOpts = append(emitterOpts, withMaxBatchSize(baseCfg.maxBatchSize)) + emitterOpts = append(emitterOpts, helper.WithMaxBatchSize(baseCfg.maxBatchSize)) } if baseCfg.flushInterval > 0 { - emitterOpts = append(emitterOpts, withFlushInterval(baseCfg.flushInterval)) + emitterOpts = append(emitterOpts, helper.WithFlushInterval(baseCfg.flushInterval)) } - emitter := NewLogEmitter(params.Logger.Sugar(), emitterOpts...) + emitter := helper.NewLogEmitter(params.TelemetrySettings, emitterOpts...) pipe, err := pipeline.Config{ Operators: operators, DefaultOutput: emitter, - }.Build(params.Logger.Sugar()) + }.Build(params.TelemetrySettings) if err != nil { return nil, err } @@ -65,7 +66,7 @@ func createLogsReceiver(logReceiverType LogReceiverType) rcvr.CreateLogsFunc { if baseCfg.numWorkers > 0 { converterOpts = append(converterOpts, withWorkerCount(baseCfg.numWorkers)) } - converter := NewConverter(params.Logger, converterOpts...) + converter := NewConverter(params.TelemetrySettings, converterOpts...) obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ ReceiverID: params.ID, ReceiverCreateSettings: params, @@ -74,11 +75,11 @@ func createLogsReceiver(logReceiverType LogReceiverType) rcvr.CreateLogsFunc { return nil, err } return &receiver{ + set: params.TelemetrySettings, id: params.ID, pipe: pipe, emitter: emitter, consumer: consumerretry.NewLogs(baseCfg.RetryOnFailure, params.Logger, nextConsumer), - logger: params.Logger, converter: converter, obsrecv: obsrecv, storageID: baseCfg.StorageID, diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/frompdataconverter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/frompdataconverter.go index 11a18e6d6..f294cdb3a 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/frompdataconverter.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/frompdataconverter.go @@ -10,6 +10,7 @@ import ( "sync" "time" + "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/zap" @@ -38,6 +39,8 @@ import ( // └─┤ and sends them along entriesChan │ // └───────────────────────────────────────────────────┘ type FromPdataConverter struct { + set component.TelemetrySettings + // entriesChan is a channel on which converted logs will be sent out of the converter. entriesChan chan []*entry.Entry @@ -51,28 +54,26 @@ type FromPdataConverter struct { // wg is a WaitGroup that makes sure that we wait for spun up goroutines exit // when Stop() is called. wg sync.WaitGroup - - logger *zap.Logger } -func NewFromPdataConverter(workerCount int, logger *zap.Logger) *FromPdataConverter { - if logger == nil { - logger = zap.NewNop() +func NewFromPdataConverter(set component.TelemetrySettings, workerCount int) *FromPdataConverter { + if set.Logger == nil { + set.Logger = zap.NewNop() } if workerCount <= 0 { workerCount = int(math.Max(1, float64(runtime.NumCPU()))) } return &FromPdataConverter{ + set: set, workerChan: make(chan fromConverterWorkerItem, workerCount), entriesChan: make(chan []*entry.Entry), stopChan: make(chan struct{}), - logger: logger, } } func (c *FromPdataConverter) Start() { - c.logger.Debug("Starting log converter from pdata", zap.Int("worker_count", cap(c.workerChan))) + c.set.Logger.Debug("Starting log converter from pdata", zap.Int("worker_count", cap(c.workerChan))) for i := 0; i < cap(c.workerChan); i++ { c.wg.Add(1) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/receiver.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/receiver.go index ffdeeb1b9..4df6fd846 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/receiver.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/receiver.go @@ -16,19 +16,20 @@ import ( "go.uber.org/multierr" "go.uber.org/zap" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/pipeline" ) type receiver struct { + set component.TelemetrySettings id component.ID wg sync.WaitGroup cancel context.CancelFunc pipe pipeline.Pipeline - emitter *LogEmitter + emitter *helper.LogEmitter consumer consumer.Logs converter *Converter - logger *zap.Logger obsrecv *receiverhelper.ObsReport storageID *component.ID @@ -42,7 +43,7 @@ var _ rcvr.Logs = (*receiver)(nil) func (r *receiver) Start(ctx context.Context, host component.Host) error { rctx, cancel := context.WithCancel(ctx) r.cancel = cancel - r.logger.Info("Starting stanza receiver") + r.set.Logger.Info("Starting stanza receiver") if err := r.setStorageClient(ctx, host); err != nil { return fmt.Errorf("storage client: %w", err) @@ -87,16 +88,16 @@ func (r *receiver) emitterLoop(ctx context.Context) { for { select { case <-doneChan: - r.logger.Debug("Receive loop stopped") + r.set.Logger.Debug("Receive loop stopped") return - case e, ok := <-r.emitter.logChan: + case e, ok := <-r.emitter.OutChannel(): if !ok { continue } if err := r.converter.Batch(e); err != nil { - r.logger.Error("Could not add entry to batch", zap.Error(err)) + r.set.Logger.Error("Could not add entry to batch", zap.Error(err)) } } } @@ -112,19 +113,19 @@ func (r *receiver) consumerLoop(ctx context.Context) { for { select { case <-doneChan: - r.logger.Debug("Consumer loop stopped") + r.set.Logger.Debug("Consumer loop stopped") return case pLogs, ok := <-pLogsChan: if !ok { - r.logger.Debug("Converter channel got closed") + r.set.Logger.Debug("Converter channel got closed") continue } obsrecvCtx := r.obsrecv.StartLogsOp(ctx) logRecordCount := pLogs.LogRecordCount() cErr := r.consumer.ConsumeLogs(ctx, pLogs) if cErr != nil { - r.logger.Error("ConsumeLogs() failed", zap.Error(cErr)) + r.set.Logger.Error("ConsumeLogs() failed", zap.Error(cErr)) } r.obsrecv.EndLogsOp(obsrecvCtx, "stanza", logRecordCount, cErr) } @@ -137,7 +138,7 @@ func (r *receiver) Shutdown(ctx context.Context) error { return nil } - r.logger.Info("Stopping stanza receiver") + r.set.Logger.Info("Stopping stanza receiver") pipelineErr := r.pipe.Stop() r.converter.Stop() r.cancel() diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/register.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/register.go index 8105ef17d..426e456de 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/register.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/register.go @@ -6,6 +6,7 @@ package adapter // import "github.com/open-telemetry/opentelemetry-collector-con import ( _ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/file" // Register parsers and transformers for stanza-based log receivers _ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/stdout" + _ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/container" _ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/csv" _ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/json" _ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/jsonarray" diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/attrs/attrs.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/attrs/attrs.go index f8b4a1c47..0b174a97a 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/attrs/attrs.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/attrs/attrs.go @@ -5,25 +5,31 @@ package attrs // import "github.com/open-telemetry/opentelemetry-collector-contr import ( "fmt" + "os" "path/filepath" "runtime" ) const ( - LogFileName = "log.file.name" - LogFilePath = "log.file.path" - LogFileNameResolved = "log.file.name_resolved" - LogFilePathResolved = "log.file.path_resolved" + LogFileName = "log.file.name" + LogFilePath = "log.file.path" + LogFileNameResolved = "log.file.name_resolved" + LogFilePathResolved = "log.file.path_resolved" + LogFileOwnerName = "log.file.owner.name" + LogFileOwnerGroupName = "log.file.owner.group.name" ) type Resolver struct { - IncludeFileName bool `mapstructure:"include_file_name,omitempty"` - IncludeFilePath bool `mapstructure:"include_file_path,omitempty"` - IncludeFileNameResolved bool `mapstructure:"include_file_name_resolved,omitempty"` - IncludeFilePathResolved bool `mapstructure:"include_file_path_resolved,omitempty"` + IncludeFileName bool `mapstructure:"include_file_name,omitempty"` + IncludeFilePath bool `mapstructure:"include_file_path,omitempty"` + IncludeFileNameResolved bool `mapstructure:"include_file_name_resolved,omitempty"` + IncludeFilePathResolved bool `mapstructure:"include_file_path_resolved,omitempty"` + IncludeFileOwnerName bool `mapstructure:"include_file_owner_name,omitempty"` + IncludeFileOwnerGroupName bool `mapstructure:"include_file_owner_group_name,omitempty"` } -func (r *Resolver) Resolve(path string) (attributes map[string]any, err error) { +func (r *Resolver) Resolve(file *os.File) (attributes map[string]any, err error) { + var path = file.Name() // size 2 is sufficient if not resolving symlinks. This optimizes for the most performant cases. attributes = make(map[string]any, 2) if r.IncludeFileName { @@ -32,6 +38,12 @@ func (r *Resolver) Resolve(path string) (attributes map[string]any, err error) { if r.IncludeFilePath { attributes[LogFilePath] = path } + if r.IncludeFileOwnerName || r.IncludeFileOwnerGroupName { + err = r.addOwnerInfo(file, attributes) + if err != nil { + return nil, err + } + } if !r.IncludeFileNameResolved && !r.IncludeFilePathResolved { return attributes, nil } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/attrs/owner_other.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/attrs/owner_other.go new file mode 100644 index 000000000..b9b7d40b5 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/attrs/owner_other.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build !windows + +package attrs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/attrs" + +import ( + "fmt" + "os" + "os/user" + "syscall" +) + +func (r *Resolver) addOwnerInfo(file *os.File, attributes map[string]any) error { + fileInfo, errStat := file.Stat() + if errStat != nil { + return fmt.Errorf("resolve file stat: %w", errStat) + } + fileStat := fileInfo.Sys().(*syscall.Stat_t) + + if r.IncludeFileOwnerName { + fileOwner, errFileUser := user.LookupId(fmt.Sprint(fileStat.Uid)) + if errFileUser != nil { + return fmt.Errorf("resolve file owner name: %w", errFileUser) + } + attributes[LogFileOwnerName] = fileOwner.Username + } + if r.IncludeFileOwnerGroupName { + fileGroup, errFileGroup := user.LookupGroupId(fmt.Sprint(fileStat.Gid)) + if errFileGroup != nil { + return fmt.Errorf("resolve file group name: %w", errFileGroup) + } + attributes[LogFileOwnerGroupName] = fileGroup.Name + } + return nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/attrs/owner_windows.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/attrs/owner_windows.go new file mode 100644 index 000000000..25d5e15fe --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/attrs/owner_windows.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build windows + +package attrs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/attrs" + +import ( + "fmt" + "os" +) + +func (r *Resolver) addOwnerInfo(file *os.File, attributes map[string]any) error { + return fmt.Errorf("owner info not implemented for windows") +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/config.go index 1c59dfeed..157266ac1 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/config.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/config.go @@ -7,8 +7,10 @@ import ( "bufio" "errors" "fmt" + "runtime" "time" + "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/featuregate" "go.uber.org/zap" "golang.org/x/text/encoding" @@ -16,11 +18,11 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/decode" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/attrs" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/emit" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/fileset" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/fingerprint" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/header" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/reader" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/scanner" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/tracker" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/matcher" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper" @@ -88,11 +90,11 @@ type HeaderConfig struct { } // Deprecated [v0.97.0] Use Build and WithSplitFunc option instead -func (c Config) BuildWithSplitFunc(logger *zap.SugaredLogger, emit emit.Callback, splitFunc bufio.SplitFunc) (*Manager, error) { - return c.Build(logger, emit, WithSplitFunc(splitFunc)) +func (c Config) BuildWithSplitFunc(set component.TelemetrySettings, emit emit.Callback, splitFunc bufio.SplitFunc) (*Manager, error) { + return c.Build(set, emit, WithSplitFunc(splitFunc)) } -func (c Config) Build(logger *zap.SugaredLogger, emit emit.Callback, opts ...Option) (*Manager, error) { +func (c Config) Build(set component.TelemetrySettings, emit emit.Callback, opts ...Option) (*Manager, error) { if err := c.validate(); err != nil { return nil, err } @@ -135,7 +137,7 @@ func (c Config) Build(logger *zap.SugaredLogger, emit emit.Callback, opts ...Opt var hCfg *header.Config if c.Header != nil { - hCfg, err = header.NewConfig(c.Header.Pattern, c.Header.MetadataOperators, enc) + hCfg, err = header.NewConfig(set, c.Header.Pattern, c.Header.MetadataOperators, enc) if err != nil { return nil, fmt.Errorf("failed to build header config: %w", err) } @@ -146,8 +148,9 @@ func (c Config) Build(logger *zap.SugaredLogger, emit emit.Callback, opts ...Opt return nil, err } + set.Logger = set.Logger.With(zap.String("component", "fileconsumer")) readerFactory := reader.Factory{ - SugaredLogger: logger.With("component", "fileconsumer"), + TelemetrySettings: set, FromBeginning: startAtBeginning, FingerprintSize: int(c.FingerprintSize), InitialBufferSize: scanner.DefaultBufferSize, @@ -161,20 +164,22 @@ func (c Config) Build(logger *zap.SugaredLogger, emit emit.Callback, opts ...Opt HeaderConfig: hCfg, DeleteAtEOF: c.DeleteAfterRead, } - knownFiles := make([]*fileset.Fileset[*reader.Metadata], 3) - for i := 0; i < len(knownFiles); i++ { - knownFiles[i] = fileset.New[*reader.Metadata](c.MaxConcurrentFiles / 2) + + var t tracker.Tracker + if o.noTracking { + t = tracker.NewNoStateTracker(set, c.MaxConcurrentFiles/2) + } else { + t = tracker.NewFileTracker(set, c.MaxConcurrentFiles/2) } + set.Logger = set.Logger.With(zap.String("component", "fileconsumer")) return &Manager{ - SugaredLogger: logger.With("component", "fileconsumer"), - readerFactory: readerFactory, - fileMatcher: fileMatcher, - pollInterval: c.PollInterval, - maxBatchFiles: c.MaxConcurrentFiles / 2, - maxBatches: c.MaxBatches, - currentPollFiles: fileset.New[*reader.Reader](c.MaxConcurrentFiles / 2), - previousPollFiles: fileset.New[*reader.Reader](c.MaxConcurrentFiles / 2), - knownFiles: knownFiles, + set: set, + readerFactory: readerFactory, + fileMatcher: fileMatcher, + pollInterval: c.PollInterval, + maxBatchFiles: c.MaxConcurrentFiles / 2, + maxBatches: c.MaxBatches, + tracker: t, }, nil } @@ -220,16 +225,22 @@ func (c Config) validate() error { if c.StartAt == "end" { return fmt.Errorf("'header' cannot be specified with 'start_at: end'") } - if _, err := header.NewConfig(c.Header.Pattern, c.Header.MetadataOperators, enc); err != nil { - return fmt.Errorf("invalid config for 'header': %w", err) + set := component.TelemetrySettings{Logger: zap.NewNop()} + if _, errConfig := header.NewConfig(set, c.Header.Pattern, c.Header.MetadataOperators, enc); errConfig != nil { + return fmt.Errorf("invalid config for 'header': %w", errConfig) } } + if runtime.GOOS == "windows" && (c.Resolver.IncludeFileOwnerName || c.Resolver.IncludeFileOwnerGroupName) { + return fmt.Errorf("'include_file_owner_name' or 'include_file_owner_group_name' it's not supported for windows: %w", err) + } + return nil } type options struct { - splitFunc bufio.SplitFunc + splitFunc bufio.SplitFunc + noTracking bool } type Option func(*options) @@ -240,3 +251,11 @@ func WithSplitFunc(f bufio.SplitFunc) Option { o.splitFunc = f } } + +// WithNoTracking forces the readerFactory to not keep track of files in memory. When used, the reader will +// read from the beginning of each file every time it is polled. +func WithNoTracking() Option { + return func(o *options) { + o.noTracking = true + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/design.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/design.md index 9fa2b6958..34d7fee70 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/design.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/design.md @@ -9,30 +9,44 @@ The effective search space (`include - exclude`) is referred to colloquially as # Fingerprints -Files are identified and tracked using fingerprints. A fingerprint is the first `N` bytes of the file, with the default for `N` being `1000`. +Files are identified and tracked using fingerprints. A fingerprint is the first `N` bytes of the file, +with the default for `N` being `1000`. ### Fingerprint Growth -When a file is smaller than `N` bytes, the fingerprint is the entire contents of the file. A fingerprint that is less than `N` bytes will be compared to other fingerprints using a prefix check. As the file grows, its fingerprint will be updated, until it reaches the full size of `N`. +When a file is smaller than `N` bytes, the fingerprint is the entire contents of the file. A fingerprint that is +less than `N` bytes will be compared to other fingerprints using a prefix check. As the file grows, its fingerprint +will be updated, until it reaches the full size of `N`. ### Deduplication of Files Multiple files with the same fingerprint are handled as if they are the same file. -Most commonly, this circumstance is observed during file rotation that depends on a copy/truncate strategy. After copying the file, but before truncating the original, two files with the same content briefly exist. If the `file_input` operator happens to observe both files at the same time, it will detect a duplicate fingerprint and ingest only one of the files. +Most commonly, this circumstance is observed during file rotation that depends on a copy/truncate strategy. +After copying the file, but before truncating the original, two files with the same content briefly exist. +If the `file_input` operator happens to observe both files at the same time, it will detect a duplicate fingerprint +and ingest only one of the files. -If logs are replicated to multiple files, or if log files are copied manually, it is not understood to be of any significant value to ingest the duplicates. As a result, fingerprints are not designed to differentiate between these files, and double ingestion of the same content is not supported automatically. +If logs are replicated to multiple files, or if log files are copied manually, it is not understood to be of any +significant value to ingest the duplicates. As a result, fingerprints are not designed to differentiate between these +files, and double ingestion of the same content is not supported automatically. -In some rare circumstances, a logger may print a very verbose preamble to each log file. When this occurs, fingerprinting may fail to differentiate files from one another. This can be overcome by customizing the size of the fingerprint using the `fingerprint_size` setting. +In some rare circumstances, a logger may print a very verbose preamble to each log file. When this occurs, +fingerprinting may fail to differentiate files from one another. This can be overcome by customizing the size +of the fingerprint using the `fingerprint_size` setting. ### Log line ordering across file rotations -In general, we offer no guarantees as to the relative ordering of log lines originating from different files. For the common use case of files being rotated outside the watched pattern, we make a best-effort attempt at reading the rotated file to the end before reading the new file. This guarantees log line ordering across rotations, assuming the following conditions are met: +In general, we offer no guarantees as to the relative ordering of log lines originating from different files. +For the common use case of files being rotated outside the watched pattern, we make a best-effort attempt at reading +the rotated file to the end before reading the new file. This guarantees log line ordering across rotations, +assuming the following conditions are met: * rotated file names don't match the watched pattern * rotated files aren't written to after the rotation -A minor reordering of log lines often doesn't matter, but it can when using the recombine operator later in the pipeline, for example. +A minor reordering of log lines often doesn't matter, but it can when using the recombine operator later in the +pipeline, for example. # Readers @@ -51,24 +65,34 @@ A Reader contains the following: As implied by the name, Readers are responsible for consuming data as it is written to a file. -Before a Reader begins consuming, it will seek the file's last known offset. If no offset is known for the file, then the Reader will seek either the beginning or end of the file, according to the `start_at` setting. It will then begin reading from there. +Before a Reader begins consuming, it will seek the file's last known offset. If no offset is known for the file, then +the Reader will seek either the beginning or end of the file, according to the `start_at` setting. It will then begin +reading from there. -While a file is shorter than the length of a fingerprint, its Reader will continuously append to the fingerprint, as it consumes newly written data. +While a file is shorter than the length of a fingerprint, its Reader will continuously append to the fingerprint, +as it consumes newly written data. -A Reader consumes a file using a `bufio.Scanner`, with the Scanner's buffer size defined by the `max_log_size` setting, and the Scanner's split func defined by the `multiline` setting. +A Reader consumes a file using a `bufio.Scanner`, with the Scanner's buffer size defined by the `max_log_size` setting, +and the Scanner's split func defined by the `multiline` setting. -As each log is read from the file, it is decoded according to the `encoding` function, and then emitted from the operator. +As each log is read from the file, it is decoded according to the `encoding` function, and then emitted from +the operator. The Reader's offset is updated accordingly whenever a log is emitted. ### Persistence -Readers are always instantiated with an open file handle. Eventually, the file handle is closed, but the Reader is not immediately discarded. Rather, it is maintained for a fixed number of "poll cycles" (see Polling section below) as a reference to the file's metadata, which may be useful for detecting files that have been moved or copied, and for recalling metadata such as the file's previous path. +Readers are always instantiated with an open file handle. Eventually, the file handle is closed, but the Reader is +not immediately discarded. Rather, it is maintained for a fixed number of "poll cycles" (see Polling section below) +as a reference to the file's metadata, which may be useful for detecting files that have been moved or copied, +and for recalling metadata such as the file's previous path. Readers are maintained for a fixed period of time, and then discarded. -When the `file_input` operator makes use of a persistence mechanism to save and recall its state, it is simply Setting and Getting a slice of Readers. These Readers contain all the information necessary to pick up exactly where the operator left off. +When the `file_input` operator makes use of a persistence mechanism to save and recall its state, it is simply +Setting and Getting a slice of Readers. These Readers contain all the information necessary to pick up exactly +where the operator left off. # Polling @@ -80,18 +104,26 @@ Each poll cycle runs through a series of steps which are presented below. ### Detailed Poll Cycle 1. Dequeuing - 1. If any matches are queued from the previous cycle, an appropriate number are dequeued, and processed the same as would a newly matched set of files. + 1. If any matches are queued from the previous cycle, an appropriate number are dequeued, and processed the same + as would a newly matched set of files. 2. Aging - 1. If no queued files were left over from the previous cycle, then all previously matched files have been consumed, and we are ready to query the file system again. Prior to doing so, we will increment the "generation" of all historical Readers. Eventually, these Readers will be discarded based on their age. Until that point, they may be useful references. + 1. If no queued files were left over from the previous cycle, then all previously matched files have been consumed, + and we are ready to query the file system again. Prior to doing so, we will increment the "generation" of all + historical Readers. Eventually, these Readers will be discarded based on their age. Until that point, they may + be useful references. 3. Matching 1. The file system is searched for files with a path that matches the `include` setting. 2. Files that match the `exclude` setting are discarded. - 3. As a special case, on the first poll cycle, a warning is printed if no files are matched. Execution continues regardless. + 3. As a special case, on the first poll cycle, a warning is printed if no files are matched. + Execution continues regardless. 4. Queueing - 1. If the number of matched files is less than or equal to the maximum degree of concurrency, as defined by the `max_concurrent_files` setting, then no queueing occurs. + 1. If the number of matched files is less than or equal to the maximum degree of concurrency, as defined + by the `max_concurrent_files` setting, then no queueing occurs. 2. Else, queueing occurs, which means the following: - - Matched files are split into two sets, such that the first is small enough to respect `max_concurrent_files`, and the second contains the remaining files (called the queue). - - The current poll interval will begin processing the first set of files, just as if they were the only ones found during the matching phase. + - Matched files are split into two sets, such that the first is small enough to respect `max_concurrent_files`, + and the second contains the remaining files (called the queue). + - The current poll interval will begin processing the first set of files, just as if they were the + only ones found during the matching phase. - Subsequent poll cycles will pull matches off of the queue, until the queue is empty. - The `max_concurrent_files` setting is respected at all times. 5. Opening @@ -104,27 +136,38 @@ Each poll cycle runs through a series of steps which are presented below. 1. The first `N` bytes of each file are read. (See fingerprinting section above.) 7. Exclusion 1. Empty files are closed immediately and discarded. (There is nothing to read.) - 2. Fingerprints found in this batch are cross referenced against each other to detect duplicates. Duplicate files are closed immediately and discarded. - - In the vast majority of cases, this occurs during file rotation that uses the copy/truncate method. (See fingerprinting section above.) + 2. Fingerprints found in this batch are cross referenced against each other to detect duplicates. Duplicate + files are closed immediately and discarded. + - In the vast majority of cases, this occurs during file rotation that uses the copy/truncate method. + (See fingerprinting section above.) 8. Reader Creation 1. Each file handle is wrapped into a `Reader` along with some metadata. (See Reader section above) - - During the creation of a `Reader`, the file's fingerprint is cross referenced with previously known fingerprints. - - If a file's fingerprint matches one that has recently been seen, then metadata is copied over from the previous iteration of the Reader. Most importantly, the offset is accurately maintained in this way. - - If a file's fingerprint does not match any recently seen files, then its offset is initialized according to the `start_at` setting. + - During the creation of a `Reader`, the file's fingerprint is cross referenced with previously + known fingerprints. + - If a file's fingerprint matches one that has recently been seen, then metadata is copied over from the + previous iteration of the Reader. Most importantly, the offset is accurately maintained in this way. + - If a file's fingerprint does not match any recently seen files, then its offset is initialized + according to the `start_at` setting. 9. Detection of Lost Files - 1. Fingerprints are used to cross reference the matched files from this poll cycle against the matched file from the previous poll cycle. Files that were matched in the previous cycle but were not matched in this cycle are referred to as "lost files". + 1. Fingerprints are used to cross reference the matched files from this poll cycle against the matched + file from the previous poll cycle. Files that were matched in the previous cycle but were not matched + in this cycle are referred to as "lost files". 2. File become "lost" for several reasons: - The file may have been deleted, typically due to rotation limits or ttl-based pruning. - The file may have been rotated to another location. - If the file was moved, the open file handle from the previous poll cycle may be useful. 10. Consumption - 1. Lost files are consumed. In some cases, such as deletion, this operation will fail. However, if a file was moved, we may be able to consume the remainder of its content. + 1. Lost files are consumed. In some cases, such as deletion, this operation will fail. However, if a file + was moved, we may be able to consume the remainder of its content. - We do not expect to match this file again, so the best we can do is finish consuming their current contents. - We can reasonably expect in most cases that these files are no longer being written to. 2. Matched files (from this poll cycle) are consumed. - - These file handles will be left open until the next poll cycle, when they will be used to detect and potentially consume lost files. - - Typically, we can expect to find most of these files again. However, these files are consumed greedily in case we do not see them again. - 3. All open files are consumed concurrently. This includes both the lost files from the previous cycle, and the matched files from this cycle. + - These file handles will be left open until the next poll cycle, when they will be used to detect and + potentially consume lost files. + - Typically, we can expect to find most of these files again. However, these files are consumed greedily + in case we do not see them again. + 3. All open files are consumed concurrently. This includes both the lost files from the previous cycle, and the + matched files from this cycle. 11. Closing 1. All files from the previous poll cycle are closed. 12. Archiving @@ -132,7 +175,8 @@ Each poll cycle runs through a series of steps which are presented below. 2. The same Readers are also retained as a separate slice, for easy access in the next poll cycle. 13. Pruning 1. The historical record is purged of Readers that have existed for 3 generations. - - This number is somewhat arbitrary, and should probably be made configurable. However, its exact purpose is quite obscure. + - This number is somewhat arbitrary, and should probably be made configurable. However, its exact purpose + is quite obscure. 14. Persistence 1. The historical record of readers is synced to whatever persistence mechanism was provided to the operator. 15. End Poll Cycle @@ -152,11 +196,15 @@ Whenever the operator starts, it: When the operator shuts down, the following occurs: - If a poll cycle is not currently underway, the operator simply closes any open files. -- Otherwise, the current poll cycle is signaled to stop immediately, which in turn signals all Readers to stop immediately. - - If a Reader is idle or in between log entries, it will return immediately. Otherwise it will return after consuming one final log entry. - - Once all Readers have stopped, the remainder of the poll cycle completes as usual, which includes the steps labeled `Closing`, `Archiving`, `Pruning`, and `Persistence`. +- Otherwise, the current poll cycle is signaled to stop immediately, which in turn signals all Readers to + stop immediately. + - If a Reader is idle or in between log entries, it will return immediately. Otherwise it will return + after consuming one final log entry. + - Once all Readers have stopped, the remainder of the poll cycle completes as usual, which includes + the steps labeled `Closing`, `Archiving`, `Pruning`, and `Persistence`. -The net effect of the shut down routine is that all files are checkpointed in a normal manner (i.e. not in the middle of a log entry), and all checkpoints are persisted. +The net effect of the shut down routine is that all files are checkpointed in a normal manner +(i.e. not in the middle of a log entry), and all checkpoints are persisted. # Known Limitations @@ -164,8 +212,10 @@ The net effect of the shut down routine is that all files are checkpointed in a ### Potential data loss when maximum concurrency must be enforced The operator may lose a small percentage of logs, if both of the following conditions are true: -1. The number of files being matched exceeds the maximum degree of concurrency allowed by the `max_concurrent_files` setting. -2. Files are being "lost". That is, file rotation is moving files out of the operator's matching pattern, such that subsequent polling cycles will not find these files. +1. The number of files being matched exceeds the maximum degree of concurrency allowed + by the `max_concurrent_files` setting. +2. Files are being "lost". That is, file rotation is moving files out of the operator's matching pattern, + such that subsequent polling cycles will not find these files. When both of these conditions occur, it is impossible for the operator to both: 1. Respect the specified concurrency limitation. @@ -175,16 +225,21 @@ When this scenario occurs, a design tradeoff must be made. The choice is between 1. Ensure that `max_concurrent_files` is always respected. 2. Risk losing a small percentage of log entries. -The current design chooses to guarantee the maximum degree of concurrency because failure to do so risks harming the operator's host system. While the loss of logs is not ideal, it is less likely to harm the operator's host system, and is therefore considered the more acceptable of the two options. +The current design chooses to guarantee the maximum degree of concurrency because failure to do so risks +harming the operator's host system. While the loss of logs is not ideal, it is less likely to harm +the operator's host system, and is therefore considered the more acceptable of the two options. ### Potential data loss when file rotation via copy/truncate rotates backup files out of operator's matching pattern The operator may lose a small percentage of logs, if both of the following conditions are true: 1. Files are being rotated using the copy/truncate strategy. -2. Files are being "lost". That is, file rotation is moving files out of the operator's matching pattern, such that subsequent polling cycles will not find these files. +2. Files are being "lost". That is, file rotation is moving files out of the operator's matching pattern, + such that subsequent polling cycles will not find these files. -When both of these conditions occur, it is possible that a file is written to (then copied elsewhere) and then truncated before the operator has a chance to consume the new data. +When both of these conditions occur, it is possible that a file is written to (then copied elsewhere) and +then truncated before the operator has a chance to consume the new data. ### Potential failure to consume files when file rotation via move/create is used on Windows -On Windows, rotation of files using the Move/Create strategy may cause errors and loss of data, because Golang does not currently support the Windows mechanism for `FILE_SHARE_DELETE`. +On Windows, rotation of files using the Move/Create strategy may cause errors and loss of data, +because Golang does not currently support the Windows mechanism for `FILE_SHARE_DELETE`. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/file.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/file.go index 50fb27ec4..4ebc1564d 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/file.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/file.go @@ -10,32 +10,33 @@ import ( "sync" "time" + "go.opentelemetry.io/collector/component" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/checkpoint" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/fileset" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/fingerprint" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/reader" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/tracker" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/matcher" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator" ) type Manager struct { + // Deprecated [v0.101.0] *zap.SugaredLogger + + set component.TelemetrySettings wg sync.WaitGroup cancel context.CancelFunc readerFactory reader.Factory fileMatcher *matcher.Matcher + tracker tracker.Tracker pollInterval time.Duration persister operator.Persister maxBatches int maxBatchFiles int - - currentPollFiles *fileset.Fileset[*reader.Reader] - previousPollFiles *fileset.Fileset[*reader.Reader] - knownFiles []*fileset.Fileset[*reader.Metadata] } func (m *Manager) Start(persister operator.Persister) error { @@ -43,7 +44,7 @@ func (m *Manager) Start(persister operator.Persister) error { m.cancel = cancel if _, err := m.fileMatcher.MatchFiles(); err != nil { - m.Warnf("finding files: %v", err) + m.set.Logger.Warn("finding files", zap.Error(err)) } if persister != nil { @@ -53,9 +54,9 @@ func (m *Manager) Start(persister operator.Persister) error { return fmt.Errorf("read known files from database: %w", err) } if len(offsets) > 0 { - m.Infow("Resuming from previously known offset(s). 'start_at' setting is not applicable.") + m.set.Logger.Info("Resuming from previously known offset(s). 'start_at' setting is not applicable.") m.readerFactory.FromBeginning = true - m.knownFiles[0].Add(offsets...) + m.tracker.LoadMetadata(offsets) } } @@ -65,20 +66,6 @@ func (m *Manager) Start(persister operator.Persister) error { return nil } -func (m *Manager) closePreviousFiles() { - // m.previousPollFiles -> m.knownFiles[0] - for r, _ := m.previousPollFiles.Pop(); r != nil; r, _ = m.previousPollFiles.Pop() { - m.knownFiles[0].Add(r.Close()) - } -} - -func (m *Manager) rotateFilesets() { - // shift the filesets at end of every consume() call - // m.knownFiles[0] -> m.knownFiles[1] -> m.knownFiles[2] - copy(m.knownFiles[1:], m.knownFiles) - m.knownFiles[0] = fileset.New[*reader.Metadata](m.maxBatchFiles / 2) -} - // Stop will stop the file monitoring process func (m *Manager) Stop() error { if m.cancel != nil { @@ -86,14 +73,10 @@ func (m *Manager) Stop() error { m.cancel = nil } m.wg.Wait() - m.closePreviousFiles() + m.tracker.ClosePreviousFiles() if m.persister != nil { - checkpoints := make([]*reader.Metadata, 0, m.totalReaders()) - for _, knownFiles := range m.knownFiles { - checkpoints = append(checkpoints, knownFiles.Get()...) - } - if err := checkpoint.Save(context.Background(), m.persister, checkpoints); err != nil { - m.Errorw("save offsets", zap.Error(err)) + if err := checkpoint.Save(context.Background(), m.persister, m.tracker.GetMetadata()); err != nil { + m.set.Logger.Error("save offsets", zap.Error(err)) } } return nil @@ -128,9 +111,9 @@ func (m *Manager) poll(ctx context.Context) { // Get the list of paths on disk matches, err := m.fileMatcher.MatchFiles() if err != nil { - m.Warnf("finding files: %v", err) + m.set.Logger.Debug("finding files", zap.Error(err)) } - m.Debugw("matched files", zap.Strings("paths", matches)) + m.set.Logger.Debug("matched files", zap.Strings("paths", matches)) for len(matches) > m.maxBatchFiles { m.consume(ctx, matches[:m.maxBatchFiles]) @@ -150,31 +133,26 @@ func (m *Manager) poll(ctx context.Context) { // Any new files that appear should be consumed entirely m.readerFactory.FromBeginning = true if m.persister != nil { - allCheckpoints := make([]*reader.Metadata, 0, m.totalReaders()) - for _, knownFiles := range m.knownFiles { - allCheckpoints = append(allCheckpoints, knownFiles.Get()...) - } - - for _, r := range m.previousPollFiles.Get() { - allCheckpoints = append(allCheckpoints, r.Metadata) - } - if err := checkpoint.Save(context.Background(), m.persister, allCheckpoints); err != nil { - m.Errorw("save offsets", zap.Error(err)) + metadata := m.tracker.GetMetadata() + if metadata != nil { + if err := checkpoint.Save(context.Background(), m.persister, metadata); err != nil { + m.set.Logger.Error("save offsets", zap.Error(err)) + } } } // rotate at end of every poll() - m.rotateFilesets() + m.tracker.EndPoll() } func (m *Manager) consume(ctx context.Context, paths []string) { - m.Debug("Consuming files", zap.Strings("paths", paths)) + m.set.Logger.Debug("Consuming files", zap.Strings("paths", paths)) m.makeReaders(paths) m.readLostFiles(ctx) // read new readers to end var wg sync.WaitGroup - for _, r := range m.currentPollFiles.Get() { + for _, r := range m.tracker.CurrentPollFiles() { wg.Add(1) go func(r *reader.Reader) { defer wg.Done() @@ -183,20 +161,20 @@ func (m *Manager) consume(ctx context.Context, paths []string) { } wg.Wait() - m.postConsume() + m.tracker.EndConsume() } func (m *Manager) makeFingerprint(path string) (*fingerprint.Fingerprint, *os.File) { file, err := os.Open(path) // #nosec - operator must read in files defined by user if err != nil { - m.Errorw("Failed to open file", zap.Error(err)) + m.set.Logger.Error("Failed to open file", zap.Error(err)) return nil, nil } fp, err := m.readerFactory.NewFingerprint(file) if err != nil { if err = file.Close(); err != nil { - m.Debugw("problem closing file", zap.Error(err)) + m.set.Logger.Debug("problem closing file", zap.Error(err)) } return nil, nil } @@ -204,7 +182,7 @@ func (m *Manager) makeFingerprint(path string) (*fingerprint.Fingerprint, *os.Fi if fp.Len() == 0 { // Empty file, don't read it until we can compare its fingerprint if err = file.Close(); err != nil { - m.Debugw("problem closing file", zap.Error(err)) + m.set.Logger.Debug("problem closing file", zap.Error(err)) } return nil, nil } @@ -215,7 +193,6 @@ func (m *Manager) makeFingerprint(path string) (*fingerprint.Fingerprint, *os.Fi // discarding any that have a duplicate fingerprint to other files that have already // been read this polling interval func (m *Manager) makeReaders(paths []string) { - m.currentPollFiles = fileset.New[*reader.Reader](m.maxBatchFiles / 2) for _, path := range paths { fp, file := m.makeFingerprint(path) if fp == nil { @@ -224,47 +201,37 @@ func (m *Manager) makeReaders(paths []string) { // Exclude duplicate paths with the same content. This can happen when files are // being rotated with copy/truncate strategy. (After copy, prior to truncate.) - if r := m.currentPollFiles.Match(fp, fileset.Equal); r != nil { + if r := m.tracker.GetCurrentFile(fp); r != nil { // re-add the reader as Match() removes duplicates - m.currentPollFiles.Add(r) + m.tracker.Add(r) if err := file.Close(); err != nil { - m.Debugw("problem closing file", zap.Error(err)) + m.set.Logger.Debug("problem closing file", zap.Error(err)) } continue } r, err := m.newReader(file, fp) if err != nil { - m.Errorw("Failed to create reader", zap.Error(err)) + m.set.Logger.Error("Failed to create reader", zap.Error(err)) continue } - m.currentPollFiles.Add(r) + m.tracker.Add(r) } } func (m *Manager) newReader(file *os.File, fp *fingerprint.Fingerprint) (*reader.Reader, error) { // Check previous poll cycle for match - if oldReader := m.previousPollFiles.Match(fp, fileset.StartsWith); oldReader != nil { + if oldReader := m.tracker.GetOpenFile(fp); oldReader != nil { return m.readerFactory.NewReaderFromMetadata(file, oldReader.Close()) } - // Iterate backwards to match newest first - for i := 0; i < len(m.knownFiles); i++ { - if oldMetadata := m.knownFiles[i].Match(fp, fileset.StartsWith); oldMetadata != nil { - return m.readerFactory.NewReaderFromMetadata(file, oldMetadata) - } + // Check for closed files for match + if oldMetadata := m.tracker.GetClosedFile(fp); oldMetadata != nil { + return m.readerFactory.NewReaderFromMetadata(file, oldMetadata) } // If we don't match any previously known files, create a new reader from scratch - m.Infow("Started watching file", "path", file.Name()) + m.set.Logger.Info("Started watching file", zap.String("path", file.Name())) return m.readerFactory.NewReader(file, fp) } - -func (m *Manager) totalReaders() int { - total := m.previousPollFiles.Len() - for i := 0; i < len(m.knownFiles); i++ { - total += m.knownFiles[i].Len() - } - return total -} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/file_other.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/file_other.go index daef0f312..816a0cee5 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/file_other.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/file_other.go @@ -21,10 +21,11 @@ func (m *Manager) readLostFiles(ctx context.Context) { // since we are deleting the files before they can become lost. return } - lostReaders := make([]*reader.Reader, 0, m.previousPollFiles.Len()) + previousPollFiles := m.tracker.PreviousPollFiles() + lostReaders := make([]*reader.Reader, 0, len(previousPollFiles)) OUTER: - for _, oldReader := range m.previousPollFiles.Get() { - for _, newReader := range m.currentPollFiles.Get() { + for _, oldReader := range previousPollFiles { + for _, newReader := range m.tracker.CurrentPollFiles() { if newReader.Fingerprint.StartsWith(oldReader.Fingerprint) { continue OUTER } @@ -54,12 +55,3 @@ OUTER: } lostWG.Wait() } - -// On non-windows platforms, we keep files open between poll cycles so that we can detect -// and read "lost" files, which have been moved out of the matching pattern. -func (m *Manager) postConsume() { - m.closePreviousFiles() - - // m.currentPollFiles -> m.previousPollFiles - m.previousPollFiles = m.currentPollFiles -} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/file_windows.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/file_windows.go index 8eed8a967..d92679197 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/file_windows.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/file_windows.go @@ -12,10 +12,3 @@ import ( // Noop on windows because we close files immediately after reading. func (m *Manager) readLostFiles(ctx context.Context) { } - -// On windows, we close files immediately after reading because they cannot be moved while open. -func (m *Manager) postConsume() { - // m.currentPollFiles -> m.previousPollFiles - m.previousPollFiles = m.currentPollFiles - m.closePreviousFiles() -} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/header/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/header/config.go index 618d1b1fa..768fc1611 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/header/config.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/header/config.go @@ -10,7 +10,7 @@ import ( "fmt" "regexp" - "go.uber.org/zap" + "go.opentelemetry.io/collector/component" "golang.org/x/text/encoding" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator" @@ -25,7 +25,7 @@ type Config struct { metadataOperators []operator.Config } -func NewConfig(matchRegex string, metadataOperators []operator.Config, enc encoding.Encoding) (*Config, error) { +func NewConfig(set component.TelemetrySettings, matchRegex string, metadataOperators []operator.Config, enc encoding.Encoding) (*Config, error) { var err error if len(metadataOperators) == 0 { return nil, errors.New("at least one operator must be specified for `metadata_operators`") @@ -35,11 +35,10 @@ func NewConfig(matchRegex string, metadataOperators []operator.Config, enc encod return nil, errors.New("encoding must be specified") } - nopLogger := zap.NewNop().Sugar() p, err := pipeline.Config{ Operators: metadataOperators, - DefaultOutput: newPipelineOutput(nopLogger), - }.Build(nopLogger) + DefaultOutput: newPipelineOutput(set), + }.Build(set) if err != nil { return nil, fmt.Errorf("failed to build pipelines: %w", err) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/header/output.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/header/output.go index 7caf7f9a8..7e76cc8bb 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/header/output.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/header/output.go @@ -7,7 +7,7 @@ import ( "context" "fmt" - "go.uber.org/zap" + "go.opentelemetry.io/collector/component" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper" @@ -22,16 +22,11 @@ type pipelineOutput struct { } // newPipelineOutput creates a new receiver output -func newPipelineOutput(logger *zap.SugaredLogger) *pipelineOutput { +func newPipelineOutput(set component.TelemetrySettings) *pipelineOutput { + op, _ := helper.NewOutputConfig(pipelineOutputType, pipelineOutputType).Build(set) return &pipelineOutput{ - OutputOperator: helper.OutputOperator{ - BasicOperator: helper.BasicOperator{ - OperatorID: pipelineOutputType, - OperatorType: pipelineOutputType, - SugaredLogger: logger, - }, - }, - logChan: make(chan *entry.Entry, 1), + OutputOperator: op, + logChan: make(chan *entry.Entry, 1), } } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/header/reader.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/header/reader.go index f55b2323c..27a81c338 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/header/reader.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/header/reader.go @@ -8,8 +8,8 @@ import ( "errors" "fmt" + "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/extension/experimental/storage" - "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/pipeline" @@ -18,20 +18,20 @@ import ( var ErrEndOfHeader = errors.New("end of header") type Reader struct { - logger *zap.SugaredLogger + set component.TelemetrySettings cfg Config pipeline pipeline.Pipeline output *pipelineOutput } -func NewReader(logger *zap.SugaredLogger, cfg Config) (*Reader, error) { - r := &Reader{logger: logger, cfg: cfg} +func NewReader(set component.TelemetrySettings, cfg Config) (*Reader, error) { + r := &Reader{set: set, cfg: cfg} var err error - r.output = newPipelineOutput(logger) + r.output = newPipelineOutput(set) r.pipeline, err = pipeline.Config{ Operators: cfg.metadataOperators, DefaultOutput: r.output, - }.Build(logger) + }.Build(set) if err != nil { return nil, fmt.Errorf("failed to build pipeline: %w", err) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/reader/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/reader/factory.go index dab54565a..bd3508fa6 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/reader/factory.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/reader/factory.go @@ -10,6 +10,7 @@ import ( "os" "time" + "go.opentelemetry.io/collector/component" "go.uber.org/zap" "golang.org/x/text/encoding" @@ -28,7 +29,7 @@ const ( ) type Factory struct { - *zap.SugaredLogger + component.TelemetrySettings HeaderConfig *header.Config FromBeginning bool FingerprintSize int @@ -48,7 +49,7 @@ func (f *Factory) NewFingerprint(file *os.File) (*fingerprint.Fingerprint, error } func (f *Factory) NewReader(file *os.File, fp *fingerprint.Fingerprint) (*Reader, error) { - attributes, err := f.Attributes.Resolve(file.Name()) + attributes, err := f.Attributes.Resolve(file) if err != nil { return nil, err } @@ -60,9 +61,10 @@ func (f *Factory) NewReader(file *os.File, fp *fingerprint.Fingerprint) (*Reader } func (f *Factory) NewReaderFromMetadata(file *os.File, m *Metadata) (r *Reader, err error) { + r = &Reader{ Metadata: m, - logger: f.SugaredLogger.With("path", file.Name()), + set: f.TelemetrySettings, file: file, fileName: file.Name(), fingerprintSize: f.FingerprintSize, @@ -72,6 +74,7 @@ func (f *Factory) NewReaderFromMetadata(file *os.File, m *Metadata) (r *Reader, lineSplitFunc: f.SplitFunc, deleteAtEOF: f.DeleteAtEOF, } + r.set.Logger = r.set.Logger.With(zap.String("path", r.fileName)) if r.Fingerprint.Len() > r.fingerprintSize { // User has reconfigured fingerprint_size @@ -100,7 +103,7 @@ func (f *Factory) NewReaderFromMetadata(file *os.File, m *Metadata) (r *Reader, r.splitFunc = r.lineSplitFunc r.processFunc = r.emitFunc } else { - r.headerReader, err = header.NewReader(f.SugaredLogger, *f.HeaderConfig) + r.headerReader, err = header.NewReader(f.TelemetrySettings, *f.HeaderConfig) if err != nil { return nil, err } @@ -108,7 +111,7 @@ func (f *Factory) NewReaderFromMetadata(file *os.File, m *Metadata) (r *Reader, r.processFunc = r.headerReader.Process } - attributes, err := f.Attributes.Resolve(file.Name()) + attributes, err := f.Attributes.Resolve(file) if err != nil { return nil, err } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/reader/reader.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/reader/reader.go index ae5f4a5e2..9fa22b0e2 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/reader/reader.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/reader/reader.go @@ -9,6 +9,7 @@ import ( "errors" "os" + "go.opentelemetry.io/collector/component" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/decode" @@ -30,7 +31,7 @@ type Metadata struct { // Reader manages a single file type Reader struct { *Metadata - logger *zap.SugaredLogger + set component.TelemetrySettings fileName string file *os.File fingerprintSize int @@ -49,7 +50,7 @@ type Reader struct { // ReadToEnd will read until the end of the file func (r *Reader) ReadToEnd(ctx context.Context) { if _, err := r.file.Seek(r.Offset, 0); err != nil { - r.logger.Errorw("Failed to seek", zap.Error(err)) + r.set.Logger.Error("Failed to seek", zap.Error(err)) return } @@ -72,7 +73,7 @@ func (r *Reader) ReadToEnd(ctx context.Context) { ok := s.Scan() if !ok { if err := s.Error(); err != nil { - r.logger.Errorw("Failed during scan", zap.Error(err)) + r.set.Logger.Error("Failed during scan", zap.Error(err)) } else if r.deleteAtEOF { r.delete() } @@ -81,7 +82,7 @@ func (r *Reader) ReadToEnd(ctx context.Context) { token, err := r.decoder.Decode(s.Bytes()) if err != nil { - r.logger.Errorw("decode: %w", zap.Error(err)) + r.set.Logger.Error("decode: %w", zap.Error(err)) r.Offset = s.Pos() // move past the bad token or we may be stuck continue } @@ -93,14 +94,14 @@ func (r *Reader) ReadToEnd(ctx context.Context) { } if !errors.Is(err, header.ErrEndOfHeader) { - r.logger.Errorw("process: %w", zap.Error(err)) + r.set.Logger.Error("process: %w", zap.Error(err)) r.Offset = s.Pos() // move past the bad token or we may be stuck continue } // Clean up the header machinery if err = r.headerReader.Stop(); err != nil { - r.logger.Errorw("Failed to stop header pipeline during finalization", zap.Error(err)) + r.set.Logger.Error("Failed to stop header pipeline during finalization", zap.Error(err)) } r.headerReader = nil r.HeaderFinalized = true @@ -113,7 +114,7 @@ func (r *Reader) ReadToEnd(ctx context.Context) { // Do not use the updated offset from the old scanner, as the most recent token // could be split differently with the new splitter. if _, err = r.file.Seek(r.Offset, 0); err != nil { - r.logger.Errorw("Failed to seek post-header", zap.Error(err)) + r.set.Logger.Error("Failed to seek post-header", zap.Error(err)) return } s = scanner.New(r, r.maxLogSize, scanner.DefaultBufferSize, r.Offset, r.splitFunc) @@ -124,7 +125,7 @@ func (r *Reader) ReadToEnd(ctx context.Context) { func (r *Reader) delete() { r.close() if err := os.Remove(r.fileName); err != nil { - r.logger.Errorf("could not delete %s", r.fileName) + r.set.Logger.Error("could not delete", zap.String("filename", r.fileName)) } } @@ -139,14 +140,14 @@ func (r *Reader) Close() *Metadata { func (r *Reader) close() { if r.file != nil { if err := r.file.Close(); err != nil { - r.logger.Debugw("Problem closing reader", zap.Error(err)) + r.set.Logger.Debug("Problem closing reader", zap.Error(err)) } r.file = nil } if r.headerReader != nil { if err := r.headerReader.Stop(); err != nil { - r.logger.Errorw("Failed to stop header pipeline", zap.Error(err)) + r.set.Logger.Error("Failed to stop header pipeline", zap.Error(err)) } } } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/tracker/tracker.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/tracker/tracker.go new file mode 100644 index 000000000..6364af28c --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/tracker/tracker.go @@ -0,0 +1,178 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package tracker // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/tracker" + +import ( + "go.opentelemetry.io/collector/component" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/fileset" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/fingerprint" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/reader" +) + +// Interface for tracking files that are being consumed. +type Tracker interface { + Add(reader *reader.Reader) + GetCurrentFile(fp *fingerprint.Fingerprint) *reader.Reader + GetOpenFile(fp *fingerprint.Fingerprint) *reader.Reader + GetClosedFile(fp *fingerprint.Fingerprint) *reader.Metadata + GetMetadata() []*reader.Metadata + LoadMetadata(metadata []*reader.Metadata) + CurrentPollFiles() []*reader.Reader + PreviousPollFiles() []*reader.Reader + ClosePreviousFiles() + EndPoll() + EndConsume() + TotalReaders() int +} + +// fileTracker tracks known offsets for files that are being consumed by the manager. +type fileTracker struct { + set component.TelemetrySettings + + maxBatchFiles int + + currentPollFiles *fileset.Fileset[*reader.Reader] + previousPollFiles *fileset.Fileset[*reader.Reader] + knownFiles []*fileset.Fileset[*reader.Metadata] +} + +func NewFileTracker(set component.TelemetrySettings, maxBatchFiles int) Tracker { + knownFiles := make([]*fileset.Fileset[*reader.Metadata], 3) + for i := 0; i < len(knownFiles); i++ { + knownFiles[i] = fileset.New[*reader.Metadata](maxBatchFiles) + } + set.Logger = set.Logger.With(zap.String("tracker", "fileTracker")) + return &fileTracker{ + set: set, + maxBatchFiles: maxBatchFiles, + currentPollFiles: fileset.New[*reader.Reader](maxBatchFiles), + previousPollFiles: fileset.New[*reader.Reader](maxBatchFiles), + knownFiles: knownFiles, + } +} + +func (t *fileTracker) Add(reader *reader.Reader) { + // add a new reader for tracking + t.currentPollFiles.Add(reader) +} + +func (t *fileTracker) GetCurrentFile(fp *fingerprint.Fingerprint) *reader.Reader { + return t.currentPollFiles.Match(fp, fileset.Equal) +} + +func (t *fileTracker) GetOpenFile(fp *fingerprint.Fingerprint) *reader.Reader { + return t.previousPollFiles.Match(fp, fileset.StartsWith) +} + +func (t *fileTracker) GetClosedFile(fp *fingerprint.Fingerprint) *reader.Metadata { + for i := 0; i < len(t.knownFiles); i++ { + if oldMetadata := t.knownFiles[i].Match(fp, fileset.StartsWith); oldMetadata != nil { + return oldMetadata + } + } + return nil +} + +func (t *fileTracker) GetMetadata() []*reader.Metadata { + // return all known metadata for checkpoining + allCheckpoints := make([]*reader.Metadata, 0, t.TotalReaders()) + for _, knownFiles := range t.knownFiles { + allCheckpoints = append(allCheckpoints, knownFiles.Get()...) + } + + for _, r := range t.previousPollFiles.Get() { + allCheckpoints = append(allCheckpoints, r.Metadata) + } + return allCheckpoints +} + +func (t *fileTracker) LoadMetadata(metadata []*reader.Metadata) { + t.knownFiles[0].Add(metadata...) +} + +func (t *fileTracker) CurrentPollFiles() []*reader.Reader { + return t.currentPollFiles.Get() +} + +func (t *fileTracker) PreviousPollFiles() []*reader.Reader { + return t.previousPollFiles.Get() +} + +func (t *fileTracker) ClosePreviousFiles() { + // t.previousPollFiles -> t.knownFiles[0] + + for r, _ := t.previousPollFiles.Pop(); r != nil; r, _ = t.previousPollFiles.Pop() { + t.knownFiles[0].Add(r.Close()) + } +} + +func (t *fileTracker) EndPoll() { + // shift the filesets at end of every poll() call + // t.knownFiles[0] -> t.knownFiles[1] -> t.knownFiles[2] + copy(t.knownFiles[1:], t.knownFiles) + t.knownFiles[0] = fileset.New[*reader.Metadata](t.maxBatchFiles) +} + +func (t *fileTracker) TotalReaders() int { + total := t.previousPollFiles.Len() + for i := 0; i < len(t.knownFiles); i++ { + total += t.knownFiles[i].Len() + } + return total +} + +// noStateTracker only tracks the current polled files. Once the poll is +// complete and telemetry is consumed, the tracked files are closed. The next +// poll will create fresh readers with no previously tracked offsets. +type noStateTracker struct { + set component.TelemetrySettings + maxBatchFiles int + currentPollFiles *fileset.Fileset[*reader.Reader] +} + +func NewNoStateTracker(set component.TelemetrySettings, maxBatchFiles int) Tracker { + set.Logger = set.Logger.With(zap.String("tracker", "noStateTracker")) + return &noStateTracker{ + set: set, + maxBatchFiles: maxBatchFiles, + currentPollFiles: fileset.New[*reader.Reader](maxBatchFiles), + } +} + +func (t *noStateTracker) Add(reader *reader.Reader) { + // add a new reader for tracking + t.currentPollFiles.Add(reader) +} + +func (t *noStateTracker) CurrentPollFiles() []*reader.Reader { + return t.currentPollFiles.Get() +} + +func (t *noStateTracker) GetCurrentFile(fp *fingerprint.Fingerprint) *reader.Reader { + return t.currentPollFiles.Match(fp, fileset.Equal) +} + +func (t *noStateTracker) EndConsume() { + for r, _ := t.currentPollFiles.Pop(); r != nil; r, _ = t.currentPollFiles.Pop() { + r.Close() + } +} + +func (t *noStateTracker) GetOpenFile(_ *fingerprint.Fingerprint) *reader.Reader { return nil } + +func (t *noStateTracker) GetClosedFile(_ *fingerprint.Fingerprint) *reader.Metadata { return nil } + +func (t *noStateTracker) GetMetadata() []*reader.Metadata { return nil } + +func (t *noStateTracker) LoadMetadata(_ []*reader.Metadata) {} + +func (t *noStateTracker) PreviousPollFiles() []*reader.Reader { return nil } + +func (t *noStateTracker) ClosePreviousFiles() {} + +func (t *noStateTracker) EndPoll() {} + +func (t *noStateTracker) TotalReaders() int { return 0 } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/tracker/tracker_other.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/tracker/tracker_other.go new file mode 100644 index 000000000..f1df53fe6 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/tracker/tracker_other.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build !windows + +package tracker // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/tracker" + +import ( + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/fileset" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/reader" +) + +// On non-windows platforms, we keep files open between poll cycles so that we can detect +// and read "lost" files, which have been moved out of the matching pattern. +func (t *fileTracker) EndConsume() { + t.ClosePreviousFiles() + + // t.currentPollFiles -> t.previousPollFiles + t.previousPollFiles = t.currentPollFiles + t.currentPollFiles = fileset.New[*reader.Reader](t.maxBatchFiles) +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/tracker/tracker_windows.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/tracker/tracker_windows.go new file mode 100644 index 000000000..75ce2d876 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/tracker/tracker_windows.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build windows +// +build windows + +package tracker // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/tracker" + +import ( + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/fileset" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/reader" +) + +// On windows, we close files immediately after reading because they cannot be moved while open. +func (t *fileTracker) EndConsume() { + // t.currentPollFiles -> t.previousPollFiles + t.previousPollFiles = t.currentPollFiles + t.ClosePreviousFiles() + t.currentPollFiles = fileset.New[*reader.Reader](t.maxBatchFiles) +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/matcher/internal/filter/exclude.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/matcher/internal/filter/exclude.go new file mode 100644 index 000000000..8e6e254f5 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/matcher/internal/filter/exclude.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package filter // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/matcher/internal/filter" +import ( + "os" + "time" + + "go.uber.org/multierr" +) + +type excludeOlderThanOption struct { + age time.Duration +} + +func (eot excludeOlderThanOption) apply(items []*item) ([]*item, error) { + filteredItems := make([]*item, 0, len(items)) + var errs error + for _, item := range items { + fi, err := os.Stat(item.value) + if err != nil { + errs = multierr.Append(errs, err) + continue + } + + // Keep (include) the file if its age (since last modification) + // is the same or less than the configured age. + fileAge := time.Since(fi.ModTime()) + if fileAge <= eot.age { + filteredItems = append(filteredItems, item) + } + } + + return filteredItems, errs +} + +// ExcludeOlderThan excludes files whose modification time is older than the specified age. +func ExcludeOlderThan(age time.Duration) Option { + return excludeOlderThanOption{age: age} +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/matcher/internal/filter/sort.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/matcher/internal/filter/sort.go index 67002f7e3..2ca233bd3 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/matcher/internal/filter/sort.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/matcher/internal/filter/sort.go @@ -132,7 +132,19 @@ func SortTemporal(regexKey string, ascending bool, layout string, location strin ) } -type mtimeSortOption struct{} +type TopNOption int + +//nolint:unparam +func (t TopNOption) apply(items []*item) ([]*item, error) { + if len(items) <= int(t) { + return items, nil + } + return items[:t], nil +} + +type mtimeSortOption struct { + ascending bool +} type mtimeItem struct { mtime time.Time @@ -158,10 +170,20 @@ func (m mtimeSortOption) apply(items []*item) ([]*item, error) { }) } - sort.SliceStable(mtimeItems, func(i, j int) bool { - // This checks if item i > j, in order to reverse the sort (most recently modified file is first in the list) - return mtimeItems[i].mtime.After(mtimeItems[j].mtime) - }) + var lessFunc func(i, j int) bool + if m.ascending { + lessFunc = func(i, j int) bool { + // This checks if item i < j + return mtimeItems[i].mtime.Before(mtimeItems[j].mtime) + } + } else { + lessFunc = func(i, j int) bool { + // This checks if item i > j, in order to reverse the sort (most recently modified file is first in the list) + return mtimeItems[i].mtime.After(mtimeItems[j].mtime) + } + } + + sort.SliceStable(mtimeItems, lessFunc) filteredValues := make([]*item, 0, len(items)) for _, mtimeItem := range mtimeItems { @@ -171,6 +193,8 @@ func (m mtimeSortOption) apply(items []*item) ([]*item, error) { return filteredValues, errs } -func SortMtime() Option { - return mtimeSortOption{} +func SortMtime(ascending bool) Option { + return mtimeSortOption{ + ascending: ascending, + } } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/matcher/matcher.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/matcher/matcher.go index a1fc7109a..948f18852 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/matcher/matcher.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/matcher/matcher.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "regexp" + "time" "go.opentelemetry.io/collector/featuregate" @@ -33,8 +34,12 @@ var mtimeSortTypeFeatureGate = featuregate.GlobalRegistry().MustRegister( ) type Criteria struct { - Include []string `mapstructure:"include,omitempty"` - Exclude []string `mapstructure:"exclude,omitempty"` + Include []string `mapstructure:"include,omitempty"` + Exclude []string `mapstructure:"exclude,omitempty"` + + // ExcludeOlderThan allows excluding files whose modification time is older + // than the specified age. + ExcludeOlderThan time.Duration `mapstructure:"exclude_older_than"` OrderingCriteria OrderingCriteria `mapstructure:"ordering_criteria,omitempty"` } @@ -66,11 +71,17 @@ func New(c Criteria) (*Matcher, error) { return nil, fmt.Errorf("exclude: %w", err) } + m := &Matcher{ + include: c.Include, + exclude: c.Exclude, + } + + if c.ExcludeOlderThan != 0 { + m.filterOpts = append(m.filterOpts, filter.ExcludeOlderThan(c.ExcludeOlderThan)) + } + if len(c.OrderingCriteria.SortBy) == 0 { - return &Matcher{ - include: c.Include, - exclude: c.Exclude, - }, nil + return m, nil } if c.OrderingCriteria.TopN < 0 { @@ -92,9 +103,10 @@ func New(c Criteria) (*Matcher, error) { if err != nil { return nil, fmt.Errorf("compile regex: %w", err) } + + m.regex = regex } - var filterOpts []filter.Option for _, sc := range c.OrderingCriteria.SortBy { switch sc.SortType { case sortTypeNumeric: @@ -102,36 +114,32 @@ func New(c Criteria) (*Matcher, error) { if err != nil { return nil, fmt.Errorf("numeric sort: %w", err) } - filterOpts = append(filterOpts, f) + m.filterOpts = append(m.filterOpts, f) case sortTypeAlphabetical: f, err := filter.SortAlphabetical(sc.RegexKey, sc.Ascending) if err != nil { return nil, fmt.Errorf("alphabetical sort: %w", err) } - filterOpts = append(filterOpts, f) + m.filterOpts = append(m.filterOpts, f) case sortTypeTimestamp: f, err := filter.SortTemporal(sc.RegexKey, sc.Ascending, sc.Layout, sc.Location) if err != nil { return nil, fmt.Errorf("timestamp sort: %w", err) } - filterOpts = append(filterOpts, f) + m.filterOpts = append(m.filterOpts, f) case sortTypeMtime: if !mtimeSortTypeFeatureGate.IsEnabled() { return nil, fmt.Errorf("the %q feature gate must be enabled to use %q sort type", mtimeSortTypeFeatureGate.ID(), sortTypeMtime) } - filterOpts = append(filterOpts, filter.SortMtime()) + m.filterOpts = append(m.filterOpts, filter.SortMtime(sc.Ascending)) default: return nil, fmt.Errorf("'sort_type' must be specified") } } - return &Matcher{ - include: c.Include, - exclude: c.Exclude, - regex: regex, - topN: c.OrderingCriteria.TopN, - filterOpts: filterOpts, - }, nil + m.filterOpts = append(m.filterOpts, filter.TopNOption(c.OrderingCriteria.TopN)) + + return m, nil } // orderingCriteriaNeedsRegex returns true if any of the sort options require a regex to be set. @@ -149,7 +157,6 @@ type Matcher struct { include []string exclude []string regex *regexp.Regexp - topN int filterOpts []filter.Option } @@ -171,10 +178,5 @@ func (m Matcher) MatchFiles() ([]string, error) { if len(result) == 0 { return result, errors.Join(err, errs) } - - if len(result) <= m.topN { - return result, errors.Join(err, errs) - } - - return result[:m.topN], errors.Join(err, errs) + return result, errs } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/flush/flush.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/flush/flush.go index 0b239b4ae..afa03fe92 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/flush/flush.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/flush/flush.go @@ -33,7 +33,6 @@ func (s *State) Func(splitFunc bufio.SplitFunc, period time.Duration) bufio.Spli return func(data []byte, atEOF bool) (int, []byte, error) { advance, token, err := splitFunc(data, atEOF) - // Don't interfere with errors if err != nil { return advance, token, err @@ -52,6 +51,13 @@ func (s *State) Func(splitFunc bufio.SplitFunc, period time.Duration) bufio.Spli return 0, nil, nil } + // We're seeing new data so postpone the next flush + if len(data) > s.LastDataLength { + s.LastDataChange = time.Now() + s.LastDataLength = len(data) + return 0, nil, nil + } + // Flush timed out if time.Since(s.LastDataChange) > period { s.LastDataChange = time.Now() @@ -59,12 +65,6 @@ func (s *State) Func(splitFunc bufio.SplitFunc, period time.Duration) bufio.Spli return len(data), data, nil } - // We're seeing new data so postpone the next flush - if len(data) > s.LastDataLength { - s.LastDataChange = time.Now() - s.LastDataLength = len(data) - } - // Ask for more data return 0, nil, nil } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/config.go index f9934c6a0..f1f9781ae 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/config.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/config.go @@ -7,8 +7,8 @@ import ( "encoding/json" "fmt" + "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" - "go.uber.org/zap" ) // Config is the configuration of an operator @@ -25,7 +25,7 @@ func NewConfig(b Builder) Config { type Builder interface { ID() string Type() string - Build(*zap.SugaredLogger) (Operator, error) + Build(component.TelemetrySettings) (Operator, error) SetID(string) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/emitter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/emitter.go new file mode 100644 index 000000000..dcff31667 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/emitter.go @@ -0,0 +1,176 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package helper // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper" + +import ( + "context" + "sync" + "time" + + "go.opentelemetry.io/collector/component" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator" +) + +// LogEmitter is a stanza operator that emits log entries to a channel +type LogEmitter struct { + OutputOperator + logChan chan []*entry.Entry + stopOnce sync.Once + cancel context.CancelFunc + batchMux sync.Mutex + batch []*entry.Entry + wg sync.WaitGroup + maxBatchSize uint + flushInterval time.Duration +} + +var ( + defaultFlushInterval = 100 * time.Millisecond + defaultMaxBatchSize uint = 100 +) + +type EmitterOption interface { + apply(*LogEmitter) +} + +func WithMaxBatchSize(maxBatchSize uint) EmitterOption { + return maxBatchSizeOption{maxBatchSize} +} + +type maxBatchSizeOption struct { + maxBatchSize uint +} + +func (o maxBatchSizeOption) apply(e *LogEmitter) { + e.maxBatchSize = o.maxBatchSize +} + +func WithFlushInterval(flushInterval time.Duration) EmitterOption { + return flushIntervalOption{flushInterval} +} + +type flushIntervalOption struct { + flushInterval time.Duration +} + +func (o flushIntervalOption) apply(e *LogEmitter) { + e.flushInterval = o.flushInterval +} + +// NewLogEmitter creates a new receiver output +func NewLogEmitter(set component.TelemetrySettings, opts ...EmitterOption) *LogEmitter { + op, _ := NewOutputConfig("log_emitter", "log_emitter").Build(set) + e := &LogEmitter{ + OutputOperator: op, + logChan: make(chan []*entry.Entry), + maxBatchSize: defaultMaxBatchSize, + batch: make([]*entry.Entry, 0, defaultMaxBatchSize), + flushInterval: defaultFlushInterval, + cancel: func() {}, + } + for _, opt := range opts { + opt.apply(e) + } + return e +} + +// Start starts the goroutine(s) required for this operator +func (e *LogEmitter) Start(_ operator.Persister) error { + ctx, cancel := context.WithCancel(context.Background()) + e.cancel = cancel + + e.wg.Add(1) + go e.flusher(ctx) + return nil +} + +// Stop will close the log channel and stop running goroutines +func (e *LogEmitter) Stop() error { + e.stopOnce.Do(func() { + e.cancel() + e.wg.Wait() + + close(e.logChan) + }) + + return nil +} + +// OutChannel returns the channel on which entries will be sent to. +func (e *LogEmitter) OutChannel() <-chan []*entry.Entry { + return e.logChan +} + +// OutChannelForWrite returns the channel on which entries can be sent to. +func (e *LogEmitter) OutChannelForWrite() chan []*entry.Entry { + return e.logChan +} + +// Process will emit an entry to the output channel +func (e *LogEmitter) Process(ctx context.Context, ent *entry.Entry) error { + if oldBatch := e.appendEntry(ent); len(oldBatch) > 0 { + e.flush(ctx, oldBatch) + } + + return nil +} + +// appendEntry appends the entry to the current batch. If maxBatchSize is reached, a new batch will be made, and the old batch +// (which should be flushed) will be returned +func (e *LogEmitter) appendEntry(ent *entry.Entry) []*entry.Entry { + e.batchMux.Lock() + defer e.batchMux.Unlock() + + e.batch = append(e.batch, ent) + if uint(len(e.batch)) >= e.maxBatchSize { + var oldBatch []*entry.Entry + oldBatch, e.batch = e.batch, make([]*entry.Entry, 0, e.maxBatchSize) + return oldBatch + } + + return nil +} + +// flusher flushes the current batch every flush interval. Intended to be run as a goroutine +func (e *LogEmitter) flusher(ctx context.Context) { + defer e.wg.Done() + + ticker := time.NewTicker(e.flushInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + if oldBatch := e.makeNewBatch(); len(oldBatch) > 0 { + e.flush(ctx, oldBatch) + } + case <-ctx.Done(): + return + } + } +} + +// flush flushes the provided batch to the log channel. +func (e *LogEmitter) flush(ctx context.Context, batch []*entry.Entry) { + select { + case e.logChan <- batch: + case <-ctx.Done(): + } +} + +// makeNewBatch replaces the current batch on the log emitter with a new batch, returning the old one +func (e *LogEmitter) makeNewBatch() []*entry.Entry { + e.batchMux.Lock() + defer e.batchMux.Unlock() + + if len(e.batch) == 0 { + return nil + } + + var oldBatch []*entry.Entry + oldBatch, e.batch = e.batch, make([]*entry.Entry, 0, e.maxBatchSize) + return oldBatch +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/input.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/input.go index 214fa3242..b42b1d33c 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/input.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/input.go @@ -6,7 +6,7 @@ package helper // import "github.com/open-telemetry/opentelemetry-collector-cont import ( "context" - "go.uber.org/zap" + "go.opentelemetry.io/collector/component" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/errors" @@ -29,8 +29,8 @@ type InputConfig struct { } // Build will build a base producer. -func (c InputConfig) Build(logger *zap.SugaredLogger) (InputOperator, error) { - writerOperator, err := c.WriterConfig.Build(logger) +func (c InputConfig) Build(set component.TelemetrySettings) (InputOperator, error) { + writerOperator, err := c.WriterConfig.Build(set) if err != nil { return InputOperator{}, errors.WithDetails(err, "operator_id", c.ID()) } @@ -84,7 +84,7 @@ func (i *InputOperator) CanProcess() bool { // Process will always return an error if called. func (i *InputOperator) Process(_ context.Context, _ *entry.Entry) error { - i.Errorw("Operator received an entry, but can not process") + i.Logger().Error("Operator received an entry, but can not process") return errors.NewError( "Operator can not process logs.", "Ensure that operator is not configured to receive logs from other operators", diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/operator.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/operator.go index 066fae48a..d7ed6f56f 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/operator.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/operator.go @@ -4,6 +4,7 @@ package helper // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper" import ( + "go.opentelemetry.io/collector/component" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/errors" @@ -43,7 +44,7 @@ func (c BasicConfig) Type() string { } // Build will build a basic operator. -func (c BasicConfig) Build(logger *zap.SugaredLogger) (BasicOperator, error) { +func (c BasicConfig) Build(set component.TelemetrySettings) (BasicOperator, error) { if c.OperatorType == "" { return BasicOperator{}, errors.NewError( "missing required `type` field.", @@ -52,7 +53,7 @@ func (c BasicConfig) Build(logger *zap.SugaredLogger) (BasicOperator, error) { ) } - if logger == nil { + if set.Logger == nil { return BasicOperator{}, errors.NewError( "operator build context is missing a logger.", "this is an unexpected internal error", @@ -61,10 +62,11 @@ func (c BasicConfig) Build(logger *zap.SugaredLogger) (BasicOperator, error) { ) } + set.Logger = set.Logger.With(zap.String("operator_id", c.ID()), zap.String("operator_type", c.Type())) operator := BasicOperator{ - OperatorID: c.ID(), - OperatorType: c.Type(), - SugaredLogger: logger.With("operator_id", c.ID(), "operator_type", c.Type()), + OperatorID: c.ID(), + OperatorType: c.Type(), + set: set, } return operator, nil @@ -74,7 +76,7 @@ func (c BasicConfig) Build(logger *zap.SugaredLogger) (BasicOperator, error) { type BasicOperator struct { OperatorID string OperatorType string - *zap.SugaredLogger + set component.TelemetrySettings } // ID will return the operator id. @@ -91,8 +93,8 @@ func (p *BasicOperator) Type() string { } // Logger returns the operator's scoped logger. -func (p *BasicOperator) Logger() *zap.SugaredLogger { - return p.SugaredLogger +func (p *BasicOperator) Logger() *zap.Logger { + return p.set.Logger } // Start will start the operator. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/output.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/output.go index 0efca7633..de777a5fe 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/output.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/output.go @@ -4,7 +4,7 @@ package helper // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper" import ( - "go.uber.org/zap" + "go.opentelemetry.io/collector/component" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/errors" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator" @@ -23,8 +23,8 @@ type OutputConfig struct { } // Build will build an output operator. -func (c OutputConfig) Build(logger *zap.SugaredLogger) (OutputOperator, error) { - basicOperator, err := c.BasicConfig.Build(logger) +func (c OutputConfig) Build(set component.TelemetrySettings) (OutputOperator, error) { + basicOperator, err := c.BasicConfig.Build(set) if err != nil { return OutputOperator{}, err } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/parser.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/parser.go index 56a188086..84ba71035 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/parser.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/parser.go @@ -7,7 +7,7 @@ import ( "context" "fmt" - "go.uber.org/zap" + "go.opentelemetry.io/collector/component" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/errors" @@ -35,8 +35,8 @@ type ParserConfig struct { } // Build will build a parser operator. -func (c ParserConfig) Build(logger *zap.SugaredLogger) (ParserOperator, error) { - transformerOperator, err := c.TransformerConfig.Build(logger) +func (c ParserConfig) Build(set component.TelemetrySettings) (ParserOperator, error) { + transformerOperator, err := c.TransformerConfig.Build(set) if err != nil { return ParserOperator{}, err } @@ -60,7 +60,7 @@ func (c ParserConfig) Build(logger *zap.SugaredLogger) (ParserOperator, error) { } if c.SeverityConfig != nil { - severityParser, err := c.SeverityConfig.Build(logger) + severityParser, err := c.SeverityConfig.Build(set) if err != nil { return ParserOperator{}, err } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/regexp.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/regexp.go new file mode 100644 index 000000000..7306926ce --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/regexp.go @@ -0,0 +1,28 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package helper // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper" + +import ( + "fmt" + "regexp" +) + +func MatchValues(value string, regexp *regexp.Regexp) (map[string]any, error) { + matches := regexp.FindStringSubmatch(value) + if matches == nil { + return nil, fmt.Errorf("regex pattern does not match") + } + + parsedValues := map[string]any{} + for i, subexp := range regexp.SubexpNames() { + if i == 0 { + // Skip whole match + continue + } + if subexp != "" { + parsedValues[subexp] = matches[i] + } + } + return parsedValues, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/severity_builder.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/severity_builder.go index f2d5cd1b6..9c488a2d4 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/severity_builder.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/severity_builder.go @@ -8,7 +8,7 @@ import ( "strconv" "strings" - "go.uber.org/zap" + "go.opentelemetry.io/collector/component" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry" ) @@ -118,7 +118,7 @@ type SeverityConfig struct { } // Build builds a SeverityParser from a SeverityConfig -func (c *SeverityConfig) Build(_ *zap.SugaredLogger) (SeverityParser, error) { +func (c *SeverityConfig) Build(_ component.TelemetrySettings) (SeverityParser, error) { operatorMapping := getBuiltinMapping(c.Preset) for severity, unknown := range c.Mapping { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/time.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/time.go index f9326c73d..bee6adbc2 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/time.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/time.go @@ -31,7 +31,7 @@ const NativeKey = "native" // provided for operator development // NewTimeParser creates a new time parser with default values func NewTimeParser() TimeParser { return TimeParser{ - LayoutType: "strptime", + LayoutType: StrptimeKey, } } @@ -47,12 +47,13 @@ type TimeParser struct { // Unmarshal starting from default settings func (t *TimeParser) Unmarshal(component *confmap.Conf) error { - cfg := NewTimeParser() - err := component.Unmarshal(&cfg, confmap.WithIgnoreUnused()) + err := component.Unmarshal(t, confmap.WithIgnoreUnused()) if err != nil { return err } - *t = cfg + if t.LayoutType == "" { + t.LayoutType = StrptimeKey + } return nil } @@ -71,10 +72,6 @@ func (t *TimeParser) Validate() error { return errors.NewError("missing required configuration parameter `layout`", "") } - if t.LayoutType == "" { - t.LayoutType = StrptimeKey - } - switch t.LayoutType { case NativeKey, GotimeKey: // ok case StrptimeKey: diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/transformer.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/transformer.go index bc19174e4..a62eba5ca 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/transformer.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/transformer.go @@ -8,6 +8,7 @@ import ( "fmt" "github.com/expr-lang/expr/vm" + "go.opentelemetry.io/collector/component" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry" @@ -30,18 +31,18 @@ type TransformerConfig struct { } // Build will build a transformer operator. -func (c TransformerConfig) Build(logger *zap.SugaredLogger) (TransformerOperator, error) { - writerOperator, err := c.WriterConfig.Build(logger) +func (c TransformerConfig) Build(set component.TelemetrySettings) (TransformerOperator, error) { + writerOperator, err := c.WriterConfig.Build(set) if err != nil { return TransformerOperator{}, errors.WithDetails(err, "operator_id", c.ID()) } switch c.OnError { - case SendOnError, DropOnError: + case SendOnError, SendOnErrorQuiet, DropOnError, DropOnErrorQuiet: default: return TransformerOperator{}, errors.NewError( "operator config has an invalid `on_error` field.", - "ensure that the `on_error` field is set to either `send` or `drop`.", + "ensure that the `on_error` field is set to one of `send`, `send_quiet`, `drop`, `drop_quiet`.", "on_error", c.OnError, ) } @@ -95,8 +96,12 @@ func (t *TransformerOperator) ProcessWith(ctx context.Context, entry *entry.Entr // HandleEntryError will handle an entry error using the on_error strategy. func (t *TransformerOperator) HandleEntryError(ctx context.Context, entry *entry.Entry, err error) error { - t.Errorw("Failed to process entry", zap.Any("error", err), zap.Any("action", t.OnError)) - if t.OnError == SendOnError { + if t.OnError == SendOnErrorQuiet || t.OnError == DropOnErrorQuiet { + t.Logger().Debug("Failed to process entry", zap.Any("error", err), zap.Any("action", t.OnError)) + } else { + t.Logger().Error("Failed to process entry", zap.Any("error", err), zap.Any("action", t.OnError)) + } + if t.OnError == SendOnError || t.OnError == SendOnErrorQuiet { t.Write(ctx, entry) } return err @@ -124,5 +129,11 @@ type TransformFunction = func(*entry.Entry) error // SendOnError specifies an on_error mode for sending entries after an error. const SendOnError = "send" +// SendOnErrorQuiet specifies an on_error mode for sending entries after an error but without logging on error level +const SendOnErrorQuiet = "send_quiet" + // DropOnError specifies an on_error mode for dropping entries after an error. const DropOnError = "drop" + +// DropOnError specifies an on_error mode for dropping entries after an error but without logging on error level +const DropOnErrorQuiet = "drop_quiet" diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/writer.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/writer.go index ced6b5533..89666ae88 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/writer.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/writer.go @@ -7,7 +7,7 @@ import ( "context" "fmt" - "go.uber.org/zap" + "go.opentelemetry.io/collector/component" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator" @@ -27,8 +27,8 @@ type WriterConfig struct { } // Build will build a writer operator from the config. -func (c WriterConfig) Build(logger *zap.SugaredLogger) (WriterOperator, error) { - basicOperator, err := c.BasicConfig.Build(logger) +func (c WriterConfig) Build(set component.TelemetrySettings) (WriterOperator, error) { + basicOperator, err := c.BasicConfig.Build(set) if err != nil { return WriterOperator{}, err } @@ -94,8 +94,8 @@ func (w *WriterOperator) SetOutputs(operators []operator.Operator) error { } // SetOutputIDs will set the outputs of the operator. -func (w *WriterOperator) SetOutputIDs(opIds []string) { - w.OutputIDs = opIds +func (w *WriterOperator) SetOutputIDs(opIDs []string) { + w.OutputIDs = opIDs } // FindOperator will find an operator matching the supplied id. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/file/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/file/config.go index 0e5a24e70..6c07b777f 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/file/config.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/file/config.go @@ -4,7 +4,7 @@ package file // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/file" import ( - "go.uber.org/zap" + "go.opentelemetry.io/collector/component" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/decode" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer" @@ -38,8 +38,8 @@ type Config struct { } // Build will build a file input operator from the supplied configuration -func (c Config) Build(logger *zap.SugaredLogger) (operator.Operator, error) { - inputOperator, err := c.InputConfig.Build(logger) +func (c Config) Build(set component.TelemetrySettings) (operator.Operator, error) { + inputOperator, err := c.InputConfig.Build(set) if err != nil { return nil, err } @@ -60,7 +60,7 @@ func (c Config) Build(logger *zap.SugaredLogger) (operator.Operator, error) { toBody: toBody, } - input.fileConsumer, err = c.Config.Build(logger, input.emit) + input.fileConsumer, err = c.Config.Build(set, input.emit) if err != nil { return nil, err } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/file/file.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/file/input.go similarity index 75% rename from vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/file/file.go rename to vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/file/input.go index 4e42725dc..ce20ef18c 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/file/file.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/file/input.go @@ -7,6 +7,8 @@ import ( "context" "fmt" + "go.uber.org/zap" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator" @@ -25,30 +27,30 @@ type Input struct { } // Start will start the file monitoring process -func (f *Input) Start(persister operator.Persister) error { - return f.fileConsumer.Start(persister) +func (i *Input) Start(persister operator.Persister) error { + return i.fileConsumer.Start(persister) } // Stop will stop the file monitoring process -func (f *Input) Stop() error { - return f.fileConsumer.Stop() +func (i *Input) Stop() error { + return i.fileConsumer.Stop() } -func (f *Input) emit(ctx context.Context, token []byte, attrs map[string]any) error { +func (i *Input) emit(ctx context.Context, token []byte, attrs map[string]any) error { if len(token) == 0 { return nil } - ent, err := f.NewEntry(f.toBody(token)) + ent, err := i.NewEntry(i.toBody(token)) if err != nil { return fmt.Errorf("create entry: %w", err) } for k, v := range attrs { if err := ent.Set(entry.NewAttributeField(k), v); err != nil { - f.Errorf("set attribute: %w", err) + i.Logger().Error("set attribute", zap.Error(err)) } } - f.Write(ctx, ent) + i.Write(ctx, ent) return nil } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/journald/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/journald/config_all.go similarity index 100% rename from vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/journald/config.go rename to vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/journald/config_all.go diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/journald/config_linux.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/journald/config_linux.go new file mode 100644 index 000000000..b8d62175f --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/journald/config_linux.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build linux + +package journald // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/journald" + +import ( + "context" + "fmt" + "os/exec" + "regexp" + "sort" + "time" + + jsoniter "github.com/json-iterator/go" + "go.opentelemetry.io/collector/component" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator" +) + +const waitDuration = 1 * time.Second + +func init() { + operator.Register(operatorType, func() operator.Builder { return NewConfig() }) +} + +// Build will build a journald input operator from the supplied configuration +func (c Config) Build(set component.TelemetrySettings) (operator.Operator, error) { + inputOperator, err := c.InputConfig.Build(set) + if err != nil { + return nil, err + } + + args, err := c.buildArgs() + if err != nil { + return nil, err + } + + return &Input{ + InputOperator: inputOperator, + newCmd: func(ctx context.Context, cursor []byte) cmd { + if cursor != nil { + args = append(args, "--after-cursor", string(cursor)) + } + return exec.CommandContext(ctx, "journalctl", args...) // #nosec - ... + // journalctl is an executable that is required for this operator to function + }, + json: jsoniter.ConfigFastest, + }, nil +} + +func (c Config) buildArgs() ([]string, error) { + args := make([]string, 0, 10) + + // Export logs in UTC time + args = append(args, "--utc") + + // Export logs as JSON + args = append(args, "--output=json") + + // Continue watching logs until cancelled + args = append(args, "--follow") + + switch c.StartAt { + case "end": + case "beginning": + args = append(args, "--no-tail") + default: + return nil, fmt.Errorf("invalid value '%s' for parameter 'start_at'", c.StartAt) + } + + for _, unit := range c.Units { + args = append(args, "--unit", unit) + } + + for _, identifier := range c.Identifiers { + args = append(args, "--identifier", identifier) + } + + args = append(args, "--priority", c.Priority) + + if len(c.Grep) > 0 { + args = append(args, "--grep", c.Grep) + } + + if c.Dmesg { + args = append(args, "--dmesg") + } + + switch { + case c.Directory != nil: + args = append(args, "--directory", *c.Directory) + case len(c.Files) > 0: + for _, file := range c.Files { + args = append(args, "--file", file) + } + } + + if len(c.Matches) > 0 { + matches, err := c.buildMatchesConfig() + if err != nil { + return nil, err + } + args = append(args, matches...) + } + + if c.All { + args = append(args, "--all") + } + + return args, nil +} + +func buildMatchConfig(mc MatchConfig) ([]string, error) { + re := regexp.MustCompile("^[_A-Z]+$") + + // Sort keys to be consistent with every run and to be predictable for tests + sortedKeys := make([]string, 0, len(mc)) + for key := range mc { + if !re.MatchString(key) { + return []string{}, fmt.Errorf("'%s' is not a valid Systemd field name", key) + } + sortedKeys = append(sortedKeys, key) + } + sort.Strings(sortedKeys) + + configs := []string{} + for _, key := range sortedKeys { + configs = append(configs, fmt.Sprintf("%s=%s", key, mc[key])) + } + + return configs, nil +} + +func (c Config) buildMatchesConfig() ([]string, error) { + matches := []string{} + + for i, mc := range c.Matches { + if i > 0 { + matches = append(matches, "+") + } + mcs, err := buildMatchConfig(mc) + if err != nil { + return []string{}, err + } + + matches = append(matches, mcs...) + } + + return matches, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/journald/journald_nonlinux.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/journald/config_nonlinux.go similarity index 76% rename from vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/journald/journald_nonlinux.go rename to vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/journald/config_nonlinux.go index 5f560bbb4..d0bd996bf 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/journald/journald_nonlinux.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/journald/config_nonlinux.go @@ -8,11 +8,11 @@ package journald // import "github.com/open-telemetry/opentelemetry-collector-co import ( "errors" - "go.uber.org/zap" + "go.opentelemetry.io/collector/component" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator" ) -func (c Config) Build(_ *zap.SugaredLogger) (operator.Operator, error) { +func (c Config) Build(_ component.TelemetrySettings) (operator.Operator, error) { return nil, errors.New("journald input operator is only supported on linux") } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/journald/journald.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/journald/input.go similarity index 59% rename from vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/journald/journald.go rename to vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/journald/input.go index b8e07846e..f7af8c3ad 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/journald/journald.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/journald/input.go @@ -12,8 +12,6 @@ import ( "fmt" "io" "os/exec" - "regexp" - "sort" "strconv" "strings" "sync" @@ -27,138 +25,6 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper" ) -const waitDuration = 1 * time.Second - -func init() { - operator.Register(operatorType, func() operator.Builder { return NewConfig() }) -} - -// Build will build a journald input operator from the supplied configuration -func (c Config) Build(logger *zap.SugaredLogger) (operator.Operator, error) { - inputOperator, err := c.InputConfig.Build(logger) - if err != nil { - return nil, err - } - - args, err := c.buildArgs() - if err != nil { - return nil, err - } - - return &Input{ - InputOperator: inputOperator, - newCmd: func(ctx context.Context, cursor []byte) cmd { - if cursor != nil { - args = append(args, "--after-cursor", string(cursor)) - } - return exec.CommandContext(ctx, "journalctl", args...) // #nosec - ... - // journalctl is an executable that is required for this operator to function - }, - json: jsoniter.ConfigFastest, - }, nil -} - -func (c Config) buildArgs() ([]string, error) { - args := make([]string, 0, 10) - - // Export logs in UTC time - args = append(args, "--utc") - - // Export logs as JSON - args = append(args, "--output=json") - - // Continue watching logs until cancelled - args = append(args, "--follow") - - switch c.StartAt { - case "end": - case "beginning": - args = append(args, "--no-tail") - default: - return nil, fmt.Errorf("invalid value '%s' for parameter 'start_at'", c.StartAt) - } - - for _, unit := range c.Units { - args = append(args, "--unit", unit) - } - - for _, identifier := range c.Identifiers { - args = append(args, "--identifier", identifier) - } - - args = append(args, "--priority", c.Priority) - - if len(c.Grep) > 0 { - args = append(args, "--grep", c.Grep) - } - - if c.Dmesg { - args = append(args, "--dmesg") - } - - switch { - case c.Directory != nil: - args = append(args, "--directory", *c.Directory) - case len(c.Files) > 0: - for _, file := range c.Files { - args = append(args, "--file", file) - } - } - - if len(c.Matches) > 0 { - matches, err := c.buildMatchesConfig() - if err != nil { - return nil, err - } - args = append(args, matches...) - } - - if c.All { - args = append(args, "--all") - } - - return args, nil -} - -func buildMatchConfig(mc MatchConfig) ([]string, error) { - re := regexp.MustCompile("^[_A-Z]+$") - - // Sort keys to be consistent with every run and to be predictable for tests - sortedKeys := make([]string, 0, len(mc)) - for key := range mc { - if !re.MatchString(key) { - return []string{}, fmt.Errorf("'%s' is not a valid Systemd field name", key) - } - sortedKeys = append(sortedKeys, key) - } - sort.Strings(sortedKeys) - - configs := []string{} - for _, key := range sortedKeys { - configs = append(configs, fmt.Sprintf("%s=%s", key, mc[key])) - } - - return configs, nil -} - -func (c Config) buildMatchesConfig() ([]string, error) { - matches := []string{} - - for i, mc := range c.Matches { - if i > 0 { - matches = append(matches, "+") - } - mcs, err := buildMatchConfig(mc) - if err != nil { - return []string{}, err - } - - matches = append(matches, mcs...) - } - - return matches, nil -} - // Input is an operator that process logs using journald type Input struct { helper.InputOperator @@ -238,7 +104,7 @@ func (operator *Input) Start(persister operator.Persister) error { case failedChan <- f: // log an error in case channel is closed case <-time.After(waitDuration): - operator.Logger().Errorw("journalctl command exited", "error", f.err, "output", f.output) + operator.Logger().Error("journalctl command exited", zap.String("error", f.err), zap.String("output", f.output)) } }() @@ -254,7 +120,7 @@ func (operator *Input) Start(persister operator.Persister) error { line, err := stderrBuf.ReadBytes('\n') if err != nil { if !errors.Is(err, io.EOF) { - operator.Errorw("Received error reading from journalctl stderr", zap.Error(err)) + operator.Logger().Error("Received error reading from journalctl stderr", zap.Error(err)) } stderrChan <- strings.Join(messages, "\n") return @@ -274,18 +140,18 @@ func (operator *Input) Start(persister operator.Persister) error { line, err := stdoutBuf.ReadBytes('\n') if err != nil { if !errors.Is(err, io.EOF) { - operator.Errorw("Received error reading from journalctl stdout", zap.Error(err)) + operator.Logger().Error("Received error reading from journalctl stdout", zap.Error(err)) } return } entry, cursor, err := operator.parseJournalEntry(line) if err != nil { - operator.Warnw("Failed to parse journal entry", zap.Error(err)) + operator.Logger().Warn("Failed to parse journal entry", zap.Error(err)) continue } if err := operator.persister.Set(ctx, lastReadCursorKey, []byte(cursor)); err != nil { - operator.Warnw("Failed to set offset", zap.Error(err)) + operator.Logger().Warn("Failed to set offset", zap.Error(err)) } operator.Write(ctx, entry) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/operator.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/operator.go index 1343dfd29..21d9a176c 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/operator.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/operator.go @@ -39,5 +39,5 @@ type Operator interface { // Process will process an entry from an operator. Process(context.Context, *entry.Entry) error // Logger returns the operator's logger - Logger() *zap.SugaredLogger + Logger() *zap.Logger } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/file/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/file/config.go new file mode 100644 index 000000000..510b95c49 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/file/config.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package file // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/file" + +import ( + "fmt" + "html/template" + + "go.opentelemetry.io/collector/component" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper" +) + +const operatorType = "file_output" + +func init() { + operator.Register(operatorType, func() operator.Builder { return NewConfig("") }) +} + +// NewConfig creates a new file output config with default values +func NewConfig(operatorID string) *Config { + return &Config{ + OutputConfig: helper.NewOutputConfig(operatorID, operatorType), + } +} + +// Config is the configuration of a file output operatorn. +type Config struct { + helper.OutputConfig `mapstructure:",squash"` + + Path string `mapstructure:"path"` + Format string `mapstructure:"format"` +} + +// Build will build a file output operator. +func (c Config) Build(set component.TelemetrySettings) (operator.Operator, error) { + outputOperator, err := c.OutputConfig.Build(set) + if err != nil { + return nil, err + } + + var tmpl *template.Template + if c.Format != "" { + tmpl, err = template.New("file").Parse(c.Format) + if err != nil { + return nil, err + } + } + + if c.Path == "" { + return nil, fmt.Errorf("must provide a path to output to") + } + + return &Output{ + OutputOperator: outputOperator, + path: c.Path, + tmpl: tmpl, + }, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/file/file.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/file/file.go deleted file mode 100644 index 7bd676446..000000000 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/file/file.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package file // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/file" - -import ( - "context" - "encoding/json" - "fmt" - "html/template" - "os" - "sync" - - "go.uber.org/zap" - - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper" -) - -func init() { - operator.Register("file_output", func() operator.Builder { return NewConfig("") }) -} - -// NewConfig creates a new file output config with default values -func NewConfig(operatorID string) *Config { - return &Config{ - OutputConfig: helper.NewOutputConfig(operatorID, "file_output"), - } -} - -// Config is the configuration of a file output operatorn. -type Config struct { - helper.OutputConfig `mapstructure:",squash"` - - Path string `mapstructure:"path"` - Format string `mapstructure:"format"` -} - -// Build will build a file output operator. -func (c Config) Build(logger *zap.SugaredLogger) (operator.Operator, error) { - outputOperator, err := c.OutputConfig.Build(logger) - if err != nil { - return nil, err - } - - var tmpl *template.Template - if c.Format != "" { - tmpl, err = template.New("file").Parse(c.Format) - if err != nil { - return nil, err - } - } - - if c.Path == "" { - return nil, fmt.Errorf("must provide a path to output to") - } - - return &Output{ - OutputOperator: outputOperator, - path: c.Path, - tmpl: tmpl, - }, nil -} - -// Output is an operator that writes logs to a file. -type Output struct { - helper.OutputOperator - - path string - tmpl *template.Template - encoder *json.Encoder - file *os.File - mux sync.Mutex -} - -// Start will open the output file. -func (fo *Output) Start(_ operator.Persister) error { - var err error - fo.file, err = os.OpenFile(fo.path, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600) - if err != nil { - return err - } - - fo.encoder = json.NewEncoder(fo.file) - fo.encoder.SetEscapeHTML(false) - - return nil -} - -// Stop will close the output file. -func (fo *Output) Stop() error { - if fo.file != nil { - if err := fo.file.Close(); err != nil { - fo.Errorf(err.Error()) - } - } - return nil -} - -// Process will write an entry to the output file. -func (fo *Output) Process(_ context.Context, entry *entry.Entry) error { - fo.mux.Lock() - defer fo.mux.Unlock() - - if fo.tmpl != nil { - err := fo.tmpl.Execute(fo.file, entry) - if err != nil { - return err - } - } else { - err := fo.encoder.Encode(entry) - if err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/file/output.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/file/output.go new file mode 100644 index 000000000..6f98c3a09 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/file/output.go @@ -0,0 +1,73 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package file // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/file" + +import ( + "context" + "encoding/json" + "html/template" + "os" + "sync" + + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper" +) + +// Output is an operator that writes logs to a file. +type Output struct { + helper.OutputOperator + + path string + tmpl *template.Template + encoder *json.Encoder + file *os.File + mux sync.Mutex +} + +// Start will open the output file. +func (o *Output) Start(_ operator.Persister) error { + var err error + o.file, err = os.OpenFile(o.path, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600) + if err != nil { + return err + } + + o.encoder = json.NewEncoder(o.file) + o.encoder.SetEscapeHTML(false) + + return nil +} + +// Stop will close the output file. +func (o *Output) Stop() error { + if o.file != nil { + if err := o.file.Close(); err != nil { + o.Logger().Error("close", zap.Error(err)) + } + } + return nil +} + +// Process will write an entry to the output file. +func (o *Output) Process(_ context.Context, entry *entry.Entry) error { + o.mux.Lock() + defer o.mux.Unlock() + + if o.tmpl != nil { + err := o.tmpl.Execute(o.file, entry) + if err != nil { + return err + } + } else { + err := o.encoder.Encode(entry) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/stdout/stdout.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/stdout/config.go similarity index 53% rename from vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/stdout/stdout.go rename to vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/stdout/config.go index e892c7485..fca71d198 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/stdout/stdout.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/stdout/config.go @@ -4,30 +4,29 @@ package stdout // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/stdout" import ( - "context" "encoding/json" "io" "os" - "sync" - "go.uber.org/zap" + "go.opentelemetry.io/collector/component" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper" ) +const operatorType = "stdout" + // Stdout is a global handle to standard output var Stdout io.Writer = os.Stdout func init() { - operator.Register("stdout", func() operator.Builder { return NewConfig("") }) + operator.Register(operatorType, func() operator.Builder { return NewConfig("") }) } // NewConfig creates a new stdout config with default values func NewConfig(operatorID string) *Config { return &Config{ - OutputConfig: helper.NewOutputConfig(operatorID, "stdout"), + OutputConfig: helper.NewOutputConfig(operatorID, operatorType), } } @@ -37,8 +36,8 @@ type Config struct { } // Build will build a stdout operator. -func (c Config) Build(logger *zap.SugaredLogger) (operator.Operator, error) { - outputOperator, err := c.OutputConfig.Build(logger) +func (c Config) Build(set component.TelemetrySettings) (operator.Operator, error) { + outputOperator, err := c.OutputConfig.Build(set) if err != nil { return nil, err } @@ -48,23 +47,3 @@ func (c Config) Build(logger *zap.SugaredLogger) (operator.Operator, error) { encoder: json.NewEncoder(Stdout), }, nil } - -// Output is an operator that logs entries using stdout. -type Output struct { - helper.OutputOperator - encoder *json.Encoder - mux sync.Mutex -} - -// Process will log entries received. -func (o *Output) Process(_ context.Context, entry *entry.Entry) error { - o.mux.Lock() - err := o.encoder.Encode(entry) - if err != nil { - o.mux.Unlock() - o.Errorf("Failed to process entry: %s", err) - return err - } - o.mux.Unlock() - return nil -} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/stdout/output.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/stdout/output.go new file mode 100644 index 000000000..e7940057d --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/stdout/output.go @@ -0,0 +1,35 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package stdout // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/stdout" + +import ( + "context" + "encoding/json" + "sync" + + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper" +) + +// Output is an operator that logs entries using stdout. +type Output struct { + helper.OutputOperator + encoder *json.Encoder + mux sync.Mutex +} + +// Process will log entries received. +func (o *Output) Process(_ context.Context, entry *entry.Entry) error { + o.mux.Lock() + err := o.encoder.Encode(entry) + if err != nil { + o.mux.Unlock() + o.Logger().Error("Failed to process entry", zap.Error(err)) + return err + } + o.mux.Unlock() + return nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/container/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/container/config.go new file mode 100644 index 000000000..1310d596c --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/container/config.go @@ -0,0 +1,120 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package container // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/container" + +import ( + "fmt" + "sync" + + jsoniter "github.com/json-iterator/go" + "go.opentelemetry.io/collector/component" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/errors" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/transformer/recombine" +) + +const operatorType = "container" + +func init() { + operator.Register(operatorType, func() operator.Builder { return NewConfig() }) +} + +// NewConfig creates a new JSON parser config with default values +func NewConfig() *Config { + return NewConfigWithID(operatorType) +} + +// NewConfigWithID creates a new JSON parser config with default values +func NewConfigWithID(operatorID string) *Config { + return &Config{ + ParserConfig: helper.NewParserConfig(operatorID, operatorType), + Format: "", + AddMetadataFromFilePath: true, + } +} + +// Config is the configuration of a Container parser operator. +type Config struct { + helper.ParserConfig `mapstructure:",squash"` + + Format string `mapstructure:"format"` + AddMetadataFromFilePath bool `mapstructure:"add_metadata_from_filepath"` +} + +// Build will build a Container parser operator. +func (c Config) Build(set component.TelemetrySettings) (operator.Operator, error) { + parserOperator, err := c.ParserConfig.Build(set) + if err != nil { + return nil, err + } + + cLogEmitter := helper.NewLogEmitter(set) + recombineParser, err := createRecombine(set, cLogEmitter) + if err != nil { + return nil, fmt.Errorf("failed to create internal recombine config: %w", err) + } + + wg := sync.WaitGroup{} + + if c.Format != "" { + switch c.Format { + case dockerFormat, crioFormat, containerdFormat: + default: + return &Parser{}, errors.NewError( + "operator config has an invalid `format` field.", + "ensure that the `format` field is set to one of `docker`, `crio`, `containerd`.", + "format", c.OnError, + ) + } + } + + p := &Parser{ + ParserOperator: parserOperator, + recombineParser: recombineParser, + json: jsoniter.ConfigFastest, + format: c.Format, + addMetadataFromFilepath: c.AddMetadataFromFilePath, + crioLogEmitter: cLogEmitter, + criConsumers: &wg, + } + return p, nil +} + +// createRecombine creates an internal recombine operator which outputs to an async helper.LogEmitter +// the equivalent recombine config: +// +// combine_field: body +// combine_with: "" +// is_last_entry: attributes.logtag == 'F' +// max_log_size: 102400 +// source_identifier: attributes["log.file.path"] +// type: recombine +func createRecombine(set component.TelemetrySettings, cLogEmitter *helper.LogEmitter) (operator.Operator, error) { + recombineParserCfg := createRecombineConfig() + recombineParser, err := recombineParserCfg.Build(set) + if err != nil { + return nil, fmt.Errorf("failed to resolve internal recombine config: %w", err) + } + + // set the LogEmmiter as the output of the recombine parser + recombineParser.SetOutputIDs([]string{cLogEmitter.OperatorID}) + if err := recombineParser.SetOutputs([]operator.Operator{cLogEmitter}); err != nil { + return nil, fmt.Errorf("failed to set outputs of internal recombine") + } + + return recombineParser, nil +} + +func createRecombineConfig() *recombine.Config { + recombineParserCfg := recombine.NewConfigWithID(recombineInternalID) + recombineParserCfg.IsLastEntry = "attributes.logtag == 'F'" + recombineParserCfg.CombineField = entry.NewBodyField() + recombineParserCfg.CombineWith = "" + recombineParserCfg.SourceIdentifier = entry.NewAttributeField("log.file.path") + recombineParserCfg.MaxLogSize = 102400 + return recombineParserCfg +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/container/parser.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/container/parser.go new file mode 100644 index 000000000..384097c53 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/container/parser.go @@ -0,0 +1,358 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package container // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/container" + +import ( + "context" + "errors" + "fmt" + "regexp" + "strings" + "sync" + "time" + + jsoniter "github.com/json-iterator/go" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper" +) + +const dockerFormat = "docker" +const crioFormat = "crio" +const containerdFormat = "containerd" +const recombineInternalID = "recombine_container_internal" +const dockerPattern = "^\\{" +const crioPattern = "^(?P