diff --git a/examples/scratch-env/go.mod b/examples/scratch-env/go.mod index 8794b12ee2..8bf14708d3 100644 --- a/examples/scratch-env/go.mod +++ b/examples/scratch-env/go.mod @@ -56,11 +56,11 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.29.0 // indirect - k8s.io/apiextensions-apiserver v0.29.0 // indirect - k8s.io/apimachinery v0.29.0 // indirect - k8s.io/client-go v0.29.0 // indirect - k8s.io/component-base v0.29.0 // indirect + k8s.io/api v0.29.2 // indirect + k8s.io/apiextensions-apiserver v0.29.2 // indirect + k8s.io/apimachinery v0.29.2 // indirect + k8s.io/client-go v0.29.2 // indirect + k8s.io/component-base v0.29.2 // indirect k8s.io/klog/v2 v2.110.1 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect diff --git a/examples/scratch-env/go.sum b/examples/scratch-env/go.sum index 56c128fe6c..1d92b20b96 100644 --- a/examples/scratch-env/go.sum +++ b/examples/scratch-env/go.sum @@ -173,16 +173,16 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.29.0 h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A= -k8s.io/api v0.29.0/go.mod h1:sdVmXoz2Bo/cb77Pxi71IPTSErEW32xa4aXwKH7gfBA= -k8s.io/apiextensions-apiserver v0.29.0 h1:0VuspFG7Hj+SxyF/Z/2T0uFbI5gb5LRgEyUVE3Q4lV0= -k8s.io/apiextensions-apiserver v0.29.0/go.mod h1:TKmpy3bTS0mr9pylH0nOt/QzQRrW7/h7yLdRForMZwc= -k8s.io/apimachinery v0.29.0 h1:+ACVktwyicPz0oc6MTMLwa2Pw3ouLAfAon1wPLtG48o= -k8s.io/apimachinery v0.29.0/go.mod h1:eVBxQ/cwiJxH58eK/jd/vAk4mrxmVlnpBH5J2GbMeis= -k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8= -k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38= -k8s.io/component-base v0.29.0 h1:T7rjd5wvLnPBV1vC4zWd/iWRbV8Mdxs+nGaoaFzGw3s= -k8s.io/component-base v0.29.0/go.mod h1:sADonFTQ9Zc9yFLghpDpmNXEdHyQmFIGbiuZbqAXQ1M= +k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A= +k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0= +k8s.io/apiextensions-apiserver v0.29.2 h1:UK3xB5lOWSnhaCk0RFZ0LUacPZz9RY4wi/yt2Iu+btg= +k8s.io/apiextensions-apiserver v0.29.2/go.mod h1:aLfYjpA5p3OwtqNXQFkhJ56TB+spV8Gc4wfMhUA3/b8= +k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= +k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= +k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg= +k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA= +k8s.io/component-base v0.29.2 h1:lpiLyuvPA9yV1aQwGLENYyK7n/8t6l3nn3zAtFTJYe8= +k8s.io/component-base v0.29.2/go.mod h1:BfB3SLrefbZXiBfbM+2H1dlat21Uewg/5qtKOl8degM= k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= diff --git a/go.mod b/go.mod index e838081088..5c69759e98 100644 --- a/go.mod +++ b/go.mod @@ -21,12 +21,12 @@ require ( golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e golang.org/x/sys v0.16.0 gomodules.xyz/jsonpatch/v2 v2.4.0 - k8s.io/api v0.29.0 - k8s.io/apiextensions-apiserver v0.29.0 - k8s.io/apimachinery v0.29.0 - k8s.io/apiserver v0.29.0 - k8s.io/client-go v0.29.0 - k8s.io/component-base v0.29.0 + k8s.io/api v0.29.2 + k8s.io/apiextensions-apiserver v0.29.2 + k8s.io/apimachinery v0.29.2 + k8s.io/apiserver v0.29.2 + k8s.io/client-go v0.29.2 + k8s.io/component-base v0.29.2 k8s.io/klog/v2 v2.110.1 k8s.io/utils v0.0.0-20230726121419-3b25d923346b sigs.k8s.io/yaml v1.4.0 diff --git a/go.sum b/go.sum index db51cb8e8f..0535a201a1 100644 --- a/go.sum +++ b/go.sum @@ -226,18 +226,18 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.29.0 h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A= -k8s.io/api v0.29.0/go.mod h1:sdVmXoz2Bo/cb77Pxi71IPTSErEW32xa4aXwKH7gfBA= -k8s.io/apiextensions-apiserver v0.29.0 h1:0VuspFG7Hj+SxyF/Z/2T0uFbI5gb5LRgEyUVE3Q4lV0= -k8s.io/apiextensions-apiserver v0.29.0/go.mod h1:TKmpy3bTS0mr9pylH0nOt/QzQRrW7/h7yLdRForMZwc= -k8s.io/apimachinery v0.29.0 h1:+ACVktwyicPz0oc6MTMLwa2Pw3ouLAfAon1wPLtG48o= -k8s.io/apimachinery v0.29.0/go.mod h1:eVBxQ/cwiJxH58eK/jd/vAk4mrxmVlnpBH5J2GbMeis= -k8s.io/apiserver v0.29.0 h1:Y1xEMjJkP+BIi0GSEv1BBrf1jLU9UPfAnnGGbbDdp7o= -k8s.io/apiserver v0.29.0/go.mod h1:31n78PsRKPmfpee7/l9NYEv67u6hOL6AfcE761HapDM= -k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8= -k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38= -k8s.io/component-base v0.29.0 h1:T7rjd5wvLnPBV1vC4zWd/iWRbV8Mdxs+nGaoaFzGw3s= -k8s.io/component-base v0.29.0/go.mod h1:sADonFTQ9Zc9yFLghpDpmNXEdHyQmFIGbiuZbqAXQ1M= +k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A= +k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0= +k8s.io/apiextensions-apiserver v0.29.2 h1:UK3xB5lOWSnhaCk0RFZ0LUacPZz9RY4wi/yt2Iu+btg= +k8s.io/apiextensions-apiserver v0.29.2/go.mod h1:aLfYjpA5p3OwtqNXQFkhJ56TB+spV8Gc4wfMhUA3/b8= +k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= +k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= +k8s.io/apiserver v0.29.2 h1:+Z9S0dSNr+CjnVXQePG8TcBWHr3Q7BmAr7NraHvsMiQ= +k8s.io/apiserver v0.29.2/go.mod h1:B0LieKVoyU7ykQvPFm7XSdIHaCHSzCzQWPFa5bqbeMQ= +k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg= +k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA= +k8s.io/component-base v0.29.2 h1:lpiLyuvPA9yV1aQwGLENYyK7n/8t6l3nn3zAtFTJYe8= +k8s.io/component-base v0.29.2/go.mod h1:BfB3SLrefbZXiBfbM+2H1dlat21Uewg/5qtKOl8degM= k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= diff --git a/hack/tools/go.mod b/hack/tools/go.mod index e2c3eac332..ac27210756 100644 --- a/hack/tools/go.mod +++ b/hack/tools/go.mod @@ -50,9 +50,9 @@ require ( gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.29.0 // indirect - k8s.io/apiextensions-apiserver v0.29.0 // indirect - k8s.io/apimachinery v0.29.0 // indirect + k8s.io/api v0.29.2 // indirect + k8s.io/apiextensions-apiserver v0.29.2 // indirect + k8s.io/apimachinery v0.29.2 // indirect k8s.io/klog/v2 v2.110.1 // indirect k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect diff --git a/hack/tools/go.sum b/hack/tools/go.sum index 151b377222..43388fff23 100644 --- a/hack/tools/go.sum +++ b/hack/tools/go.sum @@ -220,12 +220,12 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.29.0 h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A= -k8s.io/api v0.29.0/go.mod h1:sdVmXoz2Bo/cb77Pxi71IPTSErEW32xa4aXwKH7gfBA= -k8s.io/apiextensions-apiserver v0.29.0 h1:0VuspFG7Hj+SxyF/Z/2T0uFbI5gb5LRgEyUVE3Q4lV0= -k8s.io/apiextensions-apiserver v0.29.0/go.mod h1:TKmpy3bTS0mr9pylH0nOt/QzQRrW7/h7yLdRForMZwc= -k8s.io/apimachinery v0.29.0 h1:+ACVktwyicPz0oc6MTMLwa2Pw3ouLAfAon1wPLtG48o= -k8s.io/apimachinery v0.29.0/go.mod h1:eVBxQ/cwiJxH58eK/jd/vAk4mrxmVlnpBH5J2GbMeis= +k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A= +k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0= +k8s.io/apiextensions-apiserver v0.29.2 h1:UK3xB5lOWSnhaCk0RFZ0LUacPZz9RY4wi/yt2Iu+btg= +k8s.io/apiextensions-apiserver v0.29.2/go.mod h1:aLfYjpA5p3OwtqNXQFkhJ56TB+spV8Gc4wfMhUA3/b8= +k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= +k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= diff --git a/pkg/cache/cache.go b/pkg/cache/cache.go index 1cecf88e5e..73ad68fe43 100644 --- a/pkg/cache/cache.go +++ b/pkg/cache/cache.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "net/http" + "sort" "time" "golang.org/x/exp/maps" @@ -418,14 +419,6 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) { } } - for namespace, cfg := range opts.DefaultNamespaces { - cfg = defaultConfig(cfg, optionDefaultsToConfig(&opts)) - if namespace == metav1.NamespaceAll { - cfg.FieldSelector = fields.AndSelectors(appendIfNotNil(namespaceAllSelector(maps.Keys(opts.DefaultNamespaces)), cfg.FieldSelector)...) - } - opts.DefaultNamespaces[namespace] = cfg - } - for obj, byObject := range opts.ByObject { isNamespaced, err := apiutil.IsObjectNamespaced(obj, opts.Scheme, opts.Mapper) if err != nil { @@ -435,7 +428,12 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) { return opts, fmt.Errorf("type %T is not namespaced, but its ByObject.Namespaces setting is not nil", obj) } - // Default the namespace-level configs first, because they need to use the undefaulted type-level config. + if isNamespaced && byObject.Namespaces == nil { + byObject.Namespaces = maps.Clone(opts.DefaultNamespaces) + } + + // Default the namespace-level configs first, because they need to use the undefaulted type-level config + // to be able to potentially fall through to settings from DefaultNamespaces. for namespace, config := range byObject.Namespaces { // 1. Default from the undefaulted type-level config config = defaultConfig(config, byObjectToConfig(byObject)) @@ -461,19 +459,35 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) { byObject.Namespaces[namespace] = config } - defaultedConfig := defaultConfig(byObjectToConfig(byObject), optionDefaultsToConfig(&opts)) - byObject.Label = defaultedConfig.LabelSelector - byObject.Field = defaultedConfig.FieldSelector - byObject.Transform = defaultedConfig.Transform - byObject.UnsafeDisableDeepCopy = defaultedConfig.UnsafeDisableDeepCopy - - if isNamespaced && byObject.Namespaces == nil { - byObject.Namespaces = opts.DefaultNamespaces + // Only default ByObject iself if it isn't namespaced or has no namespaces configured, as only + // then any of this will be honored. + if !isNamespaced || len(byObject.Namespaces) == 0 { + defaultedConfig := defaultConfig(byObjectToConfig(byObject), optionDefaultsToConfig(&opts)) + byObject.Label = defaultedConfig.LabelSelector + byObject.Field = defaultedConfig.FieldSelector + byObject.Transform = defaultedConfig.Transform + byObject.UnsafeDisableDeepCopy = defaultedConfig.UnsafeDisableDeepCopy } opts.ByObject[obj] = byObject } + // Default namespaces after byObject has been defaulted, otherwise a namespace without selectors + // will get the `Default` selectors, then get copied to byObject and then not get defaulted from + // byObject, as it already has selectors. + for namespace, cfg := range opts.DefaultNamespaces { + cfg = defaultConfig(cfg, optionDefaultsToConfig(&opts)) + if namespace == metav1.NamespaceAll { + cfg.FieldSelector = fields.AndSelectors( + appendIfNotNil( + namespaceAllSelector(maps.Keys(opts.DefaultNamespaces)), + cfg.FieldSelector, + )..., + ) + } + opts.DefaultNamespaces[namespace] = cfg + } + // Default the resync period to 10 hours if unset if opts.SyncPeriod == nil { opts.SyncPeriod = &defaultSyncPeriod @@ -498,20 +512,21 @@ func defaultConfig(toDefault, defaultFrom Config) Config { return toDefault } -func namespaceAllSelector(namespaces []string) fields.Selector { +func namespaceAllSelector(namespaces []string) []fields.Selector { selectors := make([]fields.Selector, 0, len(namespaces)-1) + sort.Strings(namespaces) for _, namespace := range namespaces { if namespace != metav1.NamespaceAll { selectors = append(selectors, fields.OneTermNotEqualSelector("metadata.namespace", namespace)) } } - return fields.AndSelectors(selectors...) + return selectors } -func appendIfNotNil[T comparable](a, b T) []T { +func appendIfNotNil[T comparable](a []T, b T) []T { if b != *new(T) { - return []T{a, b} + return append(a, b) } - return []T{a} + return a } diff --git a/pkg/cache/cache_test.go b/pkg/cache/cache_test.go index cfe0856a1e..c83b2ddcf9 100644 --- a/pkg/cache/cache_test.go +++ b/pkg/cache/cache_test.go @@ -1630,6 +1630,26 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca }}, expectedPods: []string{"test-pod-4"}, }), + Entry("namespaces configured, type-level label selector matches everything, overrides global selector", selectorsTestCase{ + options: cache.Options{ + DefaultNamespaces: map[string]cache.Config{testNamespaceOne: {}}, + ByObject: map[client.Object]cache.ByObject{ + &corev1.Pod{}: {Label: labels.Everything()}, + }, + DefaultLabelSelector: labels.SelectorFromSet(map[string]string{"does-not": "match-anything"}), + }, + expectedPods: []string{"test-pod-1", "test-pod-5"}, + }), + Entry("namespaces configured, global selector is used", selectorsTestCase{ + options: cache.Options{ + DefaultNamespaces: map[string]cache.Config{testNamespaceTwo: {}}, + ByObject: map[client.Object]cache.ByObject{ + &corev1.Pod{}: {}, + }, + DefaultLabelSelector: labels.SelectorFromSet(map[string]string{"common-label": "common"}), + }, + expectedPods: []string{"test-pod-3"}, + }), Entry("global label selector matches one pod", selectorsTestCase{ options: cache.Options{ DefaultLabelSelector: labels.SelectorFromSet(map[string]string{ diff --git a/pkg/cache/defaulting_test.go b/pkg/cache/defaulting_test.go index bb5e6ca083..3c01bf8404 100644 --- a/pkg/cache/defaulting_test.go +++ b/pkg/cache/defaulting_test.go @@ -22,6 +22,7 @@ import ( "time" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" fuzz "github.com/google/gofuzz" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" @@ -38,6 +39,22 @@ func TestDefaultOpts(t *testing.T) { t.Parallel() pod := &corev1.Pod{} + + compare := func(a, b any) string { + return cmp.Diff(a, b, + cmpopts.IgnoreUnexported(Options{}), + cmpopts.IgnoreFields(Options{}, "HTTPClient", "Scheme", "Mapper", "SyncPeriod"), + cmp.Comparer(func(a, b fields.Selector) bool { + if (a != nil) != (b != nil) { + return false + } + if a == nil { + return true + } + return a.String() == b.String() + }), + ) + } testCases := []struct { name string in Options @@ -221,6 +238,120 @@ func TestDefaultOpts(t *testing.T) { return cmp.Diff(expected, o.DefaultNamespaces) }, }, + { + name: "ByObject.Namespaces get selector from DefaultNamespaces before DefaultSelector", + in: Options{ + ByObject: map[client.Object]ByObject{ + pod: {Namespaces: map[string]Config{"default": {}}}, + }, + DefaultNamespaces: map[string]Config{"default": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "namespace"})}}, + DefaultLabelSelector: labels.SelectorFromSet(map[string]string{"from": "default"}), + }, + + verification: func(o Options) string { + expected := Options{ + ByObject: map[client.Object]ByObject{ + pod: {Namespaces: map[string]Config{"default": { + LabelSelector: labels.SelectorFromSet(map[string]string{"from": "namespace"}), + }}}, + }, + DefaultNamespaces: map[string]Config{"default": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "namespace"})}}, + DefaultLabelSelector: labels.SelectorFromSet(map[string]string{"from": "default"}), + } + + return compare(expected, o) + }, + }, + { + name: "Two namespaces in DefaultNamespaces with custom selection logic", + in: Options{DefaultNamespaces: map[string]Config{ + "kube-public": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-public"})}, + "kube-system": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-system"})}, + "": {}, + }}, + + verification: func(o Options) string { + expected := Options{ + DefaultNamespaces: map[string]Config{ + "kube-public": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-public"})}, + "kube-system": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-system"})}, + "": {FieldSelector: fields.ParseSelectorOrDie("metadata.namespace!=kube-public,metadata.namespace!=kube-system")}, + }, + } + + return compare(expected, o) + }, + }, + { + name: "Two namespaces in DefaultNamespaces with custom selection logic and namespace default has its own field selector", + in: Options{DefaultNamespaces: map[string]Config{ + "kube-public": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-public"})}, + "kube-system": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-system"})}, + "": {FieldSelector: fields.ParseSelectorOrDie("spec.nodeName=foo")}, + }}, + + verification: func(o Options) string { + expected := Options{ + DefaultNamespaces: map[string]Config{ + "kube-public": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-public"})}, + "kube-system": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-system"})}, + "": {FieldSelector: fields.ParseSelectorOrDie( + "metadata.namespace!=kube-public,metadata.namespace!=kube-system,spec.nodeName=foo", + )}, + }, + } + + return compare(expected, o) + }, + }, + { + name: "Two namespaces in ByObject.Namespaces with custom selection logic", + in: Options{ByObject: map[client.Object]ByObject{pod: { + Namespaces: map[string]Config{ + "kube-public": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-public"})}, + "kube-system": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-system"})}, + "": {}, + }, + }}}, + + verification: func(o Options) string { + expected := Options{ByObject: map[client.Object]ByObject{pod: { + Namespaces: map[string]Config{ + "kube-public": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-public"})}, + "kube-system": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-system"})}, + "": {FieldSelector: fields.ParseSelectorOrDie( + "metadata.namespace!=kube-public,metadata.namespace!=kube-system", + )}, + }, + }}} + + return compare(expected, o) + }, + }, + { + name: "Two namespaces in ByObject.Namespaces with custom selection logic and namespace default has its own field selector", + in: Options{ByObject: map[client.Object]ByObject{pod: { + Namespaces: map[string]Config{ + "kube-public": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-public"})}, + "kube-system": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-system"})}, + "": {FieldSelector: fields.ParseSelectorOrDie("spec.nodeName=foo")}, + }, + }}}, + + verification: func(o Options) string { + expected := Options{ByObject: map[client.Object]ByObject{pod: { + Namespaces: map[string]Config{ + "kube-public": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-public"})}, + "kube-system": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-system"})}, + "": {FieldSelector: fields.ParseSelectorOrDie( + "metadata.namespace!=kube-public,metadata.namespace!=kube-system,spec.nodeName=foo", + )}, + }, + }}} + + return compare(expected, o) + }, + }, { name: "DefaultNamespace label selector doesn't get defaulted when set", in: Options{ @@ -235,6 +366,30 @@ func TestDefaultOpts(t *testing.T) { return cmp.Diff(expected, o.DefaultNamespaces) }, }, + { + name: "Defaulted namespaces in ByObject contain ByObject's selector", + in: Options{ + ByObject: map[client.Object]ByObject{ + pod: {Label: labels.SelectorFromSet(map[string]string{"from": "pod"})}, + }, + DefaultNamespaces: map[string]Config{"default": {}}, + }, + verification: func(o Options) string { + expected := Options{ + ByObject: map[client.Object]ByObject{ + pod: { + Label: labels.SelectorFromSet(map[string]string{"from": "pod"}), + Namespaces: map[string]Config{"default": { + LabelSelector: labels.SelectorFromSet(map[string]string{"from": "pod"}), + }}, + }, + }, + + DefaultNamespaces: map[string]Config{"default": {}}, + } + return compare(expected, o) + }, + }, } for _, tc := range testCases { diff --git a/pkg/certwatcher/certwatcher.go b/pkg/certwatcher/certwatcher.go index 2b9b60d8d7..fe15fc0dd7 100644 --- a/pkg/certwatcher/certwatcher.go +++ b/pkg/certwatcher/certwatcher.go @@ -173,14 +173,14 @@ func (cw *CertWatcher) ReadCertificate() error { func (cw *CertWatcher) handleEvent(event fsnotify.Event) { // Only care about events which may modify the contents of the file. - if !(isWrite(event) || isRemove(event) || isCreate(event)) { + if !(isWrite(event) || isRemove(event) || isCreate(event) || isChmod(event)) { return } log.V(1).Info("certificate event", "event", event) - // If the file was removed, re-add the watch. - if isRemove(event) { + // If the file was removed or renamed, re-add the watch to the previous name + if isRemove(event) || isChmod(event) { if err := cw.watcher.Add(event.Name); err != nil { log.Error(err, "error re-watching file") } @@ -202,3 +202,7 @@ func isCreate(event fsnotify.Event) bool { func isRemove(event fsnotify.Event) bool { return event.Op.Has(fsnotify.Remove) } + +func isChmod(event fsnotify.Event) bool { + return event.Op.Has(fsnotify.Chmod) +} diff --git a/pkg/certwatcher/certwatcher_suite_test.go b/pkg/certwatcher/certwatcher_suite_test.go index a44a968c89..2d0f677685 100644 --- a/pkg/certwatcher/certwatcher_suite_test.go +++ b/pkg/certwatcher/certwatcher_suite_test.go @@ -41,7 +41,7 @@ var _ = BeforeSuite(func() { }) var _ = AfterSuite(func() { - for _, file := range []string{certPath, keyPath} { + for _, file := range []string{certPath, keyPath, certPath + ".new", keyPath + ".new", certPath + ".old", keyPath + ".old"} { _ = os.Remove(file) } }) diff --git a/pkg/certwatcher/certwatcher_test.go b/pkg/certwatcher/certwatcher_test.go index 7e12e42679..1fb247581f 100644 --- a/pkg/certwatcher/certwatcher_test.go +++ b/pkg/certwatcher/certwatcher_test.go @@ -121,6 +121,36 @@ var _ = Describe("CertWatcher", func() { Expect(called.Load()).To(BeNumerically(">=", 1)) }) + It("should reload currentCert when changed with rename", func() { + doneCh := startWatcher() + called := atomic.Int64{} + watcher.RegisterCallback(func(crt tls.Certificate) { + called.Add(1) + Expect(crt.Certificate).ToNot(BeEmpty()) + }) + + firstcert, _ := watcher.GetCertificate(nil) + + err := writeCerts(certPath+".new", keyPath+".new", "192.168.0.2") + Expect(err).ToNot(HaveOccurred()) + + Expect(os.Link(certPath, certPath+".old")).To(Succeed()) + Expect(os.Rename(certPath+".new", certPath)).To(Succeed()) + + Expect(os.Link(keyPath, keyPath+".old")).To(Succeed()) + Expect(os.Rename(keyPath+".new", keyPath)).To(Succeed()) + + Eventually(func() bool { + secondcert, _ := watcher.GetCertificate(nil) + first := firstcert.PrivateKey.(*rsa.PrivateKey) + return first.Equal(secondcert.PrivateKey) + }).ShouldNot(BeTrue()) + + ctxCancel() + Eventually(doneCh, "4s").Should(BeClosed()) + Expect(called.Load()).To(BeNumerically(">=", 1)) + }) + Context("prometheus metric read_certificate_total", func() { var readCertificateTotalBefore float64 var readCertificateErrorsBefore float64 @@ -159,17 +189,18 @@ var _ = Describe("CertWatcher", func() { Expect(os.Remove(keyPath)).To(Succeed()) + // Note, we are checking two errors here, because os.Remove generates two fsnotify events: Chmod + Remove Eventually(func() error { readCertificateTotalAfter := testutil.ToFloat64(metrics.ReadCertificateTotal) - if readCertificateTotalAfter != readCertificateTotalBefore+1.0 { - return fmt.Errorf("metric read certificate total expected: %v and got: %v", readCertificateTotalBefore+1.0, readCertificateTotalAfter) + if readCertificateTotalAfter != readCertificateTotalBefore+2.0 { + return fmt.Errorf("metric read certificate total expected: %v and got: %v", readCertificateTotalBefore+2.0, readCertificateTotalAfter) } return nil }, "4s").Should(Succeed()) Eventually(func() error { readCertificateErrorsAfter := testutil.ToFloat64(metrics.ReadCertificateErrors) - if readCertificateErrorsAfter != readCertificateErrorsBefore+1.0 { - return fmt.Errorf("metric read certificate errors expected: %v and got: %v", readCertificateErrorsBefore+1.0, readCertificateErrorsAfter) + if readCertificateErrorsAfter != readCertificateErrorsBefore+2.0 { + return fmt.Errorf("metric read certificate errors expected: %v and got: %v", readCertificateErrorsBefore+2.0, readCertificateErrorsAfter) } return nil }, "4s").Should(Succeed()) diff --git a/pkg/client/apiutil/restmapper.go b/pkg/client/apiutil/restmapper.go index 5af02063b8..927be22b4e 100644 --- a/pkg/client/apiutil/restmapper.go +++ b/pkg/client/apiutil/restmapper.go @@ -53,7 +53,7 @@ func NewDynamicRESTMapper(cfg *rest.Config, httpClient *http.Client) (meta.RESTM // client for discovery information to do REST mappings. type mapper struct { mapper meta.RESTMapper - client *discovery.DiscoveryClient + client discovery.DiscoveryInterface knownGroups map[string]*restmapper.APIGroupResources apiGroups map[string]*metav1.APIGroup @@ -182,23 +182,28 @@ func (m *mapper) addKnownGroupAndReload(groupName string, versions ...string) er Group: metav1.APIGroup{Name: groupName}, VersionedResources: make(map[string][]metav1.APIResource), } - if _, ok := m.knownGroups[groupName]; ok { - groupResources = m.knownGroups[groupName] - } // Update information for group resources about versioned resources. // The number of API calls is equal to the number of versions: /apis//. - groupVersionResources, err := m.fetchGroupVersionResources(groupName, versions...) + // If we encounter a missing API version (NotFound error), we will remove the group from + // the m.apiGroups and m.knownGroups caches. + // If this happens, in the next call the group will be added back to apiGroups + // and only the existing versions will be loaded in knownGroups. + groupVersionResources, err := m.fetchGroupVersionResourcesLocked(groupName, versions...) if err != nil { return fmt.Errorf("failed to get API group resources: %w", err) } - for version, resources := range groupVersionResources { - groupResources.VersionedResources[version.Version] = resources.APIResources + + if _, ok := m.knownGroups[groupName]; ok { + groupResources = m.knownGroups[groupName] } // Update information for group resources about the API group by adding new versions. // Ignore the versions that are already registered. - for _, version := range versions { + for groupVersion, resources := range groupVersionResources { + version := groupVersion.Version + + groupResources.VersionedResources[version] = resources.APIResources found := false for _, v := range groupResources.Group.Versions { if v.Version == version { @@ -265,8 +270,9 @@ func (m *mapper) findAPIGroupByName(groupName string) (*metav1.APIGroup, error) return m.apiGroups[groupName], nil } -// fetchGroupVersionResources fetches the resources for the specified group and its versions. -func (m *mapper) fetchGroupVersionResources(groupName string, versions ...string) (map[schema.GroupVersion]*metav1.APIResourceList, error) { +// fetchGroupVersionResourcesLocked fetches the resources for the specified group and its versions. +// This method might modify the cache so it needs to be called under the lock. +func (m *mapper) fetchGroupVersionResourcesLocked(groupName string, versions ...string) (map[schema.GroupVersion]*metav1.APIResourceList, error) { groupVersionResources := make(map[schema.GroupVersion]*metav1.APIResourceList) failedGroups := make(map[schema.GroupVersion]error) @@ -274,9 +280,20 @@ func (m *mapper) fetchGroupVersionResources(groupName string, versions ...string groupVersion := schema.GroupVersion{Group: groupName, Version: version} apiResourceList, err := m.client.ServerResourcesForGroupVersion(groupVersion.String()) - if err != nil && !apierrors.IsNotFound(err) { + if apierrors.IsNotFound(err) { + // If the version is not found, we remove the group from the cache + // so it gets refreshed on the next call. + if m.isAPIGroupCached(groupVersion) { + delete(m.apiGroups, groupName) + } + if m.isGroupVersionCached(groupVersion) { + delete(m.knownGroups, groupName) + } + continue + } else if err != nil { failedGroups[groupVersion] = err } + if apiResourceList != nil { // even in case of error, some fallback might have been returned. groupVersionResources[groupVersion] = apiResourceList @@ -290,3 +307,29 @@ func (m *mapper) fetchGroupVersionResources(groupName string, versions ...string return groupVersionResources, nil } + +// isGroupVersionCached checks if a version for a group is cached in the known groups cache. +func (m *mapper) isGroupVersionCached(gv schema.GroupVersion) bool { + if cachedGroup, ok := m.knownGroups[gv.Group]; ok { + _, cached := cachedGroup.VersionedResources[gv.Version] + return cached + } + + return false +} + +// isAPIGroupCached checks if a version for a group is cached in the api groups cache. +func (m *mapper) isAPIGroupCached(gv schema.GroupVersion) bool { + cachedGroup, ok := m.apiGroups[gv.Group] + if !ok { + return false + } + + for _, version := range cachedGroup.Versions { + if version.Version == gv.Version { + return true + } + } + + return false +} diff --git a/pkg/client/apiutil/restmapper_test.go b/pkg/client/apiutil/restmapper_test.go index e520c24de5..2e34a98735 100644 --- a/pkg/client/apiutil/restmapper_test.go +++ b/pkg/client/apiutil/restmapper_test.go @@ -28,9 +28,11 @@ import ( gomegatypes "github.com/onsi/gomega/types" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/discovery" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" @@ -529,23 +531,7 @@ func TestLazyRestMapperProvider(t *testing.T) { g.Expect(err).NotTo(gmg.HaveOccurred()) // Register another CRD in runtime - "riders.crew.example.com". - - crd := &apiextensionsv1.CustomResourceDefinition{} - err = c.Get(context.TODO(), types.NamespacedName{Name: "drivers.crew.example.com"}, crd) - g.Expect(err).NotTo(gmg.HaveOccurred()) - g.Expect(crd.Spec.Names.Kind).To(gmg.Equal("Driver")) - - newCRD := &apiextensionsv1.CustomResourceDefinition{} - crd.DeepCopyInto(newCRD) - newCRD.Name = "riders.crew.example.com" - newCRD.Spec.Names = apiextensionsv1.CustomResourceDefinitionNames{ - Kind: "Rider", - Plural: "riders", - } - newCRD.ResourceVersion = "" - - // Create the new CRD. - g.Expect(c.Create(context.TODO(), newCRD)).To(gmg.Succeed()) + createNewCRD(context.TODO(), g, c, "crew.example.com", "Rider", "riders") // Wait a bit until the CRD is registered. g.Eventually(func() error { @@ -564,6 +550,153 @@ func TestLazyRestMapperProvider(t *testing.T) { g.Expect(err).NotTo(gmg.HaveOccurred()) g.Expect(mapping.GroupVersionKind.Kind).To(gmg.Equal("rider")) }) + + t.Run("LazyRESTMapper should invalidate the group cache if a version is not found", func(t *testing.T) { + g := gmg.NewWithT(t) + ctx := context.Background() + + httpClient, err := rest.HTTPClientFor(restCfg) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + crt := newCountingRoundTripper(httpClient.Transport) + httpClient.Transport = crt + + lazyRestMapper, err := apiutil.NewDynamicRESTMapper(restCfg, httpClient) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + s := scheme.Scheme + err = apiextensionsv1.AddToScheme(s) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + c, err := client.New(restCfg, client.Options{Scheme: s}) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + // Register a new CRD ina new group to avoid collisions when deleting versions - "taxis.inventory.example.com". + group := "inventory.example.com" + kind := "Taxi" + plural := "taxis" + crdName := plural + "." + group + // Create a CRD with two versions: v1alpha1 and v1 where both are served and + // v1 is the storage version so we can easily remove v1alpha1 later. + crd := newCRD(ctx, g, c, group, kind, plural) + v1alpha1 := crd.Spec.Versions[0] + v1alpha1.Name = "v1alpha1" + v1alpha1.Storage = false + v1alpha1.Served = true + v1 := crd.Spec.Versions[0] + v1.Name = "v1" + v1.Storage = true + v1.Served = true + crd.Spec.Versions = []apiextensionsv1.CustomResourceDefinitionVersion{v1alpha1, v1} + g.Expect(c.Create(ctx, crd)).To(gmg.Succeed()) + t.Cleanup(func() { + g.Expect(c.Delete(ctx, crd)).To(gmg.Succeed()) + }) + + // Wait until the CRD is registered. + discHTTP, err := rest.HTTPClientFor(restCfg) + g.Expect(err).NotTo(gmg.HaveOccurred()) + discClient, err := discovery.NewDiscoveryClientForConfigAndClient(restCfg, discHTTP) + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Eventually(func(g gmg.Gomega) { + _, err = discClient.ServerResourcesForGroupVersion(group + "/v1") + g.Expect(err).NotTo(gmg.HaveOccurred()) + }).Should(gmg.Succeed(), "v1 should be available") + + // There are no requests before any call + g.Expect(crt.GetRequestCount()).To(gmg.Equal(0)) + + // Since we don't specify what version we expect, restmapper will fetch them all and search there. + // To fetch a list of available versions + // #1: GET https://host/api + // #2: GET https://host/apis + // Then, for all available versions: + // #3: GET https://host/apis/inventory.example.com/v1alpha1 + // #4: GET https://host/apis/inventory.example.com/v1 + // This should fill the cache for apiGroups and versions. + mapping, err := lazyRestMapper.RESTMapping(schema.GroupKind{Group: group, Kind: kind}) + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(mapping.GroupVersionKind.Kind).To(gmg.Equal(kind)) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(4)) + crt.Reset() // We reset the counter to check how many additional requests are made later. + + // At this point v1alpha1 should be cached + _, err = lazyRestMapper.RESTMapping(schema.GroupKind{Group: group, Kind: kind}, "v1alpha1") + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(0)) + + // We update the CRD to only have v1 version. + g.Expect(c.Get(ctx, types.NamespacedName{Name: crdName}, crd)).To(gmg.Succeed()) + for _, version := range crd.Spec.Versions { + if version.Name == "v1" { + v1 = version + break + } + } + crd.Spec.Versions = []apiextensionsv1.CustomResourceDefinitionVersion{v1} + g.Expect(c.Update(ctx, crd)).To(gmg.Succeed()) + + // We wait until v1alpha1 is not available anymore. + g.Eventually(func(g gmg.Gomega) { + _, err = discClient.ServerResourcesForGroupVersion(group + "/v1alpha1") + g.Expect(apierrors.IsNotFound(err)).To(gmg.BeTrue(), "v1alpha1 should not be available anymore") + }).Should(gmg.Succeed()) + + // Although v1alpha1 is not available anymore, the cache is not invalidated yet so it should return a mapping. + _, err = lazyRestMapper.RESTMapping(schema.GroupKind{Group: group, Kind: kind}, "v1alpha1") + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(0)) + + // We request Limo, which is not in the mapper because it doesn't exist. + // This will trigger a reload of the lazy mapper cache. + // Reloading the cache will read v2 again and since it's not available anymore, it should invalidate the cache. + // #1: GET https://host/apis/inventory.example.com/v1alpha1 + // #2: GET https://host/apis/inventory.example.com/v1 + _, err = lazyRestMapper.RESTMapping(schema.GroupKind{Group: group, Kind: "Limo"}) + g.Expect(err).To(beNoMatchError()) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(2)) + crt.Reset() + + // Now we request v1alpha1 again and it should return an error since the cache was invalidated. + // #1: GET https://host/apis/inventory.example.com/v1alpha1 + _, err = lazyRestMapper.RESTMapping(schema.GroupKind{Group: group, Kind: kind}, "v1alpha1") + g.Expect(err).To(beNoMatchError()) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(1)) + + // Verify that when requesting the mapping without a version, it doesn't error + // and it returns v1. + mapping, err = lazyRestMapper.RESTMapping(schema.GroupKind{Group: group, Kind: kind}) + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(mapping.Resource.Version).To(gmg.Equal("v1")) + }) +} + +// createNewCRD creates a new CRD with the given group, kind, and plural and returns it. +func createNewCRD(ctx context.Context, g gmg.Gomega, c client.Client, group, kind, plural string) *apiextensionsv1.CustomResourceDefinition { + newCRD := newCRD(ctx, g, c, group, kind, plural) + g.Expect(c.Create(ctx, newCRD)).To(gmg.Succeed()) + + return newCRD +} + +// newCRD returns a new CRD with the given group, kind, and plural. +func newCRD(ctx context.Context, g gmg.Gomega, c client.Client, group, kind, plural string) *apiextensionsv1.CustomResourceDefinition { + crd := &apiextensionsv1.CustomResourceDefinition{} + err := c.Get(ctx, types.NamespacedName{Name: "drivers.crew.example.com"}, crd) + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(crd.Spec.Names.Kind).To(gmg.Equal("Driver")) + + newCRD := &apiextensionsv1.CustomResourceDefinition{} + crd.DeepCopyInto(newCRD) + newCRD.Spec.Group = group + newCRD.Name = plural + "." + group + newCRD.Spec.Names = apiextensionsv1.CustomResourceDefinitionNames{ + Kind: kind, + Plural: plural, + } + newCRD.ResourceVersion = "" + + return newCRD } func beNoMatchError() gomegatypes.GomegaMatcher { @@ -594,6 +727,7 @@ func (e *errorMatcher) Match(actual interface{}) (success bool, err error) { func (e *errorMatcher) FailureMessage(actual interface{}) (message string) { return format.Message(actual, fmt.Sprintf("to be %s error", e.message)) } + func (e *errorMatcher) NegatedFailureMessage(actual interface{}) (message string) { return format.Message(actual, fmt.Sprintf("not to be %s error", e.message)) } diff --git a/pkg/client/apiutil/restmapper_wb_test.go b/pkg/client/apiutil/restmapper_wb_test.go new file mode 100644 index 0000000000..96dbe79e77 --- /dev/null +++ b/pkg/client/apiutil/restmapper_wb_test.go @@ -0,0 +1,203 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiutil + +import ( + "testing" + + gmg "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/restmapper" +) + +func TestLazyRestMapper_fetchGroupVersionResourcesLocked_CacheInvalidation(t *testing.T) { + tests := []struct { + name string + groupName string + versions []string + cachedAPIGroups, expectedAPIGroups map[string]*metav1.APIGroup + cachedKnownGroups, expectedKnownGroups map[string]*restmapper.APIGroupResources + }{ + { + name: "Not found version for cached groupVersion in apiGroups and knownGroups", + groupName: "group1", + versions: []string{"v1", "v2"}, + cachedAPIGroups: map[string]*metav1.APIGroup{ + "group1": { + Name: "group1", + Versions: []metav1.GroupVersionForDiscovery{ + { + Version: "v1", + }, + }, + }, + }, + cachedKnownGroups: map[string]*restmapper.APIGroupResources{ + "group1": { + VersionedResources: map[string][]metav1.APIResource{ + "v1": { + { + Name: "resource1", + }, + }, + }, + }, + }, + expectedAPIGroups: map[string]*metav1.APIGroup{}, + expectedKnownGroups: map[string]*restmapper.APIGroupResources{}, + }, + { + name: "Not found version for cached groupVersion only in apiGroups", + groupName: "group1", + versions: []string{"v1", "v2"}, + cachedAPIGroups: map[string]*metav1.APIGroup{ + "group1": { + Name: "group1", + Versions: []metav1.GroupVersionForDiscovery{ + { + Version: "v1", + }, + }, + }, + }, + cachedKnownGroups: map[string]*restmapper.APIGroupResources{ + "group1": { + VersionedResources: map[string][]metav1.APIResource{ + "v3": { + { + Name: "resource1", + }, + }, + }, + }, + }, + expectedAPIGroups: map[string]*metav1.APIGroup{}, + expectedKnownGroups: map[string]*restmapper.APIGroupResources{ + "group1": { + VersionedResources: map[string][]metav1.APIResource{ + "v3": { + { + Name: "resource1", + }, + }, + }, + }, + }, + }, + { + name: "Not found version for cached groupVersion only in knownGroups", + groupName: "group1", + versions: []string{"v1", "v2"}, + cachedAPIGroups: map[string]*metav1.APIGroup{ + "group1": { + Name: "group1", + Versions: []metav1.GroupVersionForDiscovery{ + { + Version: "v3", + }, + }, + }, + }, + cachedKnownGroups: map[string]*restmapper.APIGroupResources{ + "group1": { + VersionedResources: map[string][]metav1.APIResource{ + "v2": { + { + Name: "resource1", + }, + }, + }, + }, + }, + expectedAPIGroups: map[string]*metav1.APIGroup{ + "group1": { + Name: "group1", + Versions: []metav1.GroupVersionForDiscovery{ + { + Version: "v3", + }, + }, + }, + }, + expectedKnownGroups: map[string]*restmapper.APIGroupResources{}, + }, + { + name: "Not found version for non cached groupVersion", + groupName: "group1", + versions: []string{"v1", "v2"}, + cachedAPIGroups: map[string]*metav1.APIGroup{ + "group1": { + Name: "group1", + Versions: []metav1.GroupVersionForDiscovery{ + { + Version: "v3", + }, + }, + }, + }, + cachedKnownGroups: map[string]*restmapper.APIGroupResources{ + "group1": { + VersionedResources: map[string][]metav1.APIResource{ + "v3": { + { + Name: "resource1", + }, + }, + }, + }, + }, + expectedAPIGroups: map[string]*metav1.APIGroup{ + "group1": { + Name: "group1", + Versions: []metav1.GroupVersionForDiscovery{ + { + Version: "v3", + }, + }, + }, + }, + expectedKnownGroups: map[string]*restmapper.APIGroupResources{ + "group1": { + VersionedResources: map[string][]metav1.APIResource{ + "v3": { + { + Name: "resource1", + }, + }, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := gmg.NewWithT(t) + m := &mapper{ + mapper: restmapper.NewDiscoveryRESTMapper([]*restmapper.APIGroupResources{}), + client: fake.NewSimpleClientset().Discovery(), + apiGroups: tt.cachedAPIGroups, + knownGroups: tt.cachedKnownGroups, + } + _, err := m.fetchGroupVersionResourcesLocked(tt.groupName, tt.versions...) + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(m.apiGroups).To(gmg.BeComparableTo(tt.expectedAPIGroups)) + g.Expect(m.knownGroups).To(gmg.BeComparableTo(tt.expectedKnownGroups)) + }) + } +} diff --git a/pkg/client/fake/client.go b/pkg/client/fake/client.go index 790a1faab1..b90a6ebb8d 100644 --- a/pkg/client/fake/client.go +++ b/pkg/client/fake/client.go @@ -947,7 +947,7 @@ func dryPatch(action testing.PatchActionImpl, tracker testing.ObjectTracker) (ru if err := json.Unmarshal(modified, obj); err != nil { return nil, err } - case types.StrategicMergePatchType, types.ApplyPatchType: + case types.StrategicMergePatchType: mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj) if err != nil { return nil, err @@ -955,8 +955,10 @@ func dryPatch(action testing.PatchActionImpl, tracker testing.ObjectTracker) (ru if err = json.Unmarshal(mergedByte, obj); err != nil { return nil, err } + case types.ApplyPatchType: + return nil, errors.New("apply patches are not supported in the fake client. Follow https://github.com/kubernetes/kubernetes/issues/115598 for the current status") default: - return nil, fmt.Errorf("PatchType is not supported") + return nil, fmt.Errorf("%s PatchType is not supported", action.GetPatchType()) } return obj, nil } diff --git a/pkg/client/fake/client_test.go b/pkg/client/fake/client_test.go index d299175ba9..e5487de21a 100644 --- a/pkg/client/fake/client_test.go +++ b/pkg/client/fake/client_test.go @@ -359,6 +359,26 @@ var _ = Describe("Fake client", func() { Expect(list.Items).To(ConsistOf(*dep2)) }) + It("should reject apply patches, they are not supported in the fake client", func() { + By("Creating a new configmap") + cm := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "new-test-cm", + Namespace: "ns2", + }, + } + err := cl.Create(context.Background(), cm) + Expect(err).ToNot(HaveOccurred()) + + cm.Data = map[string]string{"foo": "bar"} + err = cl.Patch(context.Background(), cm, client.Apply, client.ForceOwnership) + Expect(err).To(MatchError(ContainSubstring("apply patches are not supported in the fake client"))) + }) + It("should be able to Create", func() { By("Creating a new configmap") newcm := &corev1.ConfigMap{ diff --git a/pkg/client/fieldmanager_test.go b/pkg/client/fieldmanager_test.go new file mode 100644 index 0000000000..7c3d752316 --- /dev/null +++ b/pkg/client/fieldmanager_test.go @@ -0,0 +1,148 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client_test + +import ( + "context" + "testing" + + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/client/interceptor" +) + +func TestWithFieldOwner(t *testing.T) { + calls := 0 + fakeClient := testClient(t, "custom-field-mgr", func() { calls++ }) + wrappedClient := client.WithFieldOwner(fakeClient, "custom-field-mgr") + + ctx := context.Background() + dummyObj := &corev1.Namespace{} + + _ = wrappedClient.Create(ctx, dummyObj) + _ = wrappedClient.Update(ctx, dummyObj) + _ = wrappedClient.Patch(ctx, dummyObj, nil) + _ = wrappedClient.Status().Create(ctx, dummyObj, dummyObj) + _ = wrappedClient.Status().Update(ctx, dummyObj) + _ = wrappedClient.Status().Patch(ctx, dummyObj, nil) + _ = wrappedClient.SubResource("some-subresource").Create(ctx, dummyObj, dummyObj) + _ = wrappedClient.SubResource("some-subresource").Update(ctx, dummyObj) + _ = wrappedClient.SubResource("some-subresource").Patch(ctx, dummyObj, nil) + + if expectedCalls := 9; calls != expectedCalls { + t.Fatalf("wrong number of calls to assertions: expected=%d; got=%d", expectedCalls, calls) + } +} + +func TestWithFieldOwnerOverridden(t *testing.T) { + calls := 0 + + fakeClient := testClient(t, "new-field-manager", func() { calls++ }) + wrappedClient := client.WithFieldOwner(fakeClient, "old-field-manager") + + ctx := context.Background() + dummyObj := &corev1.Namespace{} + + _ = wrappedClient.Create(ctx, dummyObj, client.FieldOwner("new-field-manager")) + _ = wrappedClient.Update(ctx, dummyObj, client.FieldOwner("new-field-manager")) + _ = wrappedClient.Patch(ctx, dummyObj, nil, client.FieldOwner("new-field-manager")) + _ = wrappedClient.Status().Create(ctx, dummyObj, dummyObj, client.FieldOwner("new-field-manager")) + _ = wrappedClient.Status().Update(ctx, dummyObj, client.FieldOwner("new-field-manager")) + _ = wrappedClient.Status().Patch(ctx, dummyObj, nil, client.FieldOwner("new-field-manager")) + _ = wrappedClient.SubResource("some-subresource").Create(ctx, dummyObj, dummyObj, client.FieldOwner("new-field-manager")) + _ = wrappedClient.SubResource("some-subresource").Update(ctx, dummyObj, client.FieldOwner("new-field-manager")) + _ = wrappedClient.SubResource("some-subresource").Patch(ctx, dummyObj, nil, client.FieldOwner("new-field-manager")) + + if expectedCalls := 9; calls != expectedCalls { + t.Fatalf("wrong number of calls to assertions: expected=%d; got=%d", expectedCalls, calls) + } +} + +// testClient is a helper function that checks if calls have the expected field manager, +// and calls the callback function on each intercepted call. +func testClient(t *testing.T, expectedFieldManager string, callback func()) client.Client { + // TODO: we could use the dummyClient in interceptor pkg if we move it to an internal pkg + return fake.NewClientBuilder().WithInterceptorFuncs(interceptor.Funcs{ + Create: func(ctx context.Context, c client.WithWatch, obj client.Object, opts ...client.CreateOption) error { + callback() + out := &client.CreateOptions{} + for _, f := range opts { + f.ApplyToCreate(out) + } + if got := out.FieldManager; expectedFieldManager != got { + t.Fatalf("wrong field manager: expected=%q; got=%q", expectedFieldManager, got) + } + return nil + }, + Update: func(ctx context.Context, c client.WithWatch, obj client.Object, opts ...client.UpdateOption) error { + callback() + out := &client.UpdateOptions{} + for _, f := range opts { + f.ApplyToUpdate(out) + } + if got := out.FieldManager; expectedFieldManager != got { + t.Fatalf("wrong field manager: expected=%q; got=%q", expectedFieldManager, got) + } + return nil + }, + Patch: func(ctx context.Context, c client.WithWatch, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + callback() + out := &client.PatchOptions{} + for _, f := range opts { + f.ApplyToPatch(out) + } + if got := out.FieldManager; expectedFieldManager != got { + t.Fatalf("wrong field manager: expected=%q; got=%q", expectedFieldManager, got) + } + return nil + }, + SubResourceCreate: func(ctx context.Context, c client.Client, subResourceName string, obj client.Object, subResource client.Object, opts ...client.SubResourceCreateOption) error { + callback() + out := &client.SubResourceCreateOptions{} + for _, f := range opts { + f.ApplyToSubResourceCreate(out) + } + if got := out.FieldManager; expectedFieldManager != got { + t.Fatalf("wrong field manager: expected=%q; got=%q", expectedFieldManager, got) + } + return nil + }, + SubResourceUpdate: func(ctx context.Context, c client.Client, subResourceName string, obj client.Object, opts ...client.SubResourceUpdateOption) error { + callback() + out := &client.SubResourceUpdateOptions{} + for _, f := range opts { + f.ApplyToSubResourceUpdate(out) + } + if got := out.FieldManager; expectedFieldManager != got { + t.Fatalf("wrong field manager: expected=%q; got=%q", expectedFieldManager, got) + } + return nil + }, + SubResourcePatch: func(ctx context.Context, c client.Client, subResourceName string, obj client.Object, patch client.Patch, opts ...client.SubResourcePatchOption) error { + callback() + out := &client.SubResourcePatchOptions{} + for _, f := range opts { + f.ApplyToSubResourcePatch(out) + } + if got := out.FieldManager; expectedFieldManager != got { + t.Fatalf("wrong field manager: expected=%q; got=%q", expectedFieldManager, got) + } + return nil + }, + }).Build() +} diff --git a/pkg/client/fieldowner.go b/pkg/client/fieldowner.go new file mode 100644 index 0000000000..2f2f892ef3 --- /dev/null +++ b/pkg/client/fieldowner.go @@ -0,0 +1,106 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// WithFieldOwner wraps a Client and adds the fieldOwner as the field +// manager to all write requests from this client. If additional [FieldOwner] +// options are specified on methods of this client, the value specified here +// will be overridden. +func WithFieldOwner(c Client, fieldOwner string) Client { + return &clientWithFieldManager{ + manager: fieldOwner, + c: c, + Reader: c, + } +} + +type clientWithFieldManager struct { + manager string + c Client + Reader +} + +func (f *clientWithFieldManager) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + return f.c.Create(ctx, obj, append([]CreateOption{FieldOwner(f.manager)}, opts...)...) +} + +func (f *clientWithFieldManager) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + return f.c.Update(ctx, obj, append([]UpdateOption{FieldOwner(f.manager)}, opts...)...) +} + +func (f *clientWithFieldManager) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + return f.c.Patch(ctx, obj, patch, append([]PatchOption{FieldOwner(f.manager)}, opts...)...) +} + +func (f *clientWithFieldManager) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + return f.c.Delete(ctx, obj, opts...) +} + +func (f *clientWithFieldManager) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + return f.c.DeleteAllOf(ctx, obj, opts...) +} + +func (f *clientWithFieldManager) Scheme() *runtime.Scheme { return f.c.Scheme() } +func (f *clientWithFieldManager) RESTMapper() meta.RESTMapper { return f.c.RESTMapper() } +func (f *clientWithFieldManager) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return f.c.GroupVersionKindFor(obj) +} +func (f *clientWithFieldManager) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return f.c.IsObjectNamespaced(obj) +} + +func (f *clientWithFieldManager) Status() StatusWriter { + return &subresourceClientWithFieldOwner{ + owner: f.manager, + subresourceWriter: f.c.Status(), + } +} + +func (f *clientWithFieldManager) SubResource(subresource string) SubResourceClient { + c := f.c.SubResource(subresource) + return &subresourceClientWithFieldOwner{ + owner: f.manager, + subresourceWriter: c, + SubResourceReader: c, + } +} + +type subresourceClientWithFieldOwner struct { + owner string + subresourceWriter SubResourceWriter + SubResourceReader +} + +func (f *subresourceClientWithFieldOwner) Create(ctx context.Context, obj Object, subresource Object, opts ...SubResourceCreateOption) error { + return f.subresourceWriter.Create(ctx, obj, subresource, append([]SubResourceCreateOption{FieldOwner(f.owner)}, opts...)...) +} + +func (f *subresourceClientWithFieldOwner) Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error { + return f.subresourceWriter.Update(ctx, obj, append([]SubResourceUpdateOption{FieldOwner(f.owner)}, opts...)...) +} + +func (f *subresourceClientWithFieldOwner) Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error { + return f.subresourceWriter.Patch(ctx, obj, patch, append([]SubResourcePatchOption{FieldOwner(f.owner)}, opts...)...) +} diff --git a/pkg/manager/internal.go b/pkg/manager/internal.go index a16f354a1b..2b03263de8 100644 --- a/pkg/manager/internal.go +++ b/pkg/manager/internal.go @@ -179,6 +179,24 @@ func (cm *controllerManager) add(r Runnable) error { return cm.runnables.Add(r) } +// AddMetricsServerExtraHandler adds extra handler served on path to the http server that serves metrics. +func (cm *controllerManager) AddMetricsServerExtraHandler(path string, handler http.Handler) error { + cm.Lock() + defer cm.Unlock() + if cm.started { + return fmt.Errorf("unable to add new metrics handler because metrics endpoint has already been created") + } + if cm.metricsServer == nil { + cm.GetLogger().Info("warn: metrics server is currently disabled, registering extra handler %q will be ignored", path) + return nil + } + if err := cm.metricsServer.AddExtraHandler(path, handler); err != nil { + return err + } + cm.logger.V(2).Info("Registering metrics http server extra handler", "path", path) + return nil +} + // AddHealthzCheck allows you to add Healthz checker. func (cm *controllerManager) AddHealthzCheck(name string, check healthz.Checker) error { cm.Lock() @@ -518,6 +536,8 @@ func (cm *controllerManager) engageStopProcedure(stopComplete <-chan struct{}) e // Stop all the leader election runnables, which includes reconcilers. cm.logger.Info("Stopping and waiting for leader election runnables") + // Prevent leader election when shutting down a non-elected manager + cm.runnables.LeaderElection.startOnce.Do(func() {}) cm.runnables.LeaderElection.StopAndWait(cm.shutdownCtx) // Stop the caches before the leader election runnables, this is an important diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 25c3c7375b..0b7a865004 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -67,6 +67,15 @@ type Manager interface { // election was configured. Elected() <-chan struct{} + // AddMetricsServerExtraHandler adds an extra handler served on path to the http server that serves metrics. + // Might be useful to register some diagnostic endpoints e.g. pprof. + // + // Note that these endpoints are meant to be sensitive and shouldn't be exposed publicly. + // + // If the simple path -> handler mapping offered here is not enough, + // a new http server/listener should be added as Runnable to the manager via Add method. + AddMetricsServerExtraHandler(path string, handler http.Handler) error + // AddHealthzCheck allows you to add Healthz checker AddHealthzCheck(name string, check healthz.Checker) error diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index 90596e9ace..83f0bd849a 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -378,6 +378,85 @@ var _ = Describe("manger.Manager", func() { Expect(cm.gracefulShutdownTimeout.Nanoseconds()).To(Equal(int64(0))) }) + + It("should prevent leader election when shutting down a non-elected manager", func() { + var rl resourcelock.Interface + m1, err := New(cfg, Options{ + LeaderElection: true, + LeaderElectionNamespace: "default", + LeaderElectionID: "test-leader-election-id", + newResourceLock: func(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error) { + var err error + rl, err = leaderelection.NewResourceLock(config, recorderProvider, options) + return rl, err + }, + HealthProbeBindAddress: "0", + Metrics: metricsserver.Options{BindAddress: "0"}, + PprofBindAddress: "0", + }) + Expect(err).ToNot(HaveOccurred()) + Expect(m1).ToNot(BeNil()) + Expect(rl.Describe()).To(Equal("default/test-leader-election-id")) + + m1cm, ok := m1.(*controllerManager) + Expect(ok).To(BeTrue()) + m1cm.onStoppedLeading = func() {} + + m2, err := New(cfg, Options{ + LeaderElection: true, + LeaderElectionNamespace: "default", + LeaderElectionID: "test-leader-election-id", + newResourceLock: func(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error) { + var err error + rl, err = leaderelection.NewResourceLock(config, recorderProvider, options) + return rl, err + }, + HealthProbeBindAddress: "0", + Metrics: metricsserver.Options{BindAddress: "0"}, + PprofBindAddress: "0", + }) + Expect(err).ToNot(HaveOccurred()) + Expect(m2).ToNot(BeNil()) + Expect(rl.Describe()).To(Equal("default/test-leader-election-id")) + + m1done := make(chan struct{}) + Expect(m1.Add(RunnableFunc(func(ctx context.Context) error { + defer GinkgoRecover() + close(m1done) + return nil + }))).To(Succeed()) + + ctx1, cancel1 := context.WithCancel(context.Background()) + defer cancel1() + go func() { + defer GinkgoRecover() + Expect(m1.Elected()).ShouldNot(BeClosed()) + Expect(m1.Start(ctx1)).NotTo(HaveOccurred()) + }() + <-m1.Elected() + <-m1done + + electionRunnable := &needElection{make(chan struct{})} + + Expect(m2.Add(electionRunnable)).To(Succeed()) + + ctx2, cancel2 := context.WithCancel(context.Background()) + m2done := make(chan struct{}) + go func() { + defer GinkgoRecover() + Expect(m2.Start(ctx2)).NotTo(HaveOccurred()) + close(m2done) + }() + Consistently(m2.Elected()).ShouldNot(Receive()) + + go func() { + defer GinkgoRecover() + Consistently(electionRunnable.ch).ShouldNot(Receive()) + }() + cancel2() + <-m2done + }) + It("should default ID to controller-runtime if ID is not set", func() { var rl resourcelock.Interface m1, err := New(cfg, Options{ @@ -1374,6 +1453,12 @@ var _ = Describe("manger.Manager", func() { m, err := New(cfg, opts) Expect(err).NotTo(HaveOccurred()) + // Should error when we add another extra endpoint on the already registered path. + err = m.AddMetricsServerExtraHandler("/debug", http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + _, _ = w.Write([]byte("Another debug info")) + })) + Expect(err).To(HaveOccurred()) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() go func() { @@ -1929,3 +2014,16 @@ func (f *fakeDeferredLoader) InjectScheme(scheme *runtime.Scheme) error { type metricsDefaultServer interface { GetBindAddr() string } + +type needElection struct { + ch chan struct{} +} + +func (n *needElection) Start(_ context.Context) error { + n.ch <- struct{}{} + return nil +} + +func (n *needElection) NeedLeaderElection() bool { + return true +} diff --git a/pkg/manager/runnable_group.go b/pkg/manager/runnable_group.go index 96566f5df1..6060910485 100644 --- a/pkg/manager/runnable_group.go +++ b/pkg/manager/runnable_group.go @@ -263,6 +263,15 @@ func (r *runnableGroup) Add(rn Runnable, ready runnableCheck) error { r.start.Unlock() } + // Recheck if we're stopped and hold the readlock, given that the stop and start can be called + // at the same time, we can end up in a situation where the runnable is added + // after the group is stopped and the channel is closed. + r.stop.RLock() + defer r.stop.RUnlock() + if r.stopped { + return errRunnableGroupStopped + } + // Enqueue the runnable. r.ch <- readyRunnable return nil @@ -272,7 +281,11 @@ func (r *runnableGroup) Add(rn Runnable, ready runnableCheck) error { func (r *runnableGroup) StopAndWait(ctx context.Context) { r.stopOnce.Do(func() { // Close the reconciler channel once we're done. - defer close(r.ch) + defer func() { + r.stop.Lock() + close(r.ch) + r.stop.Unlock() + }() _ = r.Start(ctx) r.stop.Lock() diff --git a/pkg/manager/runnable_group_test.go b/pkg/manager/runnable_group_test.go index 9a55c4de9e..34d76ed0dc 100644 --- a/pkg/manager/runnable_group_test.go +++ b/pkg/manager/runnable_group_test.go @@ -161,6 +161,42 @@ var _ = Describe("runnableGroup", func() { } }) + It("should be able to handle adding runnables while stopping", func() { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + rg := newRunnableGroup(defaultBaseContext, errCh) + + go func() { + defer GinkgoRecover() + <-time.After(1 * time.Millisecond) + Expect(rg.Start(ctx)).To(Succeed()) + }() + go func() { + defer GinkgoRecover() + <-time.After(1 * time.Millisecond) + ctx, cancel := context.WithCancel(context.Background()) + cancel() + rg.StopAndWait(ctx) + }() + + for i := 0; i < 200; i++ { + go func(i int) { + defer GinkgoRecover() + + <-time.After(time.Duration(i) * time.Microsecond) + Expect(rg.Add(RunnableFunc(func(c context.Context) error { + <-ctx.Done() + return nil + }), func(_ context.Context) bool { + return true + })).To(SatisfyAny( + Succeed(), + Equal(errRunnableGroupStopped), + )) + }(i) + } + }) + It("should not turn ready if some readiness check fail", func() { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() diff --git a/pkg/metrics/server/server.go b/pkg/metrics/server/server.go index e10c5c2103..40eb9db8cc 100644 --- a/pkg/metrics/server/server.go +++ b/pkg/metrics/server/server.go @@ -46,6 +46,9 @@ var DefaultBindAddress = ":8080" // Server is a server that serves metrics. type Server interface { + // AddExtraHandler adds extra handler served on path to the http server that serves metrics. + AddExtraHandler(path string, handler http.Handler) error + // NeedLeaderElection implements the LeaderElectionRunnable interface, which indicates // the metrics server doesn't need leader election. NeedLeaderElection() bool @@ -179,6 +182,23 @@ func (*defaultServer) NeedLeaderElection() bool { return false } +// AddExtraHandler adds extra handler served on path to the http server that serves metrics. +func (s *defaultServer) AddExtraHandler(path string, handler http.Handler) error { + s.mu.Lock() + defer s.mu.Unlock() + if s.options.ExtraHandlers == nil { + s.options.ExtraHandlers = make(map[string]http.Handler) + } + if path == defaultMetricsEndpoint { + return fmt.Errorf("overriding builtin %s endpoint is not allowed", defaultMetricsEndpoint) + } + if _, found := s.options.ExtraHandlers[path]; found { + return fmt.Errorf("can't register extra handler by duplicate path %q on metrics http server", path) + } + s.options.ExtraHandlers[path] = handler + return nil +} + // Start runs the server. // It will install the metrics related resources depend on the server configuration. func (s *defaultServer) Start(ctx context.Context) error { diff --git a/tools/setup-envtest/README.md b/tools/setup-envtest/README.md index 1bdeebbc55..cf8496d56b 100644 --- a/tools/setup-envtest/README.md +++ b/tools/setup-envtest/README.md @@ -40,6 +40,18 @@ setup-envtest use -i --use-env # sideload a pre-downloaded tarball as Kubernetes 1.16.2 into our store setup-envtest sideload 1.16.2 < downloaded-envtest.tar.gz + +# If --use-deprecated-gcs is set to false envtest binaries are downloaded from: +# https://raw.githubusercontent.com/kubernetes-sigs/controller-tools/master/envtest-releases.yaml +# To download from a custom index use the following: +# Note: In controller-runtime v0.19.0 --use-deprecated-gcs will default to false. +setup-envtest use --use-deprecated-gcs=false --index https://custom.com/envtest-releases.yaml + +# To download from the kubebuilder-tools GCS bucket: (default behavior until v0.18) +# Note: This is a Google-owned bucket and it might be shutdown at any time +# see: https://github.com/kubernetes/k8s.io/issues/2647#event-12439345373 +# Note: This flag will also be removed soon. +setup-envtest use --use-deprecated-gcs ``` ## Where does it put all those binaries? @@ -51,16 +63,16 @@ On Linux, this is `$XDG_DATA_HOME`; on Windows, `%LocalAppData`; and on OSX, `~/Library/Application Support`. There's an overall folder that holds all files, and inside that is -a folder for each version/platform pair. The exact directory structure is -not guarnateed, except that the leaf directory will contain the names -expected by envtest. You should always use `setup-envtest fetch` or +a folder for each version/platform pair. The exact directory structure is +not guaranteed, except that the leaf directory will contain the names +expected by envtest. You should always use `setup-envtest fetch` or `setup-envtest switch` (generally with the `-p path` or `-p env` flags) to get the directory that you should use. ## Why do I have to do that `source <(blah blah blah)` thing This is a normal binary, not a shell script, so we can't set the parent -process's environment variables. If you use this by hand a lot and want +process's environment variables. If you use this by hand a lot and want to save the typing, you could put something like the following in your `~/.zshrc` (or similar for bash/fish/whatever, modified to those): @@ -79,7 +91,7 @@ setup-envtest() { There are a few options. First, you'll probably want to set the `-i/--installed` flag. If you want -to avoid forgetting to set this flag, set the `ENVTEST_INSTALLED_ONLY` +to avoid forgetting to set this flag, set the `ENVTEST_INSTALLED_ONLY` env variable, which will switch that flag on by default. Then, you have a few options for managing your binaries: @@ -98,13 +110,18 @@ Then, you have a few options for managing your binaries: `--use-env` on by default. - If you want to use this tool, but download your gziped tarballs - separately, you can use the `sideload` command. You'll need to use the + separately, you can use the `sideload` command. You'll need to use the `-k/--version` flag to indicate which version you're sideloading. After that, it'll be as if you'd installed the binaries with `use`. -- If you want to talk to some internal source, you can use the - `--remote-bucket` and `--remote-server` options. The former sets which +- If you want to talk to some internal source via HTTP, you can simply set `--index` + The index must contain references to envtest binary archives in the same format as: + https://raw.githubusercontent.com/kubernetes-sigs/controller-tools/master/envtest-releases.yaml + +- If you want to talk to some internal source in a GCS "style", you can use the + `--remote-bucket` and `--remote-server` options together with `--use-deprecated-gcs`. + Note: This is deprecated and will be removed soon. The former sets which GCS bucket to download from, and the latter sets the host to talk to as if it were a GCS endpoint. Theoretically, you could use the latter version to run an internal "mirror" -- the tool expects @@ -114,7 +131,7 @@ Then, you have a few options for managing your binaries: ```json {"items": [ {"name": "kubebuilder-tools-X.Y.Z-os-arch.tar.gz", "md5Hash": ""}, - {"name": "kubebuilder-tools-X.Y.Z-os-arch.tar.gz", "md5Hash": ""}, + {"name": "kubebuilder-tools-X.Y.Z-os-arch.tar.gz", "md5Hash": ""} ]} ``` diff --git a/tools/setup-envtest/env/env.go b/tools/setup-envtest/env/env.go index e12a107352..24857916d7 100644 --- a/tools/setup-envtest/env/env.go +++ b/tools/setup-envtest/env/env.go @@ -26,24 +26,28 @@ import ( // envtest binaries. // // In general, the methods will use the Exit{,Cause} functions from this package -// to indicate errors. Catch them with a `defer HandleExitWithCode()`. +// to indicate errors. Catch them with a `defer HandleExitWithCode()`. type Env struct { // the following *must* be set on input // Platform is our current platform Platform versions.PlatformItem - // VerifiySum indicates whether or not we should run checksums. + // VerifySum indicates whether we should run checksums. VerifySum bool - // NoDownload forces us to not contact GCS, looking only - // at local files instead. + // NoDownload forces us to not contact remote services, + // looking only at local files instead. NoDownload bool // ForceDownload forces us to ignore local files and always - // contact GCS & re-download. + // contact remote services & re-download. ForceDownload bool - // Client is our remote client for contacting GCS. - Client *remote.Client + // UseDeprecatedGCS signals if the GCS client is used. + // Note: This will be removed together with remote.GCSClient. + UseDeprecatedGCS bool + + // Client is our remote client for contacting remote services. + Client remote.Client // Log allows us to log. Log logr.Logger @@ -133,7 +137,7 @@ func (e *Env) ListVersions(ctx context.Context) { } // LatestVersion returns the latest version matching our version selector and -// platform from the remote server, with the correspoding checksum for later +// platform from the remote server, with the corresponding checksum for later // use as well. func (e *Env) LatestVersion(ctx context.Context) (versions.Concrete, versions.PlatformItem) { vers, err := e.Client.ListVersions(ctx) @@ -193,7 +197,7 @@ func (e *Env) ExistsAndValid() bool { // // If necessary, it will enumerate on-disk and remote versions to accomplish // this, finding a version that matches our version selector and platform. -// It will always yield a concrete version, it *may* yield a concrete platorm +// It will always yield a concrete version, it *may* yield a concrete platform // as well. func (e *Env) EnsureVersionIsSet(ctx context.Context) { if e.Version.AsConcrete() != nil { @@ -247,13 +251,13 @@ func (e *Env) EnsureVersionIsSet(ctx context.Context) { // if we're not forcing a download, and we have a newer local version, just use that if !e.ForceDownload && localVer != nil && localVer.NewerThan(serverVer) { - e.Platform.Platform = localPlat // update our data with md5 + e.Platform.Platform = localPlat // update our data with hash e.Version.MakeConcrete(*localVer) return } // otherwise, use the new version from the server - e.Platform = platform // update our data with md5 + e.Platform = platform // update our data with hash e.Version.MakeConcrete(serverVer) } @@ -266,13 +270,13 @@ func (e *Env) Fetch(ctx context.Context) { log := e.Log.WithName("fetch") // if we didn't just fetch it, grab the sum to verify - if e.VerifySum && e.Platform.MD5 == "" { + if e.VerifySum && e.Platform.Hash == nil { if err := e.Client.FetchSum(ctx, *e.Version.AsConcrete(), &e.Platform); err != nil { - ExitCause(2, err, "unable to fetch checksum for requested version") + ExitCause(2, err, "unable to fetch hash for requested version") } } if !e.VerifySum { - e.Platform.MD5 = "" // skip verification + e.Platform.Hash = nil // skip verification } var packedPath string @@ -287,7 +291,7 @@ func (e *Env) Fetch(ctx context.Context) { } }) - archiveOut, err := e.FS.TempFile("", "*-"+e.Platform.ArchiveName(*e.Version.AsConcrete())) + archiveOut, err := e.FS.TempFile("", "*-"+e.Platform.ArchiveName(e.UseDeprecatedGCS, *e.Version.AsConcrete())) if err != nil { ExitCause(2, err, "unable to open file to write downloaded archive to") } @@ -365,8 +369,8 @@ func (e *Env) PrintInfo(printFmt PrintFormat) { case PrintOverview: fmt.Fprintf(e.Out, "Version: %s\n", e.Version) fmt.Fprintf(e.Out, "OS/Arch: %s\n", e.Platform) - if e.Platform.MD5 != "" { - fmt.Fprintf(e.Out, "md5: %s\n", e.Platform.MD5) + if e.Platform.Hash != nil { + fmt.Fprintf(e.Out, "%s: %s\n", e.Platform.Hash.Type, e.Platform.Hash.Value) } fmt.Fprintf(e.Out, "Path: %s\n", path) case PrintPath: @@ -409,7 +413,7 @@ func (e *Env) Sideload(ctx context.Context, input io.Reader) { } var ( - // expectedExectuables are the executables that are checked in PathMatches + // expectedExecutables are the executables that are checked in PathMatches // for non-store paths. expectedExecutables = []string{ "kube-apiserver", @@ -458,7 +462,7 @@ func (e *Env) PathMatches(value string) bool { } // versionFromPathName checks if the given path's last component looks like one -// of our versions, and, if so, what version it represents. If succesfull, +// of our versions, and, if so, what version it represents. If successful, // it'll set version and platform, and return true. Otherwise it returns // false. func (e *Env) versionFromPathName(value string) bool { diff --git a/tools/setup-envtest/go.mod b/tools/setup-envtest/go.mod index 405c4ab8df..ef167a20ae 100644 --- a/tools/setup-envtest/go.mod +++ b/tools/setup-envtest/go.mod @@ -3,25 +3,27 @@ module sigs.k8s.io/controller-runtime/tools/setup-envtest go 1.20 require ( - github.com/go-logr/logr v1.2.4 - github.com/go-logr/zapr v1.2.4 - github.com/onsi/ginkgo/v2 v2.12.1 - github.com/onsi/gomega v1.27.10 + github.com/go-logr/logr v1.4.1 + github.com/go-logr/zapr v1.3.0 + github.com/onsi/ginkgo/v2 v2.17.1 + github.com/onsi/gomega v1.32.0 github.com/spf13/afero v1.6.0 github.com/spf13/pflag v1.0.5 go.uber.org/zap v1.26.0 + k8s.io/apimachinery v0.28.10 + sigs.k8s.io/yaml v1.3.0 ) require ( github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/google/go-cmp v0.5.9 // indirect - github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect go.uber.org/multierr v1.10.0 // indirect - golang.org/x/net v0.14.0 // indirect - golang.org/x/sys v0.12.0 // indirect - golang.org/x/text v0.12.0 // indirect - golang.org/x/tools v0.12.0 // indirect - google.golang.org/protobuf v1.28.0 // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/tools v0.18.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/tools/setup-envtest/go.sum b/tools/setup-envtest/go.sum index de040d172d..c8ad8d8153 100644 --- a/tools/setup-envtest/go.sum +++ b/tools/setup-envtest/go.sum @@ -1,37 +1,26 @@ -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= -github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/onsi/ginkgo/v2 v2.12.1 h1:uHNEO1RP2SpuZApSkel9nEh1/Mu+hmQe7Q+Pepg5OYA= -github.com/onsi/ginkgo/v2 v2.12.1/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= -github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= -github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= +github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= +github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= +github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= +github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -40,69 +29,42 @@ github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= -golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= -golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= -golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/apimachinery v0.28.10 h1:cWonrYsJK3lbuf9IgMs5+L5Jzw6QR3ZGA3hzwG0HDeI= +k8s.io/apimachinery v0.28.10/go.mod h1:zUG757HaKs6Dc3iGtKjzIpBfqTM4yiRsEe3/E7NX15o= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/tools/setup-envtest/main.go b/tools/setup-envtest/main.go index 8dca774157..62670af515 100644 --- a/tools/setup-envtest/main.go +++ b/tools/setup-envtest/main.go @@ -49,10 +49,17 @@ var ( binDir = flag.String("bin-dir", "", "directory to store binary assets (default: $OS_SPECIFIC_DATA_DIR/envtest-binaries)") - remoteBucket = flag.String("remote-bucket", "kubebuilder-tools", "remote GCS bucket to download from") + + useDeprecatedGCS = flag.Bool("use-deprecated-gcs", true, "use GCS to fetch envtest binaries. Note: This is deprecated and will be removed soon. For more details see: https://github.com/kubernetes-sigs/controller-runtime/pull/2811. In controller-runtime v0.19.0 this flag will default to false") + + // These flags are only used with --use-deprecated-gcs. + remoteBucket = flag.String("remote-bucket", "kubebuilder-tools", "remote GCS bucket to download from (only used with --use-deprecated-gcs)") remoteServer = flag.String("remote-server", "storage.googleapis.com", "remote server to query from. You can override this if you want to run "+ - "an internal storage server instead, or for testing.") + "an internal storage server instead, or for testing. (only used with --use-deprecated-gcs)") + + // This flag is only used if --use-deprecated-gcs is set to false. + index = flag.String("index", remote.DefaultIndexURL, "index to discover envtest binaries (only used if --use-deprecated-gcs is not set, or set to false)") ) // TODO(directxman12): handle interrupts? @@ -81,16 +88,29 @@ func setupEnv(globalLog logr.Logger, version string) *envp.Env { } log.V(1).Info("using binaries directory", "dir", *binDir) - env := &envp.Env{ - Log: globalLog, - Client: &remote.Client{ + var client remote.Client + if useDeprecatedGCS != nil && *useDeprecatedGCS { + client = &remote.GCSClient{ //nolint:staticcheck // deprecation accepted for now Log: globalLog.WithName("storage-client"), Bucket: *remoteBucket, Server: *remoteServer, - }, - VerifySum: *verify, - ForceDownload: *force, - NoDownload: *installedOnly, + } + log.V(1).Info("using deprecated GCS client", "bucket", *remoteBucket, "server", *remoteServer) + } else { + client = &remote.HTTPClient{ + Log: globalLog.WithName("storage-client"), + IndexURL: *index, + } + log.V(1).Info("using HTTP client", "index", *index) + } + + env := &envp.Env{ + Log: globalLog, + UseDeprecatedGCS: useDeprecatedGCS != nil && *useDeprecatedGCS, + Client: client, + VerifySum: *verify, + ForceDownload: *force, + NoDownload: *installedOnly, Platform: versions.PlatformItem{ Platform: versions.Platform{ OS: *targetOS, diff --git a/tools/setup-envtest/remote/client.go b/tools/setup-envtest/remote/client.go index be82532583..24efd6daff 100644 --- a/tools/setup-envtest/remote/client.go +++ b/tools/setup-envtest/remote/client.go @@ -1,219 +1,20 @@ // SPDX-License-Identifier: Apache-2.0 -// Copyright 2021 The Kubernetes Authors +// Copyright 2024 The Kubernetes Authors package remote import ( "context" - "crypto/md5" //nolint:gosec - "encoding/base64" - "encoding/json" - "errors" - "fmt" "io" - "net/http" - "net/url" - "path" - "sort" - "github.com/go-logr/logr" "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" ) -// objectList is the parts we need of the GCS "list-objects-in-bucket" endpoint. -type objectList struct { - Items []bucketObject `json:"items"` - NextPageToken string `json:"nextPageToken"` -} - -// bucketObject is the parts we need of the GCS object metadata. -type bucketObject struct { - Name string `json:"name"` - Hash string `json:"md5Hash"` -} - -// Client is a basic client for fetching versions of the envtest binary archives -// from GCS. -type Client struct { - // Bucket is the bucket to fetch from. - Bucket string - - // Server is the GCS-like storage server - Server string - - // Log allows us to log. - Log logr.Logger - - // Insecure uses http for testing - Insecure bool -} - -func (c *Client) scheme() string { - if c.Insecure { - return "http" - } - return "https" -} - -// ListVersions lists all available tools versions in the given bucket, along -// with supported os/arch combos and the corresponding hash. -// -// The results are sorted with newer versions first. -func (c *Client) ListVersions(ctx context.Context) ([]versions.Set, error) { - loc := &url.URL{ - Scheme: c.scheme(), - Host: c.Server, - Path: path.Join("/storage/v1/b/", c.Bucket, "o"), - } - query := make(url.Values) - - knownVersions := map[versions.Concrete][]versions.PlatformItem{} - for cont := true; cont; { - c.Log.V(1).Info("listing bucket to get versions", "bucket", c.Bucket) - - loc.RawQuery = query.Encode() - req, err := http.NewRequestWithContext(ctx, "GET", loc.String(), nil) - if err != nil { - return nil, fmt.Errorf("unable to construct request to list bucket items: %w", err) - } - - resp, err := http.DefaultClient.Do(req) - if err != nil { - return nil, fmt.Errorf("unable to perform request to list bucket items: %w", err) - } - - err = func() error { - defer resp.Body.Close() - if resp.StatusCode != 200 { - return fmt.Errorf("unable list bucket items -- got status %q from GCS", resp.Status) - } - - var list objectList - if err := json.NewDecoder(resp.Body).Decode(&list); err != nil { - return fmt.Errorf("unable unmarshal bucket items list: %w", err) - } - - // continue listing if needed - cont = list.NextPageToken != "" - query.Set("pageToken", list.NextPageToken) - - for _, item := range list.Items { - ver, details := versions.ExtractWithPlatform(versions.ArchiveRE, item.Name) - if ver == nil { - c.Log.V(1).Info("skipping bucket object -- does not appear to be a versioned tools object", "name", item.Name) - continue - } - c.Log.V(1).Info("found version", "version", ver, "platform", details) - knownVersions[*ver] = append(knownVersions[*ver], versions.PlatformItem{ - Platform: details, - MD5: item.Hash, - }) - } - - return nil - }() - if err != nil { - return nil, err - } - } - - res := make([]versions.Set, 0, len(knownVersions)) - for ver, details := range knownVersions { - res = append(res, versions.Set{Version: ver, Platforms: details}) - } - // sort in inverse order so that the newest one is first - sort.Slice(res, func(i, j int) bool { - first, second := res[i].Version, res[j].Version - return first.NewerThan(second) - }) - - return res, nil -} - -// GetVersion downloads the given concrete version for the given concrete platform, writing it to the out. -func (c *Client) GetVersion(ctx context.Context, version versions.Concrete, platform versions.PlatformItem, out io.Writer) error { - itemName := platform.ArchiveName(version) - loc := &url.URL{ - Scheme: c.scheme(), - Host: c.Server, - Path: path.Join("/storage/v1/b/", c.Bucket, "o", itemName), - RawQuery: "alt=media", - } - - req, err := http.NewRequestWithContext(ctx, "GET", loc.String(), nil) - if err != nil { - return fmt.Errorf("unable to construct request to fetch %s: %w", itemName, err) - } - resp, err := http.DefaultClient.Do(req) - if err != nil { - return fmt.Errorf("unable to fetch %s (%s): %w", itemName, req.URL, err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - return fmt.Errorf("unable fetch %s (%s) -- got status %q from GCS", itemName, req.URL, resp.Status) - } - - if platform.MD5 != "" { - // stream in chunks to do the checksum, don't load the whole thing into - // memory to avoid causing issues with big files. - buf := make([]byte, 32*1024) // 32KiB, same as io.Copy - checksum := md5.New() //nolint:gosec - for cont := true; cont; { - amt, err := resp.Body.Read(buf) - if err != nil && !errors.Is(err, io.EOF) { - return fmt.Errorf("unable read next chunk of %s: %w", itemName, err) - } - if amt > 0 { - // checksum never returns errors according to docs - checksum.Write(buf[:amt]) - if _, err := out.Write(buf[:amt]); err != nil { - return fmt.Errorf("unable write next chunk of %s: %w", itemName, err) - } - } - cont = amt > 0 && !errors.Is(err, io.EOF) - } - - sum := base64.StdEncoding.EncodeToString(checksum.Sum(nil)) - - if sum != platform.MD5 { - return fmt.Errorf("checksum mismatch for %s: %s (computed) != %s (reported from GCS)", itemName, sum, platform.MD5) - } - } else if _, err := io.Copy(out, resp.Body); err != nil { - return fmt.Errorf("unable to download %s: %w", itemName, err) - } - return nil -} - -// FetchSum fetches the checksum for the given concrete version & platform into -// the given platform item. -func (c *Client) FetchSum(ctx context.Context, ver versions.Concrete, pl *versions.PlatformItem) error { - itemName := pl.ArchiveName(ver) - loc := &url.URL{ - Scheme: c.scheme(), - Host: c.Server, - Path: path.Join("/storage/v1/b/", c.Bucket, "o", itemName), - } - - req, err := http.NewRequestWithContext(ctx, "GET", loc.String(), nil) - if err != nil { - return fmt.Errorf("unable to construct request to fetch metadata for %s: %w", itemName, err) - } - resp, err := http.DefaultClient.Do(req) - if err != nil { - return fmt.Errorf("unable to fetch metadata for %s: %w", itemName, err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - return fmt.Errorf("unable fetch metadata for %s -- got status %q from GCS", itemName, resp.Status) - } +// Client is an interface to get and list envtest binary archives. +type Client interface { + ListVersions(ctx context.Context) ([]versions.Set, error) - var item bucketObject - if err := json.NewDecoder(resp.Body).Decode(&item); err != nil { - return fmt.Errorf("unable to unmarshal metadata for %s: %w", itemName, err) - } + GetVersion(ctx context.Context, version versions.Concrete, platform versions.PlatformItem, out io.Writer) error - pl.MD5 = item.Hash - return nil + FetchSum(ctx context.Context, ver versions.Concrete, pl *versions.PlatformItem) error } diff --git a/tools/setup-envtest/remote/gcs_client.go b/tools/setup-envtest/remote/gcs_client.go new file mode 100644 index 0000000000..85f321d5c5 --- /dev/null +++ b/tools/setup-envtest/remote/gcs_client.go @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package remote + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path" + "sort" + + "github.com/go-logr/logr" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +// objectList is the parts we need of the GCS "list-objects-in-bucket" endpoint. +type objectList struct { + Items []bucketObject `json:"items"` + NextPageToken string `json:"nextPageToken"` +} + +// bucketObject is the parts we need of the GCS object metadata. +type bucketObject struct { + Name string `json:"name"` + Hash string `json:"md5Hash"` +} + +var _ Client = &GCSClient{} + +// GCSClient is a basic client for fetching versions of the envtest binary archives +// from GCS. +// +// Deprecated: This client is deprecated and will be removed soon. +// The kubebuilder GCS bucket that we use with this client might be shutdown at any time, +// see: https://github.com/kubernetes/k8s.io/issues/2647. +type GCSClient struct { + // Bucket is the bucket to fetch from. + Bucket string + + // Server is the GCS-like storage server + Server string + + // Log allows us to log. + Log logr.Logger + + // Insecure uses http for testing + Insecure bool +} + +func (c *GCSClient) scheme() string { + if c.Insecure { + return "http" + } + return "https" +} + +// ListVersions lists all available tools versions in the given bucket, along +// with supported os/arch combos and the corresponding hash. +// +// The results are sorted with newer versions first. +func (c *GCSClient) ListVersions(ctx context.Context) ([]versions.Set, error) { + loc := &url.URL{ + Scheme: c.scheme(), + Host: c.Server, + Path: path.Join("/storage/v1/b/", c.Bucket, "o"), + } + query := make(url.Values) + + knownVersions := map[versions.Concrete][]versions.PlatformItem{} + for cont := true; cont; { + c.Log.V(1).Info("listing bucket to get versions", "bucket", c.Bucket) + + loc.RawQuery = query.Encode() + req, err := http.NewRequestWithContext(ctx, "GET", loc.String(), nil) + if err != nil { + return nil, fmt.Errorf("unable to construct request to list bucket items: %w", err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, fmt.Errorf("unable to perform request to list bucket items: %w", err) + } + + err = func() error { + defer resp.Body.Close() + if resp.StatusCode != 200 { + return fmt.Errorf("unable list bucket items -- got status %q from GCS", resp.Status) + } + + var list objectList + if err := json.NewDecoder(resp.Body).Decode(&list); err != nil { + return fmt.Errorf("unable unmarshal bucket items list: %w", err) + } + + // continue listing if needed + cont = list.NextPageToken != "" + query.Set("pageToken", list.NextPageToken) + + for _, item := range list.Items { + ver, details := versions.ExtractWithPlatform(versions.ArchiveRE, item.Name) + if ver == nil { + c.Log.V(1).Info("skipping bucket object -- does not appear to be a versioned tools object", "name", item.Name) + continue + } + c.Log.V(1).Info("found version", "version", ver, "platform", details) + knownVersions[*ver] = append(knownVersions[*ver], versions.PlatformItem{ + Platform: details, + Hash: &versions.Hash{ + Type: versions.MD5HashType, + Encoding: versions.Base64HashEncoding, + Value: item.Hash, + }, + }) + } + + return nil + }() + if err != nil { + return nil, err + } + } + + res := make([]versions.Set, 0, len(knownVersions)) + for ver, details := range knownVersions { + res = append(res, versions.Set{Version: ver, Platforms: details}) + } + // sort in inverse order so that the newest one is first + sort.Slice(res, func(i, j int) bool { + first, second := res[i].Version, res[j].Version + return first.NewerThan(second) + }) + + return res, nil +} + +// GetVersion downloads the given concrete version for the given concrete platform, writing it to the out. +func (c *GCSClient) GetVersion(ctx context.Context, version versions.Concrete, platform versions.PlatformItem, out io.Writer) error { + itemName := platform.ArchiveName(true, version) + loc := &url.URL{ + Scheme: c.scheme(), + Host: c.Server, + Path: path.Join("/storage/v1/b/", c.Bucket, "o", itemName), + RawQuery: "alt=media", + } + + req, err := http.NewRequestWithContext(ctx, "GET", loc.String(), nil) + if err != nil { + return fmt.Errorf("unable to construct request to fetch %s: %w", itemName, err) + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return fmt.Errorf("unable to fetch %s (%s): %w", itemName, req.URL, err) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return fmt.Errorf("unable fetch %s (%s) -- got status %q from GCS", itemName, req.URL, resp.Status) + } + + return readBody(resp, out, itemName, platform) +} + +// FetchSum fetches the checksum for the given concrete version & platform into +// the given platform item. +func (c *GCSClient) FetchSum(ctx context.Context, ver versions.Concrete, pl *versions.PlatformItem) error { + itemName := pl.ArchiveName(true, ver) + loc := &url.URL{ + Scheme: c.scheme(), + Host: c.Server, + Path: path.Join("/storage/v1/b/", c.Bucket, "o", itemName), + } + + req, err := http.NewRequestWithContext(ctx, "GET", loc.String(), nil) + if err != nil { + return fmt.Errorf("unable to construct request to fetch metadata for %s: %w", itemName, err) + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return fmt.Errorf("unable to fetch metadata for %s: %w", itemName, err) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return fmt.Errorf("unable fetch metadata for %s -- got status %q from GCS", itemName, resp.Status) + } + + var item bucketObject + if err := json.NewDecoder(resp.Body).Decode(&item); err != nil { + return fmt.Errorf("unable to unmarshal metadata for %s: %w", itemName, err) + } + + pl.Hash = &versions.Hash{ + Type: versions.MD5HashType, + Encoding: versions.Base64HashEncoding, + Value: item.Hash, + } + return nil +} diff --git a/tools/setup-envtest/remote/http_client.go b/tools/setup-envtest/remote/http_client.go new file mode 100644 index 0000000000..0339654a82 --- /dev/null +++ b/tools/setup-envtest/remote/http_client.go @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package remote + +import ( + "context" + "fmt" + "io" + "net/http" + "net/url" + "sort" + + "github.com/go-logr/logr" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" + "sigs.k8s.io/yaml" +) + +// DefaultIndexURL is the default index used in HTTPClient. +var DefaultIndexURL = "https://raw.githubusercontent.com/kubernetes-sigs/controller-tools/HEAD/envtest-releases.yaml" + +var _ Client = &HTTPClient{} + +// HTTPClient is a client for fetching versions of the envtest binary archives +// from an index via HTTP. +type HTTPClient struct { + // Log allows us to log. + Log logr.Logger + + // IndexURL is the URL of the index, defaults to DefaultIndexURL. + IndexURL string +} + +// Index represents an index of envtest binary archives. Example: +// +// releases: +// v1.28.0: +// envtest-v1.28.0-darwin-amd64.tar.gz: +// hash: +// selfLink: +type Index struct { + // Releases maps Kubernetes versions to Releases (envtest archives). + Releases map[string]Release `json:"releases"` +} + +// Release maps an archive name to an archive. +type Release map[string]Archive + +// Archive contains the self link to an archive and its hash. +type Archive struct { + Hash string `json:"hash"` + SelfLink string `json:"selfLink"` +} + +// ListVersions lists all available tools versions in the index, along +// with supported os/arch combos and the corresponding hash. +// +// The results are sorted with newer versions first. +func (c *HTTPClient) ListVersions(ctx context.Context) ([]versions.Set, error) { + index, err := c.getIndex(ctx) + if err != nil { + return nil, err + } + + knownVersions := map[versions.Concrete][]versions.PlatformItem{} + for _, releases := range index.Releases { + for archiveName, archive := range releases { + ver, details := versions.ExtractWithPlatform(versions.ArchiveRE, archiveName) + if ver == nil { + c.Log.V(1).Info("skipping archive -- does not appear to be a versioned tools archive", "name", archiveName) + continue + } + c.Log.V(1).Info("found version", "version", ver, "platform", details) + knownVersions[*ver] = append(knownVersions[*ver], versions.PlatformItem{ + Platform: details, + Hash: &versions.Hash{ + Type: versions.SHA512HashType, + Encoding: versions.HexHashEncoding, + Value: archive.Hash, + }, + }) + } + } + + res := make([]versions.Set, 0, len(knownVersions)) + for ver, details := range knownVersions { + res = append(res, versions.Set{Version: ver, Platforms: details}) + } + // sort in inverse order so that the newest one is first + sort.Slice(res, func(i, j int) bool { + first, second := res[i].Version, res[j].Version + return first.NewerThan(second) + }) + + return res, nil +} + +// GetVersion downloads the given concrete version for the given concrete platform, writing it to the out. +func (c *HTTPClient) GetVersion(ctx context.Context, version versions.Concrete, platform versions.PlatformItem, out io.Writer) error { + index, err := c.getIndex(ctx) + if err != nil { + return err + } + + var loc *url.URL + var name string + for _, releases := range index.Releases { + for archiveName, archive := range releases { + ver, details := versions.ExtractWithPlatform(versions.ArchiveRE, archiveName) + if ver == nil { + c.Log.V(1).Info("skipping archive -- does not appear to be a versioned tools archive", "name", archiveName) + continue + } + + if *ver == version && details.OS == platform.OS && details.Arch == platform.Arch { + loc, err = url.Parse(archive.SelfLink) + if err != nil { + return fmt.Errorf("error parsing selfLink %q, %w", loc, err) + } + name = archiveName + break + } + } + } + if name == "" { + return fmt.Errorf("unable to find archive for %s (%s,%s)", version, platform.OS, platform.Arch) + } + + req, err := http.NewRequestWithContext(ctx, "GET", loc.String(), nil) + if err != nil { + return fmt.Errorf("unable to construct request to fetch %s: %w", name, err) + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return fmt.Errorf("unable to fetch %s (%s): %w", name, req.URL, err) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return fmt.Errorf("unable fetch %s (%s) -- got status %q", name, req.URL, resp.Status) + } + + return readBody(resp, out, name, platform) +} + +// FetchSum fetches the checksum for the given concrete version & platform into +// the given platform item. +func (c *HTTPClient) FetchSum(ctx context.Context, version versions.Concrete, platform *versions.PlatformItem) error { + index, err := c.getIndex(ctx) + if err != nil { + return err + } + + for _, releases := range index.Releases { + for archiveName, archive := range releases { + ver, details := versions.ExtractWithPlatform(versions.ArchiveRE, archiveName) + if ver == nil { + c.Log.V(1).Info("skipping archive -- does not appear to be a versioned tools archive", "name", archiveName) + continue + } + + if *ver == version && details.OS == platform.OS && details.Arch == platform.Arch { + platform.Hash = &versions.Hash{ + Type: versions.SHA512HashType, + Encoding: versions.HexHashEncoding, + Value: archive.Hash, + } + return nil + } + } + } + + return fmt.Errorf("unable to find archive for %s (%s,%s)", version, platform.OS, platform.Arch) +} + +func (c *HTTPClient) getIndex(ctx context.Context) (*Index, error) { + indexURL := c.IndexURL + if indexURL == "" { + indexURL = DefaultIndexURL + } + + loc, err := url.Parse(indexURL) + if err != nil { + return nil, fmt.Errorf("unable to parse index URL: %w", err) + } + + c.Log.V(1).Info("listing versions", "index", indexURL) + + req, err := http.NewRequestWithContext(ctx, "GET", loc.String(), nil) + if err != nil { + return nil, fmt.Errorf("unable to construct request to get index: %w", err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, fmt.Errorf("unable to perform request to get index: %w", err) + } + + defer resp.Body.Close() + if resp.StatusCode != 200 { + return nil, fmt.Errorf("unable to get index -- got status %q", resp.Status) + } + + responseBody, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("unable to get index -- unable to read body %w", err) + } + + var index Index + if err := yaml.Unmarshal(responseBody, &index); err != nil { + return nil, fmt.Errorf("unable to unmarshal index: %w", err) + } + return &index, nil +} diff --git a/tools/setup-envtest/remote/read_body.go b/tools/setup-envtest/remote/read_body.go new file mode 100644 index 0000000000..650e41282c --- /dev/null +++ b/tools/setup-envtest/remote/read_body.go @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2024 The Kubernetes Authors + +package remote + +import ( + //nolint:gosec // We're aware that md5 is a weak cryptographic primitive, but we don't have a choice here. + "crypto/md5" + "crypto/sha512" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "hash" + "io" + "net/http" + + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +func readBody(resp *http.Response, out io.Writer, archiveName string, platform versions.PlatformItem) error { + if platform.Hash != nil { + // stream in chunks to do the checksum, don't load the whole thing into + // memory to avoid causing issues with big files. + buf := make([]byte, 32*1024) // 32KiB, same as io.Copy + var hasher hash.Hash + switch platform.Hash.Type { + case versions.SHA512HashType: + hasher = sha512.New() + case versions.MD5HashType: + hasher = md5.New() //nolint:gosec // We're aware that md5 is a weak cryptographic primitive, but we don't have a choice here. + default: + return fmt.Errorf("hash type %s not implemented", platform.Hash.Type) + } + for cont := true; cont; { + amt, err := resp.Body.Read(buf) + if err != nil && !errors.Is(err, io.EOF) { + return fmt.Errorf("unable read next chunk of %s: %w", archiveName, err) + } + if amt > 0 { + // checksum never returns errors according to docs + hasher.Write(buf[:amt]) + if _, err := out.Write(buf[:amt]); err != nil { + return fmt.Errorf("unable write next chunk of %s: %w", archiveName, err) + } + } + cont = amt > 0 && !errors.Is(err, io.EOF) + } + + var sum string + switch platform.Hash.Encoding { + case versions.Base64HashEncoding: + sum = base64.StdEncoding.EncodeToString(hasher.Sum(nil)) + case versions.HexHashEncoding: + sum = hex.EncodeToString(hasher.Sum(nil)) + default: + return fmt.Errorf("hash encoding %s not implemented", platform.Hash.Encoding) + } + if sum != platform.Hash.Value { + return fmt.Errorf("checksum mismatch for %s: %s (computed) != %s (reported)", archiveName, sum, platform.Hash.Value) + } + } else if _, err := io.Copy(out, resp.Body); err != nil { + return fmt.Errorf("unable to download %s: %w", archiveName, err) + } + return nil +} diff --git a/tools/setup-envtest/store/store_test.go b/tools/setup-envtest/store/store_test.go index d5607aede6..f0d83a1f79 100644 --- a/tools/setup-envtest/store/store_test.go +++ b/tools/setup-envtest/store/store_test.go @@ -125,23 +125,55 @@ var _ = Describe("Store", func() { }) }) - Describe("adding items", func() { + Describe("adding items (GCS archives)", func() { + archiveName := "kubebuilder-tools-1.16.3-linux-amd64.tar.gz" + It("should support .tar.gz input", func() { - Expect(st.Add(logCtx(), newItem, makeFakeArchive(newName))).To(Succeed()) + Expect(st.Add(logCtx(), newItem, makeFakeArchive(archiveName, "kubebuilder/bin/"))).To(Succeed()) Expect(st.Has(newItem)).To(BeTrue(), "should have the item after adding it") }) It("should extract binaries from the given archive to a directly to the item's directory, regardless of path", func() { - Expect(st.Add(logCtx(), newItem, makeFakeArchive(newName))).To(Succeed()) + Expect(st.Add(logCtx(), newItem, makeFakeArchive(archiveName, "kubebuilder/bin/"))).To(Succeed()) dirName := newItem.Platform.BaseName(newItem.Version) - Expect(afero.ReadFile(st.Root, filepath.Join("k8s", dirName, "some-file"))).To(HavePrefix(newName + "some-file")) - Expect(afero.ReadFile(st.Root, filepath.Join("k8s", dirName, "other-file"))).To(HavePrefix(newName + "other-file")) + Expect(afero.ReadFile(st.Root, filepath.Join("k8s", dirName, "some-file"))).To(HavePrefix(archiveName + "some-file")) + Expect(afero.ReadFile(st.Root, filepath.Join("k8s", dirName, "other-file"))).To(HavePrefix(archiveName + "other-file")) }) It("should clean up any existing item directory before creating the new one", func() { item := localVersions[0] - Expect(st.Add(logCtx(), item, makeFakeArchive(newName))).To(Succeed()) + Expect(st.Add(logCtx(), item, makeFakeArchive(archiveName, "kubebuilder/bin/"))).To(Succeed()) + Expect(st.Root.Stat(filepath.Join("k8s", item.Platform.BaseName(item.Version)))).NotTo(BeNil(), "new files should exist") + }) + It("should clean up if it errors before finishing", func() { + item := localVersions[0] + Expect(st.Add(logCtx(), item, new(bytes.Buffer))).NotTo(Succeed(), "should fail to extract") + _, err := st.Root.Stat(filepath.Join("k8s", item.Platform.BaseName(item.Version))) + Expect(err).To(HaveOccurred(), "the binaries dir for the item should be gone") + + }) + }) + + Describe("adding items (controller-tools archives)", func() { + archiveName := "envtest-v1.16.3-linux-amd64.tar.gz" + + It("should support .tar.gz input", func() { + Expect(st.Add(logCtx(), newItem, makeFakeArchive(archiveName, "controller-tools/envtest/"))).To(Succeed()) + Expect(st.Has(newItem)).To(BeTrue(), "should have the item after adding it") + }) + + It("should extract binaries from the given archive to a directly to the item's directory, regardless of path", func() { + Expect(st.Add(logCtx(), newItem, makeFakeArchive(archiveName, "controller-tools/envtest/"))).To(Succeed()) + + dirName := newItem.Platform.BaseName(newItem.Version) + Expect(afero.ReadFile(st.Root, filepath.Join("k8s", dirName, "some-file"))).To(HavePrefix(archiveName + "some-file")) + Expect(afero.ReadFile(st.Root, filepath.Join("k8s", dirName, "other-file"))).To(HavePrefix(archiveName + "other-file")) + }) + + It("should clean up any existing item directory before creating the new one", func() { + item := localVersions[0] + Expect(st.Add(logCtx(), item, makeFakeArchive(archiveName, "controller-tools/envtest/"))).To(Succeed()) Expect(st.Root.Stat(filepath.Join("k8s", item.Platform.BaseName(item.Version)))).NotTo(BeNil(), "new files should exist") }) It("should clean up if it errors before finishing", func() { @@ -187,8 +219,6 @@ var ( Version: ver(1, 16, 3), Platform: versions.Platform{OS: "linux", Arch: "amd64"}, } - - newName = "kubebuilder-tools-1.16.3-linux-amd64.tar.gz" ) func ver(major, minor, patch int) versions.Concrete { @@ -199,13 +229,13 @@ func ver(major, minor, patch int) versions.Concrete { } } -func makeFakeArchive(magic string) io.Reader { +func makeFakeArchive(magic, relativePath string) io.Reader { out := new(bytes.Buffer) gzipWriter := gzip.NewWriter(out) tarWriter := tar.NewWriter(gzipWriter) Expect(tarWriter.WriteHeader(&tar.Header{ Typeflag: tar.TypeDir, - Name: "kubebuilder/bin/", // so we can ensure we skip non-files + Name: relativePath, // so we can ensure we skip non-files Mode: 0777, })).To(Succeed()) for _, fileName := range []string{"some-file", "other-file"} { @@ -218,9 +248,9 @@ func makeFakeArchive(magic string) io.Reader { panic(err) } - // write to kubebuilder/bin/fileName + // write to relativePath/fileName err := tarWriter.WriteHeader(&tar.Header{ - Name: "kubebuilder/bin/" + fileName, + Name: relativePath + fileName, Size: int64(len(chunk[:])), Mode: 0777, // so we can check that we fix this later }) diff --git a/tools/setup-envtest/versions/misc_test.go b/tools/setup-envtest/versions/misc_test.go index 8a97de0410..dcb87be8b2 100644 --- a/tools/setup-envtest/versions/misc_test.go +++ b/tools/setup-envtest/versions/misc_test.go @@ -95,7 +95,8 @@ var _ = Describe("Platform", func() { Specify("knows how to produce an archive name", func() { plat := Platform{OS: "linux", Arch: "amd64"} ver := Concrete{Major: 1, Minor: 16, Patch: 3} - Expect(plat.ArchiveName(ver)).To(Equal("kubebuilder-tools-1.16.3-linux-amd64.tar.gz")) + Expect(plat.ArchiveName(true, ver)).To(Equal("kubebuilder-tools-1.16.3-linux-amd64.tar.gz")) + Expect(plat.ArchiveName(false, ver)).To(Equal("envtest-v1.16.3-linux-amd64.tar.gz")) }) Describe("parsing", func() { @@ -110,7 +111,7 @@ var _ = Describe("Platform", func() { Expect(ver).To(BeNil()) }) }) - Context("for archive names", func() { + Context("for archive names (GCS)", func() { It("should accept strings of the form kubebuilder-tools-x.y.z-os-arch.tar.gz", func() { ver, plat := ExtractWithPlatform(ArchiveRE, "kubebuilder-tools-1.16.3-linux-amd64.tar.gz") Expect(ver).To(Equal(&Concrete{Major: 1, Minor: 16, Patch: 3})) @@ -121,6 +122,17 @@ var _ = Describe("Platform", func() { Expect(ver).To(BeNil()) }) }) + Context("for archive names (controller-tools)", func() { + It("should accept strings of the form envtest-vx.y.z-os-arch.tar.gz", func() { + ver, plat := ExtractWithPlatform(ArchiveRE, "envtest-v1.16.3-linux-amd64.tar.gz") + Expect(ver).To(Equal(&Concrete{Major: 1, Minor: 16, Patch: 3})) + Expect(plat).To(Equal(Platform{OS: "linux", Arch: "amd64"})) + }) + It("should reject nonsense strings", func() { + ver, _ := ExtractWithPlatform(ArchiveRE, "envtest-v1.16.3-linux-amd64.tar.sum") + Expect(ver).To(BeNil()) + }) + }) }) }) diff --git a/tools/setup-envtest/versions/parse.go b/tools/setup-envtest/versions/parse.go index c053bf8757..21d38bb345 100644 --- a/tools/setup-envtest/versions/parse.go +++ b/tools/setup-envtest/versions/parse.go @@ -17,8 +17,6 @@ var ( // ConcreteVersionRE matches a concrete version anywhere in the string. ConcreteVersionRE = regexp.MustCompile(`(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)`) - // OnlyConcreteVersionRE matches a string that's just a concrete version. - OnlyConcreteVersionRE = regexp.MustCompile(`^` + ConcreteVersionRE.String() + `$`) ) // FromExpr extracts a version from a string in the form of a semver version, diff --git a/tools/setup-envtest/versions/platform.go b/tools/setup-envtest/versions/platform.go index 16c08b38ff..8b32ccd5bc 100644 --- a/tools/setup-envtest/versions/platform.go +++ b/tools/setup-envtest/versions/platform.go @@ -37,17 +37,58 @@ func (p Platform) BaseName(ver Concrete) string { } // ArchiveName returns the full archive name for this version and platform. -func (p Platform) ArchiveName(ver Concrete) string { - return "kubebuilder-tools-" + p.BaseName(ver) + ".tar.gz" +// useGCS is deprecated and will be removed when the remote.GCSClient is removed. +func (p Platform) ArchiveName(useGCS bool, ver Concrete) string { + if useGCS { + return "kubebuilder-tools-" + p.BaseName(ver) + ".tar.gz" + } + return "envtest-v" + p.BaseName(ver) + ".tar.gz" } // PlatformItem represents a platform with corresponding // known metadata for its download. type PlatformItem struct { Platform - MD5 string + + *Hash } +// Hash of an archive with envtest binaries. +type Hash struct { + // Type of the hash. + // GCS uses MD5HashType, controller-tools uses SHA512HashType. + Type HashType + + // Encoding of the hash value. + // GCS uses Base64HashEncoding, controller-tools uses HexHashEncoding. + Encoding HashEncoding + + // Value of the hash. + Value string +} + +// HashType is the type of a hash. +type HashType string + +const ( + // SHA512HashType represents a sha512 hash + SHA512HashType HashType = "sha512" + + // MD5HashType represents a md5 hash + MD5HashType HashType = "md5" +) + +// HashEncoding is the encoding of a hash +type HashEncoding string + +const ( + // Base64HashEncoding represents base64 encoding + Base64HashEncoding HashEncoding = "base64" + + // HexHashEncoding represents hex encoding + HexHashEncoding HashEncoding = "hex" +) + // Set is a concrete version and all the corresponding platforms that it's available for. type Set struct { Version Concrete @@ -81,5 +122,7 @@ var ( // VersionPlatformRE matches concrete version-platform strings. VersionPlatformRE = regexp.MustCompile(`^` + versionPlatformREBase + `$`) // ArchiveRE matches concrete version-platform.tar.gz strings. - ArchiveRE = regexp.MustCompile(`^kubebuilder-tools-` + versionPlatformREBase + `\.tar\.gz$`) + // The archives published to GCS by kubebuilder use the "kubebuilder-tools-" prefix (e.g. "kubebuilder-tools-1.30.0-darwin-amd64.tar.gz"). + // The archives published to GitHub releases by controller-tools use the "envtest-v" prefix (e.g. "envtest-v1.30.0-darwin-amd64.tar.gz"). + ArchiveRE = regexp.MustCompile(`^(kubebuilder-tools-|envtest-v)` + versionPlatformREBase + `\.tar\.gz$`) ) diff --git a/tools/setup-envtest/workflows/workflows_test.go b/tools/setup-envtest/workflows/workflows_test.go index a0bd7321f7..8c4007a415 100644 --- a/tools/setup-envtest/workflows/workflows_test.go +++ b/tools/setup-envtest/workflows/workflows_test.go @@ -5,15 +5,17 @@ package workflows_test import ( "bytes" + "fmt" "io/fs" "path/filepath" + "sort" "strings" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/onsi/gomega/ghttp" "github.com/spf13/afero" - + "k8s.io/apimachinery/pkg/util/sets" envp "sigs.k8s.io/controller-runtime/tools/setup-envtest/env" "sigs.k8s.io/controller-runtime/tools/setup-envtest/remote" "sigs.k8s.io/controller-runtime/tools/setup-envtest/store" @@ -46,280 +48,288 @@ const ( testStorePath = ".teststore" ) -var _ = Describe("Workflows", func() { - var ( - env *envp.Env - out *bytes.Buffer - server *ghttp.Server - remoteItems []item - ) - BeforeEach(func() { - out = new(bytes.Buffer) - baseFs := afero.Afero{Fs: afero.NewMemMapFs()} - env = &envp.Env{ - Log: testLog, - VerifySum: true, // on by default - FS: baseFs, - Store: &store.Store{Root: afero.NewBasePathFs(baseFs, testStorePath)}, - Out: out, - Platform: versions.PlatformItem{ // default - Platform: versions.Platform{ - OS: "linux", - Arch: "amd64", - }, - }, - Client: &remote.Client{ - Log: testLog.WithName("remote-client"), - Bucket: "kubebuilder-tools-test", // test custom bucket functionality too - Server: "localhost:-1", - Insecure: true, // no https in httptest :-( - }, - } - server = ghttp.NewServer() - env.Client.Server = server.Addr() - - fakeStore(env.FS, testStorePath) - remoteItems = remoteVersions - }) - JustBeforeEach(func() { - handleRemoteVersions(server, remoteItems) - }) - AfterEach(func() { - server.Close() - server = nil - }) +const ( + gcsMode = "GCS" + httpMode = "HTTP" +) + +var _ = Describe("GCS Client", func() { + WorkflowTest(gcsMode) +}) + +var _ = Describe("HTTP Client", func() { + WorkflowTest(httpMode) +}) - Describe("use", func() { - var flow wf.Use +func WorkflowTest(testMode string) { + Describe("Workflows", func() { + var ( + env *envp.Env + out *bytes.Buffer + server *ghttp.Server + remoteGCSItems []item + remoteHTTPItems itemsHTTP + ) BeforeEach(func() { - // some defaults for most tests - env.Version = versions.Spec{ - Selector: ver(1, 16, 0), + out = new(bytes.Buffer) + baseFs := afero.Afero{Fs: afero.NewMemMapFs()} + + server = ghttp.NewServer() + + var client remote.Client + switch testMode { + case gcsMode: + client = &remote.GCSClient{ //nolint:staticcheck // deprecation accepted for now + Log: testLog.WithName("gcs-client"), + Bucket: "kubebuilder-tools-test", // test custom bucket functionality too + Server: server.Addr(), + Insecure: true, // no https in httptest :-( + } + case httpMode: + client = &remote.HTTPClient{ + Log: testLog.WithName("http-client"), + IndexURL: fmt.Sprintf("http://%s/%s", server.Addr(), "envtest-releases.yaml"), + } } - flow = wf.Use{ - PrintFormat: envp.PrintPath, + + env = &envp.Env{ + Log: testLog, + VerifySum: true, // on by default + FS: baseFs, + Store: &store.Store{Root: afero.NewBasePathFs(baseFs, testStorePath)}, + Out: out, + Platform: versions.PlatformItem{ // default + Platform: versions.Platform{ + OS: "linux", + Arch: "amd64", + }, + }, + Client: client, } - }) - It("should initialize the store if it doesn't exist", func() { - Expect(env.FS.RemoveAll(testStorePath)).To(Succeed()) - // need to set this to a valid remote version cause our store is now empty - env.Version = versions.Spec{Selector: ver(1, 16, 4)} - flow.Do(env) - Expect(env.FS.Stat(testStorePath)).NotTo(BeNil()) + fakeStore(env.FS, testStorePath) + remoteGCSItems = remoteVersionsGCS + remoteHTTPItems = remoteVersionsHTTP + }) + JustBeforeEach(func() { + switch testMode { + case gcsMode: + handleRemoteVersionsGCS(server, remoteGCSItems) + case httpMode: + handleRemoteVersionsHTTP(server, remoteHTTPItems) + } + }) + AfterEach(func() { + server.Close() + server = nil }) - Context("when use env is set", func() { + Describe("use", func() { + var flow wf.Use BeforeEach(func() { - flow.UseEnv = true - }) - It("should fall back to normal behavior when the env is not set", func() { - flow.Do(env) - Expect(out.String()).To(HaveSuffix("/1.16.0-linux-amd64"), "should fall back to a local version") - }) - It("should fall back to normal behavior if binaries are missing", func() { - flow.AssetsPath = ".teststore/missing-binaries" - flow.Do(env) - Expect(out.String()).To(HaveSuffix("/1.16.0-linux-amd64"), "should fall back to a local version") - }) - It("should use the value of the env if it contains the right binaries", func() { - flow.AssetsPath = ".teststore/good-version" - flow.Do(env) - Expect(out.String()).To(Equal(flow.AssetsPath)) - }) - It("should not try and check the version of the binaries", func() { - flow.AssetsPath = ".teststore/wrong-version" - flow.Do(env) - Expect(out.String()).To(Equal(flow.AssetsPath)) + // some defaults for most tests + env.Version = versions.Spec{ + Selector: ver(1, 16, 0), + } + flow = wf.Use{ + PrintFormat: envp.PrintPath, + } }) - It("should not need to contact the network", func() { - server.Close() - flow.AssetsPath = ".teststore/good-version" + + It("should initialize the store if it doesn't exist", func() { + Expect(env.FS.RemoveAll(testStorePath)).To(Succeed()) + // need to set this to a valid remote version cause our store is now empty + env.Version = versions.Spec{Selector: ver(1, 16, 4)} flow.Do(env) - // expect to not get a panic -- if we do, it'll cause the test to fail + Expect(env.FS.Stat(testStorePath)).NotTo(BeNil()) }) - }) - Context("when downloads are disabled", func() { - BeforeEach(func() { - env.NoDownload = true - server.Close() + Context("when use env is set", func() { + BeforeEach(func() { + flow.UseEnv = true + }) + It("should fall back to normal behavior when the env is not set", func() { + flow.Do(env) + Expect(out.String()).To(HaveSuffix("/1.16.0-linux-amd64"), "should fall back to a local version") + }) + It("should fall back to normal behavior if binaries are missing", func() { + flow.AssetsPath = ".teststore/missing-binaries" + flow.Do(env) + Expect(out.String()).To(HaveSuffix("/1.16.0-linux-amd64"), "should fall back to a local version") + }) + It("should use the value of the env if it contains the right binaries", func() { + flow.AssetsPath = ".teststore/good-version" + flow.Do(env) + Expect(out.String()).To(Equal(flow.AssetsPath)) + }) + It("should not try and check the version of the binaries", func() { + flow.AssetsPath = ".teststore/wrong-version" + flow.Do(env) + Expect(out.String()).To(Equal(flow.AssetsPath)) + }) + It("should not need to contact the network", func() { + server.Close() + flow.AssetsPath = ".teststore/good-version" + flow.Do(env) + // expect to not get a panic -- if we do, it'll cause the test to fail + }) }) - // It("should not contact the network") is a gimme here, because we - // call server.Close() above. + Context("when downloads are disabled", func() { + BeforeEach(func() { + env.NoDownload = true + server.Close() + }) - It("should error if no matches are found locally", func() { - defer shouldHaveError() - env.Version.Selector = versions.Concrete{Major: 9001} - flow.Do(env) + // It("should not contact the network") is a gimme here, because we + // call server.Close() above. + + It("should error if no matches are found locally", func() { + defer shouldHaveError() + env.Version.Selector = versions.Concrete{Major: 9001} + flow.Do(env) + }) + It("should settle for the latest local match if latest is requested", func() { + env.Version = versions.Spec{ + CheckLatest: true, + Selector: versions.PatchSelector{ + Major: 1, + Minor: 16, + Patch: versions.AnyPoint, + }, + } + + flow.Do(env) + + // latest on "server" is 1.16.4, shouldn't use that + Expect(out.String()).To(HaveSuffix("/1.16.1-linux-amd64"), "should use the latest local version") + }) + }) + + Context("if latest is requested", func() { + It("should contact the network to see if there's anything newer", func() { + env.Version = versions.Spec{ + CheckLatest: true, + Selector: versions.PatchSelector{ + Major: 1, Minor: 16, Patch: versions.AnyPoint, + }, + } + flow.Do(env) + Expect(out.String()).To(HaveSuffix("/1.16.4-linux-amd64"), "should use the latest remote version") + }) + It("should still use the latest local if the network doesn't have anything newer", func() { + env.Version = versions.Spec{ + CheckLatest: true, + Selector: versions.PatchSelector{ + Major: 1, Minor: 14, Patch: versions.AnyPoint, + }, + } + + flow.Do(env) + + // latest on the server is 1.14.1, latest local is 1.14.26 + Expect(out.String()).To(HaveSuffix("/1.14.26-linux-amd64"), "should use the latest local version") + }) }) - It("should settle for the latest local match if latest is requested", func() { + + It("should check local for a match first", func() { + server.Close() // confirm no network env.Version = versions.Spec{ - CheckLatest: true, - Selector: versions.PatchSelector{ - Major: 1, - Minor: 16, - Patch: versions.AnyPoint, - }, + Selector: versions.TildeSelector{Concrete: ver(1, 16, 0)}, } - flow.Do(env) - - // latest on "server" is 1.16.4, shouldn't use that + // latest on the server is 1.16.4, latest local is 1.16.1 Expect(out.String()).To(HaveSuffix("/1.16.1-linux-amd64"), "should use the latest local version") }) - }) - Context("if latest is requested", func() { - It("should contact the network to see if there's anything newer", func() { + It("should fall back to the network if no local matches are found", func() { env.Version = versions.Spec{ - CheckLatest: true, - Selector: versions.PatchSelector{ - Major: 1, Minor: 16, Patch: versions.AnyPoint, - }, + Selector: versions.TildeSelector{Concrete: ver(1, 19, 0)}, } flow.Do(env) - Expect(out.String()).To(HaveSuffix("/1.16.4-linux-amd64"), "should use the latest remote version") + Expect(out.String()).To(HaveSuffix("/1.19.2-linux-amd64"), "should have a remote version") }) - It("should still use the latest local if the network doesn't have anything newer", func() { + + It("should error out if no matches can be found anywhere", func() { + defer shouldHaveError() env.Version = versions.Spec{ - CheckLatest: true, - Selector: versions.PatchSelector{ - Major: 1, Minor: 14, Patch: versions.AnyPoint, - }, + Selector: versions.TildeSelector{Concrete: ver(0, 0, 1)}, } - flow.Do(env) - - // latest on the server is 1.14.1, latest local is 1.14.26 - Expect(out.String()).To(HaveSuffix("/1.14.26-linux-amd64"), "should use the latest local version") }) - }) - - It("should check local for a match first", func() { - server.Close() // confirm no network - env.Version = versions.Spec{ - Selector: versions.TildeSelector{Concrete: ver(1, 16, 0)}, - } - flow.Do(env) - // latest on the server is 1.16.4, latest local is 1.16.1 - Expect(out.String()).To(HaveSuffix("/1.16.1-linux-amd64"), "should use the latest local version") - }) - - It("should fall back to the network if no local matches are found", func() { - env.Version = versions.Spec{ - Selector: versions.TildeSelector{Concrete: ver(1, 19, 0)}, - } - flow.Do(env) - Expect(out.String()).To(HaveSuffix("/1.19.2-linux-amd64"), "should have a remote version") - }) - - It("should error out if no matches can be found anywhere", func() { - defer shouldHaveError() - env.Version = versions.Spec{ - Selector: versions.TildeSelector{Concrete: ver(0, 0, 1)}, - } - flow.Do(env) - }) - It("should skip local versions matches with non-matching platforms", func() { - env.NoDownload = true // so we get an error - defer shouldHaveError() - env.Version = versions.Spec{ - // has non-matching local versions - Selector: ver(1, 13, 0), - } - - flow.Do(env) - }) - - It("should skip remote version matches with non-matching platforms", func() { - defer shouldHaveError() - env.Version = versions.Spec{ - // has a non-matching remote version - Selector: versions.TildeSelector{Concrete: ver(1, 11, 1)}, - } - flow.Do(env) - }) - - Describe("verifying the checksum", func() { - BeforeEach(func() { - remoteItems = append(remoteItems, item{ - meta: bucketObject{ - Name: "kubebuilder-tools-86.75.309-linux-amd64.tar.gz", - Hash: "nottherightone!", - }, - contents: remoteItems[0].contents, // need a valid tar.gz file to not error from that - }) + It("should skip local versions matches with non-matching platforms", func() { + env.NoDownload = true // so we get an error + defer shouldHaveError() env.Version = versions.Spec{ - Selector: ver(86, 75, 309), + // has non-matching local versions + Selector: ver(1, 13, 0), } + + flow.Do(env) }) - Specify("when enabled, should fail if the downloaded md5 checksum doesn't match", func() { + + It("should skip remote version matches with non-matching platforms", func() { defer shouldHaveError() + env.Version = versions.Spec{ + // has a non-matching remote version + Selector: versions.TildeSelector{Concrete: ver(1, 11, 1)}, + } flow.Do(env) }) - Specify("when disabled, shouldn't check the checksum at all", func() { - env.VerifySum = false - flow.Do(env) + + Describe("verifying the checksum", func() { + BeforeEach(func() { + remoteGCSItems = append(remoteGCSItems, item{ + meta: bucketObject{ + Name: "kubebuilder-tools-86.75.309-linux-amd64.tar.gz", + Hash: "nottherightone!", + }, + contents: remoteGCSItems[0].contents, // need a valid tar.gz file to not error from that + }) + // Recreate remoteHTTPItems to not impact others tests. + remoteHTTPItems = makeContentsHTTP(remoteNamesHTTP) + remoteHTTPItems.index.Releases["v86.75.309"] = map[string]remote.Archive{ + "envtest-v86.75.309-linux-amd64.tar.gz": { + SelfLink: "not used in this test", + Hash: "nottherightone!", + }, + } + // need a valid tar.gz file to not error from that + remoteHTTPItems.contents["envtest-v86.75.309-linux-amd64.tar.gz"] = remoteHTTPItems.contents["envtest-v1.10-darwin-amd64.tar.gz"] + + env.Version = versions.Spec{ + Selector: ver(86, 75, 309), + } + }) + Specify("when enabled, should fail if the downloaded hash doesn't match", func() { + defer shouldHaveError() + flow.Do(env) + }) + Specify("when disabled, shouldn't check the checksum at all", func() { + env.VerifySum = false + flow.Do(env) + }) }) }) - }) - - Describe("list", func() { - // split by fields so we're not matching on whitespace - listFields := func() [][]string { - resLines := strings.Split(strings.TrimSpace(out.String()), "\n") - resFields := make([][]string, len(resLines)) - for i, line := range resLines { - resFields[i] = strings.Fields(line) - } - return resFields - } - Context("when downloads are disabled", func() { - BeforeEach(func() { - server.Close() // ensure no network - env.NoDownload = true - }) - It("should include local contents sorted by version", func() { - env.Version = versions.AnyVersion - env.Platform.Platform = versions.Platform{OS: "*", Arch: "*"} - wf.List{}.Do(env) - - Expect(listFields()).To(Equal([][]string{ - {"(installed)", "v1.17.9", "linux/amd64"}, - {"(installed)", "v1.16.2", "ifonlysingularitywasstillathing/amd64"}, - {"(installed)", "v1.16.2", "linux/yourimagination"}, - {"(installed)", "v1.16.1", "linux/amd64"}, - {"(installed)", "v1.16.0", "linux/amd64"}, - {"(installed)", "v1.14.26", "hyperwarp/pixiedust"}, - {"(installed)", "v1.14.26", "linux/amd64"}, - })) - }) - It("should skip non-matching local contents", func() { - env.Version.Selector = versions.PatchSelector{ - Major: 1, Minor: 16, Patch: versions.AnyPoint, + Describe("list", func() { + // split by fields so we're not matching on whitespace + listFields := func() [][]string { + resLines := strings.Split(strings.TrimSpace(out.String()), "\n") + resFields := make([][]string, len(resLines)) + for i, line := range resLines { + resFields[i] = strings.Fields(line) } - env.Platform.Arch = "*" - wf.List{}.Do(env) - - Expect(listFields()).To(Equal([][]string{ - {"(installed)", "v1.16.2", "linux/yourimagination"}, - {"(installed)", "v1.16.1", "linux/amd64"}, - {"(installed)", "v1.16.0", "linux/amd64"}, - })) + return resFields + } - }) - }) - Context("when downloads are enabled", func() { - Context("when sorting", func() { + Context("when downloads are disabled", func() { BeforeEach(func() { - // shorten the list a bit for expediency - remoteItems = remoteItems[:7] + server.Close() // ensure no network + env.NoDownload = true }) - It("should sort local & remote by version", func() { + It("should include local contents sorted by version", func() { env.Version = versions.AnyVersion env.Platform.Platform = versions.Platform{OS: "*", Arch: "*"} wf.List{}.Do(env) @@ -332,89 +342,160 @@ var _ = Describe("Workflows", func() { {"(installed)", "v1.16.0", "linux/amd64"}, {"(installed)", "v1.14.26", "hyperwarp/pixiedust"}, {"(installed)", "v1.14.26", "linux/amd64"}, - {"(available)", "v1.11.1", "potato/cherrypie"}, - {"(available)", "v1.11.0", "darwin/amd64"}, - {"(available)", "v1.11.0", "linux/amd64"}, - {"(available)", "v1.10.1", "darwin/amd64"}, - {"(available)", "v1.10.1", "linux/amd64"}, })) + }) + It("should skip non-matching local contents", func() { + env.Version.Selector = versions.PatchSelector{ + Major: 1, Minor: 16, Patch: versions.AnyPoint, + } + env.Platform.Arch = "*" + wf.List{}.Do(env) + Expect(listFields()).To(Equal([][]string{ + {"(installed)", "v1.16.2", "linux/yourimagination"}, + {"(installed)", "v1.16.1", "linux/amd64"}, + {"(installed)", "v1.16.0", "linux/amd64"}, + })) }) }) - It("should skip non-matching remote contents", func() { - env.Version.Selector = versions.PatchSelector{ - Major: 1, Minor: 16, Patch: versions.AnyPoint, - } - env.Platform.Arch = "*" - wf.List{}.Do(env) - - Expect(listFields()).To(Equal([][]string{ - {"(installed)", "v1.16.2", "linux/yourimagination"}, - {"(installed)", "v1.16.1", "linux/amd64"}, - {"(installed)", "v1.16.0", "linux/amd64"}, - {"(available)", "v1.16.4", "linux/amd64"}, - })) + Context("when downloads are enabled", func() { + Context("when sorting", func() { + BeforeEach(func() { + // shorten the list a bit for expediency + remoteGCSItems = remoteGCSItems[:7] + + // Recreate remoteHTTPItems to not impact others tests. + remoteHTTPItems = makeContentsHTTP(remoteNamesHTTP) + // Also only keep the first 7 items. + // Get the first 7 archive names + var archiveNames []string + for _, release := range remoteHTTPItems.index.Releases { + for archiveName := range release { + archiveNames = append(archiveNames, archiveName) + } + } + sort.Strings(archiveNames) + archiveNamesSet := sets.Set[string]{}.Insert(archiveNames[:7]...) + // Delete all other archives + for _, release := range remoteHTTPItems.index.Releases { + for archiveName := range release { + if !archiveNamesSet.Has(archiveName) { + delete(release, archiveName) + } + } + } + }) + It("should sort local & remote by version", func() { + env.Version = versions.AnyVersion + env.Platform.Platform = versions.Platform{OS: "*", Arch: "*"} + wf.List{}.Do(env) + + Expect(listFields()).To(Equal([][]string{ + {"(installed)", "v1.17.9", "linux/amd64"}, + {"(installed)", "v1.16.2", "ifonlysingularitywasstillathing/amd64"}, + {"(installed)", "v1.16.2", "linux/yourimagination"}, + {"(installed)", "v1.16.1", "linux/amd64"}, + {"(installed)", "v1.16.0", "linux/amd64"}, + {"(installed)", "v1.14.26", "hyperwarp/pixiedust"}, + {"(installed)", "v1.14.26", "linux/amd64"}, + {"(available)", "v1.11.1", "potato/cherrypie"}, + {"(available)", "v1.11.0", "darwin/amd64"}, + {"(available)", "v1.11.0", "linux/amd64"}, + {"(available)", "v1.10.1", "darwin/amd64"}, + {"(available)", "v1.10.1", "linux/amd64"}, + })) + }) + }) + It("should skip non-matching remote contents", func() { + env.Version.Selector = versions.PatchSelector{ + Major: 1, Minor: 16, Patch: versions.AnyPoint, + } + env.Platform.Arch = "*" + wf.List{}.Do(env) + Expect(listFields()).To(Equal([][]string{ + {"(installed)", "v1.16.2", "linux/yourimagination"}, + {"(installed)", "v1.16.1", "linux/amd64"}, + {"(installed)", "v1.16.0", "linux/amd64"}, + {"(available)", "v1.16.4", "linux/amd64"}, + })) + }) }) }) - }) - Describe("cleanup", func() { - BeforeEach(func() { - server.Close() // ensure no network - flow := wf.Cleanup{} - env.Version = versions.AnyVersion - env.Platform.Arch = "*" - flow.Do(env) - }) + Describe("cleanup", func() { + BeforeEach(func() { + server.Close() // ensure no network + flow := wf.Cleanup{} + env.Version = versions.AnyVersion + env.Platform.Arch = "*" + flow.Do(env) + }) - It("should remove matching versions from the store & keep non-matching ones", func() { - entries, err := env.FS.ReadDir(".teststore/k8s") - Expect(err).NotTo(HaveOccurred(), "should be able to read the store") - Expect(entries).To(ConsistOf( - WithTransform(fs.FileInfo.Name, Equal("1.16.2-ifonlysingularitywasstillathing-amd64")), - WithTransform(fs.FileInfo.Name, Equal("1.14.26-hyperwarp-pixiedust")), - )) + It("should remove matching versions from the store & keep non-matching ones", func() { + entries, err := env.FS.ReadDir(".teststore/k8s") + Expect(err).NotTo(HaveOccurred(), "should be able to read the store") + Expect(entries).To(ConsistOf( + WithTransform(fs.FileInfo.Name, Equal("1.16.2-ifonlysingularitywasstillathing-amd64")), + WithTransform(fs.FileInfo.Name, Equal("1.14.26-hyperwarp-pixiedust")), + )) + }) }) - }) - Describe("sideload", func() { - var ( - flow wf.Sideload - // remote version fake contents are prefixed by the - // name for easier debugging, so we can use that here - expectedPrefix = remoteVersions[0].meta.Name - ) - BeforeEach(func() { - server.Close() // ensure no network - flow.Input = bytes.NewReader(remoteVersions[0].contents) - flow.PrintFormat = envp.PrintPath - }) - It("should initialize the store if it doesn't exist", func() { - env.Version.Selector = ver(1, 10, 0) - Expect(env.FS.RemoveAll(testStorePath)).To(Succeed()) - flow.Do(env) - Expect(env.FS.Stat(testStorePath)).NotTo(BeNil()) - }) - It("should fail if a non-concrete version is given", func() { - defer shouldHaveError() - env.Version = versions.LatestVersion - flow.Do(env) - }) - It("should fail if a non-concrete platform is given", func() { - defer shouldHaveError() - env.Version.Selector = ver(1, 10, 0) - env.Platform.Arch = "*" - flow.Do(env) - }) - It("should load the given gizipped tarball into our store as the given version", func() { - env.Version.Selector = ver(1, 10, 0) - flow.Do(env) - baseName := env.Platform.BaseName(*env.Version.AsConcrete()) - expectedPath := filepath.Join(".teststore/k8s", baseName, "some-file") - outContents, err := env.FS.ReadFile(expectedPath) - Expect(err).NotTo(HaveOccurred(), "should be able to load the unzipped file") - Expect(string(outContents)).To(HavePrefix(expectedPrefix), "should have the debugging prefix") + Describe("sideload", func() { + var ( + flow wf.Sideload + ) + + var expectedPrefix string + if testMode == gcsMode { + // remote version fake contents are prefixed by the + // name for easier debugging, so we can use that here + expectedPrefix = remoteVersionsGCS[0].meta.Name + } + if testMode == httpMode { + // hard coding to one of the archives in remoteVersionsHTTP as we can't pick the "first" of a map. + expectedPrefix = "envtest-v1.10-darwin-amd64.tar.gz" + } + + BeforeEach(func() { + server.Close() // ensure no network + var content []byte + if testMode == gcsMode { + content = remoteVersionsGCS[0].contents + } + if testMode == httpMode { + content = remoteVersionsHTTP.contents[expectedPrefix] + } + flow.Input = bytes.NewReader(content) + flow.PrintFormat = envp.PrintPath + }) + It("should initialize the store if it doesn't exist", func() { + env.Version.Selector = ver(1, 10, 0) + Expect(env.FS.RemoveAll(testStorePath)).To(Succeed()) + flow.Do(env) + Expect(env.FS.Stat(testStorePath)).NotTo(BeNil()) + }) + It("should fail if a non-concrete version is given", func() { + defer shouldHaveError() + env.Version = versions.LatestVersion + flow.Do(env) + }) + It("should fail if a non-concrete platform is given", func() { + defer shouldHaveError() + env.Version.Selector = ver(1, 10, 0) + env.Platform.Arch = "*" + flow.Do(env) + }) + It("should load the given gizipped tarball into our store as the given version", func() { + env.Version.Selector = ver(1, 10, 0) + flow.Do(env) + baseName := env.Platform.BaseName(*env.Version.AsConcrete()) + expectedPath := filepath.Join(".teststore/k8s", baseName, "some-file") + outContents, err := env.FS.ReadFile(expectedPath) + Expect(err).NotTo(HaveOccurred(), "should be able to load the unzipped file") + Expect(string(outContents)).To(HavePrefix(expectedPrefix), "should have the debugging prefix") + }) }) }) -}) +} diff --git a/tools/setup-envtest/workflows/workflows_testutils_test.go b/tools/setup-envtest/workflows/workflows_testutils_test.go index 93e832d763..e796e5d16c 100644 --- a/tools/setup-envtest/workflows/workflows_testutils_test.go +++ b/tools/setup-envtest/workflows/workflows_testutils_test.go @@ -9,7 +9,10 @@ import ( "compress/gzip" "crypto/md5" //nolint:gosec "crypto/rand" + "crypto/sha512" "encoding/base64" + "encoding/hex" + "fmt" "net/http" "path/filepath" @@ -17,12 +20,14 @@ import ( . "github.com/onsi/gomega" "github.com/onsi/gomega/ghttp" "github.com/spf13/afero" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/remote" + "sigs.k8s.io/yaml" "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" ) var ( - remoteNames = []string{ + remoteNamesGCS = []string{ "kubebuilder-tools-1.10-darwin-amd64.tar.gz", "kubebuilder-tools-1.10-linux-amd64.tar.gz", "kubebuilder-tools-1.10.1-darwin-amd64.tar.gz", @@ -59,8 +64,68 @@ var ( "kubebuilder-tools-v1.19.2-linux-arm64.tar.gz", "kubebuilder-tools-v1.19.2-linux-ppc64le.tar.gz", } + remoteVersionsGCS = makeContentsGCS(remoteNamesGCS) - remoteVersions = makeContents(remoteNames) + remoteNamesHTTP = remote.Index{ + Releases: map[string]remote.Release{ + "v1.10.0": map[string]remote.Archive{ + "envtest-v1.10-darwin-amd64.tar.gz": {}, + "envtest-v1.10-linux-amd64.tar.gz": {}, + }, + "v1.10.1": map[string]remote.Archive{ + "envtest-v1.10.1-darwin-amd64.tar.gz": {}, + "envtest-v1.10.1-linux-amd64.tar.gz": {}, + }, + "v1.11.0": map[string]remote.Archive{ + "envtest-v1.11.0-darwin-amd64.tar.gz": {}, + "envtest-v1.11.0-linux-amd64.tar.gz": {}, + }, + "v1.11.1": map[string]remote.Archive{ + "envtest-v1.11.1-potato-cherrypie.tar.gz": {}, + }, + "v1.12.3": map[string]remote.Archive{ + "envtest-v1.12.3-darwin-amd64.tar.gz": {}, + "envtest-v1.12.3-linux-amd64.tar.gz": {}, + }, + "v1.13.1": map[string]remote.Archive{ + "envtest-v1.13.1-darwin-amd64.tar.gz": {}, + "envtest-v1.13.1-linux-amd64.tar.gz": {}, + }, + "v1.14.1": map[string]remote.Archive{ + "envtest-v1.14.1-darwin-amd64.tar.gz": {}, + "envtest-v1.14.1-linux-amd64.tar.gz": {}, + }, + "v1.15.5": map[string]remote.Archive{ + "envtest-v1.15.5-darwin-amd64.tar.gz": {}, + "envtest-v1.15.5-linux-amd64.tar.gz": {}, + }, + "v1.16.4": map[string]remote.Archive{ + "envtest-v1.16.4-darwin-amd64.tar.gz": {}, + "envtest-v1.16.4-linux-amd64.tar.gz": {}, + }, + "v1.17.9": map[string]remote.Archive{ + "envtest-v1.17.9-darwin-amd64.tar.gz": {}, + "envtest-v1.17.9-linux-amd64.tar.gz": {}, + }, + "v1.19.0": map[string]remote.Archive{ + "envtest-v1.19.0-darwin-amd64.tar.gz": {}, + "envtest-v1.19.0-linux-amd64.tar.gz": {}, + }, + "v1.19.2": map[string]remote.Archive{ + "envtest-v1.19.2-darwin-amd64.tar.gz": {}, + "envtest-v1.19.2-linux-amd64.tar.gz": {}, + "envtest-v1.19.2-linux-arm64.tar.gz": {}, + "envtest-v1.19.2-linux-ppc64le.tar.gz": {}, + }, + "v1.20.2": map[string]remote.Archive{ + "envtest-v1.20.2-darwin-amd64.tar.gz": {}, + "envtest-v1.20.2-linux-amd64.tar.gz": {}, + "envtest-v1.20.2-linux-arm64.tar.gz": {}, + "envtest-v1.20.2-linux-ppc64le.tar.gz": {}, + }, + }, + } + remoteVersionsHTTP = makeContentsHTTP(remoteNamesHTTP) // keep this sorted. localVersions = []versions.Set{ @@ -100,7 +165,7 @@ type bucketObject struct { Hash string `json:"md5Hash"` } -func makeContents(names []string) []item { +func makeContentsGCS(names []string) []item { res := make([]item, len(names)) for i, name := range names { var chunk [1024 * 48]byte // 1.5 times our chunk read size in GetVersion @@ -108,12 +173,12 @@ func makeContents(names []string) []item { if _, err := rand.Read(chunk[len(name):]); err != nil { panic(err) } - res[i] = verWith(name, chunk[:]) + res[i] = verWithGCS(name, chunk[:]) } return res } -func verWith(name string, contents []byte) item { +func verWithGCS(name string, contents []byte) item { out := new(bytes.Buffer) gzipWriter := gzip.NewWriter(out) tarWriter := tar.NewWriter(gzipWriter) @@ -140,7 +205,7 @@ func verWith(name string, contents []byte) item { return res } -func handleRemoteVersions(server *ghttp.Server, versions []item) { +func handleRemoteVersionsGCS(server *ghttp.Server, versions []item) { list := objectList{Items: make([]bucketObject, len(versions))} for i, ver := range versions { ver := ver // copy to avoid capturing the iteration variable @@ -163,6 +228,107 @@ func handleRemoteVersions(server *ghttp.Server, versions []item) { )) } +type itemsHTTP struct { + index remote.Index + contents map[string][]byte +} + +func makeContentsHTTP(index remote.Index) itemsHTTP { + // This creates a new copy of the index so modifying the index + // in some tests doesn't affect others. + res := itemsHTTP{ + index: remote.Index{ + Releases: map[string]remote.Release{}, + }, + contents: map[string][]byte{}, + } + + for releaseVersion, releases := range index.Releases { + res.index.Releases[releaseVersion] = remote.Release{} + for archiveName := range releases { + var chunk [1024 * 48]byte // 1.5 times our chunk read size in GetVersion + copy(chunk[:], archiveName) + if _, err := rand.Read(chunk[len(archiveName):]); err != nil { + panic(err) + } + content, hash := verWithHTTP(chunk[:]) + + res.index.Releases[releaseVersion][archiveName] = remote.Archive{ + Hash: hash, + // Note: Only storing the name of the archive for now. + // This will be expanded later to a full URL once the server is running. + SelfLink: archiveName, + } + res.contents[archiveName] = content + } + } + return res +} + +func verWithHTTP(contents []byte) ([]byte, string) { + out := new(bytes.Buffer) + gzipWriter := gzip.NewWriter(out) + tarWriter := tar.NewWriter(gzipWriter) + err := tarWriter.WriteHeader(&tar.Header{ + Name: "controller-tools/envtest/some-file", + Size: int64(len(contents)), + Mode: 0777, // so we can check that we fix this later + }) + if err != nil { + panic(err) + } + _, err = tarWriter.Write(contents) + if err != nil { + panic(err) + } + tarWriter.Close() + gzipWriter.Close() + content := out.Bytes() + // controller-tools is using sha512 + hash := sha512.Sum512(content) + hashEncoded := hex.EncodeToString(hash[:]) + return content, hashEncoded +} + +func handleRemoteVersionsHTTP(server *ghttp.Server, items itemsHTTP) { + if server.HTTPTestServer == nil { + // Just return for test cases where server is closed in BeforeEach. Otherwise server.Addr() below panics. + return + } + + // The index from items contains only relative SelfLinks. + // finalIndex will contain the full links based on server.Addr(). + finalIndex := remote.Index{ + Releases: map[string]remote.Release{}, + } + + for releaseVersion, releases := range items.index.Releases { + finalIndex.Releases[releaseVersion] = remote.Release{} + + for archiveName, archive := range releases { + finalIndex.Releases[releaseVersion][archiveName] = remote.Archive{ + Hash: archive.Hash, + SelfLink: fmt.Sprintf("http://%s/%s", server.Addr(), archive.SelfLink), + } + content := items.contents[archiveName] + + // Note: Using the relative path from archive here instead of the full path. + server.RouteToHandler("GET", "/"+archive.SelfLink, func(resp http.ResponseWriter, req *http.Request) { + resp.WriteHeader(http.StatusOK) + Expect(resp.Write(content)).To(Equal(len(content))) + }) + } + } + + indexYAML, err := yaml.Marshal(finalIndex) + Expect(err).ToNot(HaveOccurred()) + + server.RouteToHandler("GET", "/envtest-releases.yaml", ghttp.RespondWith( + http.StatusOK, + indexYAML, + )) +} + func fakeStore(fs afero.Afero, dir string) { By("making the unpacked directory") unpackedBase := filepath.Join(dir, "k8s")