From cc1e48243dc500a24185382e8c37ff6b4e3ecaad Mon Sep 17 00:00:00 2001 From: Sunny Date: Fri, 28 Jan 2022 10:15:51 +0530 Subject: [PATCH 01/63] Introduce v1beta2 API package This commit introduces a v1beta2 API package for the staged breaking changes around conditions and general usage of the API objects. Signed-off-by: Hidde Beydals Signed-off-by: Sunny Co-authored-by: Hidde Beydals --- Makefile | 2 +- PROJECT | 12 + api/v1beta2/artifact_types.go | 72 +++ api/v1beta2/bucket_types.go | 208 ++++++ api/v1beta2/condition_types.go | 35 + api/v1beta2/doc.go | 20 + api/v1beta2/gitrepository_types.go | 280 ++++++++ api/v1beta2/groupversion_info.go | 33 + api/v1beta2/helmchart_types.go | 250 +++++++ api/v1beta2/helmrepository_types.go | 196 ++++++ api/v1beta2/source.go | 37 ++ api/v1beta2/zz_generated.deepcopy.go | 611 ++++++++++++++++++ .../source.toolkit.fluxcd.io_buckets.yaml | 233 +++++++ ...rce.toolkit.fluxcd.io_gitrepositories.yaml | 330 ++++++++++ .../source.toolkit.fluxcd.io_helmcharts.yaml | 255 ++++++++ ...ce.toolkit.fluxcd.io_helmrepositories.yaml | 218 +++++++ docs/api/source.md | 134 ++-- hack/boilerplate.go.txt | 2 +- 18 files changed, 2859 insertions(+), 69 deletions(-) create mode 100644 api/v1beta2/artifact_types.go create mode 100644 api/v1beta2/bucket_types.go create mode 100644 api/v1beta2/condition_types.go create mode 100644 api/v1beta2/doc.go create mode 100644 api/v1beta2/gitrepository_types.go create mode 100644 api/v1beta2/groupversion_info.go create mode 100644 api/v1beta2/helmchart_types.go create mode 100644 api/v1beta2/helmrepository_types.go create mode 100644 api/v1beta2/source.go create mode 100644 api/v1beta2/zz_generated.deepcopy.go diff --git a/Makefile b/Makefile index 3b0e5b876..bc315f6bb 100644 --- a/Makefile +++ b/Makefile @@ -127,7 +127,7 @@ manifests: controller-gen ## Generate manifests, e.g. CRD, RBAC, etc. cd api; $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role paths="./..." output:crd:artifacts:config="../config/crd/bases" api-docs: gen-crd-api-reference-docs ## Generate API reference documentation - $(GEN_CRD_API_REFERENCE_DOCS) -api-dir=./api/v1beta1 -config=./hack/api-docs/config.json -template-dir=./hack/api-docs/template -out-file=./docs/api/source.md + $(GEN_CRD_API_REFERENCE_DOCS) -api-dir=./api/v1beta2 -config=./hack/api-docs/config.json -template-dir=./hack/api-docs/template -out-file=./docs/api/source.md tidy: ## Run go mod tidy go mod tidy diff --git a/PROJECT b/PROJECT index a807390b9..776217e9f 100644 --- a/PROJECT +++ b/PROJECT @@ -1,6 +1,18 @@ domain: toolkit.fluxcd.io repo: github.com/fluxcd/source-controller resources: +- group: source + kind: GitRepository + version: v1beta2 +- group: source + kind: HelmRepository + version: v1beta2 +- group: source + kind: HelmChart + version: v1beta2 +- group: source + kind: Bucket + version: v1beta2 - group: source kind: GitRepository version: v1beta1 diff --git a/api/v1beta2/artifact_types.go b/api/v1beta2/artifact_types.go new file mode 100644 index 000000000..c1f6ab877 --- /dev/null +++ b/api/v1beta2/artifact_types.go @@ -0,0 +1,72 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "path" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Artifact represents the output of a source synchronisation. +type Artifact struct { + // Path is the relative file path of this artifact. + // +required + Path string `json:"path"` + + // URL is the HTTP address of this artifact. + // +required + URL string `json:"url"` + + // Revision is a human readable identifier traceable in the origin source + // system. It can be a Git commit SHA, Git tag, a Helm index timestamp, a Helm + // chart version, etc. + // +optional + Revision string `json:"revision"` + + // Checksum is the SHA256 checksum of the artifact. + // +optional + Checksum string `json:"checksum"` + + // LastUpdateTime is the timestamp corresponding to the last update of this + // artifact. + // +required + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` +} + +// HasRevision returns true if the given revision matches the current Revision +// of the Artifact. +func (in *Artifact) HasRevision(revision string) bool { + if in == nil { + return false + } + return in.Revision == revision +} + +// ArtifactDir returns the artifact dir path in the form of +// //. +func ArtifactDir(kind, namespace, name string) string { + kind = strings.ToLower(kind) + return path.Join(kind, namespace, name) +} + +// ArtifactPath returns the artifact path in the form of +// ///. +func ArtifactPath(kind, namespace, name, filename string) string { + return path.Join(ArtifactDir(kind, namespace, name), filename) +} diff --git a/api/v1beta2/bucket_types.go b/api/v1beta2/bucket_types.go new file mode 100644 index 000000000..057b59b41 --- /dev/null +++ b/api/v1beta2/bucket_types.go @@ -0,0 +1,208 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/acl" + "github.com/fluxcd/pkg/apis/meta" +) + +const ( + // BucketKind is the string representation of a Bucket. + BucketKind = "Bucket" +) + +// BucketSpec defines the desired state of an S3 compatible bucket +type BucketSpec struct { + // The S3 compatible storage provider name, default ('generic'). + // +kubebuilder:validation:Enum=generic;aws;gcp + // +kubebuilder:default:=generic + // +optional + Provider string `json:"provider,omitempty"` + + // The bucket name. + // +required + BucketName string `json:"bucketName"` + + // The bucket endpoint address. + // +required + Endpoint string `json:"endpoint"` + + // Insecure allows connecting to a non-TLS S3 HTTP endpoint. + // +optional + Insecure bool `json:"insecure,omitempty"` + + // The bucket region. + // +optional + Region string `json:"region,omitempty"` + + // The name of the secret containing authentication credentials + // for the Bucket. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // The interval at which to check for bucket updates. + // +required + Interval metav1.Duration `json:"interval"` + + // The timeout for download operations, defaults to 60s. + // +kubebuilder:default="60s" + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` + + // Ignore overrides the set of excluded patterns in the .sourceignore format + // (which is the same as .gitignore). If not provided, a default will be used, + // consult the documentation for your version to find out what those are. + // +optional + Ignore *string `json:"ignore,omitempty"` + + // This flag tells the controller to suspend the reconciliation of this source. + // +optional + Suspend bool `json:"suspend,omitempty"` + + // AccessFrom defines an Access Control List for allowing cross-namespace references to this object. + // +optional + AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` +} + +const ( + GenericBucketProvider string = "generic" + AmazonBucketProvider string = "aws" + GoogleBucketProvider string = "gcp" +) + +// BucketStatus defines the observed state of a bucket +type BucketStatus struct { + // ObservedGeneration is the last observed generation. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions holds the conditions for the Bucket. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // URL is the download link for the artifact output of the last Bucket sync. + // +optional + URL string `json:"url,omitempty"` + + // Artifact represents the output of the last successful Bucket sync. + // +optional + Artifact *Artifact `json:"artifact,omitempty"` + + meta.ReconcileRequestStatus `json:",inline"` +} + +const ( + // BucketOperationSucceedReason represents the fact that the bucket listing and + // download operations succeeded. + BucketOperationSucceedReason string = "BucketOperationSucceed" + + // BucketOperationFailedReason represents the fact that the bucket listing or + // download operations failed. + BucketOperationFailedReason string = "BucketOperationFailed" +) + +// BucketProgressing resets the conditions of the Bucket to metav1.Condition of +// type meta.ReadyCondition with status 'Unknown' and meta.ProgressingReason +// reason and message. It returns the modified Bucket. +func BucketProgressing(bucket Bucket) Bucket { + bucket.Status.ObservedGeneration = bucket.Generation + bucket.Status.URL = "" + bucket.Status.Conditions = []metav1.Condition{} + meta.SetResourceCondition(&bucket, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + return bucket +} + +// BucketReady sets the given Artifact and URL on the Bucket and sets the +// meta.ReadyCondition to 'True', with the given reason and message. It returns +// the modified Bucket. +func BucketReady(bucket Bucket, artifact Artifact, url, reason, message string) Bucket { + bucket.Status.Artifact = &artifact + bucket.Status.URL = url + meta.SetResourceCondition(&bucket, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + return bucket +} + +// BucketNotReady sets the meta.ReadyCondition on the Bucket to 'False', with +// the given reason and message. It returns the modified Bucket. +func BucketNotReady(bucket Bucket, reason, message string) Bucket { + meta.SetResourceCondition(&bucket, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + return bucket +} + +// BucketReadyMessage returns the message of the metav1.Condition of type +// meta.ReadyCondition with status 'True' if present, or an empty string. +func BucketReadyMessage(bucket Bucket) string { + if c := apimeta.FindStatusCondition(bucket.Status.Conditions, meta.ReadyCondition); c != nil { + if c.Status == metav1.ConditionTrue { + return c.Message + } + } + return "" +} + +// GetArtifact returns the latest artifact from the source if present in the +// status sub-resource. +func (in *Bucket) GetArtifact() *Artifact { + return in.Status.Artifact +} + +// GetStatusConditions returns a pointer to the Status.Conditions slice +func (in *Bucket) GetStatusConditions() *[]metav1.Condition { + return &in.Status.Conditions +} + +// GetInterval returns the interval at which the source is updated. +func (in *Bucket) GetInterval() metav1.Duration { + return in.Spec.Interval +} + +// +genclient +// +genclient:Namespaced +// +kubebuilder:storageversion +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Endpoint",type=string,JSONPath=`.spec.endpoint` +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" + +// Bucket is the Schema for the buckets API +type Bucket struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec BucketSpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} + Status BucketStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BucketList contains a list of Bucket +type BucketList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Bucket `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Bucket{}, &BucketList{}) +} diff --git a/api/v1beta2/condition_types.go b/api/v1beta2/condition_types.go new file mode 100644 index 000000000..7f40af6c0 --- /dev/null +++ b/api/v1beta2/condition_types.go @@ -0,0 +1,35 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +const SourceFinalizer = "finalizers.fluxcd.io" + +const ( + // URLInvalidReason represents the fact that a given source has an invalid URL. + URLInvalidReason string = "URLInvalid" + + // StorageOperationFailedReason signals a failure caused by a storage operation. + StorageOperationFailedReason string = "StorageOperationFailed" + + // AuthenticationFailedReason represents the fact that a given secret does not + // have the required fields or the provided credentials do not match. + AuthenticationFailedReason string = "AuthenticationFailed" + + // VerificationFailedReason represents the fact that the cryptographic + // provenance verification for the source failed. + VerificationFailedReason string = "VerificationFailed" +) diff --git a/api/v1beta2/doc.go b/api/v1beta2/doc.go new file mode 100644 index 000000000..e9fca1650 --- /dev/null +++ b/api/v1beta2/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1beta2 contains API Schema definitions for the source v1beta2 API group +// +kubebuilder:object:generate=true +// +groupName=source.toolkit.fluxcd.io +package v1beta2 diff --git a/api/v1beta2/gitrepository_types.go b/api/v1beta2/gitrepository_types.go new file mode 100644 index 000000000..ac4f683ba --- /dev/null +++ b/api/v1beta2/gitrepository_types.go @@ -0,0 +1,280 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/acl" + "github.com/fluxcd/pkg/apis/meta" +) + +const ( + // GitRepositoryKind is the string representation of a GitRepository. + GitRepositoryKind = "GitRepository" + + // GoGitImplementation represents the go-git Git implementation kind. + GoGitImplementation = "go-git" + // LibGit2Implementation represents the git2go Git implementation kind. + LibGit2Implementation = "libgit2" +) + +// GitRepositorySpec defines the desired state of a Git repository. +type GitRepositorySpec struct { + // The repository URL, can be a HTTP/S or SSH address. + // +kubebuilder:validation:Pattern="^(http|https|ssh)://" + // +required + URL string `json:"url"` + + // The secret name containing the Git credentials. + // For HTTPS repositories the secret must contain username and password + // fields. + // For SSH repositories the secret must contain identity, identity.pub and + // known_hosts fields. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // The interval at which to check for repository updates. + // +required + Interval metav1.Duration `json:"interval"` + + // The timeout for remote Git operations like cloning, defaults to 60s. + // +kubebuilder:default="60s" + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` + + // The Git reference to checkout and monitor for changes, defaults to + // master branch. + // +optional + Reference *GitRepositoryRef `json:"ref,omitempty"` + + // Verify OpenPGP signature for the Git commit HEAD points to. + // +optional + Verification *GitRepositoryVerification `json:"verify,omitempty"` + + // Ignore overrides the set of excluded patterns in the .sourceignore format + // (which is the same as .gitignore). If not provided, a default will be used, + // consult the documentation for your version to find out what those are. + // +optional + Ignore *string `json:"ignore,omitempty"` + + // This flag tells the controller to suspend the reconciliation of this source. + // +optional + Suspend bool `json:"suspend,omitempty"` + + // Determines which git client library to use. + // Defaults to go-git, valid values are ('go-git', 'libgit2'). + // +kubebuilder:validation:Enum=go-git;libgit2 + // +kubebuilder:default:=go-git + // +optional + GitImplementation string `json:"gitImplementation,omitempty"` + + // When enabled, after the clone is created, initializes all submodules within, + // using their default settings. + // This option is available only when using the 'go-git' GitImplementation. + // +optional + RecurseSubmodules bool `json:"recurseSubmodules,omitempty"` + + // Extra git repositories to map into the repository + Include []GitRepositoryInclude `json:"include,omitempty"` + + // AccessFrom defines an Access Control List for allowing cross-namespace references to this object. + // +optional + AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` +} + +func (in *GitRepositoryInclude) GetFromPath() string { + return in.FromPath +} + +func (in *GitRepositoryInclude) GetToPath() string { + if in.ToPath == "" { + return in.GitRepositoryRef.Name + } + return in.ToPath +} + +// GitRepositoryInclude defines a source with a from and to path. +type GitRepositoryInclude struct { + // Reference to a GitRepository to include. + GitRepositoryRef meta.LocalObjectReference `json:"repository"` + + // The path to copy contents from, defaults to the root directory. + // +optional + FromPath string `json:"fromPath"` + + // The path to copy contents to, defaults to the name of the source ref. + // +optional + ToPath string `json:"toPath"` +} + +// GitRepositoryRef defines the Git ref used for pull and checkout operations. +type GitRepositoryRef struct { + // The Git branch to checkout, defaults to master. + // +optional + Branch string `json:"branch,omitempty"` + + // The Git tag to checkout, takes precedence over Branch. + // +optional + Tag string `json:"tag,omitempty"` + + // The Git tag semver expression, takes precedence over Tag. + // +optional + SemVer string `json:"semver,omitempty"` + + // The Git commit SHA to checkout, if specified Tag filters will be ignored. + // +optional + Commit string `json:"commit,omitempty"` +} + +// GitRepositoryVerification defines the OpenPGP signature verification process. +type GitRepositoryVerification struct { + // Mode describes what git object should be verified, currently ('head'). + // +kubebuilder:validation:Enum=head + Mode string `json:"mode"` + + // The secret name containing the public keys of all trusted Git authors. + SecretRef meta.LocalObjectReference `json:"secretRef,omitempty"` +} + +// GitRepositoryStatus defines the observed state of a Git repository. +type GitRepositoryStatus struct { + // ObservedGeneration is the last observed generation. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions holds the conditions for the GitRepository. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // URL is the download link for the artifact output of the last repository + // sync. + // +optional + URL string `json:"url,omitempty"` + + // Artifact represents the output of the last successful repository sync. + // +optional + Artifact *Artifact `json:"artifact,omitempty"` + + // IncludedArtifacts represents the included artifacts from the last successful repository sync. + // +optional + IncludedArtifacts []*Artifact `json:"includedArtifacts,omitempty"` + + meta.ReconcileRequestStatus `json:",inline"` +} + +const ( + // GitOperationSucceedReason represents the fact that the git clone, pull + // and checkout operations succeeded. + GitOperationSucceedReason string = "GitOperationSucceed" + + // GitOperationFailedReason represents the fact that the git clone, pull or + // checkout operations failed. + GitOperationFailedReason string = "GitOperationFailed" +) + +// GitRepositoryProgressing resets the conditions of the GitRepository to +// metav1.Condition of type meta.ReadyCondition with status 'Unknown' and +// meta.ProgressingReason reason and message. It returns the modified +// GitRepository. +func GitRepositoryProgressing(repository GitRepository) GitRepository { + repository.Status.ObservedGeneration = repository.Generation + repository.Status.URL = "" + repository.Status.Conditions = []metav1.Condition{} + meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + return repository +} + +// GitRepositoryReady sets the given Artifact and URL on the GitRepository and +// sets the meta.ReadyCondition to 'True', with the given reason and message. It +// returns the modified GitRepository. +func GitRepositoryReady(repository GitRepository, artifact Artifact, includedArtifacts []*Artifact, url, reason, message string) GitRepository { + repository.Status.Artifact = &artifact + repository.Status.IncludedArtifacts = includedArtifacts + repository.Status.URL = url + meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + return repository +} + +// GitRepositoryNotReady sets the meta.ReadyCondition on the given GitRepository +// to 'False', with the given reason and message. It returns the modified +// GitRepository. +func GitRepositoryNotReady(repository GitRepository, reason, message string) GitRepository { + meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + return repository +} + +// GitRepositoryReadyMessage returns the message of the metav1.Condition of type +// meta.ReadyCondition with status 'True' if present, or an empty string. +func GitRepositoryReadyMessage(repository GitRepository) string { + if c := apimeta.FindStatusCondition(repository.Status.Conditions, meta.ReadyCondition); c != nil { + if c.Status == metav1.ConditionTrue { + return c.Message + } + } + return "" +} + +// GetArtifact returns the latest artifact from the source if present in the +// status sub-resource. +func (in *GitRepository) GetArtifact() *Artifact { + return in.Status.Artifact +} + +// GetStatusConditions returns a pointer to the Status.Conditions slice +func (in *GitRepository) GetStatusConditions() *[]metav1.Condition { + return &in.Status.Conditions +} + +// GetInterval returns the interval at which the source is updated. +func (in *GitRepository) GetInterval() metav1.Duration { + return in.Spec.Interval +} + +// +genclient +// +genclient:Namespaced +// +kubebuilder:storageversion +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=gitrepo +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" + +// GitRepository is the Schema for the gitrepositories API +type GitRepository struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec GitRepositorySpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} + Status GitRepositoryStatus `json:"status,omitempty"` +} + +// GitRepositoryList contains a list of GitRepository +// +kubebuilder:object:root=true +type GitRepositoryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []GitRepository `json:"items"` +} + +func init() { + SchemeBuilder.Register(&GitRepository{}, &GitRepositoryList{}) +} diff --git a/api/v1beta2/groupversion_info.go b/api/v1beta2/groupversion_info.go new file mode 100644 index 000000000..797e6c536 --- /dev/null +++ b/api/v1beta2/groupversion_info.go @@ -0,0 +1,33 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "source.toolkit.fluxcd.io", Version: "v1beta2"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/api/v1beta2/helmchart_types.go b/api/v1beta2/helmchart_types.go new file mode 100644 index 000000000..033fd1a35 --- /dev/null +++ b/api/v1beta2/helmchart_types.go @@ -0,0 +1,250 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/acl" + "github.com/fluxcd/pkg/apis/meta" +) + +// HelmChartKind is the string representation of a HelmChart. +const HelmChartKind = "HelmChart" + +// HelmChartSpec defines the desired state of a Helm chart. +type HelmChartSpec struct { + // The name or path the Helm chart is available at in the SourceRef. + // +required + Chart string `json:"chart"` + + // The chart version semver expression, ignored for charts from GitRepository + // and Bucket sources. Defaults to latest when omitted. + // +kubebuilder:default:=* + // +optional + Version string `json:"version,omitempty"` + + // The reference to the Source the chart is available at. + // +required + SourceRef LocalHelmChartSourceReference `json:"sourceRef"` + + // The interval at which to check the Source for updates. + // +required + Interval metav1.Duration `json:"interval"` + + // Determines what enables the creation of a new artifact. Valid values are + // ('ChartVersion', 'Revision'). + // See the documentation of the values for an explanation on their behavior. + // Defaults to ChartVersion when omitted. + // +kubebuilder:validation:Enum=ChartVersion;Revision + // +kubebuilder:default:=ChartVersion + // +optional + ReconcileStrategy string `json:"reconcileStrategy,omitempty"` + + // Alternative list of values files to use as the chart values (values.yaml + // is not included by default), expected to be a relative path in the SourceRef. + // Values files are merged in the order of this list with the last file overriding + // the first. Ignored when omitted. + // +optional + ValuesFiles []string `json:"valuesFiles,omitempty"` + + // Alternative values file to use as the default chart values, expected to + // be a relative path in the SourceRef. Deprecated in favor of ValuesFiles, + // for backwards compatibility the file defined here is merged before the + // ValuesFiles items. Ignored when omitted. + // +optional + // +deprecated + ValuesFile string `json:"valuesFile,omitempty"` + + // This flag tells the controller to suspend the reconciliation of this source. + // +optional + Suspend bool `json:"suspend,omitempty"` + + // AccessFrom defines an Access Control List for allowing cross-namespace references to this object. + // +optional + AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` +} + +const ( + // ReconcileStrategyChartVersion reconciles when the version of the Helm chart is different. + ReconcileStrategyChartVersion string = "ChartVersion" + + // ReconcileStrategyRevision reconciles when the Revision of the source is different. + ReconcileStrategyRevision string = "Revision" +) + +// LocalHelmChartSourceReference contains enough information to let you locate +// the typed referenced object at namespace level. +type LocalHelmChartSourceReference struct { + // APIVersion of the referent. + // +optional + APIVersion string `json:"apiVersion,omitempty"` + + // Kind of the referent, valid values are ('HelmRepository', 'GitRepository', + // 'Bucket'). + // +kubebuilder:validation:Enum=HelmRepository;GitRepository;Bucket + // +required + Kind string `json:"kind"` + + // Name of the referent. + // +required + Name string `json:"name"` +} + +// HelmChartStatus defines the observed state of the HelmChart. +type HelmChartStatus struct { + // ObservedGeneration is the last observed generation. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions holds the conditions for the HelmChart. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // URL is the download link for the last chart pulled. + // +optional + URL string `json:"url,omitempty"` + + // Artifact represents the output of the last successful chart sync. + // +optional + Artifact *Artifact `json:"artifact,omitempty"` + + meta.ReconcileRequestStatus `json:",inline"` +} + +const ( + // ChartPullFailedReason represents the fact that the pull of the Helm chart + // failed. + ChartPullFailedReason string = "ChartPullFailed" + + // ChartPullSucceededReason represents the fact that the pull of the Helm chart + // succeeded. + ChartPullSucceededReason string = "ChartPullSucceeded" + + // ChartPackageFailedReason represent the fact that the package of the Helm + // chart failed. + ChartPackageFailedReason string = "ChartPackageFailed" + + // ChartPackageSucceededReason represents the fact that the package of the Helm + // chart succeeded. + ChartPackageSucceededReason string = "ChartPackageSucceeded" +) + +// HelmChartProgressing resets the conditions of the HelmChart to meta.Condition +// of type meta.ReadyCondition with status 'Unknown' and meta.ProgressingReason +// reason and message. It returns the modified HelmChart. +func HelmChartProgressing(chart HelmChart) HelmChart { + chart.Status.ObservedGeneration = chart.Generation + chart.Status.URL = "" + chart.Status.Conditions = []metav1.Condition{} + meta.SetResourceCondition(&chart, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + return chart +} + +// HelmChartReady sets the given Artifact and URL on the HelmChart and sets the +// meta.ReadyCondition to 'True', with the given reason and message. It returns +// the modified HelmChart. +func HelmChartReady(chart HelmChart, artifact Artifact, url, reason, message string) HelmChart { + chart.Status.Artifact = &artifact + chart.Status.URL = url + meta.SetResourceCondition(&chart, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + return chart +} + +// HelmChartNotReady sets the meta.ReadyCondition on the given HelmChart to +// 'False', with the given reason and message. It returns the modified +// HelmChart. +func HelmChartNotReady(chart HelmChart, reason, message string) HelmChart { + meta.SetResourceCondition(&chart, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + return chart +} + +// HelmChartReadyMessage returns the message of the meta.ReadyCondition with +// status 'True', or an empty string. +func HelmChartReadyMessage(chart HelmChart) string { + if c := apimeta.FindStatusCondition(chart.Status.Conditions, meta.ReadyCondition); c != nil { + if c.Status == metav1.ConditionTrue { + return c.Message + } + } + return "" +} + +// GetArtifact returns the latest artifact from the source if present in the +// status sub-resource. +func (in *HelmChart) GetArtifact() *Artifact { + return in.Status.Artifact +} + +// GetStatusConditions returns a pointer to the Status.Conditions slice +func (in *HelmChart) GetStatusConditions() *[]metav1.Condition { + return &in.Status.Conditions +} + +// GetInterval returns the interval at which the source is updated. +func (in *HelmChart) GetInterval() metav1.Duration { + return in.Spec.Interval +} + +// GetValuesFiles returns a merged list of ValuesFiles. +func (in *HelmChart) GetValuesFiles() []string { + valuesFiles := in.Spec.ValuesFiles + + // Prepend the deprecated ValuesFile to the list + if in.Spec.ValuesFile != "" { + valuesFiles = append([]string{in.Spec.ValuesFile}, valuesFiles...) + } + return valuesFiles +} + +// +genclient +// +genclient:Namespaced +// +kubebuilder:storageversion +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=hc +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Chart",type=string,JSONPath=`.spec.chart` +// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version` +// +kubebuilder:printcolumn:name="Source Kind",type=string,JSONPath=`.spec.sourceRef.kind` +// +kubebuilder:printcolumn:name="Source Name",type=string,JSONPath=`.spec.sourceRef.name` +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" + +// HelmChart is the Schema for the helmcharts API +type HelmChart struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec HelmChartSpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} + Status HelmChartStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// HelmChartList contains a list of HelmChart +type HelmChartList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HelmChart `json:"items"` +} + +func init() { + SchemeBuilder.Register(&HelmChart{}, &HelmChartList{}) +} diff --git a/api/v1beta2/helmrepository_types.go b/api/v1beta2/helmrepository_types.go new file mode 100644 index 000000000..afd5bdbb4 --- /dev/null +++ b/api/v1beta2/helmrepository_types.go @@ -0,0 +1,196 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/acl" + "github.com/fluxcd/pkg/apis/meta" +) + +const ( + // HelmRepositoryKind is the string representation of a HelmRepository. + HelmRepositoryKind = "HelmRepository" + // HelmRepositoryURLIndexKey is the key to use for indexing HelmRepository + // resources by their HelmRepositorySpec.URL. + HelmRepositoryURLIndexKey = ".metadata.helmRepositoryURL" +) + +// HelmRepositorySpec defines the reference to a Helm repository. +type HelmRepositorySpec struct { + // The Helm repository URL, a valid URL contains at least a protocol and host. + // +required + URL string `json:"url"` + + // The name of the secret containing authentication credentials for the Helm + // repository. + // For HTTP/S basic auth the secret must contain username and + // password fields. + // For TLS the secret must contain a certFile and keyFile, and/or + // caCert fields. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // PassCredentials allows the credentials from the SecretRef to be passed on to + // a host that does not match the host as defined in URL. + // This may be required if the host of the advertised chart URLs in the index + // differ from the defined URL. + // Enabling this should be done with caution, as it can potentially result in + // credentials getting stolen in a MITM-attack. + // +optional + PassCredentials bool `json:"passCredentials,omitempty"` + + // The interval at which to check the upstream for updates. + // +required + Interval metav1.Duration `json:"interval"` + + // The timeout of index downloading, defaults to 60s. + // +kubebuilder:default:="60s" + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` + + // This flag tells the controller to suspend the reconciliation of this source. + // +optional + Suspend bool `json:"suspend,omitempty"` + + // AccessFrom defines an Access Control List for allowing cross-namespace references to this object. + // +optional + AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` +} + +// HelmRepositoryStatus defines the observed state of the HelmRepository. +type HelmRepositoryStatus struct { + // ObservedGeneration is the last observed generation. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions holds the conditions for the HelmRepository. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // URL is the download link for the last index fetched. + // +optional + URL string `json:"url,omitempty"` + + // Artifact represents the output of the last successful repository sync. + // +optional + Artifact *Artifact `json:"artifact,omitempty"` + + meta.ReconcileRequestStatus `json:",inline"` +} + +const ( + // IndexationFailedReason represents the fact that the indexation of the given + // Helm repository failed. + IndexationFailedReason string = "IndexationFailed" + + // IndexationSucceededReason represents the fact that the indexation of the + // given Helm repository succeeded. + IndexationSucceededReason string = "IndexationSucceed" +) + +// HelmRepositoryProgressing resets the conditions of the HelmRepository to +// metav1.Condition of type meta.ReadyCondition with status 'Unknown' and +// meta.ProgressingReason reason and message. It returns the modified +// HelmRepository. +func HelmRepositoryProgressing(repository HelmRepository) HelmRepository { + repository.Status.ObservedGeneration = repository.Generation + repository.Status.URL = "" + repository.Status.Conditions = []metav1.Condition{} + meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + return repository +} + +// HelmRepositoryReady sets the given Artifact and URL on the HelmRepository and +// sets the meta.ReadyCondition to 'True', with the given reason and message. It +// returns the modified HelmRepository. +func HelmRepositoryReady(repository HelmRepository, artifact Artifact, url, reason, message string) HelmRepository { + repository.Status.Artifact = &artifact + repository.Status.URL = url + meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + return repository +} + +// HelmRepositoryNotReady sets the meta.ReadyCondition on the given +// HelmRepository to 'False', with the given reason and message. It returns the +// modified HelmRepository. +func HelmRepositoryNotReady(repository HelmRepository, reason, message string) HelmRepository { + meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + return repository +} + +// HelmRepositoryReadyMessage returns the message of the metav1.Condition of type +// meta.ReadyCondition with status 'True' if present, or an empty string. +func HelmRepositoryReadyMessage(repository HelmRepository) string { + if c := apimeta.FindStatusCondition(repository.Status.Conditions, meta.ReadyCondition); c != nil { + if c.Status == metav1.ConditionTrue { + return c.Message + } + } + return "" +} + +// GetArtifact returns the latest artifact from the source if present in the +// status sub-resource. +func (in *HelmRepository) GetArtifact() *Artifact { + return in.Status.Artifact +} + +// GetStatusConditions returns a pointer to the Status.Conditions slice +func (in *HelmRepository) GetStatusConditions() *[]metav1.Condition { + return &in.Status.Conditions +} + +// GetInterval returns the interval at which the source is updated. +func (in *HelmRepository) GetInterval() metav1.Duration { + return in.Spec.Interval +} + +// +genclient +// +genclient:Namespaced +// +kubebuilder:storageversion +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=helmrepo +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" + +// HelmRepository is the Schema for the helmrepositories API +type HelmRepository struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec HelmRepositorySpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} + Status HelmRepositoryStatus `json:"status,omitempty"` +} + +// HelmRepositoryList contains a list of HelmRepository +// +kubebuilder:object:root=true +type HelmRepositoryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HelmRepository `json:"items"` +} + +func init() { + SchemeBuilder.Register(&HelmRepository{}, &HelmRepositoryList{}) +} diff --git a/api/v1beta2/source.go b/api/v1beta2/source.go new file mode 100644 index 000000000..b1fde1f15 --- /dev/null +++ b/api/v1beta2/source.go @@ -0,0 +1,37 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // SourceIndexKey is the key used for indexing resources + // resources based on their Source. + SourceIndexKey string = ".metadata.source" +) + +// Source interface must be supported by all API types. +// +k8s:deepcopy-gen=false +type Source interface { + // GetArtifact returns the latest artifact from the source if present in the + // status sub-resource. + GetArtifact() *Artifact + // GetInterval returns the interval at which the source is updated. + GetInterval() metav1.Duration +} diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..bec40f3a4 --- /dev/null +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,611 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/fluxcd/pkg/apis/acl" + "github.com/fluxcd/pkg/apis/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Artifact) DeepCopyInto(out *Artifact) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Artifact. +func (in *Artifact) DeepCopy() *Artifact { + if in == nil { + return nil + } + out := new(Artifact) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Bucket) DeepCopyInto(out *Bucket) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Bucket. +func (in *Bucket) DeepCopy() *Bucket { + if in == nil { + return nil + } + out := new(Bucket) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Bucket) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketList) DeepCopyInto(out *BucketList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Bucket, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketList. +func (in *BucketList) DeepCopy() *BucketList { + if in == nil { + return nil + } + out := new(BucketList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketSpec) DeepCopyInto(out *BucketSpec) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + out.Interval = in.Interval + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(v1.Duration) + **out = **in + } + if in.Ignore != nil { + in, out := &in.Ignore, &out.Ignore + *out = new(string) + **out = **in + } + if in.AccessFrom != nil { + in, out := &in.AccessFrom, &out.AccessFrom + *out = new(acl.AccessFrom) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSpec. +func (in *BucketSpec) DeepCopy() *BucketSpec { + if in == nil { + return nil + } + out := new(BucketSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketStatus) DeepCopyInto(out *BucketStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(Artifact) + (*in).DeepCopyInto(*out) + } + out.ReconcileRequestStatus = in.ReconcileRequestStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketStatus. +func (in *BucketStatus) DeepCopy() *BucketStatus { + if in == nil { + return nil + } + out := new(BucketStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepository) DeepCopyInto(out *GitRepository) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepository. +func (in *GitRepository) DeepCopy() *GitRepository { + if in == nil { + return nil + } + out := new(GitRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GitRepository) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositoryInclude) DeepCopyInto(out *GitRepositoryInclude) { + *out = *in + out.GitRepositoryRef = in.GitRepositoryRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryInclude. +func (in *GitRepositoryInclude) DeepCopy() *GitRepositoryInclude { + if in == nil { + return nil + } + out := new(GitRepositoryInclude) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositoryList) DeepCopyInto(out *GitRepositoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]GitRepository, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryList. +func (in *GitRepositoryList) DeepCopy() *GitRepositoryList { + if in == nil { + return nil + } + out := new(GitRepositoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GitRepositoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositoryRef) DeepCopyInto(out *GitRepositoryRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryRef. +func (in *GitRepositoryRef) DeepCopy() *GitRepositoryRef { + if in == nil { + return nil + } + out := new(GitRepositoryRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositorySpec) DeepCopyInto(out *GitRepositorySpec) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + out.Interval = in.Interval + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(v1.Duration) + **out = **in + } + if in.Reference != nil { + in, out := &in.Reference, &out.Reference + *out = new(GitRepositoryRef) + **out = **in + } + if in.Verification != nil { + in, out := &in.Verification, &out.Verification + *out = new(GitRepositoryVerification) + **out = **in + } + if in.Ignore != nil { + in, out := &in.Ignore, &out.Ignore + *out = new(string) + **out = **in + } + if in.Include != nil { + in, out := &in.Include, &out.Include + *out = make([]GitRepositoryInclude, len(*in)) + copy(*out, *in) + } + if in.AccessFrom != nil { + in, out := &in.AccessFrom, &out.AccessFrom + *out = new(acl.AccessFrom) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositorySpec. +func (in *GitRepositorySpec) DeepCopy() *GitRepositorySpec { + if in == nil { + return nil + } + out := new(GitRepositorySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositoryStatus) DeepCopyInto(out *GitRepositoryStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(Artifact) + (*in).DeepCopyInto(*out) + } + if in.IncludedArtifacts != nil { + in, out := &in.IncludedArtifacts, &out.IncludedArtifacts + *out = make([]*Artifact, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Artifact) + (*in).DeepCopyInto(*out) + } + } + } + out.ReconcileRequestStatus = in.ReconcileRequestStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryStatus. +func (in *GitRepositoryStatus) DeepCopy() *GitRepositoryStatus { + if in == nil { + return nil + } + out := new(GitRepositoryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositoryVerification) DeepCopyInto(out *GitRepositoryVerification) { + *out = *in + out.SecretRef = in.SecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryVerification. +func (in *GitRepositoryVerification) DeepCopy() *GitRepositoryVerification { + if in == nil { + return nil + } + out := new(GitRepositoryVerification) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmChart) DeepCopyInto(out *HelmChart) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChart. +func (in *HelmChart) DeepCopy() *HelmChart { + if in == nil { + return nil + } + out := new(HelmChart) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HelmChart) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmChartList) DeepCopyInto(out *HelmChartList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HelmChart, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartList. +func (in *HelmChartList) DeepCopy() *HelmChartList { + if in == nil { + return nil + } + out := new(HelmChartList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HelmChartList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmChartSpec) DeepCopyInto(out *HelmChartSpec) { + *out = *in + out.SourceRef = in.SourceRef + out.Interval = in.Interval + if in.ValuesFiles != nil { + in, out := &in.ValuesFiles, &out.ValuesFiles + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AccessFrom != nil { + in, out := &in.AccessFrom, &out.AccessFrom + *out = new(acl.AccessFrom) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartSpec. +func (in *HelmChartSpec) DeepCopy() *HelmChartSpec { + if in == nil { + return nil + } + out := new(HelmChartSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmChartStatus) DeepCopyInto(out *HelmChartStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(Artifact) + (*in).DeepCopyInto(*out) + } + out.ReconcileRequestStatus = in.ReconcileRequestStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartStatus. +func (in *HelmChartStatus) DeepCopy() *HelmChartStatus { + if in == nil { + return nil + } + out := new(HelmChartStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmRepository) DeepCopyInto(out *HelmRepository) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepository. +func (in *HelmRepository) DeepCopy() *HelmRepository { + if in == nil { + return nil + } + out := new(HelmRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HelmRepository) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmRepositoryList) DeepCopyInto(out *HelmRepositoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HelmRepository, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositoryList. +func (in *HelmRepositoryList) DeepCopy() *HelmRepositoryList { + if in == nil { + return nil + } + out := new(HelmRepositoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HelmRepositoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmRepositorySpec) DeepCopyInto(out *HelmRepositorySpec) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + out.Interval = in.Interval + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(v1.Duration) + **out = **in + } + if in.AccessFrom != nil { + in, out := &in.AccessFrom, &out.AccessFrom + *out = new(acl.AccessFrom) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositorySpec. +func (in *HelmRepositorySpec) DeepCopy() *HelmRepositorySpec { + if in == nil { + return nil + } + out := new(HelmRepositorySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmRepositoryStatus) DeepCopyInto(out *HelmRepositoryStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(Artifact) + (*in).DeepCopyInto(*out) + } + out.ReconcileRequestStatus = in.ReconcileRequestStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositoryStatus. +func (in *HelmRepositoryStatus) DeepCopy() *HelmRepositoryStatus { + if in == nil { + return nil + } + out := new(HelmRepositoryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalHelmChartSourceReference) DeepCopyInto(out *LocalHelmChartSourceReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalHelmChartSourceReference. +func (in *LocalHelmChartSourceReference) DeepCopy() *LocalHelmChartSourceReference { + if in == nil { + return nil + } + out := new(LocalHelmChartSourceReference) + in.DeepCopyInto(out) + return out +} diff --git a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml index 61bbaf32d..d5de513bd 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml @@ -246,6 +246,239 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .spec.endpoint + name: Endpoint + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Bucket is the Schema for the buckets API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BucketSpec defines the desired state of an S3 compatible + bucket + properties: + accessFrom: + description: AccessFrom defines an Access Control List for allowing + cross-namespace references to this object. + properties: + namespaceSelectors: + description: NamespaceSelectors is the list of namespace selectors + to which this ACL applies. Items in this list are evaluated + using a logical OR operation. + items: + description: NamespaceSelector selects the namespaces to which + this ACL applies. An empty map of MatchLabels matches all + namespaces in a cluster. + properties: + matchLabels: + additionalProperties: + type: string + description: MatchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + type: array + required: + - namespaceSelectors + type: object + bucketName: + description: The bucket name. + type: string + endpoint: + description: The bucket endpoint address. + type: string + ignore: + description: Ignore overrides the set of excluded patterns in the + .sourceignore format (which is the same as .gitignore). If not provided, + a default will be used, consult the documentation for your version + to find out what those are. + type: string + insecure: + description: Insecure allows connecting to a non-TLS S3 HTTP endpoint. + type: boolean + interval: + description: The interval at which to check for bucket updates. + type: string + provider: + default: generic + description: The S3 compatible storage provider name, default ('generic'). + enum: + - generic + - aws + - gcp + type: string + region: + description: The bucket region. + type: string + secretRef: + description: The name of the secret containing authentication credentials + for the Bucket. + properties: + name: + description: Name of the referent + type: string + required: + - name + type: object + suspend: + description: This flag tells the controller to suspend the reconciliation + of this source. + type: boolean + timeout: + default: 60s + description: The timeout for download operations, defaults to 60s. + type: string + required: + - bucketName + - endpoint + - interval + type: object + status: + default: + observedGeneration: -1 + description: BucketStatus defines the observed state of a bucket + properties: + artifact: + description: Artifact represents the output of the last successful + Bucket sync. + properties: + checksum: + description: Checksum is the SHA256 checksum of the artifact. + type: string + lastUpdateTime: + description: LastUpdateTime is the timestamp corresponding to + the last update of this artifact. + format: date-time + type: string + path: + description: Path is the relative file path of this artifact. + type: string + revision: + description: Revision is a human readable identifier traceable + in the origin source system. It can be a Git commit SHA, Git + tag, a Helm index timestamp, a Helm chart version, etc. + type: string + url: + description: URL is the HTTP address of this artifact. + type: string + required: + - path + - url + type: object + conditions: + description: Conditions holds the conditions for the Bucket. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: + \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type + \ // +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastHandledReconcileAt: + description: LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change can be detected. + type: string + observedGeneration: + description: ObservedGeneration is the last observed generation. + format: int64 + type: integer + url: + description: URL is the download link for the artifact output of the + last Bucket sync. + type: string + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml index f2ea9662c..622ac5ad0 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml @@ -345,6 +345,336 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .spec.url + name: URL + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: GitRepository is the Schema for the gitrepositories API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: GitRepositorySpec defines the desired state of a Git repository. + properties: + accessFrom: + description: AccessFrom defines an Access Control List for allowing + cross-namespace references to this object. + properties: + namespaceSelectors: + description: NamespaceSelectors is the list of namespace selectors + to which this ACL applies. Items in this list are evaluated + using a logical OR operation. + items: + description: NamespaceSelector selects the namespaces to which + this ACL applies. An empty map of MatchLabels matches all + namespaces in a cluster. + properties: + matchLabels: + additionalProperties: + type: string + description: MatchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + type: array + required: + - namespaceSelectors + type: object + gitImplementation: + default: go-git + description: Determines which git client library to use. Defaults + to go-git, valid values are ('go-git', 'libgit2'). + enum: + - go-git + - libgit2 + type: string + ignore: + description: Ignore overrides the set of excluded patterns in the + .sourceignore format (which is the same as .gitignore). If not provided, + a default will be used, consult the documentation for your version + to find out what those are. + type: string + include: + description: Extra git repositories to map into the repository + items: + description: GitRepositoryInclude defines a source with a from and + to path. + properties: + fromPath: + description: The path to copy contents from, defaults to the + root directory. + type: string + repository: + description: Reference to a GitRepository to include. + properties: + name: + description: Name of the referent + type: string + required: + - name + type: object + toPath: + description: The path to copy contents to, defaults to the name + of the source ref. + type: string + required: + - repository + type: object + type: array + interval: + description: The interval at which to check for repository updates. + type: string + recurseSubmodules: + description: When enabled, after the clone is created, initializes + all submodules within, using their default settings. This option + is available only when using the 'go-git' GitImplementation. + type: boolean + ref: + description: The Git reference to checkout and monitor for changes, + defaults to master branch. + properties: + branch: + description: The Git branch to checkout, defaults to master. + type: string + commit: + description: The Git commit SHA to checkout, if specified Tag + filters will be ignored. + type: string + semver: + description: The Git tag semver expression, takes precedence over + Tag. + type: string + tag: + description: The Git tag to checkout, takes precedence over Branch. + type: string + type: object + secretRef: + description: The secret name containing the Git credentials. For HTTPS + repositories the secret must contain username and password fields. + For SSH repositories the secret must contain identity, identity.pub + and known_hosts fields. + properties: + name: + description: Name of the referent + type: string + required: + - name + type: object + suspend: + description: This flag tells the controller to suspend the reconciliation + of this source. + type: boolean + timeout: + default: 60s + description: The timeout for remote Git operations like cloning, defaults + to 60s. + type: string + url: + description: The repository URL, can be a HTTP/S or SSH address. + pattern: ^(http|https|ssh):// + type: string + verify: + description: Verify OpenPGP signature for the Git commit HEAD points + to. + properties: + mode: + description: Mode describes what git object should be verified, + currently ('head'). + enum: + - head + type: string + secretRef: + description: The secret name containing the public keys of all + trusted Git authors. + properties: + name: + description: Name of the referent + type: string + required: + - name + type: object + required: + - mode + type: object + required: + - interval + - url + type: object + status: + default: + observedGeneration: -1 + description: GitRepositoryStatus defines the observed state of a Git repository. + properties: + artifact: + description: Artifact represents the output of the last successful + repository sync. + properties: + checksum: + description: Checksum is the SHA256 checksum of the artifact. + type: string + lastUpdateTime: + description: LastUpdateTime is the timestamp corresponding to + the last update of this artifact. + format: date-time + type: string + path: + description: Path is the relative file path of this artifact. + type: string + revision: + description: Revision is a human readable identifier traceable + in the origin source system. It can be a Git commit SHA, Git + tag, a Helm index timestamp, a Helm chart version, etc. + type: string + url: + description: URL is the HTTP address of this artifact. + type: string + required: + - path + - url + type: object + conditions: + description: Conditions holds the conditions for the GitRepository. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: + \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type + \ // +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + includedArtifacts: + description: IncludedArtifacts represents the included artifacts from + the last successful repository sync. + items: + description: Artifact represents the output of a source synchronisation. + properties: + checksum: + description: Checksum is the SHA256 checksum of the artifact. + type: string + lastUpdateTime: + description: LastUpdateTime is the timestamp corresponding to + the last update of this artifact. + format: date-time + type: string + path: + description: Path is the relative file path of this artifact. + type: string + revision: + description: Revision is a human readable identifier traceable + in the origin source system. It can be a Git commit SHA, Git + tag, a Helm index timestamp, a Helm chart version, etc. + type: string + url: + description: URL is the HTTP address of this artifact. + type: string + required: + - path + - url + type: object + type: array + lastHandledReconcileAt: + description: LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change can be detected. + type: string + observedGeneration: + description: ObservedGeneration is the last observed generation. + format: int64 + type: integer + url: + description: URL is the download link for the artifact output of the + last repository sync. + type: string + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml index 6594bca5b..b32ea6184 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml @@ -270,6 +270,261 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .spec.chart + name: Chart + type: string + - jsonPath: .spec.version + name: Version + type: string + - jsonPath: .spec.sourceRef.kind + name: Source Kind + type: string + - jsonPath: .spec.sourceRef.name + name: Source Name + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: HelmChart is the Schema for the helmcharts API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: HelmChartSpec defines the desired state of a Helm chart. + properties: + accessFrom: + description: AccessFrom defines an Access Control List for allowing + cross-namespace references to this object. + properties: + namespaceSelectors: + description: NamespaceSelectors is the list of namespace selectors + to which this ACL applies. Items in this list are evaluated + using a logical OR operation. + items: + description: NamespaceSelector selects the namespaces to which + this ACL applies. An empty map of MatchLabels matches all + namespaces in a cluster. + properties: + matchLabels: + additionalProperties: + type: string + description: MatchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + type: array + required: + - namespaceSelectors + type: object + chart: + description: The name or path the Helm chart is available at in the + SourceRef. + type: string + interval: + description: The interval at which to check the Source for updates. + type: string + reconcileStrategy: + default: ChartVersion + description: Determines what enables the creation of a new artifact. + Valid values are ('ChartVersion', 'Revision'). See the documentation + of the values for an explanation on their behavior. Defaults to + ChartVersion when omitted. + enum: + - ChartVersion + - Revision + type: string + sourceRef: + description: The reference to the Source the chart is available at. + properties: + apiVersion: + description: APIVersion of the referent. + type: string + kind: + description: Kind of the referent, valid values are ('HelmRepository', + 'GitRepository', 'Bucket'). + enum: + - HelmRepository + - GitRepository + - Bucket + type: string + name: + description: Name of the referent. + type: string + required: + - kind + - name + type: object + suspend: + description: This flag tells the controller to suspend the reconciliation + of this source. + type: boolean + valuesFile: + description: Alternative values file to use as the default chart values, + expected to be a relative path in the SourceRef. Deprecated in favor + of ValuesFiles, for backwards compatibility the file defined here + is merged before the ValuesFiles items. Ignored when omitted. + type: string + valuesFiles: + description: Alternative list of values files to use as the chart + values (values.yaml is not included by default), expected to be + a relative path in the SourceRef. Values files are merged in the + order of this list with the last file overriding the first. Ignored + when omitted. + items: + type: string + type: array + version: + default: '*' + description: The chart version semver expression, ignored for charts + from GitRepository and Bucket sources. Defaults to latest when omitted. + type: string + required: + - chart + - interval + - sourceRef + type: object + status: + default: + observedGeneration: -1 + description: HelmChartStatus defines the observed state of the HelmChart. + properties: + artifact: + description: Artifact represents the output of the last successful + chart sync. + properties: + checksum: + description: Checksum is the SHA256 checksum of the artifact. + type: string + lastUpdateTime: + description: LastUpdateTime is the timestamp corresponding to + the last update of this artifact. + format: date-time + type: string + path: + description: Path is the relative file path of this artifact. + type: string + revision: + description: Revision is a human readable identifier traceable + in the origin source system. It can be a Git commit SHA, Git + tag, a Helm index timestamp, a Helm chart version, etc. + type: string + url: + description: URL is the HTTP address of this artifact. + type: string + required: + - path + - url + type: object + conditions: + description: Conditions holds the conditions for the HelmChart. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: + \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type + \ // +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastHandledReconcileAt: + description: LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change can be detected. + type: string + observedGeneration: + description: ObservedGeneration is the last observed generation. + format: int64 + type: integer + url: + description: URL is the download link for the last chart pulled. + type: string + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml index e9595d5c0..328f33d14 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml @@ -233,6 +233,224 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .spec.url + name: URL + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: HelmRepository is the Schema for the helmrepositories API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: HelmRepositorySpec defines the reference to a Helm repository. + properties: + accessFrom: + description: AccessFrom defines an Access Control List for allowing + cross-namespace references to this object. + properties: + namespaceSelectors: + description: NamespaceSelectors is the list of namespace selectors + to which this ACL applies. Items in this list are evaluated + using a logical OR operation. + items: + description: NamespaceSelector selects the namespaces to which + this ACL applies. An empty map of MatchLabels matches all + namespaces in a cluster. + properties: + matchLabels: + additionalProperties: + type: string + description: MatchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + type: array + required: + - namespaceSelectors + type: object + interval: + description: The interval at which to check the upstream for updates. + type: string + passCredentials: + description: PassCredentials allows the credentials from the SecretRef + to be passed on to a host that does not match the host as defined + in URL. This may be required if the host of the advertised chart + URLs in the index differ from the defined URL. Enabling this should + be done with caution, as it can potentially result in credentials + getting stolen in a MITM-attack. + type: boolean + secretRef: + description: The name of the secret containing authentication credentials + for the Helm repository. For HTTP/S basic auth the secret must contain + username and password fields. For TLS the secret must contain a + certFile and keyFile, and/or caCert fields. + properties: + name: + description: Name of the referent + type: string + required: + - name + type: object + suspend: + description: This flag tells the controller to suspend the reconciliation + of this source. + type: boolean + timeout: + default: 60s + description: The timeout of index downloading, defaults to 60s. + type: string + url: + description: The Helm repository URL, a valid URL contains at least + a protocol and host. + type: string + required: + - interval + - url + type: object + status: + default: + observedGeneration: -1 + description: HelmRepositoryStatus defines the observed state of the HelmRepository. + properties: + artifact: + description: Artifact represents the output of the last successful + repository sync. + properties: + checksum: + description: Checksum is the SHA256 checksum of the artifact. + type: string + lastUpdateTime: + description: LastUpdateTime is the timestamp corresponding to + the last update of this artifact. + format: date-time + type: string + path: + description: Path is the relative file path of this artifact. + type: string + revision: + description: Revision is a human readable identifier traceable + in the origin source system. It can be a Git commit SHA, Git + tag, a Helm index timestamp, a Helm chart version, etc. + type: string + url: + description: URL is the HTTP address of this artifact. + type: string + required: + - path + - url + type: object + conditions: + description: Conditions holds the conditions for the HelmRepository. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: + \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type + \ // +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastHandledReconcileAt: + description: LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change can be detected. + type: string + observedGeneration: + description: ObservedGeneration is the last observed generation. + format: int64 + type: integer + url: + description: URL is the download link for the last index fetched. + type: string + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/docs/api/source.md b/docs/api/source.md index 9aabeee23..d1144a3ed 100644 --- a/docs/api/source.md +++ b/docs/api/source.md @@ -2,22 +2,22 @@

Packages:

-

source.toolkit.fluxcd.io/v1beta1

-

Package v1beta1 contains API Schema definitions for the source v1beta1 API group

+

source.toolkit.fluxcd.io/v1beta2

+

Package v1beta2 contains API Schema definitions for the source v1beta2 API group

Resource Types: -

Bucket +

Bucket

Bucket is the Schema for the buckets API

@@ -35,7 +35,7 @@ Resource Types: apiVersion
string -source.toolkit.fluxcd.io/v1beta1 +source.toolkit.fluxcd.io/v1beta2 @@ -65,7 +65,7 @@ Refer to the Kubernetes API documentation for the fields of the spec
- + BucketSpec @@ -221,7 +221,7 @@ github.com/fluxcd/pkg/apis/acl.AccessFrom status
- + BucketStatus @@ -233,7 +233,7 @@ BucketStatus
-

GitRepository +

GitRepository

GitRepository is the Schema for the gitrepositories API

@@ -251,7 +251,7 @@ BucketStatus apiVersion
string -source.toolkit.fluxcd.io/v1beta1 +source.toolkit.fluxcd.io/v1beta2 @@ -281,7 +281,7 @@ Refer to the Kubernetes API documentation for the fields of the spec
- + GitRepositorySpec @@ -350,7 +350,7 @@ Kubernetes meta/v1.Duration ref
- + GitRepositoryRef @@ -365,7 +365,7 @@ master branch.

verify
- + GitRepositoryVerification @@ -432,7 +432,7 @@ This option is available only when using the ‘go-git’ GitImplementat include
- + []GitRepositoryInclude @@ -462,7 +462,7 @@ github.com/fluxcd/pkg/apis/acl.AccessFrom status
- + GitRepositoryStatus @@ -474,7 +474,7 @@ GitRepositoryStatus
-

HelmChart +

HelmChart

HelmChart is the Schema for the helmcharts API

@@ -492,7 +492,7 @@ GitRepositoryStatus apiVersion
string -source.toolkit.fluxcd.io/v1beta1 +source.toolkit.fluxcd.io/v1beta2 @@ -522,7 +522,7 @@ Refer to the Kubernetes API documentation for the fields of the spec
- + HelmChartSpec @@ -559,7 +559,7 @@ and Bucket sources. Defaults to latest when omitted.

sourceRef
- + LocalHelmChartSourceReference @@ -659,7 +659,7 @@ github.com/fluxcd/pkg/apis/acl.AccessFrom status
- + HelmChartStatus @@ -671,7 +671,7 @@ HelmChartStatus
-

HelmRepository +

HelmRepository

HelmRepository is the Schema for the helmrepositories API

@@ -689,7 +689,7 @@ HelmChartStatus apiVersion
string -source.toolkit.fluxcd.io/v1beta1 +source.toolkit.fluxcd.io/v1beta2 @@ -719,7 +719,7 @@ Refer to the Kubernetes API documentation for the fields of the spec
- + HelmRepositorySpec @@ -835,7 +835,7 @@ github.com/fluxcd/pkg/apis/acl.AccessFrom status
- + HelmRepositoryStatus @@ -847,14 +847,14 @@ HelmRepositoryStatus
-

Artifact +

Artifact

(Appears on: -BucketStatus, -GitRepositoryStatus, -HelmChartStatus, -HelmRepositoryStatus) +BucketStatus, +GitRepositoryStatus, +HelmChartStatus, +HelmRepositoryStatus)

Artifact represents the output of a source synchronisation.

@@ -933,11 +933,11 @@ artifact.

-

BucketSpec +

BucketSpec

(Appears on: -Bucket) +Bucket)

BucketSpec defines the desired state of an S3 compatible bucket

@@ -1094,11 +1094,11 @@ github.com/fluxcd/pkg/apis/acl.AccessFrom
-

BucketStatus +

BucketStatus

(Appears on: -Bucket) +Bucket)

BucketStatus defines the observed state of a bucket

@@ -1153,7 +1153,7 @@ string artifact
- + Artifact @@ -1182,11 +1182,11 @@ github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus
-

GitRepositoryInclude +

GitRepositoryInclude

(Appears on: -GitRepositorySpec) +GitRepositorySpec)

GitRepositoryInclude defines a source with a from and to path.

@@ -1240,11 +1240,11 @@ string
-

GitRepositoryRef +

GitRepositoryRef

(Appears on: -GitRepositorySpec) +GitRepositorySpec)

GitRepositoryRef defines the Git ref used for pull and checkout operations.

@@ -1309,11 +1309,11 @@ string
-

GitRepositorySpec +

GitRepositorySpec

(Appears on: -GitRepository) +GitRepository)

GitRepositorySpec defines the desired state of a Git repository.

@@ -1386,7 +1386,7 @@ Kubernetes meta/v1.Duration ref
- + GitRepositoryRef @@ -1401,7 +1401,7 @@ master branch.

verify
- + GitRepositoryVerification @@ -1468,7 +1468,7 @@ This option is available only when using the ‘go-git’ GitImplementat include
- + []GitRepositoryInclude @@ -1495,11 +1495,11 @@ github.com/fluxcd/pkg/apis/acl.AccessFrom
-

GitRepositoryStatus +

GitRepositoryStatus

(Appears on: -GitRepository) +GitRepository)

GitRepositoryStatus defines the observed state of a Git repository.

@@ -1555,7 +1555,7 @@ sync.

artifact
- + Artifact @@ -1569,8 +1569,8 @@ Artifact includedArtifacts
- -[]*github.com/fluxcd/source-controller/api/v1beta1.Artifact + +[]*github.com/fluxcd/source-controller/api/v1beta2.Artifact @@ -1598,11 +1598,11 @@ github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus
-

GitRepositoryVerification +

GitRepositoryVerification

(Appears on: -GitRepositorySpec) +GitRepositorySpec)

GitRepositoryVerification defines the OpenPGP signature verification process.

@@ -1643,11 +1643,11 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference
-

HelmChartSpec +

HelmChartSpec

(Appears on: -HelmChart) +HelmChart)

HelmChartSpec defines the desired state of a Helm chart.

@@ -1688,7 +1688,7 @@ and Bucket sources. Defaults to latest when omitted.

sourceRef
- + LocalHelmChartSourceReference @@ -1785,11 +1785,11 @@ github.com/fluxcd/pkg/apis/acl.AccessFrom
-

HelmChartStatus +

HelmChartStatus

(Appears on: -HelmChart) +HelmChart)

HelmChartStatus defines the observed state of the HelmChart.

@@ -1844,7 +1844,7 @@ string artifact
- + Artifact @@ -1873,11 +1873,11 @@ github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus
-

HelmRepositorySpec +

HelmRepositorySpec

(Appears on: -HelmRepository) +HelmRepository)

HelmRepositorySpec defines the reference to a Helm repository.

@@ -1994,11 +1994,11 @@ github.com/fluxcd/pkg/apis/acl.AccessFrom
-

HelmRepositoryStatus +

HelmRepositoryStatus

(Appears on: -HelmRepository) +HelmRepository)

HelmRepositoryStatus defines the observed state of the HelmRepository.

@@ -2053,7 +2053,7 @@ string artifact
- + Artifact @@ -2082,11 +2082,11 @@ github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus
-

LocalHelmChartSourceReference +

LocalHelmChartSourceReference

(Appears on: -HelmChartSpec) +HelmChartSpec)

LocalHelmChartSourceReference contains enough information to let you locate the typed referenced object at namespace level.

@@ -2139,7 +2139,7 @@ string -

Source +

Source

Source interface must be supported by all API types.

diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt index ae5141121..74dbebc30 100644 --- a/hack/boilerplate.go.txt +++ b/hack/boilerplate.go.txt @@ -1,5 +1,5 @@ /* -Copyright 2020 The Flux authors +Copyright 2022 The Flux authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. From 7c3c14997e63faa7321c1a7b8d1b4ac00db7480a Mon Sep 17 00:00:00 2001 From: Hidde Beydals Date: Thu, 27 Jan 2022 15:06:17 +0100 Subject: [PATCH 02/63] Switch to v1beta2 API package Signed-off-by: Hidde Beydals --- api/v1beta1/zz_generated.deepcopy.go | 2 +- controllers/artifact.go | 2 +- controllers/artifact_test.go | 2 +- controllers/bucket_controller.go | 2 +- controllers/gitrepository_controller.go | 2 +- controllers/gitrepository_controller_test.go | 2 +- controllers/helmchart_controller.go | 2 +- controllers/helmchart_controller_test.go | 2 +- controllers/helmrepository_controller.go | 2 +- controllers/helmrepository_controller_test.go | 2 +- controllers/source_predicate.go | 2 +- controllers/storage.go | 2 +- controllers/storage_test.go | 2 +- controllers/suite_test.go | 2 +- main.go | 2 +- 15 files changed, 15 insertions(+), 15 deletions(-) diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index d5e4f4892..3fd54793d 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -2,7 +2,7 @@ // +build !ignore_autogenerated /* -Copyright 2020 The Flux authors +Copyright 2022 The Flux authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/controllers/artifact.go b/controllers/artifact.go index 0e16fd03c..2eff57747 100644 --- a/controllers/artifact.go +++ b/controllers/artifact.go @@ -1,6 +1,6 @@ package controllers -import sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" +import sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" // hasArtifactUpdated returns true if any of the revisions in the current artifacts // does not match any of the artifacts in the updated artifacts diff --git a/controllers/artifact_test.go b/controllers/artifact_test.go index 959661615..9746dfd8e 100644 --- a/controllers/artifact_test.go +++ b/controllers/artifact_test.go @@ -3,7 +3,7 @@ package controllers import ( "testing" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" ) func TestHasUpdated(t *testing.T) { diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index a25587d1a..66f1487ff 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -48,7 +48,7 @@ import ( "github.com/fluxcd/pkg/runtime/predicates" "github.com/fluxcd/source-controller/pkg/gcp" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" "github.com/fluxcd/source-controller/pkg/sourceignore" ) diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go index 7642a1614..0a3f02a29 100644 --- a/controllers/gitrepository_controller.go +++ b/controllers/gitrepository_controller.go @@ -44,7 +44,7 @@ import ( "github.com/fluxcd/pkg/runtime/metrics" "github.com/fluxcd/pkg/runtime/predicates" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" "github.com/fluxcd/source-controller/pkg/git" "github.com/fluxcd/source-controller/pkg/git/strategy" "github.com/fluxcd/source-controller/pkg/sourceignore" diff --git a/controllers/gitrepository_controller_test.go b/controllers/gitrepository_controller_test.go index a8691c26c..15910248f 100644 --- a/controllers/gitrepository_controller_test.go +++ b/controllers/gitrepository_controller_test.go @@ -50,7 +50,7 @@ import ( "github.com/fluxcd/pkg/gittestserver" "github.com/fluxcd/pkg/untar" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" ) var _ = Describe("GitRepositoryReconciler", func() { diff --git a/controllers/helmchart_controller.go b/controllers/helmchart_controller.go index e63f8e458..4d5d4244c 100644 --- a/controllers/helmchart_controller.go +++ b/controllers/helmchart_controller.go @@ -53,7 +53,7 @@ import ( "github.com/fluxcd/pkg/runtime/predicates" "github.com/fluxcd/pkg/untar" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" "github.com/fluxcd/source-controller/internal/helm/chart" "github.com/fluxcd/source-controller/internal/helm/getter" "github.com/fluxcd/source-controller/internal/helm/repository" diff --git a/controllers/helmchart_controller_test.go b/controllers/helmchart_controller_test.go index cb9838b15..b2f469b36 100644 --- a/controllers/helmchart_controller_test.go +++ b/controllers/helmchart_controller_test.go @@ -46,7 +46,7 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/yaml" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" ) var _ = Describe("HelmChartReconciler", func() { diff --git a/controllers/helmrepository_controller.go b/controllers/helmrepository_controller.go index d82bdad69..34723a92d 100644 --- a/controllers/helmrepository_controller.go +++ b/controllers/helmrepository_controller.go @@ -42,7 +42,7 @@ import ( "github.com/fluxcd/pkg/runtime/metrics" "github.com/fluxcd/pkg/runtime/predicates" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" "github.com/fluxcd/source-controller/internal/helm/getter" "github.com/fluxcd/source-controller/internal/helm/repository" ) diff --git a/controllers/helmrepository_controller_test.go b/controllers/helmrepository_controller_test.go index e7d945a60..171ce8424 100644 --- a/controllers/helmrepository_controller_test.go +++ b/controllers/helmrepository_controller_test.go @@ -33,7 +33,7 @@ import ( "github.com/fluxcd/pkg/apis/meta" "github.com/fluxcd/pkg/helmtestserver" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" ) var _ = Describe("HelmRepositoryReconciler", func() { diff --git a/controllers/source_predicate.go b/controllers/source_predicate.go index 47dc73c28..60786b87e 100644 --- a/controllers/source_predicate.go +++ b/controllers/source_predicate.go @@ -20,7 +20,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" ) type SourceRevisionChangePredicate struct { diff --git a/controllers/storage.go b/controllers/storage.go index a70150513..57cee19c1 100644 --- a/controllers/storage.go +++ b/controllers/storage.go @@ -36,7 +36,7 @@ import ( "github.com/fluxcd/pkg/lockedfile" "github.com/fluxcd/pkg/untar" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" "github.com/fluxcd/source-controller/internal/fs" "github.com/fluxcd/source-controller/pkg/sourceignore" ) diff --git a/controllers/storage_test.go b/controllers/storage_test.go index 4af3a3418..b93ed69af 100644 --- a/controllers/storage_test.go +++ b/controllers/storage_test.go @@ -29,7 +29,7 @@ import ( "github.com/go-git/go-git/v5/plumbing/format/gitignore" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" ) func createStoragePath() (string, error) { diff --git a/controllers/suite_test.go b/controllers/suite_test.go index fae4c3b05..545872f80 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -37,7 +37,7 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" // +kubebuilder:scaffold:imports ) diff --git a/main.go b/main.go index 67f00a920..67bbdecd1 100644 --- a/main.go +++ b/main.go @@ -43,7 +43,7 @@ import ( "github.com/fluxcd/pkg/runtime/pprof" "github.com/fluxcd/pkg/runtime/probes" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" "github.com/fluxcd/source-controller/controllers" "github.com/fluxcd/source-controller/internal/helm" // +kubebuilder:scaffold:imports From e42eedd3921cbb58abc9b69bd3de017ceaccc05c Mon Sep 17 00:00:00 2001 From: Hidde Beydals Date: Fri, 30 Jul 2021 11:17:22 +0200 Subject: [PATCH 03/63] Introduce more explicit Condition types This commit introduces new Condition types to the v1beta1 API, facilitating easier observation of (potentially) problematic state for end-users. - `ArtifactUnavailableCondition`: indicates there is no artifact available for the resource. This Condition should be set by the reconciler as soon as it observes the absence of an artifact for a source. - `CheckoutFailedCondition`: indicates a transient or persistent checkout failure. This Condition should be set by the reconciler as soon as it observes a Git checkout failure, including any prerequisites like the unavailability of the referenced Secret used for authentication. It should be deleted as soon as a successful checkout has been observed again. - `SourceVerifiedCondition`: indicates the integrity of the source has been verified. The Condition should be set to True or False by the reconciler based on the result of the integrity check. If there is no verification mode and/or secret configured, the Condition should be removed. - `IncludeUnavailableCondition`: indicates one of the referenced includes is not available. This Condition should for example be set by the reconciler when the include does not exist, or does not have an artifact. If the includes become available, it should be deleted. - `ArtifactOutdatedCondition`: indicates the current artifact of the source is outdated. This Condition should for example be set by the reconciler when it notices there is a newer revision for an artifact, or the previously included artifacts differ from the current available ones. The Condition should be removed after writing a new artifact to the storage. Signed-off-by: Hidde Beydals --- api/v1beta2/gitrepository_types.go | 57 ++++++++++++------- ...rce.toolkit.fluxcd.io_gitrepositories.yaml | 20 ++++--- 2 files changed, 49 insertions(+), 28 deletions(-) diff --git a/api/v1beta2/gitrepository_types.go b/api/v1beta2/gitrepository_types.go index ac4f683ba..e92700664 100644 --- a/api/v1beta2/gitrepository_types.go +++ b/api/v1beta2/gitrepository_types.go @@ -34,6 +34,30 @@ const ( LibGit2Implementation = "libgit2" ) +const ( + // ArtifactUnavailableCondition indicates there is no Artifact available for the Source. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + ArtifactUnavailableCondition string = "ArtifactUnavailable" + + // CheckoutFailedCondition indicates a transient or persistent checkout failure. If True, observations on the + // upstream Source revision are not possible, and the Artifact available for the Source may be outdated. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + CheckoutFailedCondition string = "CheckoutFailed" + + // SourceVerifiedCondition indicates the integrity of the Source has been verified. If True, the integrity check + // succeeded. If False, it failed. The Condition is only present on the resource if the integrity has been verified. + SourceVerifiedCondition string = "SourceVerified" + + // IncludeUnavailableCondition indicates one of the includes is not available. For example, because it does not + // exist, or does not have an Artifact. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + IncludeUnavailableCondition string = "IncludeUnavailable" + + // ArtifactOutdatedCondition indicates the current Artifact of the Source is outdated. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + ArtifactOutdatedCondition string = "ArtifactOutdated" +) + // GitRepositorySpec defines the desired state of a Git repository. type GitRepositorySpec struct { // The repository URL, can be a HTTP/S or SSH address. @@ -42,10 +66,8 @@ type GitRepositorySpec struct { URL string `json:"url"` // The secret name containing the Git credentials. - // For HTTPS repositories the secret must contain username and password - // fields. - // For SSH repositories the secret must contain identity, identity.pub and - // known_hosts fields. + // For HTTPS repositories the secret must contain username and password fields. + // For SSH repositories the secret must contain 'identity', 'identity.pub' and 'known_hosts' fields. // +optional SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` @@ -63,16 +85,16 @@ type GitRepositorySpec struct { // +optional Reference *GitRepositoryRef `json:"ref,omitempty"` - // Verify OpenPGP signature for the Git commit HEAD points to. + // Verification defines the configuration to verify the OpenPGP signature for the Git commit HEAD points to. // +optional Verification *GitRepositoryVerification `json:"verify,omitempty"` - // Ignore overrides the set of excluded patterns in the .sourceignore format - // (which is the same as .gitignore). If not provided, a default will be used, - // consult the documentation for your version to find out what those are. + // Ignore overrides the set of excluded patterns in the .sourceignore format (which is the same as .gitignore). + // If not provided, a default will be used, consult the documentation for your version to find out what those are. // +optional Ignore *string `json:"ignore,omitempty"` + // Suspend tells the controller to suspend the reconciliation of this source. // This flag tells the controller to suspend the reconciliation of this source. // +optional Suspend bool `json:"suspend,omitempty"` @@ -84,13 +106,13 @@ type GitRepositorySpec struct { // +optional GitImplementation string `json:"gitImplementation,omitempty"` - // When enabled, after the clone is created, initializes all submodules within, - // using their default settings. + // When enabled, after the clone is created, initializes all submodules within, using their default settings. // This option is available only when using the 'go-git' GitImplementation. // +optional RecurseSubmodules bool `json:"recurseSubmodules,omitempty"` - // Extra git repositories to map into the repository + // Include defines a list of GitRepository resources which artifacts should be included in the artifact produced for + // this resource. Include []GitRepositoryInclude `json:"include,omitempty"` // AccessFrom defines an Access Control List for allowing cross-namespace references to this object. @@ -144,11 +166,11 @@ type GitRepositoryRef struct { // GitRepositoryVerification defines the OpenPGP signature verification process. type GitRepositoryVerification struct { - // Mode describes what git object should be verified, currently ('head'). + // Mode describes what Git object should be verified, currently ('head'). // +kubebuilder:validation:Enum=head Mode string `json:"mode"` - // The secret name containing the public keys of all trusted Git authors. + // SecretRef containing the public keys of all trusted Git authors. SecretRef meta.LocalObjectReference `json:"secretRef,omitempty"` } @@ -162,8 +184,7 @@ type GitRepositoryStatus struct { // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` - // URL is the download link for the artifact output of the last repository - // sync. + // URL is the download link for the artifact output of the last repository sync. // +optional URL string `json:"url,omitempty"` @@ -179,12 +200,10 @@ type GitRepositoryStatus struct { } const ( - // GitOperationSucceedReason represents the fact that the git clone, pull - // and checkout operations succeeded. + // GitOperationSucceedReason represents the fact that the git clone, pull and checkout operations succeeded. GitOperationSucceedReason string = "GitOperationSucceed" - // GitOperationFailedReason represents the fact that the git clone, pull or - // checkout operations failed. + // GitOperationFailedReason represents the fact that the git clone, pull or checkout operations failed. GitOperationFailedReason string = "GitOperationFailed" ) diff --git a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml index 622ac5ad0..5a9d809bf 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml @@ -423,7 +423,8 @@ spec: to find out what those are. type: string include: - description: Extra git repositories to map into the repository + description: Include defines a list of GitRepository resources which + artifacts should be included in the artifact produced for this resource. items: description: GitRepositoryInclude defines a source with a from and to path. @@ -479,8 +480,8 @@ spec: secretRef: description: The secret name containing the Git credentials. For HTTPS repositories the secret must contain username and password fields. - For SSH repositories the secret must contain identity, identity.pub - and known_hosts fields. + For SSH repositories the secret must contain 'identity', 'identity.pub' + and 'known_hosts' fields. properties: name: description: Name of the referent @@ -489,7 +490,8 @@ spec: - name type: object suspend: - description: This flag tells the controller to suspend the reconciliation + description: Suspend tells the controller to suspend the reconciliation + of this source. This flag tells the controller to suspend the reconciliation of this source. type: boolean timeout: @@ -502,18 +504,18 @@ spec: pattern: ^(http|https|ssh):// type: string verify: - description: Verify OpenPGP signature for the Git commit HEAD points - to. + description: Verification defines the configuration to verify the + OpenPGP signature for the Git commit HEAD points to. properties: mode: - description: Mode describes what git object should be verified, + description: Mode describes what Git object should be verified, currently ('head'). enum: - head type: string secretRef: - description: The secret name containing the public keys of all - trusted Git authors. + description: SecretRef containing the public keys of all trusted + Git authors. properties: name: description: Name of the referent From 349739b7e44b6139b742d3ef85fa107b424531ed Mon Sep 17 00:00:00 2001 From: Hidde Beydals Date: Fri, 30 Jul 2021 12:33:18 +0200 Subject: [PATCH 04/63] Implement new runtime interfaces, prepare testenv This commit ensures all API objects implement the interfaces used by the runtime package to work with conditions, etc., and prepares the test suite to work with the `pkg/runtime/testenv` wrapper. Changes are made in a backwards compatible way (that being: the existing code can still be build and works as expected), but without proper dependency boundaries. The result of this is that the API package temporary depends on the runtime package, which is resolved when all reconcilers have been refactored and the API package does no longer contain condition modifying functions. Signed-off-by: Hidde Beydals --- api/go.mod | 23 +- api/go.sum | 34 ++- api/v1beta1/bucket_types.go | 24 +- api/v1beta1/gitrepository_types.go | 24 +- api/v1beta1/helmchart_types.go | 24 +- api/v1beta1/helmrepository_types.go | 24 +- api/v1beta2/bucket_types.go | 33 ++- api/v1beta2/gitrepository_types.go | 33 ++- api/v1beta2/helmchart_types.go | 35 ++- api/v1beta2/helmrepository_types.go | 33 ++- .../source.toolkit.fluxcd.io_buckets.yaml | 10 +- ...rce.toolkit.fluxcd.io_gitrepositories.yaml | 18 +- .../source.toolkit.fluxcd.io_helmcharts.yaml | 6 +- ...ce.toolkit.fluxcd.io_helmrepositories.yaml | 10 +- controllers/bucket_controller.go | 14 +- controllers/gitrepository_controller.go | 23 +- controllers/helmchart_controller.go | 14 +- controllers/helmchart_controller_test.go | 116 ++++----- controllers/helmrepository_controller.go | 14 +- controllers/helmrepository_controller_test.go | 8 +- controllers/legacy_suite_test.go | 197 +++++++++++++++ controllers/storage.go | 8 +- controllers/suite_test.go | 226 +++++++----------- docs/api/source.md | 51 ++-- go.mod | 6 +- go.sum | 13 +- main.go | 24 +- 27 files changed, 660 insertions(+), 385 deletions(-) create mode 100644 controllers/legacy_suite_test.go diff --git a/api/go.mod b/api/go.mod index ce4aef76c..2af43091b 100644 --- a/api/go.mod +++ b/api/go.mod @@ -4,25 +4,46 @@ go 1.17 require ( github.com/fluxcd/pkg/apis/acl v0.0.3 - github.com/fluxcd/pkg/apis/meta v0.10.2 + github.com/fluxcd/pkg/apis/meta v0.11.0-rc.3 + // TODO(hidde): introduction of the runtime package is temporary, and the dependency should be removed as soon as + // all APIs have been updated to the runtime standards (more specifically; have dropped their condition modifying + // functions). + github.com/fluxcd/pkg/runtime v0.13.0-rc.6 k8s.io/apimachinery v0.23.1 sigs.k8s.io/controller-runtime v0.11.0 ) require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/go-logr/logr v1.2.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.2 // indirect github.com/google/go-cmp v0.5.6 // indirect github.com/google/gofuzz v1.2.0 // indirect + github.com/googleapis/gnostic v0.5.5 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/onsi/gomega v1.17.0 // indirect + github.com/pkg/errors v0.9.1 // indirect golang.org/x/net v0.0.0-20211215060638-4ddde0e984e9 // indirect + golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect + golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 // indirect + golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect golang.org/x/text v0.3.7 // indirect + golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/protobuf v1.27.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect + k8s.io/api v0.23.0 // indirect + k8s.io/client-go v0.23.0 // indirect k8s.io/klog/v2 v2.30.0 // indirect + k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect k8s.io/utils v0.0.0-20211208161948-7d6a63dca704 // indirect sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.0 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/api/go.sum b/api/go.sum index ffe31a8af..927fd8a67 100644 --- a/api/go.sum +++ b/api/go.sum @@ -68,6 +68,7 @@ github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiU github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= @@ -76,7 +77,9 @@ github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnweb github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= @@ -118,13 +121,16 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fluxcd/pkg/apis/acl v0.0.3 h1:Lw0ZHdpnO4G7Zy9KjrzwwBmDZQuy4qEjaU/RvA6k1lc= github.com/fluxcd/pkg/apis/acl v0.0.3/go.mod h1:XPts6lRJ9C9fIF9xVWofmQwftvhY25n1ps7W9xw0XLU= -github.com/fluxcd/pkg/apis/meta v0.10.2 h1:pnDBBEvfs4HaKiVAYgz+e/AQ8dLvcgmVfSeBroZ/KKI= -github.com/fluxcd/pkg/apis/meta v0.10.2/go.mod h1:KQ2er9xa6koy7uoPMZjIjNudB5p4tXs+w0GO6fRcy7I= +github.com/fluxcd/pkg/apis/meta v0.11.0-rc.3 h1:YY6RlaHIMXawgEOJhJbSrm4NpD9fJTCWFGKgtNfQ0/g= +github.com/fluxcd/pkg/apis/meta v0.11.0-rc.3/go.mod h1:ki5wJE4nuFOZt78q0RSYkrKwINgIBPynuswZhnTOSoI= +github.com/fluxcd/pkg/runtime v0.13.0-rc.6 h1:MsxiKYGsuRzEvyreQG5ocNaIZDwKhqvQ711/w4rTkCo= +github.com/fluxcd/pkg/runtime v0.13.0-rc.6/go.mod h1:4oKUO19TeudXrnCRnxCfMSS7EQTYpYlgfXwlQuDJ/Eg= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -148,6 +154,7 @@ github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTg github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -194,6 +201,7 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -236,6 +244,7 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= @@ -250,9 +259,11 @@ github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBt github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-retryablehttp v0.6.8/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= @@ -270,6 +281,7 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -309,6 +321,7 @@ github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= @@ -359,6 +372,7 @@ github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCko github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -369,21 +383,25 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.28.0 h1:vGVfV9KrDTvWt5boZO0I19g2E3CsWfpPPKZM9dt3mEw= github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= @@ -466,15 +484,18 @@ go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4 go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -584,6 +605,7 @@ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -663,6 +685,7 @@ golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 h1:M69LAlWZCshgp0QSzyDcSsSIejIEeuaCVpmwcKwyLMk= golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -678,6 +701,7 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -743,6 +767,7 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -772,6 +797,7 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -852,6 +878,7 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -895,11 +922,13 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.23.0 h1:WrL1gb73VSC8obi8cuYETJGXEoFNEh3LU0Pt+Sokgro= k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg= +k8s.io/apiextensions-apiserver v0.23.0 h1:uii8BYmHYiT2ZTAJxmvc3X8UhNYMxl2A0z0Xq3Pm+WY= k8s.io/apiextensions-apiserver v0.23.0/go.mod h1:xIFAEEDlAZgpVBl/1VSjGDmLoXAWRG40+GsWhKhAxY4= k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc= k8s.io/apimachinery v0.23.1 h1:sfBjlDFwj2onG0Ijx5C+SrAoeUscPrmghm7wHP+uXlo= k8s.io/apimachinery v0.23.1/go.mod h1:SADt2Kl8/sttJ62RRsi9MIV4o8f5S3coArm0Iu3fBno= k8s.io/apiserver v0.23.0/go.mod h1:Cec35u/9zAepDPPFyT+UMrgqOCjgJ5qtfVJDxjZYmt4= +k8s.io/client-go v0.23.0 h1:vcsOqyPq7XV3QmQRCBH/t9BICJM9Q1M18qahjv+rebY= k8s.io/client-go v0.23.0/go.mod h1:hrDnpnK1mSr65lHHcUuIZIXDgEbzc7/683c6hyG4jTA= k8s.io/code-generator v0.23.0/go.mod h1:vQvOhDXhuzqiVfM/YHp+dmg10WDZCchJVObc9MvowsE= k8s.io/component-base v0.23.0/go.mod h1:DHH5uiFvLC1edCpvcTDV++NKULdYYU6pR9Tt3HIKMKI= @@ -908,6 +937,7 @@ k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= diff --git a/api/v1beta1/bucket_types.go b/api/v1beta1/bucket_types.go index 33e28bb49..0d5f3de81 100644 --- a/api/v1beta1/bucket_types.go +++ b/api/v1beta1/bucket_types.go @@ -126,7 +126,13 @@ func BucketProgressing(bucket Bucket) Bucket { bucket.Status.ObservedGeneration = bucket.Generation bucket.Status.URL = "" bucket.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&bucket, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionUnknown, + Reason: meta.ProgressingReason, + Message: "reconciliation in progress", + } + apimeta.SetStatusCondition(bucket.GetStatusConditions(), newCondition) return bucket } @@ -136,14 +142,26 @@ func BucketProgressing(bucket Bucket) Bucket { func BucketReady(bucket Bucket, artifact Artifact, url, reason, message string) Bucket { bucket.Status.Artifact = &artifact bucket.Status.URL = url - meta.SetResourceCondition(&bucket, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionTrue, + Reason: reason, + Message: message, + } + apimeta.SetStatusCondition(bucket.GetStatusConditions(), newCondition) return bucket } // BucketNotReady sets the meta.ReadyCondition on the Bucket to 'False', with // the given reason and message. It returns the modified Bucket. func BucketNotReady(bucket Bucket, reason, message string) Bucket { - meta.SetResourceCondition(&bucket, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionFalse, + Reason: reason, + Message: message, + } + apimeta.SetStatusCondition(bucket.GetStatusConditions(), newCondition) return bucket } diff --git a/api/v1beta1/gitrepository_types.go b/api/v1beta1/gitrepository_types.go index d38a6873c..3cdfab6b9 100644 --- a/api/v1beta1/gitrepository_types.go +++ b/api/v1beta1/gitrepository_types.go @@ -196,7 +196,13 @@ func GitRepositoryProgressing(repository GitRepository) GitRepository { repository.Status.ObservedGeneration = repository.Generation repository.Status.URL = "" repository.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionUnknown, + Reason: meta.ProgressingReason, + Message: "reconciliation in progress", + } + apimeta.SetStatusCondition(repository.GetStatusConditions(), newCondition) return repository } @@ -207,7 +213,13 @@ func GitRepositoryReady(repository GitRepository, artifact Artifact, includedArt repository.Status.Artifact = &artifact repository.Status.IncludedArtifacts = includedArtifacts repository.Status.URL = url - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionTrue, + Reason: reason, + Message: message, + } + apimeta.SetStatusCondition(repository.GetStatusConditions(), newCondition) return repository } @@ -215,7 +227,13 @@ func GitRepositoryReady(repository GitRepository, artifact Artifact, includedArt // to 'False', with the given reason and message. It returns the modified // GitRepository. func GitRepositoryNotReady(repository GitRepository, reason, message string) GitRepository { - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionFalse, + Reason: reason, + Message: message, + } + apimeta.SetStatusCondition(repository.GetStatusConditions(), newCondition) return repository } diff --git a/api/v1beta1/helmchart_types.go b/api/v1beta1/helmchart_types.go index 51c04781d..8d4c0a02d 100644 --- a/api/v1beta1/helmchart_types.go +++ b/api/v1beta1/helmchart_types.go @@ -152,7 +152,13 @@ func HelmChartProgressing(chart HelmChart) HelmChart { chart.Status.ObservedGeneration = chart.Generation chart.Status.URL = "" chart.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&chart, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionUnknown, + Reason: meta.ProgressingReason, + Message: "reconciliation in progress", + } + apimeta.SetStatusCondition(chart.GetStatusConditions(), newCondition) return chart } @@ -162,7 +168,13 @@ func HelmChartProgressing(chart HelmChart) HelmChart { func HelmChartReady(chart HelmChart, artifact Artifact, url, reason, message string) HelmChart { chart.Status.Artifact = &artifact chart.Status.URL = url - meta.SetResourceCondition(&chart, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionTrue, + Reason: reason, + Message: message, + } + apimeta.SetStatusCondition(chart.GetStatusConditions(), newCondition) return chart } @@ -170,7 +182,13 @@ func HelmChartReady(chart HelmChart, artifact Artifact, url, reason, message str // 'False', with the given reason and message. It returns the modified // HelmChart. func HelmChartNotReady(chart HelmChart, reason, message string) HelmChart { - meta.SetResourceCondition(&chart, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionFalse, + Reason: reason, + Message: message, + } + apimeta.SetStatusCondition(chart.GetStatusConditions(), newCondition) return chart } diff --git a/api/v1beta1/helmrepository_types.go b/api/v1beta1/helmrepository_types.go index 0af0d4cf6..62b0e9a6d 100644 --- a/api/v1beta1/helmrepository_types.go +++ b/api/v1beta1/helmrepository_types.go @@ -113,7 +113,13 @@ func HelmRepositoryProgressing(repository HelmRepository) HelmRepository { repository.Status.ObservedGeneration = repository.Generation repository.Status.URL = "" repository.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionUnknown, + Reason: meta.ProgressingReason, + Message: "reconciliation in progress", + } + apimeta.SetStatusCondition(repository.GetStatusConditions(), newCondition) return repository } @@ -123,7 +129,13 @@ func HelmRepositoryProgressing(repository HelmRepository) HelmRepository { func HelmRepositoryReady(repository HelmRepository, artifact Artifact, url, reason, message string) HelmRepository { repository.Status.Artifact = &artifact repository.Status.URL = url - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionTrue, + Reason: reason, + Message: message, + } + apimeta.SetStatusCondition(repository.GetStatusConditions(), newCondition) return repository } @@ -131,7 +143,13 @@ func HelmRepositoryReady(repository HelmRepository, artifact Artifact, url, reas // HelmRepository to 'False', with the given reason and message. It returns the // modified HelmRepository. func HelmRepositoryNotReady(repository HelmRepository, reason, message string) HelmRepository { - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionFalse, + Reason: reason, + Message: message, + } + apimeta.SetStatusCondition(repository.GetStatusConditions(), newCondition) return repository } diff --git a/api/v1beta2/bucket_types.go b/api/v1beta2/bucket_types.go index 057b59b41..1ce3f1c1a 100644 --- a/api/v1beta2/bucket_types.go +++ b/api/v1beta2/bucket_types.go @@ -22,6 +22,7 @@ import ( "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" ) const ( @@ -126,7 +127,7 @@ func BucketProgressing(bucket Bucket) Bucket { bucket.Status.ObservedGeneration = bucket.Generation bucket.Status.URL = "" bucket.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&bucket, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + conditions.MarkUnknown(&bucket, meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress") return bucket } @@ -136,14 +137,14 @@ func BucketProgressing(bucket Bucket) Bucket { func BucketReady(bucket Bucket, artifact Artifact, url, reason, message string) Bucket { bucket.Status.Artifact = &artifact bucket.Status.URL = url - meta.SetResourceCondition(&bucket, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + conditions.MarkTrue(&bucket, meta.ReadyCondition, reason, message) return bucket } // BucketNotReady sets the meta.ReadyCondition on the Bucket to 'False', with // the given reason and message. It returns the modified Bucket. func BucketNotReady(bucket Bucket, reason, message string) Bucket { - meta.SetResourceCondition(&bucket, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + conditions.MarkFalse(&bucket, meta.ReadyCondition, reason, message) return bucket } @@ -158,22 +159,32 @@ func BucketReadyMessage(bucket Bucket) string { return "" } -// GetArtifact returns the latest artifact from the source if present in the -// status sub-resource. +// GetConditions returns the status conditions of the object. +func (in Bucket) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *Bucket) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetInterval returns the interval at which the source is reconciled. +func (in Bucket) GetInterval() metav1.Duration { + return in.Spec.Interval +} + +// GetArtifact returns the latest artifact from the source if present in the status sub-resource. func (in *Bucket) GetArtifact() *Artifact { return in.Status.Artifact } -// GetStatusConditions returns a pointer to the Status.Conditions slice +// GetStatusConditions returns a pointer to the Status.Conditions slice. +// Deprecated: use GetConditions instead. func (in *Bucket) GetStatusConditions() *[]metav1.Condition { return &in.Status.Conditions } -// GetInterval returns the interval at which the source is updated. -func (in *Bucket) GetInterval() metav1.Duration { - return in.Spec.Interval -} - // +genclient // +genclient:Namespaced // +kubebuilder:storageversion diff --git a/api/v1beta2/gitrepository_types.go b/api/v1beta2/gitrepository_types.go index e92700664..0fe30440b 100644 --- a/api/v1beta2/gitrepository_types.go +++ b/api/v1beta2/gitrepository_types.go @@ -22,6 +22,7 @@ import ( "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" ) const ( @@ -215,7 +216,7 @@ func GitRepositoryProgressing(repository GitRepository) GitRepository { repository.Status.ObservedGeneration = repository.Generation repository.Status.URL = "" repository.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + conditions.MarkUnknown(&repository, meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress") return repository } @@ -226,7 +227,7 @@ func GitRepositoryReady(repository GitRepository, artifact Artifact, includedArt repository.Status.Artifact = &artifact repository.Status.IncludedArtifacts = includedArtifacts repository.Status.URL = url - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + conditions.MarkTrue(&repository, meta.ReadyCondition, reason, message) return repository } @@ -234,7 +235,7 @@ func GitRepositoryReady(repository GitRepository, artifact Artifact, includedArt // to 'False', with the given reason and message. It returns the modified // GitRepository. func GitRepositoryNotReady(repository GitRepository, reason, message string) GitRepository { - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + conditions.MarkFalse(&repository, meta.ReadyCondition, reason, message) return repository } @@ -249,22 +250,32 @@ func GitRepositoryReadyMessage(repository GitRepository) string { return "" } -// GetArtifact returns the latest artifact from the source if present in the -// status sub-resource. +// GetConditions returns the status conditions of the object. +func (in GitRepository) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *GitRepository) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetInterval returns the interval at which the source is reconciled. +func (in GitRepository) GetInterval() metav1.Duration { + return in.Spec.Interval +} + +// GetArtifact returns the latest artifact from the source if present in the status sub-resource. func (in *GitRepository) GetArtifact() *Artifact { return in.Status.Artifact } -// GetStatusConditions returns a pointer to the Status.Conditions slice +// GetStatusConditions returns a pointer to the Status.Conditions slice. +// Deprecated: use GetConditions instead. func (in *GitRepository) GetStatusConditions() *[]metav1.Condition { return &in.Status.Conditions } -// GetInterval returns the interval at which the source is updated. -func (in *GitRepository) GetInterval() metav1.Duration { - return in.Spec.Interval -} - // +genclient // +genclient:Namespaced // +kubebuilder:storageversion diff --git a/api/v1beta2/helmchart_types.go b/api/v1beta2/helmchart_types.go index 033fd1a35..1c3172b06 100644 --- a/api/v1beta2/helmchart_types.go +++ b/api/v1beta2/helmchart_types.go @@ -22,6 +22,7 @@ import ( "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" ) // HelmChartKind is the string representation of a HelmChart. @@ -152,7 +153,7 @@ func HelmChartProgressing(chart HelmChart) HelmChart { chart.Status.ObservedGeneration = chart.Generation chart.Status.URL = "" chart.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&chart, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + conditions.MarkUnknown(&chart, meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress") return chart } @@ -162,7 +163,7 @@ func HelmChartProgressing(chart HelmChart) HelmChart { func HelmChartReady(chart HelmChart, artifact Artifact, url, reason, message string) HelmChart { chart.Status.Artifact = &artifact chart.Status.URL = url - meta.SetResourceCondition(&chart, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + conditions.MarkTrue(&chart, meta.ReadyCondition, reason, message) return chart } @@ -170,7 +171,7 @@ func HelmChartReady(chart HelmChart, artifact Artifact, url, reason, message str // 'False', with the given reason and message. It returns the modified // HelmChart. func HelmChartNotReady(chart HelmChart, reason, message string) HelmChart { - meta.SetResourceCondition(&chart, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + conditions.MarkFalse(&chart, meta.ReadyCondition, reason, message) return chart } @@ -185,22 +186,26 @@ func HelmChartReadyMessage(chart HelmChart) string { return "" } -// GetArtifact returns the latest artifact from the source if present in the -// status sub-resource. -func (in *HelmChart) GetArtifact() *Artifact { - return in.Status.Artifact +// GetConditions returns the status conditions of the object. +func (in HelmChart) GetConditions() []metav1.Condition { + return in.Status.Conditions } -// GetStatusConditions returns a pointer to the Status.Conditions slice -func (in *HelmChart) GetStatusConditions() *[]metav1.Condition { - return &in.Status.Conditions +// SetConditions sets the status conditions on the object. +func (in *HelmChart) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions } -// GetInterval returns the interval at which the source is updated. -func (in *HelmChart) GetInterval() metav1.Duration { +// GetInterval returns the interval at which the source is reconciled. +func (in HelmChart) GetInterval() metav1.Duration { return in.Spec.Interval } +// GetArtifact returns the latest artifact from the source if present in the status sub-resource. +func (in *HelmChart) GetArtifact() *Artifact { + return in.Status.Artifact +} + // GetValuesFiles returns a merged list of ValuesFiles. func (in *HelmChart) GetValuesFiles() []string { valuesFiles := in.Spec.ValuesFiles @@ -212,6 +217,12 @@ func (in *HelmChart) GetValuesFiles() []string { return valuesFiles } +// GetStatusConditions returns a pointer to the Status.Conditions slice. +// Deprecated: use GetConditions instead. +func (in *HelmChart) GetStatusConditions() *[]metav1.Condition { + return &in.Status.Conditions +} + // +genclient // +genclient:Namespaced // +kubebuilder:storageversion diff --git a/api/v1beta2/helmrepository_types.go b/api/v1beta2/helmrepository_types.go index afd5bdbb4..d421a30f6 100644 --- a/api/v1beta2/helmrepository_types.go +++ b/api/v1beta2/helmrepository_types.go @@ -22,6 +22,7 @@ import ( "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" ) const ( @@ -113,7 +114,7 @@ func HelmRepositoryProgressing(repository HelmRepository) HelmRepository { repository.Status.ObservedGeneration = repository.Generation repository.Status.URL = "" repository.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + conditions.MarkUnknown(&repository, meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress") return repository } @@ -123,7 +124,7 @@ func HelmRepositoryProgressing(repository HelmRepository) HelmRepository { func HelmRepositoryReady(repository HelmRepository, artifact Artifact, url, reason, message string) HelmRepository { repository.Status.Artifact = &artifact repository.Status.URL = url - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + conditions.MarkTrue(&repository, meta.ReadyCondition, reason, message) return repository } @@ -131,7 +132,7 @@ func HelmRepositoryReady(repository HelmRepository, artifact Artifact, url, reas // HelmRepository to 'False', with the given reason and message. It returns the // modified HelmRepository. func HelmRepositoryNotReady(repository HelmRepository, reason, message string) HelmRepository { - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + conditions.MarkFalse(&repository, meta.ReadyCondition, reason, message) return repository } @@ -146,22 +147,32 @@ func HelmRepositoryReadyMessage(repository HelmRepository) string { return "" } -// GetArtifact returns the latest artifact from the source if present in the -// status sub-resource. +// GetConditions returns the status conditions of the object. +func (in HelmRepository) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *HelmRepository) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetInterval returns the interval at which the source is reconciled. +func (in HelmRepository) GetInterval() metav1.Duration { + return in.Spec.Interval +} + +// GetArtifact returns the latest artifact from the source if present in the status sub-resource. func (in *HelmRepository) GetArtifact() *Artifact { return in.Status.Artifact } -// GetStatusConditions returns a pointer to the Status.Conditions slice +// GetStatusConditions returns a pointer to the Status.Conditions slice. +// Deprecated: use GetConditions instead. func (in *HelmRepository) GetStatusConditions() *[]metav1.Condition { return &in.Status.Conditions } -// GetInterval returns the interval at which the source is updated. -func (in *HelmRepository) GetInterval() metav1.Duration { - return in.Spec.Interval -} - // +genclient // +genclient:Namespaced // +kubebuilder:storageversion diff --git a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml index d5de513bd..55fd59894 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml @@ -111,7 +111,7 @@ spec: for the Bucket. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -233,7 +233,8 @@ spec: type: array lastHandledReconcileAt: description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change can be detected. + reconcile request value, so a change of the annotation value can + be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. @@ -344,7 +345,7 @@ spec: for the Bucket. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -466,7 +467,8 @@ spec: type: array lastHandledReconcileAt: description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change can be detected. + reconcile request value, so a change of the annotation value can + be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. diff --git a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml index 5a9d809bf..47c99328e 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml @@ -106,7 +106,7 @@ spec: description: Reference to a GitRepository to include. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -153,7 +153,7 @@ spec: and known_hosts fields. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -186,7 +186,7 @@ spec: trusted Git authors. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -332,7 +332,8 @@ spec: type: array lastHandledReconcileAt: description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change can be detected. + reconcile request value, so a change of the annotation value can + be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. @@ -437,7 +438,7 @@ spec: description: Reference to a GitRepository to include. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -484,7 +485,7 @@ spec: and 'known_hosts' fields. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -518,7 +519,7 @@ spec: Git authors. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -664,7 +665,8 @@ spec: type: array lastHandledReconcileAt: description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change can be detected. + reconcile request value, so a change of the annotation value can + be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. diff --git a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml index b32ea6184..06d6773ab 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml @@ -258,7 +258,8 @@ spec: type: array lastHandledReconcileAt: description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change can be detected. + reconcile request value, so a change of the annotation value can + be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. @@ -513,7 +514,8 @@ spec: type: array lastHandledReconcileAt: description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change can be detected. + reconcile request value, so a change of the annotation value can + be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. diff --git a/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml index 328f33d14..cb8f6c411 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml @@ -96,7 +96,7 @@ spec: certFile and keyFile, and/or caCert fields. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -221,7 +221,8 @@ spec: type: array lastHandledReconcileAt: description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change can be detected. + reconcile request value, so a change of the annotation value can + be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. @@ -314,7 +315,7 @@ spec: certFile and keyFile, and/or caCert fields. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -439,7 +440,8 @@ spec: type: array lastHandledReconcileAt: description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change can be detected. + reconcile request value, so a change of the annotation value can + be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index 66f1487ff..a819f09e4 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -541,21 +541,11 @@ func (r *BucketReconciler) gc(bucket sourcev1.Bucket) error { // event emits a Kubernetes event and forwards the event to notification controller if configured func (r *BucketReconciler) event(ctx context.Context, bucket sourcev1.Bucket, severity, msg string) { - log := ctrl.LoggerFrom(ctx) if r.EventRecorder != nil { - r.EventRecorder.Eventf(&bucket, "Normal", severity, msg) + r.EventRecorder.Eventf(&bucket, corev1.EventTypeNormal, severity, msg) } if r.ExternalEventRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &bucket) - if err != nil { - log.Error(err, "unable to send event") - return - } - - if err := r.ExternalEventRecorder.Eventf(*objRef, nil, severity, severity, msg); err != nil { - log.Error(err, "unable to send event") - return - } + r.ExternalEventRecorder.Eventf(&bucket, corev1.EventTypeNormal, severity, msg) } } diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go index 0a3f02a29..976b24c0a 100644 --- a/controllers/gitrepository_controller.go +++ b/controllers/gitrepository_controller.go @@ -122,7 +122,7 @@ func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reques // check dependencies if len(repository.Spec.Include) > 0 { if err := r.checkDependencies(repository); err != nil { - repository = sourcev1.GitRepositoryNotReady(repository, meta.DependencyNotReadyReason, err.Error()) + repository = sourcev1.GitRepositoryNotReady(repository, "DependencyNotReady", err.Error()) if err := r.updateStatus(ctx, req, repository.Status); err != nil { log.Error(err, "unable to update status for dependency not ready") return ctrl.Result{Requeue: true}, err @@ -284,7 +284,7 @@ func (r *GitRepositoryReconciler) reconcile(ctx context.Context, repository sour var gr sourcev1.GitRepository err := r.Get(context.Background(), dName, &gr) if err != nil { - return sourcev1.GitRepositoryNotReady(repository, meta.DependencyNotReadyReason, err.Error()), err + return sourcev1.GitRepositoryNotReady(repository, "DependencyNotReady", err.Error()), err } includedArtifacts = append(includedArtifacts, gr.GetArtifact()) } @@ -329,11 +329,11 @@ func (r *GitRepositoryReconciler) reconcile(ctx context.Context, repository sour for i, incl := range repository.Spec.Include { toPath, err := securejoin.SecureJoin(tmpGit, incl.GetToPath()) if err != nil { - return sourcev1.GitRepositoryNotReady(repository, meta.DependencyNotReadyReason, err.Error()), err + return sourcev1.GitRepositoryNotReady(repository, "DependencyNotReady", err.Error()), err } err = r.Storage.CopyToPath(includedArtifacts[i], incl.GetFromPath(), toPath) if err != nil { - return sourcev1.GitRepositoryNotReady(repository, meta.DependencyNotReadyReason, err.Error()), err + return sourcev1.GitRepositoryNotReady(repository, "DependencyNotReady", err.Error()), err } } @@ -423,22 +423,11 @@ func (r *GitRepositoryReconciler) gc(repository sourcev1.GitRepository) error { // event emits a Kubernetes event and forwards the event to notification controller if configured func (r *GitRepositoryReconciler) event(ctx context.Context, repository sourcev1.GitRepository, severity, msg string) { - log := ctrl.LoggerFrom(ctx) - if r.EventRecorder != nil { - r.EventRecorder.Eventf(&repository, "Normal", severity, msg) + r.EventRecorder.Eventf(&repository, corev1.EventTypeNormal, severity, msg) } if r.ExternalEventRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &repository) - if err != nil { - log.Error(err, "unable to send event") - return - } - - if err := r.ExternalEventRecorder.Eventf(*objRef, nil, severity, severity, msg); err != nil { - log.Error(err, "unable to send event") - return - } + r.ExternalEventRecorder.Eventf(&repository, corev1.EventTypeNormal, severity, msg) } } diff --git a/controllers/helmchart_controller.go b/controllers/helmchart_controller.go index 4d5d4244c..06255be25 100644 --- a/controllers/helmchart_controller.go +++ b/controllers/helmchart_controller.go @@ -634,21 +634,11 @@ func (r *HelmChartReconciler) gc(chart sourcev1.HelmChart) error { // event emits a Kubernetes event and forwards the event to notification // controller if configured. func (r *HelmChartReconciler) event(ctx context.Context, chart sourcev1.HelmChart, severity, msg string) { - log := ctrl.LoggerFrom(ctx) if r.EventRecorder != nil { - r.EventRecorder.Eventf(&chart, "Normal", severity, msg) + r.EventRecorder.Eventf(&chart, corev1.EventTypeNormal, severity, msg) } if r.ExternalEventRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &chart) - if err != nil { - log.Error(err, "unable to send event") - return - } - - if err := r.ExternalEventRecorder.Eventf(*objRef, nil, severity, severity, msg); err != nil { - log.Error(err, "unable to send event") - return - } + r.ExternalEventRecorder.Eventf(&chart, corev1.EventTypeNormal, severity, msg) } } diff --git a/controllers/helmchart_controller_test.go b/controllers/helmchart_controller_test.go index b2f469b36..d53afff0c 100644 --- a/controllers/helmchart_controller_test.go +++ b/controllers/helmchart_controller_test.go @@ -129,9 +129,9 @@ var _ = Describe("HelmChartReconciler", func() { got := &sourcev1.HelmChart{} Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && storage.ArtifactExist(*got.Status.Artifact) + return got.Status.Artifact != nil && ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeFalse()) @@ -146,7 +146,7 @@ var _ = Describe("HelmChartReconciler", func() { _ = k8sClient.Get(context.Background(), key, now) // Test revision change and garbage collection return now.Status.Artifact.Revision != got.Status.Artifact.Revision && - !storage.ArtifactExist(*got.Status.Artifact) + !ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) When("Setting valid valuesFiles attribute", func() { @@ -161,12 +161,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -184,12 +184,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -207,12 +207,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -228,12 +228,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) _, exists := helmChart.Values["testDefault"] Expect(exists).To(BeFalse()) @@ -250,12 +250,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeFalse()) @@ -271,12 +271,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeFalse()) @@ -682,7 +682,7 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) By("Committing a new version in the chart metadata") @@ -727,9 +727,9 @@ var _ = Describe("HelmChartReconciler", func() { _ = k8sClient.Get(context.Background(), key, now) // Test revision change and garbage collection return now.Status.Artifact.Revision != got.Status.Artifact.Revision && - !storage.ArtifactExist(*got.Status.Artifact) + !ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - helmChart, err := loader.Load(storage.LocalPath(*now.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*now.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values).ToNot(BeNil()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) @@ -744,7 +744,7 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Revision != updated.Status.Artifact.Revision && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) Expect(got.Status.Artifact.Revision).To(ContainSubstring(updated.Status.Artifact.Revision)) Expect(got.Status.Artifact.Revision).To(ContainSubstring(commit.String()[0:12])) @@ -762,12 +762,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -785,12 +785,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -808,12 +808,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -834,16 +834,16 @@ var _ = Describe("HelmChartReconciler", func() { // Use status condition to be sure. for _, condn := range got.Status.Conditions { if strings.Contains(condn.Message, "with merged values files [./testdata/charts/helmchart/override.yaml]") && - storage.ArtifactExist(*got.Status.Artifact) { + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) { return true } } return false }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) _, exists := helmChart.Values["testDefault"] Expect(exists).To(BeFalse()) @@ -860,12 +860,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) _, exists := helmChart.Values["testDefault"] Expect(exists).To(BeFalse()) @@ -970,7 +970,7 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) }) }) @@ -1213,9 +1213,9 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeFalse()) @@ -1232,12 +1232,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -1255,12 +1255,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -1278,12 +1278,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -1299,12 +1299,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) _, exists := helmChart.Values["testDefault"] Expect(exists).To(BeFalse()) @@ -1321,12 +1321,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) _, exists := helmChart.Values["testDefault"] Expect(exists).To(BeFalse()) diff --git a/controllers/helmrepository_controller.go b/controllers/helmrepository_controller.go index 34723a92d..42050368b 100644 --- a/controllers/helmrepository_controller.go +++ b/controllers/helmrepository_controller.go @@ -335,21 +335,11 @@ func (r *HelmRepositoryReconciler) gc(repository sourcev1.HelmRepository) error // event emits a Kubernetes event and forwards the event to notification controller if configured func (r *HelmRepositoryReconciler) event(ctx context.Context, repository sourcev1.HelmRepository, severity, msg string) { - log := ctrl.LoggerFrom(ctx) if r.EventRecorder != nil { - r.EventRecorder.Eventf(&repository, "Normal", severity, msg) + r.EventRecorder.Eventf(&repository, corev1.EventTypeNormal, severity, msg) } if r.ExternalEventRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &repository) - if err != nil { - log.Error(err, "unable to send event") - return - } - - if err := r.ExternalEventRecorder.Eventf(*objRef, nil, severity, severity, msg); err != nil { - log.Error(err, "unable to send event") - return - } + r.ExternalEventRecorder.Eventf(&repository, corev1.EventTypeNormal, severity, msg) } } diff --git a/controllers/helmrepository_controller_test.go b/controllers/helmrepository_controller_test.go index 171ce8424..bd7172fca 100644 --- a/controllers/helmrepository_controller_test.go +++ b/controllers/helmrepository_controller_test.go @@ -99,7 +99,7 @@ var _ = Describe("HelmRepositoryReconciler", func() { got := &sourcev1.HelmRepository{} Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && storage.ArtifactExist(*got.Status.Artifact) + return got.Status.Artifact != nil && ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) By("Updating the chart index") @@ -112,7 +112,7 @@ var _ = Describe("HelmRepositoryReconciler", func() { _ = k8sClient.Get(context.Background(), key, now) // Test revision change and garbage collection return now.Status.Artifact.Revision != got.Status.Artifact.Revision && - !storage.ArtifactExist(*got.Status.Artifact) + !ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) updated := &sourcev1.HelmRepository{} @@ -291,7 +291,7 @@ var _ = Describe("HelmRepositoryReconciler", func() { got := &sourcev1.HelmRepository{} _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) By("Expecting missing secret error") @@ -385,7 +385,7 @@ var _ = Describe("HelmRepositoryReconciler", func() { got := &sourcev1.HelmRepository{} _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) By("Expecting missing secret error") diff --git a/controllers/legacy_suite_test.go b/controllers/legacy_suite_test.go new file mode 100644 index 000000000..911f735b0 --- /dev/null +++ b/controllers/legacy_suite_test.go @@ -0,0 +1,197 @@ +/* +Copyright 2020 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "math/rand" + "net/http" + "os" + "path/filepath" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "helm.sh/helm/v3/pkg/getter" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var k8sManager ctrl.Manager +var ginkgoTestEnv *envtest.Environment +var ginkgoTestStorage *Storage + +var examplePublicKey []byte +var examplePrivateKey []byte +var exampleCA []byte +var lctx context.Context +var cancel context.CancelFunc + +const ginkgoTimeout = time.Second * 30 + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecsWithDefaultAndCustomReporters(t, + "Controller Suite", + []Reporter{printer.NewlineReporter{}}) +} + +var _ = BeforeSuite(func() { + done := make(chan interface{}) + go func() { + close(done) + }() + + logf.SetLogger( + zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)), + ) + lctx, cancel = context.WithCancel(ctx) + + By("bootstrapping test environment") + t := true + if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" { + ginkgoTestEnv = &envtest.Environment{ + UseExistingCluster: &t, + } + } else { + ginkgoTestEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, + } + } + + var err error + cfg, err = ginkgoTestEnv.Start() + Expect(err).ToNot(HaveOccurred()) + Expect(cfg).ToNot(BeNil()) + + err = sourcev1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + Expect(loadExampleKeys()).To(Succeed()) + + tmpStoragePath, err := os.MkdirTemp("", "source-controller-storage-") + Expect(err).NotTo(HaveOccurred(), "failed to create tmp storage dir") + + ginkgoTestStorage, err = NewStorage(tmpStoragePath, "localhost:5050", time.Second*30) + Expect(err).NotTo(HaveOccurred(), "failed to create tmp storage") + // serve artifacts from the filesystem, as done in main.go + fs := http.FileServer(http.Dir(tmpStoragePath)) + http.Handle("/", fs) + go http.ListenAndServe(":5050", nil) + + k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{ + MetricsBindAddress: "0", + Scheme: scheme.Scheme, + }) + Expect(err).ToNot(HaveOccurred()) + + err = (&GitRepositoryReconciler{ + Client: k8sManager.GetClient(), + Scheme: scheme.Scheme, + Storage: ginkgoTestStorage, + }).SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred(), "failed to setup GtRepositoryReconciler") + + err = (&HelmRepositoryReconciler{ + Client: k8sManager.GetClient(), + Scheme: scheme.Scheme, + Storage: ginkgoTestStorage, + Getters: getter.Providers{getter.Provider{ + Schemes: []string{"http", "https"}, + New: getter.NewHTTPGetter, + }}, + }).SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred(), "failed to setup HelmRepositoryReconciler") + + err = (&HelmChartReconciler{ + Client: k8sManager.GetClient(), + Scheme: scheme.Scheme, + Storage: ginkgoTestStorage, + Getters: getter.Providers{getter.Provider{ + Schemes: []string{"http", "https"}, + New: getter.NewHTTPGetter, + }}, + }).SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred(), "failed to setup HelmChartReconciler") + + go func() { + err = k8sManager.Start(lctx) + Expect(err).ToNot(HaveOccurred()) + }() + + k8sClient = k8sManager.GetClient() + Expect(k8sClient).ToNot(BeNil()) + + Eventually(done, ginkgoTimeout).Should(BeClosed()) +}, 60) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + defer GinkgoRecover() + cancel() + if ginkgoTestStorage != nil { + err := os.RemoveAll(ginkgoTestStorage.BasePath) + Expect(err).NotTo(HaveOccurred()) + } + err := ginkgoTestEnv.Stop() + Expect(err).ToNot(HaveOccurred()) +}) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +func loadExampleKeys() (err error) { + examplePublicKey, err = os.ReadFile("testdata/certs/server.pem") + if err != nil { + return err + } + examplePrivateKey, err = os.ReadFile("testdata/certs/server-key.pem") + if err != nil { + return err + } + exampleCA, err = os.ReadFile("testdata/certs/ca.pem") + return err +} + +var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz1234567890") + +func randStringRunes(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = letterRunes[rand.Intn(len(letterRunes))] + } + return string(b) +} diff --git a/controllers/storage.go b/controllers/storage.go index 57cee19c1..8f892da6d 100644 --- a/controllers/storage.go +++ b/controllers/storage.go @@ -53,7 +53,7 @@ type Storage struct { Timeout time.Duration `json:"timeout"` } -// NewStorage creates the storage helper for a given path and hostname +// NewStorage creates the storage helper for a given path and hostname. func NewStorage(basePath string, hostname string, timeout time.Duration) (*Storage, error) { if f, err := os.Stat(basePath); os.IsNotExist(err) || !f.IsDir() { return nil, fmt.Errorf("invalid dir path: %s", basePath) @@ -81,7 +81,11 @@ func (s Storage) SetArtifactURL(artifact *sourcev1.Artifact) { if artifact.Path == "" { return } - artifact.URL = fmt.Sprintf("http://%s/%s", s.Hostname, artifact.Path) + format := "http://%s/%s" + if strings.HasPrefix(s.Hostname, "http://") || strings.HasPrefix(s.Hostname, "https://") { + format = "%s/%s" + } + artifact.URL = fmt.Sprintf(format, s.Hostname, strings.TrimLeft(artifact.Path, "/")) } // SetHostname sets the hostname of the given URL string to the current Storage.Hostname and returns the result. diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 545872f80..2710f6f77 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -17,186 +17,128 @@ limitations under the License. package controllers import ( - "context" + "fmt" "math/rand" - "net/http" "os" "path/filepath" "testing" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "helm.sh/helm/v3/pkg/getter" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" - "sigs.k8s.io/controller-runtime/pkg/envtest/printer" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" + + "github.com/fluxcd/pkg/runtime/controller" + "github.com/fluxcd/pkg/runtime/testenv" + "github.com/fluxcd/pkg/testserver" sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" // +kubebuilder:scaffold:imports ) -// These tests use Ginkgo (BDD-style Go testing framework). Refer to -// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. - -var cfg *rest.Config -var k8sClient client.Client -var k8sManager ctrl.Manager -var testEnv *envtest.Environment -var storage *Storage +// These tests make use of plain Go using Gomega for assertions. +// At the beginning of every (sub)test Gomega can be initialized +// using gomega.NewWithT. +// Refer to http://onsi.github.io/gomega/ to learn more about +// Gomega. -var examplePublicKey []byte -var examplePrivateKey []byte -var exampleCA []byte -var ctx context.Context -var cancel context.CancelFunc +const ( + timeout = 10 * time.Second + interval = 1 * time.Second +) -const timeout = time.Second * 60 +var ( + testEnv *testenv.Environment + testStorage *Storage + testServer *testserver.ArtifactServer + testMetricsH controller.Metrics + ctx = ctrl.SetupSignalHandler() +) -func TestAPIs(t *testing.T) { - RegisterFailHandler(Fail) +var ( + tlsPublicKey []byte + tlsPrivateKey []byte + tlsCA []byte +) - RunSpecsWithDefaultAndCustomReporters(t, - "Controller Suite", - []Reporter{printer.NewlineReporter{}}) +func init() { + rand.Seed(time.Now().UnixNano()) } -var _ = BeforeSuite(func() { - done := make(chan interface{}) - go func() { - close(done) - }() +func TestMain(m *testing.M) { + initTestTLS() - logf.SetLogger( - zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)), - ) - ctx, cancel = context.WithCancel(context.TODO()) + utilruntime.Must(sourcev1.AddToScheme(scheme.Scheme)) - By("bootstrapping test environment") - t := true - if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" { - testEnv = &envtest.Environment{ - UseExistingCluster: &t, - } - } else { - testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, - } - } + testEnv = testenv.New(testenv.WithCRDPath(filepath.Join("..", "config", "crd", "bases"))) var err error - cfg, err = testEnv.Start() - Expect(err).ToNot(HaveOccurred()) - Expect(cfg).ToNot(BeNil()) - - err = sourcev1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = sourcev1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = sourcev1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - // +kubebuilder:scaffold:scheme - - Expect(loadExampleKeys()).To(Succeed()) - - tmpStoragePath, err := os.MkdirTemp("", "source-controller-storage-") - Expect(err).NotTo(HaveOccurred(), "failed to create tmp storage dir") - - storage, err = NewStorage(tmpStoragePath, "localhost:5050", time.Second*30) - Expect(err).NotTo(HaveOccurred(), "failed to create tmp storage") - // serve artifacts from the filesystem, as done in main.go - fs := http.FileServer(http.Dir(tmpStoragePath)) - http.Handle("/", fs) - go http.ListenAndServe(":5050", nil) - - k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{ - Scheme: scheme.Scheme, - }) - Expect(err).ToNot(HaveOccurred()) - - err = (&GitRepositoryReconciler{ - Client: k8sManager.GetClient(), - Scheme: scheme.Scheme, - Storage: storage, - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred(), "failed to setup GtRepositoryReconciler") - - err = (&HelmRepositoryReconciler{ - Client: k8sManager.GetClient(), - Scheme: scheme.Scheme, - Storage: storage, - Getters: getter.Providers{getter.Provider{ - Schemes: []string{"http", "https"}, - New: getter.NewHTTPGetter, - }}, - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred(), "failed to setup HelmRepositoryReconciler") - - err = (&HelmChartReconciler{ - Client: k8sManager.GetClient(), - Scheme: scheme.Scheme, - Storage: storage, - Getters: getter.Providers{getter.Provider{ - Schemes: []string{"http", "https"}, - New: getter.NewHTTPGetter, - }}, - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred(), "failed to setup HelmChartReconciler") + testServer, err = testserver.NewTempArtifactServer() + if err != nil { + panic(fmt.Sprintf("Failed to create a temporary storage server: %v", err)) + } + fmt.Println("Starting the test storage server") + testServer.Start() + + testStorage, err = newTestStorage(testServer.HTTPServer) + if err != nil { + panic(fmt.Sprintf("Failed to create a test storage: %v", err)) + } + + testMetricsH = controller.MustMakeMetrics(testEnv) + + //if err := (&GitRepositoryReconciler{ + // Client: testEnv, + // Metrics: testMetricsH, + // Storage: testStorage, + //}).SetupWithManager(testEnv); err != nil { + // panic(fmt.Sprintf("Failed to start GitRepositoryReconciler: %v", err)) + //} go func() { - defer GinkgoRecover() - err = k8sManager.Start(ctx) - Expect(err).ToNot(HaveOccurred()) + fmt.Println("Starting the test environment") + if err := testEnv.Start(ctx); err != nil { + panic(fmt.Sprintf("Failed to start the test environment manager: %v", err)) + } }() + <-testEnv.Manager.Elected() - k8sClient = k8sManager.GetClient() - Expect(k8sClient).ToNot(BeNil()) + code := m.Run() - Eventually(done, timeout).Should(BeClosed()) -}, timeout.Seconds()) + fmt.Println("Stopping the test environment") + if err := testEnv.Stop(); err != nil { + panic(fmt.Sprintf("Failed to stop the test environment: %v", err)) + } -var _ = AfterSuite(func() { - cancel() - By("tearing down the test environment") - if storage != nil { - err := os.RemoveAll(storage.BasePath) - Expect(err).NotTo(HaveOccurred()) + fmt.Println("Stopping the storage server") + testServer.Stop() + if err := os.RemoveAll(testServer.Root()); err != nil { + panic(fmt.Sprintf("Failed to remove storage server dir: %v", err)) } - err := testEnv.Stop() - Expect(err).ToNot(HaveOccurred()) -}) -func init() { - rand.Seed(time.Now().UnixNano()) + os.Exit(code) } -func loadExampleKeys() (err error) { - examplePublicKey, err = os.ReadFile("testdata/certs/server.pem") +func initTestTLS() { + var err error + tlsPublicKey, err = os.ReadFile("testdata/certs/server.pem") if err != nil { - return err + panic(err) } - examplePrivateKey, err = os.ReadFile("testdata/certs/server-key.pem") + tlsPrivateKey, err = os.ReadFile("testdata/certs/server-key.pem") if err != nil { - return err + panic(err) + } + tlsCA, err = os.ReadFile("testdata/certs/ca.pem") + if err != nil { + panic(err) } - exampleCA, err = os.ReadFile("testdata/certs/ca.pem") - return err } -var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz1234567890") - -func randStringRunes(n int) string { - b := make([]rune, n) - for i := range b { - b[i] = letterRunes[rand.Intn(len(letterRunes))] +func newTestStorage(s *testserver.HTTPServer) (*Storage, error) { + storage, err := NewStorage(s.Root(), s.URL(), timeout) + if err != nil { + return nil, err } - return string(b) + return storage, nil } diff --git a/docs/api/source.md b/docs/api/source.md index d1144a3ed..597eae693 100644 --- a/docs/api/source.md +++ b/docs/api/source.md @@ -313,10 +313,8 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference (Optional)

The secret name containing the Git credentials. -For HTTPS repositories the secret must contain username and password -fields. -For SSH repositories the secret must contain identity, identity.pub and -known_hosts fields.

+For HTTPS repositories the secret must contain username and password fields. +For SSH repositories the secret must contain ‘identity’, ‘identity.pub’ and ‘known_hosts’ fields.

@@ -372,7 +370,7 @@ GitRepositoryVerification (Optional) -

Verify OpenPGP signature for the Git commit HEAD points to.

+

Verification defines the configuration to verify the OpenPGP signature for the Git commit HEAD points to.

@@ -384,9 +382,8 @@ string (Optional) -

Ignore overrides the set of excluded patterns in the .sourceignore format -(which is the same as .gitignore). If not provided, a default will be used, -consult the documentation for your version to find out what those are.

+

Ignore overrides the set of excluded patterns in the .sourceignore format (which is the same as .gitignore). +If not provided, a default will be used, consult the documentation for your version to find out what those are.

@@ -398,7 +395,8 @@ bool (Optional) -

This flag tells the controller to suspend the reconciliation of this source.

+

Suspend tells the controller to suspend the reconciliation of this source. +This flag tells the controller to suspend the reconciliation of this source.

@@ -423,8 +421,7 @@ bool (Optional) -

When enabled, after the clone is created, initializes all submodules within, -using their default settings. +

When enabled, after the clone is created, initializes all submodules within, using their default settings. This option is available only when using the ‘go-git’ GitImplementation.

@@ -438,7 +435,8 @@ This option is available only when using the ‘go-git’ GitImplementat -

Extra git repositories to map into the repository

+

Include defines a list of GitRepository resources which artifacts should be included in the artifact produced for +this resource.

@@ -1349,10 +1347,8 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference (Optional)

The secret name containing the Git credentials. -For HTTPS repositories the secret must contain username and password -fields. -For SSH repositories the secret must contain identity, identity.pub and -known_hosts fields.

+For HTTPS repositories the secret must contain username and password fields. +For SSH repositories the secret must contain ‘identity’, ‘identity.pub’ and ‘known_hosts’ fields.

@@ -1408,7 +1404,7 @@ GitRepositoryVerification (Optional) -

Verify OpenPGP signature for the Git commit HEAD points to.

+

Verification defines the configuration to verify the OpenPGP signature for the Git commit HEAD points to.

@@ -1420,9 +1416,8 @@ string (Optional) -

Ignore overrides the set of excluded patterns in the .sourceignore format -(which is the same as .gitignore). If not provided, a default will be used, -consult the documentation for your version to find out what those are.

+

Ignore overrides the set of excluded patterns in the .sourceignore format (which is the same as .gitignore). +If not provided, a default will be used, consult the documentation for your version to find out what those are.

@@ -1434,7 +1429,8 @@ bool (Optional) -

This flag tells the controller to suspend the reconciliation of this source.

+

Suspend tells the controller to suspend the reconciliation of this source. +This flag tells the controller to suspend the reconciliation of this source.

@@ -1459,8 +1455,7 @@ bool (Optional) -

When enabled, after the clone is created, initializes all submodules within, -using their default settings. +

When enabled, after the clone is created, initializes all submodules within, using their default settings. This option is available only when using the ‘go-git’ GitImplementation.

@@ -1474,7 +1469,8 @@ This option is available only when using the ‘go-git’ GitImplementat -

Extra git repositories to map into the repository

+

Include defines a list of GitRepository resources which artifacts should be included in the artifact produced for +this resource.

@@ -1547,8 +1543,7 @@ string (Optional) -

URL is the download link for the artifact output of the last repository -sync.

+

URL is the download link for the artifact output of the last repository sync.

@@ -1623,7 +1618,7 @@ string -

Mode describes what git object should be verified, currently (‘head’).

+

Mode describes what Git object should be verified, currently (‘head’).

@@ -1636,7 +1631,7 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference -

The secret name containing the public keys of all trusted Git authors.

+

SecretRef containing the public keys of all trusted Git authors.

diff --git a/go.mod b/go.mod index 16b1fd8e1..538fdb57b 100644 --- a/go.mod +++ b/go.mod @@ -10,13 +10,14 @@ require ( github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 github.com/cyphar/filepath-securejoin v0.2.2 github.com/elazarl/goproxy v0.0.0-20211114080932-d06c3be7c11b - github.com/fluxcd/pkg/apis/meta v0.10.2 + github.com/fluxcd/pkg/apis/meta v0.11.0-rc.3 github.com/fluxcd/pkg/gittestserver v0.5.0 github.com/fluxcd/pkg/gitutil v0.1.0 github.com/fluxcd/pkg/helmtestserver v0.4.0 github.com/fluxcd/pkg/lockedfile v0.1.0 - github.com/fluxcd/pkg/runtime v0.12.3 + github.com/fluxcd/pkg/runtime v0.13.0-rc.7 github.com/fluxcd/pkg/ssh v0.2.0 + github.com/fluxcd/pkg/testserver v0.2.0 github.com/fluxcd/pkg/untar v0.1.0 github.com/fluxcd/pkg/version v0.1.0 github.com/fluxcd/source-controller/api v0.21.2 @@ -77,7 +78,6 @@ require ( github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect github.com/fatih/color v1.7.0 // indirect github.com/fluxcd/pkg/apis/acl v0.0.3 // indirect - github.com/fluxcd/pkg/testserver v0.1.0 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/go-errors/errors v1.0.1 // indirect github.com/go-git/gcfg v1.5.0 // indirect diff --git a/go.sum b/go.sum index 0b78f866f..c297abfa3 100644 --- a/go.sum +++ b/go.sum @@ -301,8 +301,8 @@ github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8S github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fluxcd/pkg/apis/acl v0.0.3 h1:Lw0ZHdpnO4G7Zy9KjrzwwBmDZQuy4qEjaU/RvA6k1lc= github.com/fluxcd/pkg/apis/acl v0.0.3/go.mod h1:XPts6lRJ9C9fIF9xVWofmQwftvhY25n1ps7W9xw0XLU= -github.com/fluxcd/pkg/apis/meta v0.10.2 h1:pnDBBEvfs4HaKiVAYgz+e/AQ8dLvcgmVfSeBroZ/KKI= -github.com/fluxcd/pkg/apis/meta v0.10.2/go.mod h1:KQ2er9xa6koy7uoPMZjIjNudB5p4tXs+w0GO6fRcy7I= +github.com/fluxcd/pkg/apis/meta v0.11.0-rc.3 h1:YY6RlaHIMXawgEOJhJbSrm4NpD9fJTCWFGKgtNfQ0/g= +github.com/fluxcd/pkg/apis/meta v0.11.0-rc.3/go.mod h1:ki5wJE4nuFOZt78q0RSYkrKwINgIBPynuswZhnTOSoI= github.com/fluxcd/pkg/gittestserver v0.5.0 h1:pPdaz7pUsukt4eQ+xQeNwoypOXGGOHFHnPjIHQAv0tE= github.com/fluxcd/pkg/gittestserver v0.5.0/go.mod h1:mFEF/Xrg+CjQH4VFCRCou2qZmhWKo7EYcjr7MIoX6+s= github.com/fluxcd/pkg/gitutil v0.1.0 h1:VO3kJY/CKOCO4ysDNqfdpTg04icAKBOSb3lbR5uE/IE= @@ -311,12 +311,14 @@ github.com/fluxcd/pkg/helmtestserver v0.4.0 h1:RT0G5buw5qrzEfIIH0fklppIvPAaQF//p github.com/fluxcd/pkg/helmtestserver v0.4.0/go.mod h1:JOI9f3oXUFIWmMKWMBan7FjglAU+fRTO/sPPV/Kj3gQ= github.com/fluxcd/pkg/lockedfile v0.1.0 h1:YsYFAkd6wawMCcD74ikadAKXA4s2sukdxrn7w8RB5eo= github.com/fluxcd/pkg/lockedfile v0.1.0/go.mod h1:EJLan8t9MiOcgTs8+puDjbE6I/KAfHbdvIy9VUgIjm8= -github.com/fluxcd/pkg/runtime v0.12.3 h1:h21AZ3YG5MAP7DxFF9hfKrP+vFzys2L7CkUbPFjbP/0= -github.com/fluxcd/pkg/runtime v0.12.3/go.mod h1:imJ2xYy/d4PbSinX2IefmZk+iS2c1P5fY0js8mCE4SM= +github.com/fluxcd/pkg/runtime v0.13.0-rc.6/go.mod h1:4oKUO19TeudXrnCRnxCfMSS7EQTYpYlgfXwlQuDJ/Eg= +github.com/fluxcd/pkg/runtime v0.13.0-rc.7 h1:snESiRwjrmNchIBautlxnXn8HzmeDEnS3PsMbP2fyeg= +github.com/fluxcd/pkg/runtime v0.13.0-rc.7/go.mod h1:uGPudgMUNC3wu7Zoh6AgJM8WSH3VpmnzjrwkVb86d3Y= github.com/fluxcd/pkg/ssh v0.2.0 h1:e9V+HReOL7czm7edVzYS1e+CnFKz1/kHiUNfLRpBdH8= github.com/fluxcd/pkg/ssh v0.2.0/go.mod h1:EpQC7Ztdlbi8S/dlYXqVDZtHtLpN3FNl3N6zWujVzbA= -github.com/fluxcd/pkg/testserver v0.1.0 h1:nOYgM1HYFZNNSUFykuWDmrsxj4jQxUCvmLHWOQeqmyA= github.com/fluxcd/pkg/testserver v0.1.0/go.mod h1:fvt8BHhXw6c1+CLw1QFZxcQprlcXzsrL4rzXaiGM+Iw= +github.com/fluxcd/pkg/testserver v0.2.0 h1:Mj0TapmKaywI6Fi5wvt1LAZpakUHmtzWQpJNKQ0Krt4= +github.com/fluxcd/pkg/testserver v0.2.0/go.mod h1:bgjjydkXsZTeFzjz9Cr4heGANr41uTB1Aj1Q5qzuYVk= github.com/fluxcd/pkg/untar v0.1.0 h1:k97V/xV5hFrAkIkVPuv5AVhyxh1ZzzAKba/lbDfGo6o= github.com/fluxcd/pkg/untar v0.1.0/go.mod h1:aGswNyzB1mlz/T/kpOS58mITBMxMKc9tlJBH037A2HY= github.com/fluxcd/pkg/version v0.1.0 h1:v+SmCanmCB5Tj2Cx9TXlj+kNRfPGbAvirkeqsp7ZEAQ= @@ -1224,6 +1226,7 @@ golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211110154304-99a53858aa08/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158 h1:rm+CHSpPEEW2IsXUib1ThaHIjuBVZjxNgSKmBLFfD4c= diff --git a/main.go b/main.go index 67bbdecd1..5f94d6cdf 100644 --- a/main.go +++ b/main.go @@ -123,18 +123,6 @@ func main() { helm.MaxChartSize = helmChartLimit helm.MaxChartFileSize = helmChartFileLimit - var eventRecorder *events.Recorder - if eventsAddr != "" { - var err error - if eventRecorder, err = events.NewRecorder(eventsAddr, controllerName); err != nil { - setupLog.Error(err, "unable to create event recorder") - os.Exit(1) - } - } - - metricsRecorder := metrics.NewRecorder() - crtlmetrics.Registry.MustRegister(metricsRecorder.Collectors()...) - watchNamespace := "" if !watchAllNamespaces { watchNamespace = os.Getenv("RUNTIME_NAMESPACE") @@ -163,6 +151,18 @@ func main() { probes.SetupChecks(mgr, setupLog) pprof.SetupHandlers(mgr, setupLog) + var eventRecorder *events.Recorder + if eventsAddr != "" { + var err error + if eventRecorder, err = events.NewRecorder(mgr, ctrl.Log, eventsAddr, controllerName); err != nil { + setupLog.Error(err, "unable to create event recorder") + os.Exit(1) + } + } + + metricsRecorder := metrics.NewRecorder() + crtlmetrics.Registry.MustRegister(metricsRecorder.Collectors()...) + if storageAdvAddr == "" { storageAdvAddr = determineAdvStorageAddr(storageAddr, setupLog) } From e190059cc7bc25ab2fbd99951f2fdca1f6fb2a9e Mon Sep 17 00:00:00 2001 From: Hidde Beydals Date: Fri, 30 Jul 2021 12:54:45 +0200 Subject: [PATCH 05/63] Introduce `artifactSet` to replace `hasArtifactUpdated` NOTE: Remove `hasArtifactUpdated` in the future once it's no longer used. Signed-off-by: Hidde Beydals --- controllers/artifact.go | 38 ++++++++++++++++++++++++ controllers/artifact_test.go | 56 ++++++++++++++++++++++-------------- 2 files changed, 73 insertions(+), 21 deletions(-) diff --git a/controllers/artifact.go b/controllers/artifact.go index 2eff57747..8d034f075 100644 --- a/controllers/artifact.go +++ b/controllers/artifact.go @@ -1,9 +1,47 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package controllers import sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" +type artifactSet []*sourcev1.Artifact + +// Diff returns true if any of the revisions in the artifactSet does not match any of the given artifacts. +func (s artifactSet) Diff(set artifactSet) bool { + if len(s) != len(set) { + return true + } + +outer: + for _, j := range s { + for _, k := range set { + if k.HasRevision(j.Revision) { + continue outer + } + } + return true + } + return false +} + // hasArtifactUpdated returns true if any of the revisions in the current artifacts // does not match any of the artifacts in the updated artifacts +// NOTE: artifactSet is a replacement for this. Remove this once it's not used +// anywhere. func hasArtifactUpdated(current []*sourcev1.Artifact, updated []*sourcev1.Artifact) bool { if len(current) != len(updated) { return true diff --git a/controllers/artifact_test.go b/controllers/artifact_test.go index 9746dfd8e..935c93bf7 100644 --- a/controllers/artifact_test.go +++ b/controllers/artifact_test.go @@ -1,26 +1,40 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package controllers import ( "testing" - - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" ) -func TestHasUpdated(t *testing.T) { +func Test_artifactSet_Diff(t *testing.T) { tests := []struct { name string - current []*sourcev1.Artifact - updated []*sourcev1.Artifact + current artifactSet + updated artifactSet expected bool }{ { - name: "not updated single", - current: []*sourcev1.Artifact{ + name: "one artifact, no diff", + current: artifactSet{ { Revision: "foo", }, }, - updated: []*sourcev1.Artifact{ + updated: artifactSet{ { Revision: "foo", }, @@ -28,13 +42,13 @@ func TestHasUpdated(t *testing.T) { expected: false, }, { - name: "updated single", - current: []*sourcev1.Artifact{ + name: "one artifact, diff", + current: artifactSet{ { Revision: "foo", }, }, - updated: []*sourcev1.Artifact{ + updated: artifactSet{ { Revision: "bar", }, @@ -42,8 +56,8 @@ func TestHasUpdated(t *testing.T) { expected: true, }, { - name: "not updated multiple", - current: []*sourcev1.Artifact{ + name: "multiple artifacts, no diff", + current: artifactSet{ { Revision: "foo", }, @@ -51,7 +65,7 @@ func TestHasUpdated(t *testing.T) { Revision: "bar", }, }, - updated: []*sourcev1.Artifact{ + updated: artifactSet{ { Revision: "foo", }, @@ -62,8 +76,8 @@ func TestHasUpdated(t *testing.T) { expected: false, }, { - name: "updated multiple", - current: []*sourcev1.Artifact{ + name: "multiple artifacts, diff", + current: artifactSet{ { Revision: "foo", }, @@ -71,7 +85,7 @@ func TestHasUpdated(t *testing.T) { Revision: "bar", }, }, - updated: []*sourcev1.Artifact{ + updated: artifactSet{ { Revision: "foo", }, @@ -82,8 +96,8 @@ func TestHasUpdated(t *testing.T) { expected: true, }, { - name: "updated different artifact count", - current: []*sourcev1.Artifact{ + name: "different artifact count", + current: artifactSet{ { Revision: "foo", }, @@ -91,7 +105,7 @@ func TestHasUpdated(t *testing.T) { Revision: "bar", }, }, - updated: []*sourcev1.Artifact{ + updated: artifactSet{ { Revision: "foo", }, @@ -101,7 +115,7 @@ func TestHasUpdated(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - result := hasArtifactUpdated(tt.current, tt.updated) + result := tt.current.Diff(tt.updated) if result != tt.expected { t.Errorf("Archive() result = %v, wantResult %v", result, tt.expected) } From 9eb6833d4da42fb71cc528157aa37ff7934653f8 Mon Sep 17 00:00:00 2001 From: Sunny Date: Wed, 24 Nov 2021 21:49:03 +0530 Subject: [PATCH 06/63] source: Add `GetRequeueAfter` The problem with `GetInterval()` was that the returned type was of `metav1.Duration`, while almost anywhere it was used, a type of `time.Duration` was requested. The result of this was that we had to call `GetInterval().Duration` all the time, which would become a bit cumbersome after awhile. To prevent this, we introduce a new `GetRequeueAfter() time.Duration` method, which both results the right type, and bears a name that is easier to remember where the value is used most; while setting the `Result.RequeueAfter` during reconcile operations. The introduction of this method deprecates `GetInterval()`, which should be removed in a future MINOR release. Signed-off-by: Sunny Co-authored-by: Hidde Beydals --- api/v1beta2/bucket_types.go | 8 ++++++++ api/v1beta2/gitrepository_types.go | 8 ++++++++ api/v1beta2/helmchart_types.go | 8 ++++++++ api/v1beta2/helmrepository_types.go | 8 ++++++++ api/v1beta2/source.go | 5 +++++ 5 files changed, 37 insertions(+) diff --git a/api/v1beta2/bucket_types.go b/api/v1beta2/bucket_types.go index 1ce3f1c1a..4626f1693 100644 --- a/api/v1beta2/bucket_types.go +++ b/api/v1beta2/bucket_types.go @@ -17,6 +17,8 @@ limitations under the License. package v1beta2 import ( + "time" + apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -169,7 +171,13 @@ func (in *Bucket) SetConditions(conditions []metav1.Condition) { in.Status.Conditions = conditions } +// GetRequeueAfter returns the duration after which the source must be reconciled again. +func (in Bucket) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + // GetInterval returns the interval at which the source is reconciled. +// Deprecated: use GetRequeueAfter instead. func (in Bucket) GetInterval() metav1.Duration { return in.Spec.Interval } diff --git a/api/v1beta2/gitrepository_types.go b/api/v1beta2/gitrepository_types.go index 0fe30440b..effbd5a6c 100644 --- a/api/v1beta2/gitrepository_types.go +++ b/api/v1beta2/gitrepository_types.go @@ -17,6 +17,8 @@ limitations under the License. package v1beta2 import ( + "time" + apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -260,7 +262,13 @@ func (in *GitRepository) SetConditions(conditions []metav1.Condition) { in.Status.Conditions = conditions } +// GetRequeueAfter returns the duration after which the source must be reconciled again. +func (in GitRepository) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + // GetInterval returns the interval at which the source is reconciled. +// Deprecated: use GetRequeueAfter instead. func (in GitRepository) GetInterval() metav1.Duration { return in.Spec.Interval } diff --git a/api/v1beta2/helmchart_types.go b/api/v1beta2/helmchart_types.go index 1c3172b06..8b237da0e 100644 --- a/api/v1beta2/helmchart_types.go +++ b/api/v1beta2/helmchart_types.go @@ -17,6 +17,8 @@ limitations under the License. package v1beta2 import ( + "time" + apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -196,7 +198,13 @@ func (in *HelmChart) SetConditions(conditions []metav1.Condition) { in.Status.Conditions = conditions } +// GetRequeueAfter returns the duration after which the source must be reconciled again. +func (in HelmChart) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + // GetInterval returns the interval at which the source is reconciled. +// Deprecated: use GetRequeueAfter instead. func (in HelmChart) GetInterval() metav1.Duration { return in.Spec.Interval } diff --git a/api/v1beta2/helmrepository_types.go b/api/v1beta2/helmrepository_types.go index d421a30f6..492ece868 100644 --- a/api/v1beta2/helmrepository_types.go +++ b/api/v1beta2/helmrepository_types.go @@ -17,6 +17,8 @@ limitations under the License. package v1beta2 import ( + "time" + apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -157,7 +159,13 @@ func (in *HelmRepository) SetConditions(conditions []metav1.Condition) { in.Status.Conditions = conditions } +// GetRequeueAfter returns the duration after which the source must be reconciled again. +func (in HelmRepository) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + // GetInterval returns the interval at which the source is reconciled. +// Deprecated: use GetRequeueAfter instead. func (in HelmRepository) GetInterval() metav1.Duration { return in.Spec.Interval } diff --git a/api/v1beta2/source.go b/api/v1beta2/source.go index b1fde1f15..ed60d96fe 100644 --- a/api/v1beta2/source.go +++ b/api/v1beta2/source.go @@ -17,6 +17,8 @@ limitations under the License. package v1beta2 import ( + "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -29,9 +31,12 @@ const ( // Source interface must be supported by all API types. // +k8s:deepcopy-gen=false type Source interface { + // GetRequeueAfter returns the duration after which the source must be reconciled again. + GetRequeueAfter() time.Duration // GetArtifact returns the latest artifact from the source if present in the // status sub-resource. GetArtifact() *Artifact // GetInterval returns the interval at which the source is updated. + // Deprecated: use GetRequeueAfter instead. GetInterval() metav1.Duration } From a1efbad15a85d4f0d2a14c2c2a17035809418f34 Mon Sep 17 00:00:00 2001 From: Sunny Date: Wed, 24 Nov 2021 22:30:20 +0530 Subject: [PATCH 07/63] Use new events and metrics helpers in main.go Signed-off-by: Sunny --- main.go | 64 +++++++++++++++++++++++++-------------------------------- 1 file changed, 28 insertions(+), 36 deletions(-) diff --git a/main.go b/main.go index 5f94d6cdf..bbced344b 100644 --- a/main.go +++ b/main.go @@ -33,13 +33,12 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ctrl "sigs.k8s.io/controller-runtime" - crtlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" "github.com/fluxcd/pkg/runtime/client" + helper "github.com/fluxcd/pkg/runtime/controller" "github.com/fluxcd/pkg/runtime/events" "github.com/fluxcd/pkg/runtime/leaderelection" "github.com/fluxcd/pkg/runtime/logger" - "github.com/fluxcd/pkg/runtime/metrics" "github.com/fluxcd/pkg/runtime/pprof" "github.com/fluxcd/pkg/runtime/probes" @@ -114,6 +113,7 @@ func main() { clientOptions.BindFlags(flag.CommandLine) logOptions.BindFlags(flag.CommandLine) leaderElectionOptions.BindFlags(flag.CommandLine) + flag.Parse() ctrl.SetLogger(logger.NewLogger(logOptions)) @@ -152,16 +152,12 @@ func main() { pprof.SetupHandlers(mgr, setupLog) var eventRecorder *events.Recorder - if eventsAddr != "" { - var err error - if eventRecorder, err = events.NewRecorder(mgr, ctrl.Log, eventsAddr, controllerName); err != nil { - setupLog.Error(err, "unable to create event recorder") - os.Exit(1) - } + if eventRecorder, err = events.NewRecorder(mgr, ctrl.Log, eventsAddr, controllerName); err != nil { + setupLog.Error(err, "unable to create event recorder") + os.Exit(1) } - metricsRecorder := metrics.NewRecorder() - crtlmetrics.Registry.MustRegister(metricsRecorder.Collectors()...) + metricsH := helper.MustMakeMetrics(mgr) if storageAdvAddr == "" { storageAdvAddr = determineAdvStorageAddr(storageAddr, setupLog) @@ -169,12 +165,11 @@ func main() { storage := mustInitStorage(storagePath, storageAdvAddr, setupLog) if err = (&controllers.GitRepositoryReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Storage: storage, - EventRecorder: mgr.GetEventRecorderFor(controllerName), - ExternalEventRecorder: eventRecorder, - MetricsRecorder: metricsRecorder, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Storage: storage, + EventRecorder: eventRecorder, + MetricsRecorder: metricsH.MetricsRecorder, }).SetupWithManagerAndOptions(mgr, controllers.GitRepositoryReconcilerOptions{ MaxConcurrentReconciles: concurrent, DependencyRequeueInterval: requeueDependency, @@ -183,13 +178,12 @@ func main() { os.Exit(1) } if err = (&controllers.HelmRepositoryReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Storage: storage, - Getters: getters, - EventRecorder: mgr.GetEventRecorderFor(controllerName), - ExternalEventRecorder: eventRecorder, - MetricsRecorder: metricsRecorder, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Storage: storage, + Getters: getters, + EventRecorder: eventRecorder, + MetricsRecorder: metricsH.MetricsRecorder, }).SetupWithManagerAndOptions(mgr, controllers.HelmRepositoryReconcilerOptions{ MaxConcurrentReconciles: concurrent, }); err != nil { @@ -197,13 +191,12 @@ func main() { os.Exit(1) } if err = (&controllers.HelmChartReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Storage: storage, - Getters: getters, - EventRecorder: mgr.GetEventRecorderFor(controllerName), - ExternalEventRecorder: eventRecorder, - MetricsRecorder: metricsRecorder, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Storage: storage, + Getters: getters, + EventRecorder: eventRecorder, + MetricsRecorder: metricsH.MetricsRecorder, }).SetupWithManagerAndOptions(mgr, controllers.HelmChartReconcilerOptions{ MaxConcurrentReconciles: concurrent, }); err != nil { @@ -211,12 +204,11 @@ func main() { os.Exit(1) } if err = (&controllers.BucketReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Storage: storage, - EventRecorder: mgr.GetEventRecorderFor(controllerName), - ExternalEventRecorder: eventRecorder, - MetricsRecorder: metricsRecorder, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Storage: storage, + EventRecorder: eventRecorder, + MetricsRecorder: metricsH.MetricsRecorder, }).SetupWithManagerAndOptions(mgr, controllers.BucketReconcilerOptions{ MaxConcurrentReconciles: concurrent, }); err != nil { From b1233dc24f7afc2fc2c60dc51fb6bd4338d888d7 Mon Sep 17 00:00:00 2001 From: Sunny Date: Thu, 25 Nov 2021 00:03:11 +0530 Subject: [PATCH 08/63] Move Artifact conditions to conditions Also, introduce FetchFailedCondition for generic fetch failures. Signed-off-by: Sunny Co-authored-by: Hidde Beydals --- api/v1beta2/condition_types.go | 16 ++++++++++++++++ api/v1beta2/gitrepository_types.go | 8 -------- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/api/v1beta2/condition_types.go b/api/v1beta2/condition_types.go index 7f40af6c0..13c14498e 100644 --- a/api/v1beta2/condition_types.go +++ b/api/v1beta2/condition_types.go @@ -18,6 +18,22 @@ package v1beta2 const SourceFinalizer = "finalizers.fluxcd.io" +const ( + // ArtifactUnavailableCondition indicates there is no Artifact available for the Source. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + ArtifactUnavailableCondition string = "ArtifactUnavailable" + + // ArtifactOutdatedCondition indicates the current Artifact of the Source is outdated. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + ArtifactOutdatedCondition string = "ArtifactOutdated" + + // FetchFailedCondition indicates a transient or persistent fetch failure of an upstream Source. + // If True, observations on the upstream Source revision may be impossible, and the Artifact available for the + // Source may be outdated. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + FetchFailedCondition string = "FetchFailed" +) + const ( // URLInvalidReason represents the fact that a given source has an invalid URL. URLInvalidReason string = "URLInvalid" diff --git a/api/v1beta2/gitrepository_types.go b/api/v1beta2/gitrepository_types.go index effbd5a6c..70a74d5bf 100644 --- a/api/v1beta2/gitrepository_types.go +++ b/api/v1beta2/gitrepository_types.go @@ -38,10 +38,6 @@ const ( ) const ( - // ArtifactUnavailableCondition indicates there is no Artifact available for the Source. - // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. - ArtifactUnavailableCondition string = "ArtifactUnavailable" - // CheckoutFailedCondition indicates a transient or persistent checkout failure. If True, observations on the // upstream Source revision are not possible, and the Artifact available for the Source may be outdated. // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. @@ -55,10 +51,6 @@ const ( // exist, or does not have an Artifact. // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. IncludeUnavailableCondition string = "IncludeUnavailable" - - // ArtifactOutdatedCondition indicates the current Artifact of the Source is outdated. - // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. - ArtifactOutdatedCondition string = "ArtifactOutdated" ) // GitRepositorySpec defines the desired state of a Git repository. From dc9d8b7f6684154b19c0d09ffb03c0a017d11bfb Mon Sep 17 00:00:00 2001 From: Sunny Date: Fri, 26 Nov 2021 00:18:08 +0530 Subject: [PATCH 09/63] Add gomega matcher for artifact Signed-off-by: Sunny Co-authored-by: Hidde Beydals --- controllers/artifact_matchers_test.go | 67 +++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 controllers/artifact_matchers_test.go diff --git a/controllers/artifact_matchers_test.go b/controllers/artifact_matchers_test.go new file mode 100644 index 000000000..06ab529de --- /dev/null +++ b/controllers/artifact_matchers_test.go @@ -0,0 +1,67 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "fmt" + + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/types" +) + +// MatchArtifact returns a custom matcher to check equality of a v1beta1.Artifact, the timestamp and URL are ignored. +func MatchArtifact(expected *sourcev1.Artifact) types.GomegaMatcher { + return &matchArtifact{ + expected: expected, + } +} + +type matchArtifact struct { + expected *sourcev1.Artifact +} + +func (m matchArtifact) Match(actual interface{}) (success bool, err error) { + actualArtifact, ok := actual.(*sourcev1.Artifact) + if !ok { + return false, fmt.Errorf("actual should be a pointer to an Artifact") + } + + if ok, _ := BeNil().Match(m.expected); ok { + return BeNil().Match(actual) + } + + if ok, err = Equal(m.expected.Path).Match(actualArtifact.Path); !ok { + return ok, err + } + if ok, err = Equal(m.expected.Revision).Match(actualArtifact.Revision); !ok { + return ok, err + } + if ok, err = Equal(m.expected.Checksum).Match(actualArtifact.Checksum); !ok { + return ok, err + } + + return ok, err +} + +func (m matchArtifact) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("expected\n\t%#v\nto match\n\t%#v\n", actual, m.expected) +} + +func (m matchArtifact) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("expected\n\t%#v\nto not match\n\t%#v\n", actual, m.expected) +} From 6789aaf14741beb8cb7031ef115429cf2c86d9e0 Mon Sep 17 00:00:00 2001 From: Sunny Date: Thu, 20 Jan 2022 01:09:39 +0530 Subject: [PATCH 10/63] api: Embed runtime.Object in Source interface Embedding runtime.Object in Source interface makes the Source type more useful to interact with k8s API machinery. Signed-off-by: Sunny --- api/v1beta2/source.go | 2 ++ api/v1beta2/zz_generated.deepcopy.go | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/api/v1beta2/source.go b/api/v1beta2/source.go index ed60d96fe..a8db640d9 100644 --- a/api/v1beta2/source.go +++ b/api/v1beta2/source.go @@ -20,6 +20,7 @@ import ( "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) const ( @@ -31,6 +32,7 @@ const ( // Source interface must be supported by all API types. // +k8s:deepcopy-gen=false type Source interface { + runtime.Object // GetRequeueAfter returns the duration after which the source must be reconciled again. GetRequeueAfter() time.Duration // GetArtifact returns the latest artifact from the source if present in the diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go index bec40f3a4..53c86a93a 100644 --- a/api/v1beta2/zz_generated.deepcopy.go +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -25,7 +25,7 @@ import ( "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. From 5df4acb710fca06437b09f5d2243d76d0793e797 Mon Sep 17 00:00:00 2001 From: Sunny Date: Sun, 19 Dec 2021 19:41:32 +0530 Subject: [PATCH 11/63] Add internal packages error and reconcile - internal/error - Contains internal error type used across the source-controller reconcilers. - internal/reconcile - Contains helper abstractions for the controller-runtime reconcile Result type and functions to interact with the abstractions. Signed-off-by: Sunny --- internal/error/error.go | 56 ++++++++++ internal/reconcile/reconcile.go | 148 +++++++++++++++++++++++++++ internal/reconcile/reconcile_test.go | 47 +++++++++ 3 files changed, 251 insertions(+) create mode 100644 internal/error/error.go create mode 100644 internal/reconcile/reconcile.go create mode 100644 internal/reconcile/reconcile_test.go diff --git a/internal/error/error.go b/internal/error/error.go new file mode 100644 index 000000000..df20ccc49 --- /dev/null +++ b/internal/error/error.go @@ -0,0 +1,56 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package error + +// Stalling is the reconciliation stalled state error. It contains an error +// and a reason for the stalled condition. +type Stalling struct { + // Reason is the stalled condition reason string. + Reason string + // Err is the error that caused stalling. This can be used as the message in + // stalled condition. + Err error +} + +// Error implements error interface. +func (se *Stalling) Error() string { + return se.Err.Error() +} + +// Unwrap returns the underlying error. +func (se *Stalling) Unwrap() error { + return se.Err +} + +// Event is an error event. It can be used to construct an event to be +// recorded. +type Event struct { + // Reason is the reason for the event error. + Reason string + // Error is the actual error for the event. + Err error +} + +// Error implements error interface. +func (ee *Event) Error() string { + return ee.Err.Error() +} + +// Unwrap returns the underlying error. +func (ee *Event) Unwrap() error { + return ee.Err +} diff --git a/internal/reconcile/reconcile.go b/internal/reconcile/reconcile.go new file mode 100644 index 000000000..2da1f8096 --- /dev/null +++ b/internal/reconcile/reconcile.go @@ -0,0 +1,148 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconcile + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + kuberecorder "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" + "github.com/fluxcd/pkg/runtime/patch" + + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + serror "github.com/fluxcd/source-controller/internal/error" +) + +// Result is a type for creating an abstraction for the controller-runtime +// reconcile Result to simplify the Result values. +type Result int + +const ( + // ResultEmpty indicates a reconcile result which does not requeue. + ResultEmpty Result = iota + // ResultRequeue indicates a reconcile result which should immediately + // requeue. + ResultRequeue + // ResultSuccess indicates a reconcile result which should be + // requeued on the interval as defined on the reconciled object. + ResultSuccess +) + +// BuildRuntimeResult converts a given Result and error into the +// return values of a controller's Reconcile function. +func BuildRuntimeResult(ctx context.Context, recorder kuberecorder.EventRecorder, obj sourcev1.Source, rr Result, err error) (ctrl.Result, error) { + // NOTE: The return values can be modified based on the error type. + // For example, if an error signifies a short requeue period that's + // not equal to the requeue period of the object, the error can be checked + // and an appropriate result with the period can be returned. + // + // Example: + // if e, ok := err.(*waitError); ok { + // return ctrl.Result{RequeueAfter: e.RequeueAfter}, err + // } + + // Log and record event based on the error. + switch e := err.(type) { + case *serror.Event: + recorder.Eventf(obj, corev1.EventTypeWarning, e.Reason, e.Error()) + case *serror.Stalling: + // Stalling errors are not returned to the runtime. Log it explicitly. + ctrl.LoggerFrom(ctx).Error(e, "reconciliation stalled") + recorder.Eventf(obj, corev1.EventTypeWarning, e.Reason, e.Error()) + } + + switch rr { + case ResultRequeue: + return ctrl.Result{Requeue: true}, err + case ResultSuccess: + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, err + default: + return ctrl.Result{}, err + } +} + +// ComputeReconcileResult analyzes the reconcile results (result + error), +// updates the status conditions of the object with any corrections and returns +// result patch configuration and any error to the caller. The caller is +// responsible for using the patch option to patch the object in the API server. +func ComputeReconcileResult(obj conditions.Setter, res Result, recErr error, ownedConditions []string) ([]patch.Option, error) { + // Remove reconciling condition on successful reconciliation. + if recErr == nil && res == ResultSuccess { + conditions.Delete(obj, meta.ReconcilingCondition) + } + + // Patch the object, ignoring conflicts on the conditions owned by this controller. + pOpts := []patch.Option{ + patch.WithOwnedConditions{ + Conditions: ownedConditions, + }, + } + + // Analyze the reconcile error. + switch t := recErr.(type) { + case *serror.Stalling: + if res == ResultEmpty { + // The current generation has been reconciled successfully and it + // has resulted in a stalled state. Return no error to stop further + // requeuing. + pOpts = append(pOpts, patch.WithStatusObservedGeneration{}) + conditions.MarkStalled(obj, t.Reason, t.Error()) + return pOpts, nil + } + // NOTE: Non-empty result with stalling error indicates that the + // returned result is incorrect. + case nil: + // The reconcile didn't result in any error, we are not in stalled + // state. If a requeue is requested, the current generation has not been + // reconciled successfully. + if res != ResultRequeue { + pOpts = append(pOpts, patch.WithStatusObservedGeneration{}) + } + conditions.Delete(obj, meta.StalledCondition) + default: + // The reconcile resulted in some error, but we are not in stalled + // state. + conditions.Delete(obj, meta.StalledCondition) + } + + return pOpts, recErr +} + +// LowestRequeuingResult returns the ReconcileResult with the lowest requeue +// period. +// Weightage: +// ResultRequeue - immediate requeue (lowest) +// ResultSuccess - requeue at an interval +// ResultEmpty - no requeue +func LowestRequeuingResult(i, j Result) Result { + switch { + case i == ResultEmpty: + return j + case j == ResultEmpty: + return i + case i == ResultRequeue: + return i + case j == ResultRequeue: + return j + default: + return j + } +} diff --git a/internal/reconcile/reconcile_test.go b/internal/reconcile/reconcile_test.go new file mode 100644 index 000000000..bb0cf4c44 --- /dev/null +++ b/internal/reconcile/reconcile_test.go @@ -0,0 +1,47 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconcile + +import ( + "testing" + + . "github.com/onsi/gomega" +) + +func TestLowestRequeuingResult(t *testing.T) { + tests := []struct { + name string + i Result + j Result + wantResult Result + }{ + {"bail,requeue", ResultEmpty, ResultRequeue, ResultRequeue}, + {"bail,requeueInterval", ResultEmpty, ResultSuccess, ResultSuccess}, + {"requeue,bail", ResultRequeue, ResultEmpty, ResultRequeue}, + {"requeue,requeueInterval", ResultRequeue, ResultSuccess, ResultRequeue}, + {"requeueInterval,requeue", ResultSuccess, ResultRequeue, ResultRequeue}, + {"requeueInterval,requeueInterval", ResultSuccess, ResultSuccess, ResultSuccess}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + g.Expect(LowestRequeuingResult(tt.i, tt.j)).To(Equal(tt.wantResult)) + }) + } +} From 5ab2f6219b687ba8e43f7df0bbb974cc0c9f66b3 Mon Sep 17 00:00:00 2001 From: Hidde Beydals Date: Fri, 21 Jan 2022 23:37:55 +0100 Subject: [PATCH 12/63] internal/util: introduce temp dir/path helpers In most of the reconcilers we have a repetative pattern of using part of the object metadata to construct a temporary file path. This commit introduces helpers as an abstraction, for both the creation of a temporary directory based on `client.Object` type and object metadata, and the generation of an arbitrary random temporary path string. Signed-off-by: Hidde Beydals --- internal/util/temp.go | 52 +++++++++++++++++++++++ internal/util/temp_test.go | 85 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 137 insertions(+) create mode 100644 internal/util/temp.go create mode 100644 internal/util/temp_test.go diff --git a/internal/util/temp.go b/internal/util/temp.go new file mode 100644 index 000000000..054b12801 --- /dev/null +++ b/internal/util/temp.go @@ -0,0 +1,52 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "crypto/rand" + "encoding/hex" + "fmt" + "os" + "path/filepath" + "strings" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// TempDirForObj creates a new temporary directory in the directory dir +// in the format of 'Kind-Namespace-Name-*', and returns the +// pathname of the new directory. +func TempDirForObj(dir string, obj client.Object) (string, error) { + return os.MkdirTemp(dir, pattern(obj)) +} + +// TempPathForObj creates a temporary file path in the format of +// '/Kind-Namespace-Name-'. +// If the given dir is empty, os.TempDir is used as a default. +func TempPathForObj(dir, suffix string, obj client.Object) string { + if dir == "" { + dir = os.TempDir() + } + randBytes := make([]byte, 16) + rand.Read(randBytes) + return filepath.Join(dir, pattern(obj)+hex.EncodeToString(randBytes)+suffix) +} + +func pattern(obj client.Object) (p string) { + kind := strings.ToLower(obj.GetObjectKind().GroupVersionKind().Kind) + return fmt.Sprintf("%s-%s-%s-", kind, obj.GetNamespace(), obj.GetName()) +} diff --git a/internal/util/temp_test.go b/internal/util/temp_test.go new file mode 100644 index 000000000..7db873e2d --- /dev/null +++ b/internal/util/temp_test.go @@ -0,0 +1,85 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "os" + "testing" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func TestTempDirForObj(t *testing.T) { + g := NewWithT(t) + + got, err := TempDirForObj("", mockObj()) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got).To(BeADirectory()) + defer os.RemoveAll(got) + + got2, err := TempDirForObj(got, mockObj()) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got2).To(BeADirectory()) + defer os.RemoveAll(got2) + g.Expect(got2).To(ContainSubstring(got)) +} + +func TestTempPathForObj(t *testing.T) { + tests := []struct { + name string + dir string + suffix string + want string + }{ + { + name: "default", + want: os.TempDir() + "/secret-default-foo-", + }, + { + name: "with directory", + dir: "/foo", + want: "/foo/secret-default-foo-", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + got := TempPathForObj(tt.dir, tt.suffix, mockObj()) + g.Expect(got[:len(got)-32]).To(Equal(tt.want)) + }) + } +} + +func Test_pattern(t *testing.T) { + g := NewWithT(t) + g.Expect(pattern(mockObj())).To(Equal("secret-default-foo-")) +} + +func mockObj() client.Object { + return &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "default", + }, + } +} From 31d2e6d65ce1ec6609da31ceb13340bfb4c4ed18 Mon Sep 17 00:00:00 2001 From: Hidde Beydals Date: Fri, 30 Jul 2021 14:19:39 +0200 Subject: [PATCH 13/63] Rewrite `GitRepositoryReconciler` to new standards This commit rewrites the `GitRepositoryReconciler` to new standards, while implementing the newly introduced Condition types, and trying to adhere better to Kubernetes API conventions. More specifically it introduces: - Implementation of more explicit Condition types to highlight abnormalities. - Extensive usage of the `conditions` subpackage from `runtime`. - Better and more conflict-resilient (status)patching of reconciled objects using the `patch` subpackage from runtime. - Proper implementation of kstatus' `Reconciling` and `Stalled` conditions. - First (integration) tests that solely rely on `testenv` and do not use Ginkgo. There are a couple of TODOs marked in-code, these are suggestions for the future and should be non-blocking. In addition to the TODOs, more complex and/or edge-case test scenarios may be added as well. Signed-off-by: Hidde Beydals --- api/v1beta2/gitrepository_types.go | 44 - controllers/gitrepository_controller.go | 708 ++++--- controllers/gitrepository_controller_test.go | 1692 ++++++++++------- controllers/legacy_suite_test.go | 9 +- controllers/suite_test.go | 16 +- .../testdata/git/repository/.sourceignore | 1 + controllers/testdata/git/repository/foo.txt | 0 .../testdata/git/repository/manifest.yaml | 5 + go.mod | 2 +- main.go | 9 +- 10 files changed, 1458 insertions(+), 1028 deletions(-) create mode 100644 controllers/testdata/git/repository/.sourceignore create mode 100644 controllers/testdata/git/repository/foo.txt create mode 100644 controllers/testdata/git/repository/manifest.yaml diff --git a/api/v1beta2/gitrepository_types.go b/api/v1beta2/gitrepository_types.go index 70a74d5bf..010ddac12 100644 --- a/api/v1beta2/gitrepository_types.go +++ b/api/v1beta2/gitrepository_types.go @@ -19,12 +19,10 @@ package v1beta2 import ( "time" - apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/runtime/conditions" ) const ( @@ -202,48 +200,6 @@ const ( GitOperationFailedReason string = "GitOperationFailed" ) -// GitRepositoryProgressing resets the conditions of the GitRepository to -// metav1.Condition of type meta.ReadyCondition with status 'Unknown' and -// meta.ProgressingReason reason and message. It returns the modified -// GitRepository. -func GitRepositoryProgressing(repository GitRepository) GitRepository { - repository.Status.ObservedGeneration = repository.Generation - repository.Status.URL = "" - repository.Status.Conditions = []metav1.Condition{} - conditions.MarkUnknown(&repository, meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress") - return repository -} - -// GitRepositoryReady sets the given Artifact and URL on the GitRepository and -// sets the meta.ReadyCondition to 'True', with the given reason and message. It -// returns the modified GitRepository. -func GitRepositoryReady(repository GitRepository, artifact Artifact, includedArtifacts []*Artifact, url, reason, message string) GitRepository { - repository.Status.Artifact = &artifact - repository.Status.IncludedArtifacts = includedArtifacts - repository.Status.URL = url - conditions.MarkTrue(&repository, meta.ReadyCondition, reason, message) - return repository -} - -// GitRepositoryNotReady sets the meta.ReadyCondition on the given GitRepository -// to 'False', with the given reason and message. It returns the modified -// GitRepository. -func GitRepositoryNotReady(repository GitRepository, reason, message string) GitRepository { - conditions.MarkFalse(&repository, meta.ReadyCondition, reason, message) - return repository -} - -// GitRepositoryReadyMessage returns the message of the metav1.Condition of type -// meta.ReadyCondition with status 'True' if present, or an empty string. -func GitRepositoryReadyMessage(repository GitRepository) string { - if c := apimeta.FindStatusCondition(repository.Status.Conditions, meta.ReadyCondition); c != nil { - if c.Status == metav1.ConditionTrue { - return c.Message - } - } - return "" -} - // GetConditions returns the status conditions of the object. func (in GitRepository) GetConditions() []metav1.Condition { return in.Status.Conditions diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go index 976b24c0a..9b88bc017 100644 --- a/controllers/gitrepository_controller.go +++ b/controllers/gitrepository_controller.go @@ -20,18 +20,15 @@ import ( "context" "fmt" "os" - "path/filepath" "strings" "time" securejoin "github.com/cyphar/filepath-securejoin" corev1 "k8s.io/api/core/v1" - apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + kerrors "k8s.io/apimachinery/pkg/util/errors" kuberecorder "k8s.io/client-go/tools/record" - "k8s.io/client-go/tools/reference" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -40,14 +37,16 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" + helper "github.com/fluxcd/pkg/runtime/controller" "github.com/fluxcd/pkg/runtime/events" - "github.com/fluxcd/pkg/runtime/metrics" + "github.com/fluxcd/pkg/runtime/patch" "github.com/fluxcd/pkg/runtime/predicates" + "github.com/fluxcd/source-controller/pkg/sourceignore" sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" "github.com/fluxcd/source-controller/pkg/git" "github.com/fluxcd/source-controller/pkg/git/strategy" - "github.com/fluxcd/source-controller/pkg/sourceignore" ) // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories,verbs=get;list;watch;create;update;patch;delete @@ -58,12 +57,12 @@ import ( // GitRepositoryReconciler reconciles a GitRepository object type GitRepositoryReconciler struct { client.Client - requeueDependency time.Duration - Scheme *runtime.Scheme - Storage *Storage - EventRecorder kuberecorder.EventRecorder - ExternalEventRecorder *events.Recorder - MetricsRecorder *metrics.Recorder + kuberecorder.EventRecorder + helper.Metrics + + Storage *Storage + + requeueDependency time.Duration } type GitRepositoryReconcilerOptions struct { @@ -86,398 +85,503 @@ func (r *GitRepositoryReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, o Complete(r) } -func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) { start := time.Now() log := ctrl.LoggerFrom(ctx) - var repository sourcev1.GitRepository - if err := r.Get(ctx, req.NamespacedName, &repository); err != nil { + // Fetch the GitRepository + obj := &sourcev1.GitRepository{} + if err := r.Get(ctx, req.NamespacedName, obj); err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } // Record suspended status metric - defer r.recordSuspension(ctx, repository) - - // Add our finalizer if it does not exist - if !controllerutil.ContainsFinalizer(&repository, sourcev1.SourceFinalizer) { - patch := client.MergeFrom(repository.DeepCopy()) - controllerutil.AddFinalizer(&repository, sourcev1.SourceFinalizer) - if err := r.Patch(ctx, &repository, patch); err != nil { - log.Error(err, "unable to register finalizer") - return ctrl.Result{}, err - } - } - - // Examine if the object is under deletion - if !repository.ObjectMeta.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, repository) - } + r.RecordSuspend(ctx, obj, obj.Spec.Suspend) - // Return early if the object is suspended. - if repository.Spec.Suspend { + // Return early if the object is suspended + if obj.Spec.Suspend { log.Info("Reconciliation is suspended for this object") return ctrl.Result{}, nil } - // check dependencies - if len(repository.Spec.Include) > 0 { - if err := r.checkDependencies(repository); err != nil { - repository = sourcev1.GitRepositoryNotReady(repository, "DependencyNotReady", err.Error()) - if err := r.updateStatus(ctx, req, repository.Status); err != nil { - log.Error(err, "unable to update status for dependency not ready") - return ctrl.Result{Requeue: true}, err - } - // we can't rely on exponential backoff because it will prolong the execution too much, - // instead we requeue on a fix interval. - msg := fmt.Sprintf("Dependencies do not meet ready condition, retrying in %s", r.requeueDependency.String()) - log.Info(msg) - r.event(ctx, repository, events.EventSeverityInfo, msg) - r.recordReadiness(ctx, repository) - return ctrl.Result{RequeueAfter: r.requeueDependency}, nil - } - log.Info("All dependencies area ready, proceeding with reconciliation") + // Initialize the patch helper + patchHelper, err := patch.NewHelper(obj, r.Client) + if err != nil { + return ctrl.Result{}, err } - // record reconciliation duration - if r.MetricsRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &repository) - if err != nil { - return ctrl.Result{}, err + // Always attempt to patch the object and status after each reconciliation + defer func() { + // Record the value of the reconciliation request, if any + if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { + obj.Status.SetLastHandledReconcileRequest(v) } - defer r.MetricsRecorder.RecordDuration(*objRef, start) - } - // set initial status - if resetRepository, ok := r.resetStatus(repository); ok { - repository = resetRepository - if err := r.updateStatus(ctx, req, repository.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err + // Summarize the Ready condition based on abnormalities that may have been observed. + conditions.SetSummary(obj, + meta.ReadyCondition, + conditions.WithConditions( + sourcev1.IncludeUnavailableCondition, + sourcev1.SourceVerifiedCondition, + sourcev1.CheckoutFailedCondition, + sourcev1.ArtifactOutdatedCondition, + sourcev1.ArtifactUnavailableCondition, + ), + conditions.WithNegativePolarityConditions( + sourcev1.ArtifactUnavailableCondition, + sourcev1.CheckoutFailedCondition, + sourcev1.SourceVerifiedCondition, + sourcev1.IncludeUnavailableCondition, + sourcev1.ArtifactOutdatedCondition, + ), + ) + + // Patch the object, ignoring conflicts on the conditions owned by this controller + patchOpts := []patch.Option{ + patch.WithOwnedConditions{ + Conditions: []string{ + sourcev1.ArtifactUnavailableCondition, + sourcev1.CheckoutFailedCondition, + sourcev1.IncludeUnavailableCondition, + sourcev1.ArtifactOutdatedCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, + }, + }, } - r.recordReadiness(ctx, repository) - } - - // record the value of the reconciliation request, if any - // TODO(hidde): would be better to defer this in combination with - // always patching the status sub-resource after a reconciliation. - if v, ok := meta.ReconcileAnnotationValue(repository.GetAnnotations()); ok { - repository.Status.SetLastHandledReconcileRequest(v) - } - // purge old artifacts from storage - if err := r.gc(repository); err != nil { - log.Error(err, "unable to purge old artifacts") - } + // Determine if the resource is still being reconciled, or if it has stalled, and record this observation + if retErr == nil && (result.IsZero() || !result.Requeue) { + // We are no longer reconciling + conditions.Delete(obj, meta.ReconcilingCondition) + + // We have now observed this generation + patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{}) + + readyCondition := conditions.Get(obj, meta.ReadyCondition) + switch readyCondition.Status { + case metav1.ConditionFalse: + // As we are no longer reconciling and the end-state is not ready, the reconciliation has stalled + conditions.MarkStalled(obj, readyCondition.Reason, readyCondition.Message) + case metav1.ConditionTrue: + // As we are no longer reconciling and the end-state is ready, the reconciliation is no longer stalled + conditions.Delete(obj, meta.StalledCondition) + } + } - // reconcile repository by pulling the latest Git commit - reconciledRepository, reconcileErr := r.reconcile(ctx, *repository.DeepCopy()) + // Finally, patch the resource + if err := patchHelper.Patch(ctx, obj, patchOpts...); err != nil { + retErr = kerrors.NewAggregate([]error{retErr, err}) + } - // update status with the reconciliation result - if err := r.updateStatus(ctx, req, reconciledRepository.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err - } + // Always record readiness and duration metrics + r.Metrics.RecordReadiness(ctx, obj) + r.Metrics.RecordDuration(ctx, obj, start) + }() - // if reconciliation failed, record the failure and requeue immediately - if reconcileErr != nil { - r.event(ctx, reconciledRepository, events.EventSeverityError, reconcileErr.Error()) - r.recordReadiness(ctx, reconciledRepository) - return ctrl.Result{Requeue: true}, reconcileErr + // Add finalizer first if not exist to avoid the race condition + // between init and delete + if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { + controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) + return ctrl.Result{Requeue: true}, nil } - // emit revision change event - if repository.Status.Artifact == nil || reconciledRepository.Status.Artifact.Revision != repository.Status.Artifact.Revision { - r.event(ctx, reconciledRepository, events.EventSeverityInfo, sourcev1.GitRepositoryReadyMessage(reconciledRepository)) + // Examine if the object is under deletion + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, obj) } - r.recordReadiness(ctx, reconciledRepository) - log.Info(fmt.Sprintf("Reconciliation finished in %s, next run in %s", - time.Since(start).String(), - repository.GetInterval().Duration.String(), - )) - - return ctrl.Result{RequeueAfter: repository.GetInterval().Duration}, nil + // Reconcile actual object + return r.reconcile(ctx, obj) } -func (r *GitRepositoryReconciler) checkDependencies(repository sourcev1.GitRepository) error { - for _, d := range repository.Spec.Include { - dName := types.NamespacedName{Name: d.GitRepositoryRef.Name, Namespace: repository.Namespace} - var gr sourcev1.GitRepository - err := r.Get(context.Background(), dName, &gr) - if err != nil { - return fmt.Errorf("unable to get '%s' dependency: %w", dName, err) - } +// reconcile steps through the actual reconciliation tasks for the object, it returns early on the first step that +// produces an error. +func (r *GitRepositoryReconciler) reconcile(ctx context.Context, obj *sourcev1.GitRepository) (ctrl.Result, error) { + // Mark the resource as under reconciliation + conditions.MarkReconciling(obj, meta.ProgressingReason, "") - if len(gr.Status.Conditions) == 0 || gr.Generation != gr.Status.ObservedGeneration { - return fmt.Errorf("dependency '%s' is not ready", dName) - } + // Reconcile the storage data + if result, err := r.reconcileStorage(ctx, obj); err != nil || result.IsZero() { + return result, err + } - if !apimeta.IsStatusConditionTrue(gr.Status.Conditions, meta.ReadyCondition) { - return fmt.Errorf("dependency '%s' is not ready", dName) - } + // Create temp dir for Git clone + tmpDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s-%s-", obj.Kind, obj.Namespace, obj.Name)) + if err != nil { + r.Eventf(obj, events.EventSeverityError, sourcev1.StorageOperationFailedReason, "Failed to create temporary directory: %s", err) + return ctrl.Result{}, err } + defer os.RemoveAll(tmpDir) - return nil -} + // Reconcile the source from upstream + var artifact sourcev1.Artifact + if result, err := r.reconcileSource(ctx, obj, &artifact, tmpDir); err != nil || result.IsZero() { + return ctrl.Result{RequeueAfter: obj.Spec.Interval.Duration}, err + } -func (r *GitRepositoryReconciler) reconcile(ctx context.Context, repository sourcev1.GitRepository) (sourcev1.GitRepository, error) { - log := ctrl.LoggerFrom(ctx) + // Reconcile includes from the storage + var includes artifactSet + if result, err := r.reconcileInclude(ctx, obj, includes, tmpDir); err != nil || result.IsZero() { + return ctrl.Result{RequeueAfter: r.requeueDependency}, err + } - // create tmp dir for the Git clone - tmpGit, err := os.MkdirTemp("", repository.Name) - if err != nil { - err = fmt.Errorf("tmp dir error: %w", err) - return sourcev1.GitRepositoryNotReady(repository, sourcev1.StorageOperationFailedReason, err.Error()), err + // Reconcile the artifact to storage + if result, err := r.reconcileArtifact(ctx, obj, artifact, includes, tmpDir); err != nil || result.IsZero() { + return result, err } - defer func() { - if err := os.RemoveAll(tmpGit); err != nil { - log.Error(err, "failed to remove working directory", "path", tmpGit) - } - }() - // Configure auth options using secret - var authOpts *git.AuthOptions - if repository.Spec.SecretRef != nil { + return ctrl.Result{RequeueAfter: obj.Spec.Interval.Duration}, nil +} + +// reconcileStorage ensures the current state of the storage matches the desired and previously observed state. +// +// All artifacts for the resource except for the current one are garbage collected from the storage. +// If the artifact in the Status object of the resource disappeared from storage, it is removed from the object. +// If the object does not have an artifact in its Status object, a v1beta1.ArtifactUnavailableCondition is set. +// If the hostname of any of the URLs on the object do not match the current storage server hostname, they are updated. +// +// The caller should assume a failure if an error is returned, or the Result is zero. +func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.GitRepository) (ctrl.Result, error) { + // Garbage collect previous advertised artifact(s) from storage + _ = r.garbageCollect(ctx, obj) + + // Determine if the advertised artifact is still in storage + if artifact := obj.GetArtifact(); artifact != nil && !r.Storage.ArtifactExist(*artifact) { + obj.Status.Artifact = nil + obj.Status.URL = "" + } + + // Record that we do not have an artifact + if obj.GetArtifact() == nil { + conditions.MarkTrue(obj, sourcev1.ArtifactUnavailableCondition, "NoArtifact", "No artifact for resource in storage") + return ctrl.Result{Requeue: true}, nil + } + conditions.Delete(obj, sourcev1.ArtifactUnavailableCondition) + + // Always update URLs to ensure hostname is up-to-date + // TODO(hidde): we may want to send out an event only if we notice the URL has changed + r.Storage.SetArtifactURL(obj.GetArtifact()) + obj.Status.URL = r.Storage.SetHostname(obj.Status.URL) + + return ctrl.Result{RequeueAfter: obj.Spec.Interval.Duration}, nil +} + +// reconcileSource ensures the upstream Git repository can be reached and checked out using the declared configuration, +// and observes its state. +// +// The repository is checked out to the given dir using the defined configuration, and in case of an error during the +// checkout process (including transient errors), it records v1beta1.CheckoutFailedCondition=True and returns early. +// On a successful checkout it removes v1beta1.CheckoutFailedCondition, and compares the current revision of HEAD to the +// artifact on the object, and records v1beta1.ArtifactOutdatedCondition if they differ. +// If instructed, the signature of the commit is verified if and recorded as v1beta1.SourceVerifiedCondition. If the +// signature can not be verified or the verification fails, the Condition=False and it returns early. +// If both the checkout and signature verification are successful, the given artifact pointer is set to a new artifact +// with the available metadata. +// +// The caller should assume a failure if an error is returned, or the Result is zero. +func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, + obj *sourcev1.GitRepository, artifact *sourcev1.Artifact, dir string) (ctrl.Result, error) { + // Configure authentication strategy to access the source + authOpts := &git.AuthOptions{} + if obj.Spec.SecretRef != nil { + // Attempt to retrieve secret name := types.NamespacedName{ - Namespace: repository.GetNamespace(), - Name: repository.Spec.SecretRef.Name, + Namespace: obj.GetNamespace(), + Name: obj.Spec.SecretRef.Name, } - - secret := &corev1.Secret{} - err = r.Client.Get(ctx, name, secret) - if err != nil { - err = fmt.Errorf("auth secret error: %w", err) - return sourcev1.GitRepositoryNotReady(repository, sourcev1.AuthenticationFailedReason, err.Error()), err + var secret corev1.Secret + if err := r.Client.Get(ctx, name, &secret); err != nil { + conditions.MarkTrue(obj, sourcev1.CheckoutFailedCondition, sourcev1.AuthenticationFailedReason, + "Failed to get secret '%s': %s", name.String(), err.Error()) + r.Eventf(obj, events.EventSeverityError, sourcev1.AuthenticationFailedReason, + "Failed to get secret '%s': %s", name.String(), err.Error()) + // Return error as the world as observed may change + return ctrl.Result{}, err } - authOpts, err = git.AuthOptionsFromSecret(repository.Spec.URL, secret) + // Configure strategy with secret + var err error + authOpts, err = git.AuthOptionsFromSecret(obj.Spec.URL, &secret) if err != nil { - return sourcev1.GitRepositoryNotReady(repository, sourcev1.AuthenticationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.CheckoutFailedCondition, sourcev1.AuthenticationFailedReason, + "Failed to configure auth strategy for Git implementation %q: %s", obj.Spec.GitImplementation, err) + r.Eventf(obj, events.EventSeverityError, sourcev1.AuthenticationFailedReason, + "Failed to configure auth strategy for Git implementation %q: %s", obj.Spec.GitImplementation, err) + // Return error as the contents of the secret may change + return ctrl.Result{}, err } } - checkoutOpts := git.CheckoutOptions{RecurseSubmodules: repository.Spec.RecurseSubmodules} - if ref := repository.Spec.Reference; ref != nil { + + // Configure checkout strategy + checkoutOpts := git.CheckoutOptions{RecurseSubmodules: obj.Spec.RecurseSubmodules} + if ref := obj.Spec.Reference; ref != nil { checkoutOpts.Branch = ref.Branch checkoutOpts.Commit = ref.Commit checkoutOpts.Tag = ref.Tag checkoutOpts.SemVer = ref.SemVer } checkoutStrategy, err := strategy.CheckoutStrategyForImplementation(ctx, - git.Implementation(repository.Spec.GitImplementation), checkoutOpts) + git.Implementation(obj.Spec.GitImplementation), checkoutOpts) if err != nil { - return sourcev1.GitRepositoryNotReady(repository, sourcev1.GitOperationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.CheckoutFailedCondition, sourcev1.GitOperationFailedReason, + "Failed to configure checkout strategy for Git implementation %q: %s", obj.Spec.GitImplementation, err) + // Do not return err as recovery without changes is impossible + return ctrl.Result{}, nil } - gitCtx, cancel := context.WithTimeout(ctx, repository.Spec.Timeout.Duration) + // Checkout HEAD of reference in object + gitCtx, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) defer cancel() - - commit, err := checkoutStrategy.Checkout(gitCtx, tmpGit, repository.Spec.URL, authOpts) + commit, err := checkoutStrategy.Checkout(gitCtx, dir, obj.Spec.URL, authOpts) if err != nil { - return sourcev1.GitRepositoryNotReady(repository, sourcev1.GitOperationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.CheckoutFailedCondition, sourcev1.GitOperationFailedReason, + "Failed to checkout and determine revision: %s", err) + r.Eventf(obj, events.EventSeverityError, sourcev1.GitOperationFailedReason, + "Failed to checkout and determine revision: %s", err) + // Coin flip on transient or persistent error, return error and hope for the best + return ctrl.Result{}, err } - artifact := r.Storage.NewArtifactFor(repository.Kind, repository.GetObjectMeta(), commit.String(), fmt.Sprintf("%s.tar.gz", commit.Hash.String())) + r.Eventf(obj, events.EventSeverityInfo, sourcev1.GitOperationSucceedReason, + "Cloned repository '%s' and checked out revision '%s'", obj.Spec.URL, commit.String()) + conditions.Delete(obj, sourcev1.CheckoutFailedCondition) - // copy all included repository into the artifact - includedArtifacts := []*sourcev1.Artifact{} - for _, incl := range repository.Spec.Include { - dName := types.NamespacedName{Name: incl.GitRepositoryRef.Name, Namespace: repository.Namespace} - var gr sourcev1.GitRepository - err := r.Get(context.Background(), dName, &gr) - if err != nil { - return sourcev1.GitRepositoryNotReady(repository, "DependencyNotReady", err.Error()), err - } - includedArtifacts = append(includedArtifacts, gr.GetArtifact()) + // Verify commit signature + if result, err := r.verifyCommitSignature(ctx, obj, *commit); err != nil || result.IsZero() { + return result, err } - // return early on unchanged revision and unchanged included repositories - if apimeta.IsStatusConditionTrue(repository.Status.Conditions, meta.ReadyCondition) && repository.GetArtifact().HasRevision(artifact.Revision) && !hasArtifactUpdated(repository.Status.IncludedArtifacts, includedArtifacts) { - if artifact.URL != repository.GetArtifact().URL { - r.Storage.SetArtifactURL(repository.GetArtifact()) - repository.Status.URL = r.Storage.SetHostname(repository.Status.URL) - } - return repository, nil + // Create potential new artifact with current available metadata + *artifact = r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), commit.String(), fmt.Sprintf("%s.tar.gz", commit.Hash.String())) + + // Mark observations about the revision on the object + if !obj.GetArtifact().HasRevision(commit.String()) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '%s'", commit.String()) } + return ctrl.Result{RequeueAfter: obj.Spec.Interval.Duration}, nil +} - // verify PGP signature - if repository.Spec.Verification != nil { - publicKeySecret := types.NamespacedName{ - Namespace: repository.Namespace, - Name: repository.Spec.Verification.SecretRef.Name, +// reconcileArtifact archives a new artifact to the storage, if the current observation on the object does not match the +// given data. +// +// The inspection of the given data to the object is differed, ensuring any stale observations as +// v1beta1.ArtifactUnavailableCondition and v1beta1.ArtifactOutdatedCondition are always deleted. +// If the given artifact and/or includes do not differ from the object's current, it returns early. +// Source ignore patterns are loaded, and the given directory is archived. +// On a successful archive, the artifact and includes in the status of the given object are set, and the symlink in the +// storage is updated to its path. +// +// The caller should assume a failure if an error is returned, or the Result is zero. +func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.GitRepository, artifact sourcev1.Artifact, includes artifactSet, dir string) (ctrl.Result, error) { + // Always restore the Ready condition in case it got removed due to a transient error + defer func() { + if obj.GetArtifact() != nil { + conditions.Delete(obj, sourcev1.ArtifactUnavailableCondition) } - secret := &corev1.Secret{} - if err := r.Client.Get(ctx, publicKeySecret, secret); err != nil { - err = fmt.Errorf("PGP public keys secret error: %w", err) - return sourcev1.GitRepositoryNotReady(repository, sourcev1.VerificationFailedReason, err.Error()), err + if obj.GetArtifact().HasRevision(artifact.Revision) && !includes.Diff(obj.Status.IncludedArtifacts) { + conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, + "Stored artifact for revision '%s'", artifact.Revision) } + }() - var keyRings []string - for _, v := range secret.Data { - keyRings = append(keyRings, string(v)) - } - if _, err = commit.Verify(keyRings...); err != nil { - return sourcev1.GitRepositoryNotReady(repository, sourcev1.VerificationFailedReason, err.Error()), err - } + // The artifact is up-to-date + if obj.GetArtifact().HasRevision(artifact.Revision) && !includes.Diff(obj.Status.IncludedArtifacts) { + ctrl.LoggerFrom(ctx).Info("Artifact is up-to-date") + return ctrl.Result{RequeueAfter: obj.GetInterval().Duration}, nil } - // create artifact dir - err = r.Storage.MkdirAll(artifact) - if err != nil { - err = fmt.Errorf("mkdir dir error: %w", err) - return sourcev1.GitRepositoryNotReady(repository, sourcev1.StorageOperationFailedReason, err.Error()), err + // Ensure target path exists and is a directory + if f, err := os.Stat(dir); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to stat source path") + return ctrl.Result{}, err + } else if !f.IsDir() { + ctrl.LoggerFrom(ctx).Error(err, fmt.Sprintf("source path '%s' is not a directory", dir)) + return ctrl.Result{}, err } - for i, incl := range repository.Spec.Include { - toPath, err := securejoin.SecureJoin(tmpGit, incl.GetToPath()) - if err != nil { - return sourcev1.GitRepositoryNotReady(repository, "DependencyNotReady", err.Error()), err - } - err = r.Storage.CopyToPath(includedArtifacts[i], incl.GetFromPath(), toPath) - if err != nil { - return sourcev1.GitRepositoryNotReady(repository, "DependencyNotReady", err.Error()), err - } + // Ensure artifact directory exists and acquire lock + if err := r.Storage.MkdirAll(artifact); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to create artifact directory") + return ctrl.Result{}, err } - - // acquire lock unlock, err := r.Storage.Lock(artifact) if err != nil { - err = fmt.Errorf("unable to acquire lock: %w", err) - return sourcev1.GitRepositoryNotReady(repository, sourcev1.StorageOperationFailedReason, err.Error()), err + ctrl.LoggerFrom(ctx).Error(err, "failed to acquire lock for artifact") + return ctrl.Result{}, err } defer unlock() - // archive artifact and check integrity - ignoreDomain := strings.Split(tmpGit, string(filepath.Separator)) - ps, err := sourceignore.LoadIgnorePatterns(tmpGit, ignoreDomain) + // Load ignore rules for archiving + ps, err := sourceignore.LoadIgnorePatterns(dir, nil) if err != nil { - err = fmt.Errorf(".sourceignore error: %w", err) - return sourcev1.GitRepositoryNotReady(repository, sourcev1.StorageOperationFailedReason, err.Error()), err + r.Eventf(obj, events.EventSeverityError, + "SourceIgnoreError", "Failed to load source ignore patterns from repository: %s", err) + return ctrl.Result{}, err } - if repository.Spec.Ignore != nil { - ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*repository.Spec.Ignore), ignoreDomain)...) + if obj.Spec.Ignore != nil { + ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*obj.Spec.Ignore), nil)...) } - if err := r.Storage.Archive(&artifact, tmpGit, SourceIgnoreFilter(ps, ignoreDomain)); err != nil { - err = fmt.Errorf("storage archive error: %w", err) - return sourcev1.GitRepositoryNotReady(repository, sourcev1.StorageOperationFailedReason, err.Error()), err + + // Archive directory to storage + if err := r.Storage.Archive(&artifact, dir, SourceIgnoreFilter(ps, nil)); err != nil { + r.Eventf(obj, events.EventSeverityError, sourcev1.StorageOperationFailedReason, + "Unable to archive artifact to storage: %s", err) + return ctrl.Result{}, err } + r.AnnotatedEventf(obj, map[string]string{ + "revision": artifact.Revision, + "checksum": artifact.Checksum, + }, events.EventSeverityInfo, "NewArtifact", "Stored artifact for revision '%s'", artifact.Revision) + + // Record it on the object + obj.Status.Artifact = artifact.DeepCopy() + obj.Status.IncludedArtifacts = includes - // update latest symlink + // Update symlink on a "best effort" basis url, err := r.Storage.Symlink(artifact, "latest.tar.gz") if err != nil { - err = fmt.Errorf("storage symlink error: %w", err) - return sourcev1.GitRepositoryNotReady(repository, sourcev1.StorageOperationFailedReason, err.Error()), err + r.Eventf(obj, events.EventSeverityError, sourcev1.StorageOperationFailedReason, + "Failed to update status URL symlink: %s", err) } - - message := fmt.Sprintf("Fetched revision: %s", artifact.Revision) - return sourcev1.GitRepositoryReady(repository, artifact, includedArtifacts, url, sourcev1.GitOperationSucceedReason, message), nil + if url != "" { + obj.Status.URL = url + } + return ctrl.Result{RequeueAfter: obj.Spec.Interval.Duration}, nil } -func (r *GitRepositoryReconciler) reconcileDelete(ctx context.Context, repository sourcev1.GitRepository) (ctrl.Result, error) { - if err := r.gc(repository); err != nil { - r.event(ctx, repository, events.EventSeverityError, - fmt.Sprintf("garbage collection for deleted resource failed: %s", err.Error())) - // Return the error so we retry the failed garbage collection - return ctrl.Result{}, err - } +// reconcileInclude reconciles the declared includes from the object by copying their artifact (sub)contents to the +// declared paths in the given directory. +// +// If an include is unavailable, it marks the object with v1beta1.IncludeUnavailableCondition and returns early. +// If the copy operations are successful, it deletes the v1beta1.IncludeUnavailableCondition from the object. +// If the artifactSet differs from the current set, it marks the object with v1beta1.ArtifactOutdatedCondition. +// +// The caller should assume a failure if an error is returned, or the Result is zero. +func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context, obj *sourcev1.GitRepository, artifacts artifactSet, dir string) (ctrl.Result, error) { + artifacts = make(artifactSet, len(obj.Spec.Include)) + for i, incl := range obj.Spec.Include { + // Do this first as it is much cheaper than copy operations + toPath, err := securejoin.SecureJoin(dir, incl.GetToPath()) + if err != nil { + conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "IllegalPath", + "Path calculation for include %q failed: %s", incl.GitRepositoryRef.Name, err.Error()) + return ctrl.Result{}, err + } - // Record deleted status - r.recordReadiness(ctx, repository) + // Retrieve the included GitRepository + dep := &sourcev1.GitRepository{} + if err := r.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: incl.GitRepositoryRef.Name}, dep); err != nil { + conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "NotFound", + "Could not get resource for include %q: %s", incl.GitRepositoryRef.Name, err.Error()) + return ctrl.Result{}, err + } - // Remove our finalizer from the list and update it - controllerutil.RemoveFinalizer(&repository, sourcev1.SourceFinalizer) - if err := r.Update(ctx, &repository); err != nil { - return ctrl.Result{}, err + // Confirm include has an artifact + if dep.GetArtifact() == nil { + conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "NoArtifact", + "No artifact available for include %q", incl.GitRepositoryRef.Name) + return ctrl.Result{}, nil + } + + // Copy artifact (sub)contents to configured directory + if err := r.Storage.CopyToPath(dep.GetArtifact(), incl.GetFromPath(), toPath); err != nil { + conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "CopyFailure", + "Failed to copy %q include from %s to %s: %s", incl.GitRepositoryRef.Name, incl.GetFromPath(), incl.GetToPath(), err.Error()) + r.Eventf(obj, events.EventSeverityError, sourcev1.IncludeUnavailableCondition, + "Failed to copy %q include from %s to %s: %s", incl.GitRepositoryRef.Name, incl.GetFromPath(), incl.GetToPath(), err.Error()) + return ctrl.Result{}, err + } + artifacts[i] = dep.GetArtifact().DeepCopy() } - // Stop reconciliation as the object is being deleted - return ctrl.Result{}, nil -} + // We now know all includes are available + conditions.Delete(obj, sourcev1.IncludeUnavailableCondition) -// resetStatus returns a modified v1beta1.GitRepository and a boolean indicating -// if the status field has been reset. -func (r *GitRepositoryReconciler) resetStatus(repository sourcev1.GitRepository) (sourcev1.GitRepository, bool) { - // We do not have an artifact, or it does no longer exist - if repository.GetArtifact() == nil || !r.Storage.ArtifactExist(*repository.GetArtifact()) { - repository = sourcev1.GitRepositoryProgressing(repository) - repository.Status.Artifact = nil - return repository, true - } - if repository.Generation != repository.Status.ObservedGeneration { - return sourcev1.GitRepositoryProgressing(repository), true + // Observe if the artifacts still match the previous included ones + if artifacts.Diff(obj.Status.IncludedArtifacts) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "IncludeChange", "Included artifacts differ from last observed includes") } - return repository, false + return ctrl.Result{RequeueAfter: obj.Spec.Interval.Duration}, nil } -// gc performs a garbage collection for the given v1beta1.GitRepository. -// It removes all but the current artifact except for when the -// deletion timestamp is set, which will result in the removal of -// all artifacts for the resource. -func (r *GitRepositoryReconciler) gc(repository sourcev1.GitRepository) error { - if !repository.DeletionTimestamp.IsZero() { - return r.Storage.RemoveAll(r.Storage.NewArtifactFor(repository.Kind, repository.GetObjectMeta(), "", "*")) - } - if repository.GetArtifact() != nil { - return r.Storage.RemoveAllButCurrent(*repository.GetArtifact()) +// reconcileDelete handles the delete of an object. It first garbage collects all artifacts for the object from the +// artifact storage, if successful, the finalizer is removed from the object. +func (r *GitRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.GitRepository) (ctrl.Result, error) { + // Garbage collect the resource's artifacts + if err := r.garbageCollect(ctx, obj); err != nil { + // Return the error so we retry the failed garbage collection + return ctrl.Result{}, err } - return nil -} -// event emits a Kubernetes event and forwards the event to notification controller if configured -func (r *GitRepositoryReconciler) event(ctx context.Context, repository sourcev1.GitRepository, severity, msg string) { - if r.EventRecorder != nil { - r.EventRecorder.Eventf(&repository, corev1.EventTypeNormal, severity, msg) - } - if r.ExternalEventRecorder != nil { - r.ExternalEventRecorder.Eventf(&repository, corev1.EventTypeNormal, severity, msg) - } + // Remove our finalizer from the list + controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) + + // Stop reconciliation as the object is being deleted + return ctrl.Result{}, nil } -func (r *GitRepositoryReconciler) recordReadiness(ctx context.Context, repository sourcev1.GitRepository) { - log := ctrl.LoggerFrom(ctx) - if r.MetricsRecorder == nil { - return +// verifyCommitSignature verifies the signature of the given commit if a verification mode is configured on the object. +func (r *GitRepositoryReconciler) verifyCommitSignature(ctx context.Context, obj *sourcev1.GitRepository, commit git.Commit) (ctrl.Result, error) { + // Check if there is a commit verification is configured and remove any old observations if there is none + if obj.Spec.Verification == nil || obj.Spec.Verification.Mode == "" { + conditions.Delete(obj, sourcev1.SourceVerifiedCondition) + return ctrl.Result{RequeueAfter: obj.Spec.Interval.Duration}, nil } - objRef, err := reference.GetReference(r.Scheme, &repository) - if err != nil { - log.Error(err, "unable to record readiness metric") - return + + // Get secret with GPG data + publicKeySecret := types.NamespacedName{ + Namespace: obj.Namespace, + Name: obj.Spec.Verification.SecretRef.Name, } - if rc := apimeta.FindStatusCondition(repository.Status.Conditions, meta.ReadyCondition); rc != nil { - r.MetricsRecorder.RecordCondition(*objRef, *rc, !repository.DeletionTimestamp.IsZero()) - } else { - r.MetricsRecorder.RecordCondition(*objRef, metav1.Condition{ - Type: meta.ReadyCondition, - Status: metav1.ConditionUnknown, - }, !repository.DeletionTimestamp.IsZero()) + secret := &corev1.Secret{} + if err := r.Client.Get(ctx, publicKeySecret, secret); err != nil { + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, meta.FailedReason, "PGP public keys secret error: %s", err.Error()) + r.Eventf(obj, events.EventSeverityError, "VerificationError", "PGP public keys secret error: %s", err.Error()) + return ctrl.Result{}, err } -} -func (r *GitRepositoryReconciler) recordSuspension(ctx context.Context, gitrepository sourcev1.GitRepository) { - if r.MetricsRecorder == nil { - return + var keyRings []string + for _, v := range secret.Data { + keyRings = append(keyRings, string(v)) } - log := ctrl.LoggerFrom(ctx) - - objRef, err := reference.GetReference(r.Scheme, &gitrepository) - if err != nil { - log.Error(err, "unable to record suspended metric") - return + // Verify commit with GPG data from secret + if _, err := commit.Verify(keyRings...); err != nil { + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, meta.FailedReason, "Signature verification of commit %q failed: %s", commit.Hash.String(), err) + r.Eventf(obj, events.EventSeverityError, "InvalidCommitSignature", "Signature verification of commit %q failed: %s", commit.Hash.String(), err) + // Return error in the hope the secret changes + return ctrl.Result{}, err } - if !gitrepository.DeletionTimestamp.IsZero() { - r.MetricsRecorder.RecordSuspend(*objRef, false) - } else { - r.MetricsRecorder.RecordSuspend(*objRef, gitrepository.Spec.Suspend) - } + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, meta.SucceededReason, "Verified signature of commit %q", commit.Hash.String()) + r.Eventf(obj, events.EventSeverityInfo, "VerifiedCommit", "Verified signature of commit %q", commit.Hash.String()) + return ctrl.Result{RequeueAfter: obj.Spec.Interval.Duration}, nil } -func (r *GitRepositoryReconciler) updateStatus(ctx context.Context, req ctrl.Request, newStatus sourcev1.GitRepositoryStatus) error { - var repository sourcev1.GitRepository - if err := r.Get(ctx, req.NamespacedName, &repository); err != nil { - return err +// garbageCollect performs a garbage collection for the given v1beta1.GitRepository. It removes all but the current +// artifact except for when the deletion timestamp is set, which will result in the removal of all artifacts for the +// resource. +func (r *GitRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourcev1.GitRepository) error { + if !obj.DeletionTimestamp.IsZero() { + if err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { + r.Eventf(obj, events.EventSeverityError, "GarbageCollectionFailed", + "Garbage collection for deleted resource failed: %s", err) + return err + } + obj.Status.Artifact = nil + // TODO(hidde): we should only push this event if we actually garbage collected something + r.Eventf(obj, events.EventSeverityInfo, "GarbageCollectionSucceeded", + "Garbage collected artifacts for deleted resource") + return nil + } + if obj.GetArtifact() != nil { + if err := r.Storage.RemoveAllButCurrent(*obj.GetArtifact()); err != nil { + r.Eventf(obj, events.EventSeverityError, "GarbageCollectionFailed", "Garbage collection of old artifacts failed: %s", err) + return err + } + // TODO(hidde): we should only push this event if we actually garbage collected something + r.Eventf(obj, events.EventSeverityInfo, "GarbageCollectionSucceeded", "Garbage collected old artifacts") } - - patch := client.MergeFrom(repository.DeepCopy()) - repository.Status = newStatus - - return r.Status().Patch(ctx, &repository, patch) + return nil } diff --git a/controllers/gitrepository_controller_test.go b/controllers/gitrepository_controller_test.go index 15910248f..27f78a25c 100644 --- a/controllers/gitrepository_controller_test.go +++ b/controllers/gitrepository_controller_test.go @@ -17,754 +17,1116 @@ limitations under the License. package controllers import ( - "context" - "crypto/tls" "fmt" - "net/http" "net/url" "os" - "os/exec" - "path" "path/filepath" "strings" + "testing" "time" "github.com/go-git/go-billy/v5/memfs" - "github.com/go-git/go-git/v5" + gogit "github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5/config" - "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/plumbing/transport/client" - httptransport "github.com/go-git/go-git/v5/plumbing/transport/http" "github.com/go-git/go-git/v5/storage/memory" - . "github.com/onsi/ginkgo" - - . "github.com/onsi/ginkgo/extensions/table" + "github.com/go-logr/logr" . "github.com/onsi/gomega" + sshtestdata "golang.org/x/crypto/ssh/testdata" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + "k8s.io/utils/pointer" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" "github.com/fluxcd/pkg/apis/meta" "github.com/fluxcd/pkg/gittestserver" - "github.com/fluxcd/pkg/untar" + "github.com/fluxcd/pkg/runtime/conditions" + "github.com/fluxcd/pkg/ssh" + "github.com/fluxcd/pkg/testserver" sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + "github.com/fluxcd/source-controller/pkg/git" ) -var _ = Describe("GitRepositoryReconciler", func() { +var ( + testGitImplementations = []string{sourcev1.GoGitImplementation, sourcev1.LibGit2Implementation} +) - const ( - timeout = time.Second * 30 - interval = time.Second * 1 - indexInterval = time.Second * 1 - ) +func TestGitRepositoryReconciler_Reconcile(t *testing.T) { + g := NewWithT(t) - Context("GitRepository", func() { - var ( - namespace *corev1.Namespace - gitServer *gittestserver.GitServer - err error - ) + server, err := gittestserver.NewTempGitServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(server.Root()) + server.AutoCreate() + g.Expect(server.StartHTTP()).To(Succeed()) + defer server.StopHTTP() - BeforeEach(func() { - namespace = &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: "git-repository-test" + randStringRunes(5)}, - } - err = k8sClient.Create(context.Background(), namespace) - Expect(err).NotTo(HaveOccurred(), "failed to create test namespace") + repoPath := "/test.git" + _, err = initGitRepo(server, "testdata/git/repository", git.DefaultBranch, repoPath) + g.Expect(err).NotTo(HaveOccurred()) + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "gitrepository-reconcile-", + Namespace: "default", + }, + Spec: sourcev1.GitRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + URL: server.HTTPAddress() + repoPath, + }, + } + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + // Wait for finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return len(obj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for GitRepository to be Ready + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + if !conditions.IsReady(obj) || obj.Status.Artifact == nil { + return false + } + readyCondition := conditions.Get(obj, meta.ReadyCondition) + return obj.Generation == readyCondition.ObservedGeneration && + obj.Generation == obj.Status.ObservedGeneration + }, timeout).Should(BeTrue()) + + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) - cert := corev1.Secret{ + // Wait for GitRepository to be deleted + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) +} + +func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { + type options struct { + username string + password string + publicKey []byte + privateKey []byte + ca []byte + } + + tests := []struct { + name string + skipForImplementation string + protocol string + server options + secret *corev1.Secret + beforeFunc func(obj *sourcev1.GitRepository) + want ctrl.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "HTTP without secretRef makes ArtifactOutdated=True", + protocol: "http", + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'master/'"), + }, + }, + { + name: "HTTP with Basic Auth secret makes ArtifactOutdated=True", + protocol: "http", + server: options{ + username: "git", + password: "1234", + }, + secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: "cert", - Namespace: namespace.Name, + Name: "basic-auth", }, Data: map[string][]byte{ - "caFile": exampleCA, + "username": []byte("git"), + "password": []byte("1234"), }, - } - err = k8sClient.Create(context.Background(), &cert) - Expect(err).NotTo(HaveOccurred()) + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "basic-auth"} + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'master/'"), + }, + }, + { + name: "HTTPS with CAFile secret makes ArtifactOutdated=True", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-file", + }, + Data: map[string][]byte{ + "caFile": tlsCA, + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "ca-file"} + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'master/'"), + }, + }, + { + name: "HTTPS with invalid CAFile secret makes CheckoutFailed=True and returns error", + skipForImplementation: sourcev1.LibGit2Implementation, + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "invalid-ca", + }, + Data: map[string][]byte{ + "caFile": []byte("invalid"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "invalid-ca"} + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.CheckoutFailedCondition, sourcev1.GitOperationFailedReason, "x509: certificate signed by unknown authority"), + }, + }, + { + name: "HTTPS with invalid CAFile secret makes CheckoutFailed=True and returns error", + skipForImplementation: sourcev1.GoGitImplementation, + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "invalid-ca", + }, + Data: map[string][]byte{ + "caFile": []byte("invalid"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "invalid-ca"} + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.CheckoutFailedCondition, sourcev1.GitOperationFailedReason, "Failed to checkout and determine revision: unable to clone '', error: Certificate"), + }, + }, + { + name: "SSH with private key secret makes ArtifactOutdated=True", + protocol: "ssh", + server: options{ + username: "git", + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "private-key", + }, + Data: map[string][]byte{ + "username": []byte("git"), + "identity": sshtestdata.PEMBytes["rsa"], + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "private-key"} + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'master/'"), + }, + }, + { + name: "SSH with password protected private key secret makes ArtifactOutdated=True", + protocol: "ssh", + server: options{ + username: "git", + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "private-key", + }, + Data: map[string][]byte{ + "username": []byte("git"), + "identity": sshtestdata.PEMEncryptedKeys[2].PEMBytes, + "password": []byte("password"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "private-key"} + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'master/'"), + }, + }, + { + name: "Include get failure makes CheckoutFailed=True and returns error", + protocol: "http", + server: options{ + username: "git", + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "non-existing"} + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.CheckoutFailedCondition, "AuthenticationFailed", "Failed to get secret '/non-existing': secrets \"non-existing\" not found"), + }, + }, + } + + for _, tt := range tests { + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "auth-strategy-", + }, + Spec: sourcev1.GitRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: interval}, + }, + } - gitServer, err = gittestserver.NewTempGitServer() - Expect(err).NotTo(HaveOccurred()) - gitServer.AutoCreate() - }) + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) - AfterEach(func() { - os.RemoveAll(gitServer.Root()) + server, err := gittestserver.NewTempGitServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(server.Root()) + server.AutoCreate() - err = k8sClient.Delete(context.Background(), namespace) - Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace") - }) + repoPath := "/test.git" + localRepo, err := initGitRepo(server, "testdata/git/repository", git.DefaultBranch, repoPath) + g.Expect(err).NotTo(HaveOccurred()) - type refTestCase struct { - reference *sourcev1.GitRepositoryRef - createRefs []string + if len(tt.server.username+tt.server.password) > 0 { + server.Auth(tt.server.username, tt.server.password) + } - waitForReason string + secret := tt.secret.DeepCopy() + switch tt.protocol { + case "http": + g.Expect(server.StartHTTP()).To(Succeed()) + defer server.StopHTTP() + obj.Spec.URL = server.HTTPAddress() + repoPath + case "https": + g.Expect(server.StartHTTPS(tt.server.publicKey, tt.server.privateKey, tt.server.ca, "example.com")).To(Succeed()) + obj.Spec.URL = server.HTTPAddress() + repoPath + case "ssh": + server.KeyDir(filepath.Join(server.Root(), "keys")) + + g.Expect(server.ListenSSH()).To(Succeed()) + obj.Spec.URL = server.SSHAddress() + repoPath + + go func() { + server.StartSSH() + }() + defer server.StopSSH() + + if secret != nil && len(secret.Data["known_hosts"]) == 0 { + u, err := url.Parse(obj.Spec.URL) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(u.Host).ToNot(BeEmpty()) + knownHosts, err := ssh.ScanHostKey(u.Host, timeout) + g.Expect(err).NotTo(HaveOccurred()) + secret.Data["known_hosts"] = knownHosts + } + default: + t.Fatalf("unsupported protocol %q", tt.protocol) + } - expectStatus metav1.ConditionStatus - expectMessage string - expectRevision string + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } - secretRef *meta.LocalObjectReference - gitImplementation string - } + builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme()) + if secret != nil { + builder.WithObjects(secret.DeepCopy()) + } - DescribeTable("Git references tests", func(t refTestCase) { - err = gitServer.StartHTTP() - defer gitServer.StopHTTP() - Expect(err).NotTo(HaveOccurred()) - - u, err := url.Parse(gitServer.HTTPAddress()) - Expect(err).NotTo(HaveOccurred()) - u.Path = path.Join(u.Path, fmt.Sprintf("repository-%s.git", randStringRunes(5))) - - fs := memfs.New() - gitrepo, err := git.Init(memory.NewStorage(), fs) - Expect(err).NotTo(HaveOccurred()) - - wt, err := gitrepo.Worktree() - Expect(err).NotTo(HaveOccurred()) - - ff, _ := fs.Create("fixture") - _ = ff.Close() - _, err = wt.Add(fs.Join("fixture")) - Expect(err).NotTo(HaveOccurred()) - - commit, err := wt.Commit("Sample", &git.CommitOptions{Author: &object.Signature{ - Name: "John Doe", - Email: "john@example.com", - When: time.Now(), - }}) - Expect(err).NotTo(HaveOccurred()) - - for _, ref := range t.createRefs { - hRef := plumbing.NewHashReference(plumbing.ReferenceName(ref), commit) - err = gitrepo.Storer.SetReference(hRef) - Expect(err).NotTo(HaveOccurred()) + r := &GitRepositoryReconciler{ + Client: builder.Build(), + Storage: testStorage, } - remote, err := gitrepo.CreateRemote(&config.RemoteConfig{ - Name: "origin", - URLs: []string{u.String()}, - }) - Expect(err).NotTo(HaveOccurred()) + for _, i := range testGitImplementations { + t.Run(i, func(t *testing.T) { + g := NewWithT(t) + + if tt.skipForImplementation == i { + t.Skipf("Skipped for Git implementation %q", i) + } - err = remote.Push(&git.PushOptions{ - RefSpecs: []config.RefSpec{"refs/heads/*:refs/heads/*", "refs/tags/*:refs/tags/*"}, - }) - Expect(err).NotTo(HaveOccurred()) + tmpDir, err := os.MkdirTemp("", "auth-strategy-") + g.Expect(err).To(BeNil()) + defer os.RemoveAll(tmpDir) - t.reference.Commit = strings.Replace(t.reference.Commit, "", commit.String(), 1) + obj := obj.DeepCopy() + obj.Spec.GitImplementation = i + + head, _ := localRepo.Head() + assertConditions := tt.assertConditions + for k := range assertConditions { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", head.Hash().String()) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", obj.Spec.URL) + } - key := types.NamespacedName{ - Name: fmt.Sprintf("git-ref-test-%s", randStringRunes(5)), - Namespace: namespace.Name, + var artifact sourcev1.Artifact + dlog := log.NewDelegatingLogSink(log.NullLogSink{}) + nullLogger := logr.New(dlog) + got, err := r.reconcileSource(logr.NewContext(ctx, nullLogger), obj, &artifact, tmpDir) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + g.Expect(artifact).ToNot(BeNil()) + }) } - created := &sourcev1.GitRepository{ + }) + } +} + +func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T) { + g := NewWithT(t) + + branches := []string{"staging"} + tags := []string{"non-semver-tag", "v0.1.0", "0.2.0", "v0.2.1", "v1.0.0-alpha", "v1.1.0", "v2.0.0"} + + tests := []struct { + name string + reference *sourcev1.GitRepositoryRef + want ctrl.Result + wantErr bool + wantRevision string + }{ + { + name: "Nil reference (default branch)", + want: ctrl.Result{RequeueAfter: interval}, + wantRevision: "master/", + }, + { + name: "Branch", + reference: &sourcev1.GitRepositoryRef{ + Branch: "staging", + }, + want: ctrl.Result{RequeueAfter: interval}, + wantRevision: "staging/", + }, + { + name: "Tag", + reference: &sourcev1.GitRepositoryRef{ + Tag: "v0.1.0", + }, + want: ctrl.Result{RequeueAfter: interval}, + wantRevision: "v0.1.0/", + }, + { + name: "Branch commit", + reference: &sourcev1.GitRepositoryRef{ + Branch: "staging", + Commit: "", + }, + want: ctrl.Result{RequeueAfter: interval}, + wantRevision: "staging/", + }, + { + name: "SemVer", + reference: &sourcev1.GitRepositoryRef{ + SemVer: "*", + }, + want: ctrl.Result{RequeueAfter: interval}, + wantRevision: "v2.0.0/", + }, + { + name: "SemVer range", + reference: &sourcev1.GitRepositoryRef{ + SemVer: "", + }, + { + name: "SemVer prerelease", + reference: &sourcev1.GitRepositoryRef{ + SemVer: ">=1.0.0-0 <1.1.0-0", + }, + wantRevision: "v1.0.0-alpha/", + want: ctrl.Result{RequeueAfter: interval}, + }, + } + + server, err := gittestserver.NewTempGitServer() + g.Expect(err).To(BeNil()) + server.AutoCreate() + g.Expect(server.StartHTTP()).To(Succeed()) + defer server.StopHTTP() + + repoPath := "/test.git" + localRepo, err := initGitRepo(server, "testdata/git/repository", git.DefaultBranch, repoPath) + g.Expect(err).NotTo(HaveOccurred()) + + headRef, err := localRepo.Head() + g.Expect(err).NotTo(HaveOccurred()) + + for _, branch := range branches { + g.Expect(remoteBranchForHead(localRepo, headRef, branch)).To(Succeed()) + } + for _, tag := range tags { + g.Expect(remoteTagForHead(localRepo, headRef, tag)).To(Succeed()) + } + + r := &GitRepositoryReconciler{ + Client: fakeclient.NewClientBuilder().WithScheme(runtime.NewScheme()).Build(), + Storage: testStorage, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + obj := &sourcev1.GitRepository{ ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, + GenerateName: "checkout-strategy-", }, Spec: sourcev1.GitRepositorySpec{ - URL: u.String(), - Interval: metav1.Duration{Duration: indexInterval}, - Reference: t.reference, + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: interval}, + URL: server.HTTPAddress() + repoPath, + Reference: tt.reference, }, } - Expect(k8sClient.Create(context.Background(), created)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), created) - - got := &sourcev1.GitRepository{} - var cond metav1.Condition - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == t.waitForReason { - cond = c - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - - Expect(cond.Status).To(Equal(t.expectStatus)) - Expect(cond.Message).To(ContainSubstring(t.expectMessage)) - Expect(got.Status.Artifact == nil).To(Equal(t.expectRevision == "")) - if t.expectRevision != "" { - Expect(got.Status.Artifact.Revision).To(Equal(t.expectRevision + "/" + commit.String())) - } - }, - Entry("branch", refTestCase{ - reference: &sourcev1.GitRepositoryRef{Branch: "some-branch"}, - createRefs: []string{"refs/heads/some-branch"}, - waitForReason: sourcev1.GitOperationSucceedReason, - expectStatus: metav1.ConditionTrue, - expectRevision: "some-branch", - }), - Entry("branch non existing", refTestCase{ - reference: &sourcev1.GitRepositoryRef{Branch: "invalid-branch"}, - waitForReason: sourcev1.GitOperationFailedReason, - expectStatus: metav1.ConditionFalse, - expectMessage: "couldn't find remote ref", - }), - Entry("tag", refTestCase{ - reference: &sourcev1.GitRepositoryRef{Tag: "some-tag"}, - createRefs: []string{"refs/tags/some-tag"}, - waitForReason: sourcev1.GitOperationSucceedReason, - expectStatus: metav1.ConditionTrue, - expectRevision: "some-tag", - }), - Entry("tag non existing", refTestCase{ - reference: &sourcev1.GitRepositoryRef{Tag: "invalid-tag"}, - waitForReason: sourcev1.GitOperationFailedReason, - expectStatus: metav1.ConditionFalse, - expectMessage: "couldn't find remote ref", - }), - Entry("semver", refTestCase{ - reference: &sourcev1.GitRepositoryRef{SemVer: "1.0.0"}, - createRefs: []string{"refs/tags/v1.0.0"}, - waitForReason: sourcev1.GitOperationSucceedReason, - expectStatus: metav1.ConditionTrue, - expectRevision: "v1.0.0", - }), - Entry("semver range", refTestCase{ - reference: &sourcev1.GitRepositoryRef{SemVer: ">=0.1.0 <1.0.0"}, - createRefs: []string{"refs/tags/0.1.0", "refs/tags/0.1.1", "refs/tags/0.2.0", "refs/tags/1.0.0"}, - waitForReason: sourcev1.GitOperationSucceedReason, - expectStatus: metav1.ConditionTrue, - expectRevision: "0.2.0", - }), - Entry("mixed semver range", refTestCase{ - reference: &sourcev1.GitRepositoryRef{SemVer: ">=0.1.0 <1.0.0"}, - createRefs: []string{"refs/tags/0.1.0", "refs/tags/v0.1.1", "refs/tags/v0.2.0", "refs/tags/1.0.0"}, - waitForReason: sourcev1.GitOperationSucceedReason, - expectStatus: metav1.ConditionTrue, - expectRevision: "v0.2.0", - }), - Entry("semver invalid", refTestCase{ - reference: &sourcev1.GitRepositoryRef{SemVer: "1.2.3.4"}, - waitForReason: sourcev1.GitOperationFailedReason, - expectStatus: metav1.ConditionFalse, - expectMessage: "semver parse error: improper constraint: 1.2.3.4", - }), - Entry("semver no match", refTestCase{ - reference: &sourcev1.GitRepositoryRef{SemVer: "1.0.0"}, - waitForReason: sourcev1.GitOperationFailedReason, - expectStatus: metav1.ConditionFalse, - expectMessage: "no match found for semver: 1.0.0", - }), - Entry("commit", refTestCase{ - reference: &sourcev1.GitRepositoryRef{ - Commit: "", - }, - waitForReason: sourcev1.GitOperationSucceedReason, - expectStatus: metav1.ConditionTrue, - expectRevision: "HEAD", - }), - Entry("commit in branch", refTestCase{ - reference: &sourcev1.GitRepositoryRef{ - Branch: "some-branch", - Commit: "", - }, - createRefs: []string{"refs/heads/some-branch"}, - waitForReason: sourcev1.GitOperationSucceedReason, - expectStatus: metav1.ConditionTrue, - expectRevision: "some-branch", - }), - Entry("invalid commit", refTestCase{ - reference: &sourcev1.GitRepositoryRef{ - Branch: "master", - Commit: "invalid", - }, - waitForReason: sourcev1.GitOperationFailedReason, - expectStatus: metav1.ConditionFalse, - expectMessage: "failed to resolve commit object for 'invalid': object not found", - }), - ) - - DescribeTable("Git self signed cert tests", func(t refTestCase) { - err = gitServer.StartHTTPS(examplePublicKey, examplePrivateKey, exampleCA, "example.com") - defer gitServer.StopHTTP() - Expect(err).NotTo(HaveOccurred()) - - u, err := url.Parse(gitServer.HTTPAddress()) - Expect(err).NotTo(HaveOccurred()) - u.Path = path.Join(u.Path, fmt.Sprintf("repository-%s.git", randStringRunes(5))) - - var transport = httptransport.NewClient(&http.Client{ - Transport: &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - }, - }) - client.InstallProtocol("https", transport) - - fs := memfs.New() - gitrepo, err := git.Init(memory.NewStorage(), fs) - Expect(err).NotTo(HaveOccurred()) - - wt, err := gitrepo.Worktree() - Expect(err).NotTo(HaveOccurred()) - - ff, _ := fs.Create("fixture") - _ = ff.Close() - _, err = wt.Add(fs.Join("fixture")) - Expect(err).NotTo(HaveOccurred()) - - commit, err := wt.Commit("Sample", &git.CommitOptions{Author: &object.Signature{ - Name: "John Doe", - Email: "john@example.com", - When: time.Now(), - }}) - Expect(err).NotTo(HaveOccurred()) - - for _, ref := range t.createRefs { - hRef := plumbing.NewHashReference(plumbing.ReferenceName(ref), commit) - err = gitrepo.Storer.SetReference(hRef) - Expect(err).NotTo(HaveOccurred()) + + if obj.Spec.Reference != nil && obj.Spec.Reference.Commit == "" { + obj.Spec.Reference.Commit = headRef.Hash().String() } - remote, err := gitrepo.CreateRemote(&config.RemoteConfig{ - Name: "origin", - URLs: []string{u.String()}, - }) - Expect(err).NotTo(HaveOccurred()) + for _, i := range testGitImplementations { + t.Run(i, func(t *testing.T) { + g := NewWithT(t) - err = remote.Push(&git.PushOptions{ - RefSpecs: []config.RefSpec{"refs/heads/*:refs/heads/*", "refs/tags/*:refs/tags/*"}, - }) - Expect(err).NotTo(HaveOccurred()) + tmpDir, err := os.MkdirTemp("", "checkout-strategy-") + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(tmpDir) - t.reference.Commit = strings.Replace(t.reference.Commit, "", commit.String(), 1) + obj := obj.DeepCopy() + obj.Spec.GitImplementation = i - client.InstallProtocol("https", httptransport.DefaultClient) + var artifact sourcev1.Artifact + got, err := r.reconcileSource(ctx, obj, &artifact, tmpDir) + if err != nil { + println(err.Error()) + } + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + if tt.wantRevision != "" { + revision := strings.ReplaceAll(tt.wantRevision, "", headRef.Hash().String()) + g.Expect(artifact.Revision).To(Equal(revision)) + g.Expect(conditions.IsTrue(obj, sourcev1.ArtifactOutdatedCondition)).To(BeTrue()) + } + }) + } + }) + } +} + +func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { + tests := []struct { + name string + dir string + beforeFunc func(obj *sourcev1.GitRepository) + afterFunc func(t *WithT, obj *sourcev1.GitRepository, artifact sourcev1.Artifact) + want ctrl.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "Archiving artifact to storage makes Ready=True", + dir: "testdata/git/repository", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + }, + afterFunc: func(t *WithT, obj *sourcev1.GitRepository, artifact sourcev1.Artifact) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + t.Expect(obj.GetArtifact().Checksum).To(Equal("f9955588f6aeed7be9b1ef15cd2ddac47bb53291")) + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'main/revision'"), + }, + }, + { + name: "Spec ignore overwrite is taken into account", + dir: "testdata/git/repository", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Ignore = pointer.StringPtr("!**.txt\n") + }, + afterFunc: func(t *WithT, obj *sourcev1.GitRepository, artifact sourcev1.Artifact) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + t.Expect(obj.GetArtifact().Checksum).To(Equal("542a8ad0171118a3249e8c531c598b898defd742")) + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'main/revision'"), + }, + }, + } - key := types.NamespacedName{ - Name: fmt.Sprintf("git-ref-test-%s", randStringRunes(5)), - Namespace: namespace.Name, + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + r := &GitRepositoryReconciler{ + Storage: testStorage, } - created := &sourcev1.GitRepository{ + + obj := &sourcev1.GitRepository{ ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, - }, - Spec: sourcev1.GitRepositorySpec{ - URL: u.String(), - Interval: metav1.Duration{Duration: indexInterval}, - Reference: t.reference, - GitImplementation: t.gitImplementation, - SecretRef: t.secretRef, + GenerateName: "reconcile-artifact-", + Generation: 1, }, + Status: sourcev1.GitRepositoryStatus{}, } - Expect(k8sClient.Create(context.Background(), created)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), created) - - got := &sourcev1.GitRepository{} - var cond metav1.Condition - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == t.waitForReason { - cond = c - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - Expect(cond.Status).To(Equal(t.expectStatus)) - Expect(cond.Message).To(ContainSubstring(t.expectMessage)) - Expect(got.Status.Artifact == nil).To(Equal(t.expectRevision == "")) - }, - Entry("self signed libgit2 without CA", refTestCase{ - reference: &sourcev1.GitRepositoryRef{Branch: "main"}, - waitForReason: sourcev1.GitOperationFailedReason, - expectStatus: metav1.ConditionFalse, - expectMessage: "user rejected certificate", - gitImplementation: sourcev1.LibGit2Implementation, - }), - Entry("self signed libgit2 with CA", refTestCase{ - reference: &sourcev1.GitRepositoryRef{Branch: "some-branch"}, - createRefs: []string{"refs/heads/some-branch"}, - waitForReason: sourcev1.GitOperationSucceedReason, - expectStatus: metav1.ConditionTrue, - expectRevision: "some-branch", - secretRef: &meta.LocalObjectReference{Name: "cert"}, - gitImplementation: sourcev1.LibGit2Implementation, - }), - Entry("self signed go-git without CA", refTestCase{ - reference: &sourcev1.GitRepositoryRef{Branch: "main"}, - waitForReason: sourcev1.GitOperationFailedReason, - expectStatus: metav1.ConditionFalse, - expectMessage: "x509: certificate signed by unknown authority", - }), - Entry("self signed go-git with CA", refTestCase{ - reference: &sourcev1.GitRepositoryRef{Branch: "some-branch"}, - createRefs: []string{"refs/heads/some-branch"}, - waitForReason: sourcev1.GitOperationSucceedReason, - expectStatus: metav1.ConditionTrue, - expectRevision: "some-branch", - secretRef: &meta.LocalObjectReference{Name: "cert"}, - gitImplementation: sourcev1.GoGitImplementation, - }), - ) - - Context("recurse submodules", func() { - It("downloads submodules when asked", func() { - Expect(gitServer.StartHTTP()).To(Succeed()) - defer gitServer.StopHTTP() - - u, err := url.Parse(gitServer.HTTPAddress()) - Expect(err).NotTo(HaveOccurred()) - - subRepoURL := *u - subRepoURL.Path = path.Join(u.Path, fmt.Sprintf("subrepository-%s.git", randStringRunes(5))) - - // create the git repo to use as a submodule - fs := memfs.New() - subRepo, err := git.Init(memory.NewStorage(), fs) - Expect(err).NotTo(HaveOccurred()) - - wt, err := subRepo.Worktree() - Expect(err).NotTo(HaveOccurred()) - - ff, _ := fs.Create("fixture") - _ = ff.Close() - _, err = wt.Add(fs.Join("fixture")) - Expect(err).NotTo(HaveOccurred()) - - _, err = wt.Commit("Sample", &git.CommitOptions{Author: &object.Signature{ - Name: "John Doe", - Email: "john@example.com", - When: time.Now(), - }}) - Expect(err).NotTo(HaveOccurred()) - - remote, err := subRepo.CreateRemote(&config.RemoteConfig{ - Name: "origin", - URLs: []string{subRepoURL.String()}, - }) - Expect(err).NotTo(HaveOccurred()) + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } - err = remote.Push(&git.PushOptions{ - RefSpecs: []config.RefSpec{"refs/heads/*:refs/heads/*", "refs/tags/*:refs/tags/*"}, - }) - Expect(err).NotTo(HaveOccurred()) - - // this one is linked to a real directory, so that I can - // exec `git submodule add` later - tmp, err := os.MkdirTemp("", "flux-test") - Expect(err).NotTo(HaveOccurred()) - defer os.RemoveAll(tmp) - - repoDir := filepath.Join(tmp, "git") - repo, err := git.PlainInit(repoDir, false) - Expect(err).NotTo(HaveOccurred()) - - wt, err = repo.Worktree() - Expect(err).NotTo(HaveOccurred()) - _, err = wt.Commit("Initial revision", &git.CommitOptions{ - Author: &object.Signature{ - Name: "John Doe", - Email: "john@example.com", - When: time.Now(), - }}) - Expect(err).NotTo(HaveOccurred()) - - submodAdd := exec.Command("git", "submodule", "add", "-b", "master", subRepoURL.String(), "sub") - submodAdd.Dir = repoDir - out, err := submodAdd.CombinedOutput() - os.Stdout.Write(out) - Expect(err).NotTo(HaveOccurred()) - - _, err = wt.Commit("Add submodule", &git.CommitOptions{ - Author: &object.Signature{ - Name: "John Doe", - Email: "john@example.com", - When: time.Now(), - }}) - Expect(err).NotTo(HaveOccurred()) - - mainRepoURL := *u - mainRepoURL.Path = path.Join(u.Path, fmt.Sprintf("repository-%s.git", randStringRunes(5))) - remote, err = repo.CreateRemote(&config.RemoteConfig{ - Name: "origin", - URLs: []string{mainRepoURL.String()}, - }) - Expect(err).NotTo(HaveOccurred()) + artifact := testStorage.NewArtifactFor(obj.Kind, obj, "main/revision", "checksum.tar.gz") - err = remote.Push(&git.PushOptions{ - RefSpecs: []config.RefSpec{"refs/heads/*:refs/heads/*", "refs/tags/*:refs/tags/*"}, - }) - Expect(err).NotTo(HaveOccurred()) + got, err := r.reconcileArtifact(ctx, obj, artifact, nil, tt.dir) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) - key := types.NamespacedName{ - Name: fmt.Sprintf("git-ref-test-%s", randStringRunes(5)), - Namespace: namespace.Name, - } - created := &sourcev1.GitRepository{ + if tt.afterFunc != nil { + tt.afterFunc(g, obj, artifact) + } + }) + } +} + +func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) { + g := NewWithT(t) + + server, err := testserver.NewTempArtifactServer() + g.Expect(err).NotTo(HaveOccurred()) + storage, err := newTestStorage(server.HTTPServer) + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(testStorage.BasePath) + + dependencyInterval := 5 * time.Second + + type dependency struct { + name string + withArtifact bool + conditions []metav1.Condition + } + + type include struct { + name string + fromPath string + toPath string + shouldExist bool + } + + tests := []struct { + name string + dependencies []dependency + includes []include + beforeFunc func(obj *sourcev1.GitRepository) + want ctrl.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "New includes make ArtifactOutdated=True", + dependencies: []dependency{ + { + name: "a", + withArtifact: true, + conditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Foo", "foo ready"), + }, + }, + { + name: "b", + withArtifact: true, + conditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Bar", "bar ready"), + }, + }, + }, + includes: []include{ + {name: "a", toPath: "a/"}, + {name: "b", toPath: "b/"}, + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "IncludeChange", "Included artifacts differ from last observed includes"), + }, + }, + { + name: "Include get failure makes IncludeUnavailable=True and returns error", + includes: []include{ + {name: "a", toPath: "a/"}, + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "NotFound", "Could not get resource for include \"a\": gitrepositories.source.toolkit.fluxcd.io \"a\" not found"), + }, + }, + { + name: "Include without an artifact makes IncludeUnavailable=True", + dependencies: []dependency{ + { + name: "a", + withArtifact: false, + conditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "Foo", "foo unavailable"), + }, + }, + }, + includes: []include{ + {name: "a", toPath: "a/"}, + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "NoArtifact", "No artifact available for include \"a\""), + }, + }, + { + name: "Invalid FromPath makes IncludeUnavailable=True and returns error", + dependencies: []dependency{ + { + name: "a", + withArtifact: true, + }, + }, + includes: []include{ + {name: "a", fromPath: "../../../path", shouldExist: false}, + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "CopyFailure", "unpack/path: no such file or directory"), + }, + }, + { + name: "Outdated IncludeUnavailable is removed", + beforeFunc: func(obj *sourcev1.GitRepository) { + conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "NoArtifact", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + var depObjs []client.Object + for _, d := range tt.dependencies { + obj := &sourcev1.GitRepository{ ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, + Name: d.name, }, - Spec: sourcev1.GitRepositorySpec{ - URL: mainRepoURL.String(), - Interval: metav1.Duration{Duration: indexInterval}, - Reference: &sourcev1.GitRepositoryRef{Branch: "master"}, - GitImplementation: sourcev1.GoGitImplementation, // only works with go-git - RecurseSubmodules: true, + Status: sourcev1.GitRepositoryStatus{ + Conditions: d.conditions, }, } - Expect(k8sClient.Create(context.Background(), created)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), created) - - got := &sourcev1.GitRepository{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.GitOperationSucceedReason { - return true - } + if d.withArtifact { + obj.Status.Artifact = &sourcev1.Artifact{ + Path: d.name + ".tar.gz", + Revision: d.name, + LastUpdateTime: metav1.Now(), } - return false - }, timeout, interval).Should(BeTrue()) - - // check that the downloaded artifact includes the - // file from the submodule - res, err := http.Get(got.Status.URL) - Expect(err).NotTo(HaveOccurred()) - Expect(res.StatusCode).To(Equal(http.StatusOK)) - - _, err = untar.Untar(res.Body, filepath.Join(tmp, "tar")) - Expect(err).NotTo(HaveOccurred()) - Expect(filepath.Join(tmp, "tar", "sub", "fixture")).To(BeAnExistingFile()) - }) - }) - - type includeTestCase struct { - fromPath string - toPath string - createFiles []string - checkFiles []string - } - - DescribeTable("Include git repositories", func(t includeTestCase) { - Expect(gitServer.StartHTTP()).To(Succeed()) - defer gitServer.StopHTTP() - - u, err := url.Parse(gitServer.HTTPAddress()) - Expect(err).NotTo(HaveOccurred()) - - // create the main git repository - mainRepoURL := *u - mainRepoURL.Path = path.Join(u.Path, fmt.Sprintf("repository-%s.git", randStringRunes(5))) - - mainFs := memfs.New() - mainRepo, err := git.Init(memory.NewStorage(), mainFs) - Expect(err).NotTo(HaveOccurred()) - - mainWt, err := mainRepo.Worktree() - Expect(err).NotTo(HaveOccurred()) - - ff, _ := mainFs.Create("fixture") - _ = ff.Close() - _, err = mainWt.Add(mainFs.Join("fixture")) - Expect(err).NotTo(HaveOccurred()) - - _, err = mainWt.Commit("Sample", &git.CommitOptions{Author: &object.Signature{ - Name: "John Doe", - Email: "john@example.com", - When: time.Now(), - }}) - Expect(err).NotTo(HaveOccurred()) - - mainRemote, err := mainRepo.CreateRemote(&config.RemoteConfig{ - Name: "origin", - URLs: []string{mainRepoURL.String()}, - }) - Expect(err).NotTo(HaveOccurred()) - - err = mainRemote.Push(&git.PushOptions{ - RefSpecs: []config.RefSpec{"refs/heads/*:refs/heads/*", "refs/tags/*:refs/tags/*"}, - }) - Expect(err).NotTo(HaveOccurred()) - - // create the sub git repository - subRepoURL := *u - subRepoURL.Path = path.Join(u.Path, fmt.Sprintf("subrepository-%s.git", randStringRunes(5))) - - subFs := memfs.New() - subRepo, err := git.Init(memory.NewStorage(), subFs) - Expect(err).NotTo(HaveOccurred()) - - subWt, err := subRepo.Worktree() - Expect(err).NotTo(HaveOccurred()) - - for _, v := range t.createFiles { - if dir := filepath.Base(v); dir != v { - err := subFs.MkdirAll(dir, 0700) - Expect(err).NotTo(HaveOccurred()) + g.Expect(storage.Archive(obj.GetArtifact(), "testdata/git/repository", nil)).To(Succeed()) } - ff, err := subFs.Create(v) - Expect(err).NotTo(HaveOccurred()) - _ = ff.Close() - _, err = subWt.Add(subFs.Join(v)) - Expect(err).NotTo(HaveOccurred()) + depObjs = append(depObjs, obj) } - _, err = subWt.Commit("Sample", &git.CommitOptions{Author: &object.Signature{ - Name: "John Doe", - Email: "john@example.com", - When: time.Now(), - }}) - Expect(err).NotTo(HaveOccurred()) - - subRemote, err := subRepo.CreateRemote(&config.RemoteConfig{ - Name: "origin", - URLs: []string{subRepoURL.String()}, - }) - Expect(err).NotTo(HaveOccurred()) - - err = subRemote.Push(&git.PushOptions{ - RefSpecs: []config.RefSpec{"refs/heads/*:refs/heads/*", "refs/tags/*:refs/tags/*"}, - }) - Expect(err).NotTo(HaveOccurred()) - - // create main and sub resetRepositories - subKey := types.NamespacedName{ - Name: fmt.Sprintf("git-ref-test-%s", randStringRunes(5)), - Namespace: namespace.Name, - } - subCreated := &sourcev1.GitRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: subKey.Name, - Namespace: subKey.Namespace, - }, - Spec: sourcev1.GitRepositorySpec{ - URL: subRepoURL.String(), - Interval: metav1.Duration{Duration: indexInterval}, - Reference: &sourcev1.GitRepositoryRef{Branch: "master"}, - }, + builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme()) + if len(tt.dependencies) > 0 { + builder.WithObjects(depObjs...) } - Expect(k8sClient.Create(context.Background(), subCreated)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), subCreated) - mainKey := types.NamespacedName{ - Name: fmt.Sprintf("git-ref-test-%s", randStringRunes(5)), - Namespace: namespace.Name, + r := &GitRepositoryReconciler{ + Client: builder.Build(), + EventRecorder: record.NewFakeRecorder(32), + // Events: helper.Events{ + // Scheme: testEnv.GetScheme(), + // EventRecorder: record.NewFakeRecorder(32), + // }, + Storage: storage, + requeueDependency: dependencyInterval, } - mainCreated := &sourcev1.GitRepository{ + + obj := &sourcev1.GitRepository{ ObjectMeta: metav1.ObjectMeta{ - Name: mainKey.Name, - Namespace: mainKey.Namespace, + Name: "reconcile-include", }, Spec: sourcev1.GitRepositorySpec{ - URL: mainRepoURL.String(), - Interval: metav1.Duration{Duration: indexInterval}, - Reference: &sourcev1.GitRepositoryRef{Branch: "master"}, - Include: []sourcev1.GitRepositoryInclude{ - { - GitRepositoryRef: meta.LocalObjectReference{ - Name: subKey.Name, - }, - FromPath: t.fromPath, - ToPath: t.toPath, - }, - }, + Interval: metav1.Duration{Duration: interval}, }, } - Expect(k8sClient.Create(context.Background(), mainCreated)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), mainCreated) - - got := &sourcev1.GitRepository{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), mainKey, got) - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.GitOperationSucceedReason { - return true - } + + for i, incl := range tt.includes { + incl := sourcev1.GitRepositoryInclude{ + GitRepositoryRef: meta.LocalObjectReference{Name: incl.name}, + FromPath: incl.fromPath, + ToPath: incl.toPath, } - return false - }, timeout, interval).Should(BeTrue()) - - // check the contents of the repository - res, err := http.Get(got.Status.URL) - Expect(err).NotTo(HaveOccurred()) - Expect(res.StatusCode).To(Equal(http.StatusOK)) - tmp, err := os.MkdirTemp("", "flux-test") - Expect(err).NotTo(HaveOccurred()) - defer os.RemoveAll(tmp) - _, err = untar.Untar(res.Body, filepath.Join(tmp, "tar")) - Expect(err).NotTo(HaveOccurred()) - for _, v := range t.checkFiles { - Expect(filepath.Join(tmp, "tar", v)).To(BeAnExistingFile()) + tt.includes[i].fromPath = incl.GetFromPath() + tt.includes[i].toPath = incl.GetToPath() + obj.Spec.Include = append(obj.Spec.Include, incl) } - // add new file to check that the change is reconciled - ff, err = subFs.Create(subFs.Join(t.fromPath, "test")) - Expect(err).NotTo(HaveOccurred()) - err = ff.Close() - Expect(err).NotTo(HaveOccurred()) - _, err = subWt.Add(subFs.Join(t.fromPath, "test")) - Expect(err).NotTo(HaveOccurred()) - - hash, err := subWt.Commit("Sample", &git.CommitOptions{Author: &object.Signature{ - Name: "John Doe", - Email: "john@example.com", - When: time.Now(), - }}) - Expect(err).NotTo(HaveOccurred()) - - err = subRemote.Push(&git.PushOptions{ - RefSpecs: []config.RefSpec{"refs/heads/*:refs/heads/*", "refs/tags/*:refs/tags/*"}, - }) - Expect(err).NotTo(HaveOccurred()) - - got = &sourcev1.GitRepository{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), mainKey, got) - if got.Status.IncludedArtifacts[0].Revision == fmt.Sprintf("master/%s", hash.String()) { - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.GitOperationSucceedReason { - return true - } + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + tmpDir, err := os.MkdirTemp("", "include-") + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(tmpDir) + + var artifacts artifactSet + got, err := r.reconcileInclude(ctx, obj, artifacts, tmpDir) + g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions)) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + for _, i := range tt.includes { + if i.toPath != "" { + expect := g.Expect(filepath.Join(testStorage.BasePath, i.toPath)) + if i.shouldExist { + expect.To(BeADirectory()) + } else { + expect.NotTo(BeADirectory()) } } - return false - }, timeout, interval).Should(BeTrue()) - - // get the main repository artifact - res, err = http.Get(got.Status.URL) - Expect(err).NotTo(HaveOccurred()) - Expect(res.StatusCode).To(Equal(http.StatusOK)) - tmp, err = os.MkdirTemp("", "flux-test") - Expect(err).NotTo(HaveOccurred()) - defer os.RemoveAll(tmp) - _, err = untar.Untar(res.Body, filepath.Join(tmp, "tar")) - Expect(err).NotTo(HaveOccurred()) - Expect(filepath.Join(tmp, "tar", t.toPath, "test")).To(BeAnExistingFile()) + if i.shouldExist { + g.Expect(filepath.Join(testStorage.BasePath, i.toPath)).Should(BeADirectory()) + } else { + g.Expect(filepath.Join(testStorage.BasePath, i.toPath)).ShouldNot(BeADirectory()) + } + } + }) + } +} + +func TestGitRepositoryReconciler_reconcileDelete(t *testing.T) { + g := NewWithT(t) + + r := &GitRepositoryReconciler{ + Storage: testStorage, + } + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: "reconcile-delete-", + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + Finalizers: []string{ + sourcev1.SourceFinalizer, + }, + }, + Status: sourcev1.GitRepositoryStatus{}, + } + + artifact := testStorage.NewArtifactFor(sourcev1.GitRepositoryKind, obj.GetObjectMeta(), "revision", "foo.txt") + obj.Status.Artifact = &artifact + + got, err := r.reconcileDelete(ctx, obj) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(got).To(Equal(ctrl.Result{})) + g.Expect(controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer)).To(BeFalse()) + g.Expect(obj.Status.Artifact).To(BeNil()) +} + +// func TestGitRepositoryReconciler_verifyCommitSignature(t *testing.T) { +// tests := []struct { +// name string +// secret *corev1.Secret +// commit git.Commit +// beforeFunc func(obj *sourcev1.GitRepository) +// want ctrl.Result +// wantErr bool +// assertConditions []metav1.Condition +// }{ +// { +// name: "Valid commit makes SourceVerifiedCondition=True", +// secret: &corev1.Secret{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "existing", +// }, +// }, +// commit: fake.NewCommit(true, "shasum"), +// beforeFunc: func(obj *sourcev1.GitRepository) { +// obj.Spec.Interval = metav1.Duration{Duration: interval} +// obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ +// Mode: "head", +// SecretRef: meta.LocalObjectReference{ +// Name: "existing", +// }, +// } +// }, +// want: ctrl.Result{RequeueAfter: interval}, +// assertConditions: []metav1.Condition{ +// *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "Verified signature of commit \"shasum\""), +// }, +// }, +// { +// name: "Invalid commit makes SourceVerifiedCondition=False and returns error", +// secret: &corev1.Secret{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "existing", +// }, +// }, +// commit: fake.NewCommit(false, "shasum"), +// beforeFunc: func(obj *sourcev1.GitRepository) { +// obj.Spec.Interval = metav1.Duration{Duration: interval} +// obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ +// Mode: "head", +// SecretRef: meta.LocalObjectReference{ +// Name: "existing", +// }, +// } +// }, +// wantErr: true, +// assertConditions: []metav1.Condition{ +// *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, meta.FailedReason, "Signature verification of commit \"shasum\" failed: invalid signature"), +// }, +// }, +// { +// name: "Secret get failure makes SourceVerified=False and returns error", +// beforeFunc: func(obj *sourcev1.GitRepository) { +// obj.Spec.Interval = metav1.Duration{Duration: interval} +// obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ +// Mode: "head", +// SecretRef: meta.LocalObjectReference{ +// Name: "none-existing", +// }, +// } +// }, +// wantErr: true, +// assertConditions: []metav1.Condition{ +// *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, meta.FailedReason, "PGP public keys secret error: secrets \"none-existing\" not found"), +// }, +// }, +// { +// name: "Nil verification in spec deletes SourceVerified condition", +// beforeFunc: func(obj *sourcev1.GitRepository) { +// obj.Spec.Interval = metav1.Duration{Duration: interval} +// conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Foo", "") +// }, +// want: ctrl.Result{RequeueAfter: interval}, +// assertConditions: []metav1.Condition{}, +// }, +// { +// name: "Empty verification mode in spec deletes SourceVerified condition", +// beforeFunc: func(obj *sourcev1.GitRepository) { +// obj.Spec.Interval = metav1.Duration{Duration: interval} +// obj.Spec.Verification = &sourcev1.GitRepositoryVerification{} +// conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Foo", "") +// }, +// want: ctrl.Result{RequeueAfter: interval}, +// assertConditions: []metav1.Condition{}, +// }, +// } + +// for _, tt := range tests { +// t.Run(tt.name, func(t *testing.T) { +// g := NewWithT(t) + +// builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme()) +// if tt.secret != nil { +// builder.WithObjects(tt.secret) +// } + +// r := &GitRepositoryReconciler{ +// Client: builder.Build(), +// } + +// obj := &sourcev1.GitRepository{ +// ObjectMeta: metav1.ObjectMeta{ +// GenerateName: "verify-commit-", +// Generation: 1, +// }, +// Status: sourcev1.GitRepositoryStatus{}, +// } + +// if tt.beforeFunc != nil { +// tt.beforeFunc(obj) +// } + +// got, err := r.verifyCommitSignature(logr.NewContext(ctx, log.NullLogger{}), obj, tt.commit) +// g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) +// g.Expect(err != nil).To(Equal(tt.wantErr)) +// g.Expect(got).To(Equal(tt.want)) +// }) +// } +// } + +// helpers + +func initGitRepo(server *gittestserver.GitServer, fixture, branch, repositoryPath string) (*gogit.Repository, error) { + fs := memfs.New() + repo, err := gogit.Init(memory.NewStorage(), fs) + if err != nil { + return nil, err + } + + branchRef := plumbing.NewBranchReferenceName(branch) + if err = repo.CreateBranch(&config.Branch{ + Name: branch, + Remote: gogit.DefaultRemoteName, + Merge: branchRef, + }); err != nil { + return nil, err + } + + err = commitFromFixture(repo, fixture) + if err != nil { + return nil, err + } + + if server.HTTPAddress() == "" { + if err = server.StartHTTP(); err != nil { + return nil, err + } + defer server.StopHTTP() + } + if _, err = repo.CreateRemote(&config.RemoteConfig{ + Name: gogit.DefaultRemoteName, + URLs: []string{server.HTTPAddressWithCredentials() + repositoryPath}, + }); err != nil { + return nil, err + } + + if err = repo.Push(&gogit.PushOptions{ + RefSpecs: []config.RefSpec{"refs/heads/*:refs/heads/*"}, + }); err != nil { + return nil, err + } + + return repo, nil +} + +func Test_commitFromFixture(t *testing.T) { + g := NewWithT(t) + + repo, err := gogit.Init(memory.NewStorage(), memfs.New()) + g.Expect(err).ToNot(HaveOccurred()) + + err = commitFromFixture(repo, "testdata/git/repository") + g.Expect(err).ToNot(HaveOccurred()) +} + +func commitFromFixture(repo *gogit.Repository, fixture string) error { + working, err := repo.Worktree() + if err != nil { + return err + } + fs := working.Filesystem + + if err = filepath.Walk(fixture, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return fs.MkdirAll(fs.Join(path[len(fixture):]), info.Mode()) + } + + fileBytes, err := os.ReadFile(path) + if err != nil { + return err + } + + ff, err := fs.Create(path[len(fixture):]) + if err != nil { + return err + } + defer ff.Close() + + _, err = ff.Write(fileBytes) + return err + }); err != nil { + return err + } + + _, err = working.Add(".") + if err != nil { + return err + } + + if _, err = working.Commit("Fixtures from "+fixture, &gogit.CommitOptions{ + Author: &object.Signature{ + Name: "Jane Doe", + Email: "jane@example.com", + When: time.Now(), + }, + }); err != nil { + return err + } + + return nil +} + +func remoteBranchForHead(repo *gogit.Repository, head *plumbing.Reference, branch string) error { + refSpec := fmt.Sprintf("%s:refs/heads/%s", head.Name(), branch) + return repo.Push(&gogit.PushOptions{ + RemoteName: "origin", + RefSpecs: []config.RefSpec{config.RefSpec(refSpec)}, + Force: true, + }) +} + +func remoteTagForHead(repo *gogit.Repository, head *plumbing.Reference, tag string) error { + if _, err := repo.CreateTag(tag, head.Hash(), &gogit.CreateTagOptions{ + // Not setting this seems to make things flaky + // Expected success, but got an error: + // <*errors.errorString | 0xc0000f6350>: { + // s: "tagger field is required", + // } + // tagger field is required + Tagger: &object.Signature{ + Name: "Jane Doe", + Email: "jane@example.com", + When: time.Now(), }, - Entry("only to path", includeTestCase{ - fromPath: "", - toPath: "sub", - createFiles: []string{"dir1", "dir2"}, - checkFiles: []string{"sub/dir1", "sub/dir2"}, - }), - Entry("to nested path", includeTestCase{ - fromPath: "", - toPath: "sub/nested", - createFiles: []string{"dir1", "dir2"}, - checkFiles: []string{"sub/nested/dir1", "sub/nested/dir2"}, - }), - Entry("from and to path", includeTestCase{ - fromPath: "nested", - toPath: "sub", - createFiles: []string{"dir1", "nested/dir2", "nested/dir3", "nested/foo/bar"}, - checkFiles: []string{"sub/dir2", "sub/dir3", "sub/foo/bar"}, - }), - ) + Message: tag, + }); err != nil { + return err + } + refSpec := fmt.Sprintf("refs/tags/%[1]s:refs/tags/%[1]s", tag) + return repo.Push(&gogit.PushOptions{ + RefSpecs: []config.RefSpec{config.RefSpec(refSpec)}, }) -}) +} diff --git a/controllers/legacy_suite_test.go b/controllers/legacy_suite_test.go index 911f735b0..748145feb 100644 --- a/controllers/legacy_suite_test.go +++ b/controllers/legacy_suite_test.go @@ -30,6 +30,7 @@ import ( "helm.sh/helm/v3/pkg/getter" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" @@ -118,11 +119,11 @@ var _ = BeforeSuite(func() { Expect(err).ToNot(HaveOccurred()) err = (&GitRepositoryReconciler{ - Client: k8sManager.GetClient(), - Scheme: scheme.Scheme, - Storage: ginkgoTestStorage, + Client: k8sManager.GetClient(), + EventRecorder: record.NewFakeRecorder(32), + Storage: ginkgoTestStorage, }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred(), "failed to setup GtRepositoryReconciler") + Expect(err).ToNot(HaveOccurred(), "failed to setup GitRepositoryReconciler") err = (&HelmRepositoryReconciler{ Client: k8sManager.GetClient(), diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 2710f6f77..a33108dc2 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -26,6 +26,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "github.com/fluxcd/pkg/runtime/controller" @@ -87,13 +88,14 @@ func TestMain(m *testing.M) { testMetricsH = controller.MustMakeMetrics(testEnv) - //if err := (&GitRepositoryReconciler{ - // Client: testEnv, - // Metrics: testMetricsH, - // Storage: testStorage, - //}).SetupWithManager(testEnv); err != nil { - // panic(fmt.Sprintf("Failed to start GitRepositoryReconciler: %v", err)) - //} + if err := (&GitRepositoryReconciler{ + Client: testEnv, + EventRecorder: record.NewFakeRecorder(32), + Metrics: testMetricsH, + Storage: testStorage, + }).SetupWithManager(testEnv); err != nil { + panic(fmt.Sprintf("Failed to start GitRepositoryReconciler: %v", err)) + } go func() { fmt.Println("Starting the test environment") diff --git a/controllers/testdata/git/repository/.sourceignore b/controllers/testdata/git/repository/.sourceignore new file mode 100644 index 000000000..989478d13 --- /dev/null +++ b/controllers/testdata/git/repository/.sourceignore @@ -0,0 +1 @@ +**.txt diff --git a/controllers/testdata/git/repository/foo.txt b/controllers/testdata/git/repository/foo.txt new file mode 100644 index 000000000..e69de29bb diff --git a/controllers/testdata/git/repository/manifest.yaml b/controllers/testdata/git/repository/manifest.yaml new file mode 100644 index 000000000..220e1b33e --- /dev/null +++ b/controllers/testdata/git/repository/manifest.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: dummy diff --git a/go.mod b/go.mod index 538fdb57b..262953542 100644 --- a/go.mod +++ b/go.mod @@ -38,6 +38,7 @@ require ( k8s.io/api v0.23.3 k8s.io/apimachinery v0.23.3 k8s.io/client-go v0.23.3 + k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 sigs.k8s.io/controller-runtime v0.11.1 sigs.k8s.io/yaml v1.3.0 ) @@ -200,7 +201,6 @@ require ( k8s.io/klog/v2 v2.40.1 // indirect k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf // indirect k8s.io/kubectl v0.22.4 // indirect - k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect oras.land/oras-go v0.4.0 // indirect sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect sigs.k8s.io/kustomize/api v0.10.1 // indirect diff --git a/main.go b/main.go index bbced344b..3c0f2791a 100644 --- a/main.go +++ b/main.go @@ -165,11 +165,10 @@ func main() { storage := mustInitStorage(storagePath, storageAdvAddr, setupLog) if err = (&controllers.GitRepositoryReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Storage: storage, - EventRecorder: eventRecorder, - MetricsRecorder: metricsH.MetricsRecorder, + Client: mgr.GetClient(), + EventRecorder: eventRecorder, + Metrics: metricsH, + Storage: storage, }).SetupWithManagerAndOptions(mgr, controllers.GitRepositoryReconcilerOptions{ MaxConcurrentReconciles: concurrent, DependencyRequeueInterval: requeueDependency, From b814070bc2e7683f3528d1efff154807745dd062 Mon Sep 17 00:00:00 2001 From: Sunny Date: Wed, 24 Nov 2021 16:35:51 +0530 Subject: [PATCH 14/63] Fixes to gitrepo reconciler tests NOTE: This should be amended with the previous commit which has commented out tests. Update reconcileSource() to work with the test case where no secret is set. A minimal auth options is created and used for git checkout. Update TestGitRepositoryReconciler_verifyCommitSignature() to use the new git.Commit type. Update TestGitRepositoryReconciler_reconcileSource_checkoutStrategy to add skipForImplementation for branch commit test case. Signed-off-by: Sunny --- controllers/gitrepository_controller.go | 23 +- controllers/gitrepository_controller_test.go | 390 ++++++++++++------- pkg/git/options.go | 21 + 3 files changed, 286 insertions(+), 148 deletions(-) diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go index 9b88bc017..34c2a4350 100644 --- a/controllers/gitrepository_controller.go +++ b/controllers/gitrepository_controller.go @@ -284,7 +284,8 @@ func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context, obj *sou func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, obj *sourcev1.GitRepository, artifact *sourcev1.Artifact, dir string) (ctrl.Result, error) { // Configure authentication strategy to access the source - authOpts := &git.AuthOptions{} + var authOpts *git.AuthOptions + var err error if obj.Spec.SecretRef != nil { // Attempt to retrieve secret name := types.NamespacedName{ @@ -302,16 +303,18 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, } // Configure strategy with secret - var err error authOpts, err = git.AuthOptionsFromSecret(obj.Spec.URL, &secret) - if err != nil { - conditions.MarkTrue(obj, sourcev1.CheckoutFailedCondition, sourcev1.AuthenticationFailedReason, - "Failed to configure auth strategy for Git implementation %q: %s", obj.Spec.GitImplementation, err) - r.Eventf(obj, events.EventSeverityError, sourcev1.AuthenticationFailedReason, - "Failed to configure auth strategy for Git implementation %q: %s", obj.Spec.GitImplementation, err) - // Return error as the contents of the secret may change - return ctrl.Result{}, err - } + } else { + // Set the minimal auth options for valid transport. + authOpts, err = git.AuthOptionsWithoutSecret(obj.Spec.URL) + } + if err != nil { + conditions.MarkTrue(obj, sourcev1.CheckoutFailedCondition, sourcev1.AuthenticationFailedReason, + "Failed to configure auth strategy for Git implementation %q: %s", obj.Spec.GitImplementation, err) + r.Eventf(obj, events.EventSeverityError, sourcev1.AuthenticationFailedReason, + "Failed to configure auth strategy for Git implementation %q: %s", obj.Spec.GitImplementation, err) + // Return error as the contents of the secret may change + return ctrl.Result{}, err } // Configure checkout strategy diff --git a/controllers/gitrepository_controller_test.go b/controllers/gitrepository_controller_test.go index 27f78a25c..f9570f085 100644 --- a/controllers/gitrepository_controller_test.go +++ b/controllers/gitrepository_controller_test.go @@ -56,6 +56,86 @@ import ( "github.com/fluxcd/source-controller/pkg/git" ) +const ( + encodedCommitFixture = `tree f0c522d8cc4c90b73e2bc719305a896e7e3c108a +parent eb167bc68d0a11530923b1f24b4978535d10b879 +author Stefan Prodan 1633681364 +0300 +committer Stefan Prodan 1633681364 +0300 + +Update containerd and runc to fix CVEs + +Signed-off-by: Stefan Prodan +` + malformedEncodedCommitFixture = `parent eb167bc68d0a11530923b1f24b4978535d10b879 +author Stefan Prodan 1633681364 +0300 +committer Stefan Prodan 1633681364 +0300 + +Update containerd and runc to fix CVEs + +Signed-off-by: Stefan Prodan +` + signatureCommitFixture = `-----BEGIN PGP SIGNATURE----- + +iHUEABEIAB0WIQQHgExUr4FrLdKzpNYyma6w5AhbrwUCYV//1AAKCRAyma6w5Ahb +r7nJAQCQU4zEJu04/Q0ac/UaL6htjhq/wTDNMeUM+aWG/LcBogEAqFUea1oR2BJQ +JCJmEtERFh39zNWSazQmxPAFhEE0kbc= +=+Wlj +-----END PGP SIGNATURE-----` + armoredKeyRingFixture = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQSuBF9+HgMRDADKT8UBcSzpTi4JXt/ohhVW3x81AGFPrQvs6MYrcnNJfIkPTJD8 +mY5T7j1fkaN5wcf1wnxM9qTcW8BodkWNGEoEYOtVuigLSxPFqIncxK0PHvdU8ths +TEInBrgZv9t6xIVa4QngOEUd2D/aYni7M+75z7ntgj6eU1xLZ60upRFn05862OvJ +rZFUvzjsZXMAO3enCu2VhG/2axCY/5uI8PgWjyiKV2TH4LBJgzlb0v6SyI+fYf5K +Bg2WzDuLKvQBi9tFSwnUbQoFFlOeiGW8G/bdkoJDWeS1oYgSD3nkmvXvrVESCrbT +C05OtQOiDXjSpkLim81vNVPtI2XEug+9fEA+jeJakyGwwB+K8xqV3QILKCoWHKGx +yWcMHSR6cP9tdXCk2JHZBm1PLSJ8hIgMH/YwBJLYg90u8lLAs9WtpVBKkLplzzgm +B4Z4VxCC+xI1kt+3ZgYvYC+oUXJXrjyAzy+J1f+aWl2+S/79glWgl/xz2VibWMz6 +nZUE+wLMxOQqyOsBALsoE6z81y/7gfn4R/BziBASi1jq/r/wdboFYowmqd39DACX ++i+V0OplP2TN/F5JajzRgkrlq5cwZHinnw+IFwj9RTfOkdGb3YwhBt/h2PP38969 +ZG+y8muNtaIqih1pXj1fz9HRtsiCABN0j+JYpvV2D2xuLL7P1O0dt5BpJ3KqNCRw +mGgO2GLxbwvlulsLidCPxdK/M8g9Eeb/xwA5LVwvjVchHkzHuUT7durn7AT0RWiK +BT8iDfeBB9RKienAbWyybEqRaR6/Tv+mghFIalsDiBPbfm4rsNzsq3ohfByqECiy +yUvs2O3NDwkoaBDkA3GFyKv8/SVpcuL5OkVxAHNCIMhNzSgotQ3KLcQc0IREfFCa +3CsBAC7CsE2bJZ9IA9sbBa3jimVhWUQVudRWiLFeYHUF/hjhqS8IHyFwprjEOLaV +EG0kBO6ELypD/bOsmN9XZLPYyI3y9DM6Vo0KMomE+yK/By/ZMxVfex8/TZreUdhP +VdCLL95Rc4w9io8qFb2qGtYBij2wm0RWLcM0IhXWAtjI3B17IN+6hmv+JpiZccsM +AMNR5/RVdXIl0hzr8LROD0Xe4sTyZ+fm3mvpczoDPQNRrWpmI/9OT58itnVmZ5jM +7djV5y/NjBk63mlqYYfkfWto97wkhg0MnTnOhzdtzSiZQRzj+vf+ilLfIlLnuRr1 +JRV9Skv6xQltcFArx4JyfZCo7JB1ZXcbdFAvIXXS11RTErO0XVrXNm2RenpW/yZA +9f+ESQ/uUB6XNuyqVUnJDAFJFLdzx8sO3DXo7dhIlgpFqgQobUl+APpbU5LT95sm +89UrV0Lt9vh7k6zQtKOjEUhm+dErmuBnJo8MvchAuXLagHjvb58vYBCUxVxzt1KG +2IePwJ/oXIfawNEGad9Lmdo1FYG1u53AKWZmpYOTouu92O50FG2+7dBh0V2vO253 +aIGFRT1r14B1pkCIun7z7B/JELqOkmwmlRrUnxlADZEcQT3z/S8/4+2P7P6kXO7X +/TAX5xBhSqUbKe3DhJSOvf05/RVL5ULc2U2JFGLAtmBOFmnD/u0qoo5UvWliI+v/ +47QnU3RlZmFuIFByb2RhbiA8c3RlZmFuLnByb2RhbkBnbWFpbC5jb20+iJAEExEI +ADgWIQQHgExUr4FrLdKzpNYyma6w5AhbrwUCX34eAwIbAwULCQgHAgYVCgkICwIE +FgIDAQIeAQIXgAAKCRAyma6w5Ahbrzu/AP9l2YpRaWZr6wSQuEn0gMN8DRzsWJPx +pn0akdY7SRP3ngD9GoKgu41FAItnHAJ2KiHv/fHFyHMndNP3kPGPNW4BF+65Aw0E +X34eAxAMAMdYFCHmVA8TZxSTMBDpKYave8RiDCMMMjk26Gl0EPN9f2Y+s5++DhiQ +hojNH9VmJkFwZX1xppxe1y1aLa/U6fBAqMP/IdNH8270iv+A9YIxdsWLmpm99BDO +3suRfsHcOe9T0x/CwRfDNdGM/enGMhYGTgF4VD58DRDE6WntaBhl4JJa300NG6X0 +GM4Gh59DKWDnez/Shulj8demlWmakP5imCVoY+omOEc2k3nH02U+foqaGG5WxZZ+ +GwEPswm2sBxvn8nwjy9gbQwEtzNI7lWYiz36wCj2VS56Udqt+0eNg8WzocUT0XyI +moe1qm8YJQ6fxIzaC431DYi/mCDzgx4EV9ww33SXX3Yp2NL6PsdWJWw2QnoqSMpM +z5otw2KlMgUHkkXEKs0apmK4Hu2b6KD7/ydoQRFUqR38Gb0IZL1tOL6PnbCRUcig +Aypy016W/WMCjBfQ8qxIGTaj5agX2t28hbiURbxZkCkz+Z3OWkO0Rq3Y2hNAYM5s +eTn94JIGGwADBgv/dbSZ9LrBvdMwg8pAtdlLtQdjPiT1i9w5NZuQd7OuKhOxYTEB +NRDTgy4/DgeNThCeOkMB/UQQPtJ3Et45S2YRtnnuvfxgnlz7xlUn765/grtnRk4t +ONjMmb6tZos1FjIJecB/6h4RsvUd2egvtlpD/Z3YKr6MpNjWg4ji7m27e9pcJfP6 +YpTDrq9GamiHy9FS2F2pZlQxriPpVhjCLVn9tFGBIsXNxxn7SP4so6rJBmyHEAlq +iym9wl933e0FIgAw5C1vvprYu2amk+jmVBsJjjCmInW5q/kWAFnFaHBvk+v+/7tX +hywWUI7BqseikgUlkgJ6eU7E9z1DEyuS08x/cViDoNh2ntVUhpnluDu48pdqBvvY +a4uL/D+KI84THUAJ/vZy+q6G3BEb4hI9pFjgrdJpUKubxyZolmkCFZHjV34uOcTc +LQr28P8xW8vQbg5DpIsivxYLqDGXt3OyiItxvLMtw/ypt6PkoeP9A4KDST4StITE +1hrOrPtJ/VRmS2o0iHgEGBEIACAWIQQHgExUr4FrLdKzpNYyma6w5AhbrwUCX34e +AwIbDAAKCRAyma6w5Ahbr6QWAP9/pl2R6r1nuCnXzewSbnH1OLsXf32hFQAjaQ5o +Oomb3gD/TRf/nAdVED+k81GdLzciYdUGtI71/qI47G0nMBluLRE= +=/4e+ +-----END PGP PUBLIC KEY BLOCK----- +` +) + var ( testGitImplementations = []string{sourcev1.GoGitImplementation, sourcev1.LibGit2Implementation} ) @@ -243,7 +323,7 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.CheckoutFailedCondition, sourcev1.GitOperationFailedReason, "Failed to checkout and determine revision: unable to clone '', error: Certificate"), + *conditions.TrueCondition(sourcev1.CheckoutFailedCondition, sourcev1.GitOperationFailedReason, "Failed to checkout and determine revision: unable to clone: Certificate"), }, }, { @@ -378,8 +458,9 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { } r := &GitRepositoryReconciler{ - Client: builder.Build(), - Storage: testStorage, + Client: builder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, } for _, i := range testGitImplementations { @@ -425,11 +506,12 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T) tags := []string{"non-semver-tag", "v0.1.0", "0.2.0", "v0.2.1", "v1.0.0-alpha", "v1.1.0", "v2.0.0"} tests := []struct { - name string - reference *sourcev1.GitRepositoryRef - want ctrl.Result - wantErr bool - wantRevision string + name string + skipForImplementation string + reference *sourcev1.GitRepositoryRef + want ctrl.Result + wantErr bool + wantRevision string }{ { name: "Nil reference (default branch)", @@ -453,7 +535,8 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T) wantRevision: "v0.1.0/", }, { - name: "Branch commit", + name: "Branch commit", + skipForImplementation: sourcev1.LibGit2Implementation, reference: &sourcev1.GitRepositoryRef{ Branch: "staging", Commit: "", @@ -461,6 +544,16 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T) want: ctrl.Result{RequeueAfter: interval}, wantRevision: "staging/", }, + { + name: "Branch commit", + skipForImplementation: sourcev1.GoGitImplementation, + reference: &sourcev1.GitRepositoryRef{ + Branch: "staging", + Commit: "", + }, + want: ctrl.Result{RequeueAfter: interval}, + wantRevision: "HEAD/", + }, { name: "SemVer", reference: &sourcev1.GitRepositoryRef{ @@ -508,8 +601,9 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T) } r := &GitRepositoryReconciler{ - Client: fakeclient.NewClientBuilder().WithScheme(runtime.NewScheme()).Build(), - Storage: testStorage, + Client: fakeclient.NewClientBuilder().WithScheme(runtime.NewScheme()).Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, } for _, tt := range tests { @@ -534,6 +628,10 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T) t.Run(i, func(t *testing.T) { g := NewWithT(t) + if tt.skipForImplementation == i { + t.Skipf("Skipped for Git implementation %q", i) + } + tmpDir, err := os.MkdirTemp("", "checkout-strategy-") g.Expect(err).NotTo(HaveOccurred()) defer os.RemoveAll(tmpDir) @@ -577,7 +675,7 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { }, afterFunc: func(t *WithT, obj *sourcev1.GitRepository, artifact sourcev1.Artifact) { t.Expect(obj.GetArtifact()).ToNot(BeNil()) - t.Expect(obj.GetArtifact().Checksum).To(Equal("f9955588f6aeed7be9b1ef15cd2ddac47bb53291")) + t.Expect(obj.GetArtifact().Checksum).To(Equal("ef9c34eab0584035ac8b8a4070876954ea46f270250d60648672feef3e943426")) }, want: ctrl.Result{RequeueAfter: interval}, assertConditions: []metav1.Condition{ @@ -593,7 +691,7 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { }, afterFunc: func(t *WithT, obj *sourcev1.GitRepository, artifact sourcev1.Artifact) { t.Expect(obj.GetArtifact()).ToNot(BeNil()) - t.Expect(obj.GetArtifact().Checksum).To(Equal("542a8ad0171118a3249e8c531c598b898defd742")) + t.Expect(obj.GetArtifact().Checksum).To(Equal("dc95ae14c19d335b693bbba58ae2a562242b0cf33893baffd1b7605ba578e0d6")) }, want: ctrl.Result{RequeueAfter: interval}, assertConditions: []metav1.Condition{ @@ -607,7 +705,8 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { g := NewWithT(t) r := &GitRepositoryReconciler{ - Storage: testStorage, + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, } obj := &sourcev1.GitRepository{ @@ -846,7 +945,8 @@ func TestGitRepositoryReconciler_reconcileDelete(t *testing.T) { g := NewWithT(t) r := &GitRepositoryReconciler{ - Storage: testStorage, + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, } obj := &sourcev1.GitRepository{ @@ -870,129 +970,143 @@ func TestGitRepositoryReconciler_reconcileDelete(t *testing.T) { g.Expect(obj.Status.Artifact).To(BeNil()) } -// func TestGitRepositoryReconciler_verifyCommitSignature(t *testing.T) { -// tests := []struct { -// name string -// secret *corev1.Secret -// commit git.Commit -// beforeFunc func(obj *sourcev1.GitRepository) -// want ctrl.Result -// wantErr bool -// assertConditions []metav1.Condition -// }{ -// { -// name: "Valid commit makes SourceVerifiedCondition=True", -// secret: &corev1.Secret{ -// ObjectMeta: metav1.ObjectMeta{ -// Name: "existing", -// }, -// }, -// commit: fake.NewCommit(true, "shasum"), -// beforeFunc: func(obj *sourcev1.GitRepository) { -// obj.Spec.Interval = metav1.Duration{Duration: interval} -// obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ -// Mode: "head", -// SecretRef: meta.LocalObjectReference{ -// Name: "existing", -// }, -// } -// }, -// want: ctrl.Result{RequeueAfter: interval}, -// assertConditions: []metav1.Condition{ -// *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "Verified signature of commit \"shasum\""), -// }, -// }, -// { -// name: "Invalid commit makes SourceVerifiedCondition=False and returns error", -// secret: &corev1.Secret{ -// ObjectMeta: metav1.ObjectMeta{ -// Name: "existing", -// }, -// }, -// commit: fake.NewCommit(false, "shasum"), -// beforeFunc: func(obj *sourcev1.GitRepository) { -// obj.Spec.Interval = metav1.Duration{Duration: interval} -// obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ -// Mode: "head", -// SecretRef: meta.LocalObjectReference{ -// Name: "existing", -// }, -// } -// }, -// wantErr: true, -// assertConditions: []metav1.Condition{ -// *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, meta.FailedReason, "Signature verification of commit \"shasum\" failed: invalid signature"), -// }, -// }, -// { -// name: "Secret get failure makes SourceVerified=False and returns error", -// beforeFunc: func(obj *sourcev1.GitRepository) { -// obj.Spec.Interval = metav1.Duration{Duration: interval} -// obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ -// Mode: "head", -// SecretRef: meta.LocalObjectReference{ -// Name: "none-existing", -// }, -// } -// }, -// wantErr: true, -// assertConditions: []metav1.Condition{ -// *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, meta.FailedReason, "PGP public keys secret error: secrets \"none-existing\" not found"), -// }, -// }, -// { -// name: "Nil verification in spec deletes SourceVerified condition", -// beforeFunc: func(obj *sourcev1.GitRepository) { -// obj.Spec.Interval = metav1.Duration{Duration: interval} -// conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Foo", "") -// }, -// want: ctrl.Result{RequeueAfter: interval}, -// assertConditions: []metav1.Condition{}, -// }, -// { -// name: "Empty verification mode in spec deletes SourceVerified condition", -// beforeFunc: func(obj *sourcev1.GitRepository) { -// obj.Spec.Interval = metav1.Duration{Duration: interval} -// obj.Spec.Verification = &sourcev1.GitRepositoryVerification{} -// conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Foo", "") -// }, -// want: ctrl.Result{RequeueAfter: interval}, -// assertConditions: []metav1.Condition{}, -// }, -// } - -// for _, tt := range tests { -// t.Run(tt.name, func(t *testing.T) { -// g := NewWithT(t) - -// builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme()) -// if tt.secret != nil { -// builder.WithObjects(tt.secret) -// } - -// r := &GitRepositoryReconciler{ -// Client: builder.Build(), -// } - -// obj := &sourcev1.GitRepository{ -// ObjectMeta: metav1.ObjectMeta{ -// GenerateName: "verify-commit-", -// Generation: 1, -// }, -// Status: sourcev1.GitRepositoryStatus{}, -// } - -// if tt.beforeFunc != nil { -// tt.beforeFunc(obj) -// } - -// got, err := r.verifyCommitSignature(logr.NewContext(ctx, log.NullLogger{}), obj, tt.commit) -// g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) -// g.Expect(err != nil).To(Equal(tt.wantErr)) -// g.Expect(got).To(Equal(tt.want)) -// }) -// } -// } +func TestGitRepositoryReconciler_verifyCommitSignature(t *testing.T) { + tests := []struct { + name string + secret *corev1.Secret + commit git.Commit + beforeFunc func(obj *sourcev1.GitRepository) + want ctrl.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "Valid commit makes SourceVerifiedCondition=True", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing", + }, + Data: map[string][]byte{ + "foo": []byte(armoredKeyRingFixture), + }, + }, + commit: git.Commit{ + Hash: []byte("shasum"), + Encoded: []byte(encodedCommitFixture), + Signature: signatureCommitFixture, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ + Mode: "head", + SecretRef: meta.LocalObjectReference{ + Name: "existing", + }, + } + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "Verified signature of commit \"shasum\""), + }, + }, + { + name: "Invalid commit makes SourceVerifiedCondition=False and returns error", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing", + }, + }, + commit: git.Commit{ + Hash: []byte("shasum"), + Encoded: []byte(malformedEncodedCommitFixture), + Signature: signatureCommitFixture, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ + Mode: "head", + SecretRef: meta.LocalObjectReference{ + Name: "existing", + }, + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, meta.FailedReason, "Signature verification of commit \"shasum\" failed: failed to verify commit with any of the given key rings"), + }, + }, + { + name: "Secret get failure makes SourceVerified=False and returns error", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ + Mode: "head", + SecretRef: meta.LocalObjectReference{ + Name: "none-existing", + }, + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, meta.FailedReason, "PGP public keys secret error: secrets \"none-existing\" not found"), + }, + }, + { + name: "Nil verification in spec deletes SourceVerified condition", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{}, + }, + { + name: "Empty verification mode in spec deletes SourceVerified condition", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{} + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme()) + if tt.secret != nil { + builder.WithObjects(tt.secret) + } + + r := &GitRepositoryReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Client: builder.Build(), + } + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "verify-commit-", + Generation: 1, + }, + Status: sourcev1.GitRepositoryStatus{}, + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + dlog := log.NewDelegatingLogSink(log.NullLogSink{}) + nullLogger := logr.New(dlog) + got, err := r.verifyCommitSignature(logr.NewContext(ctx, nullLogger), obj, tt.commit) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + }) + } +} // helpers diff --git a/pkg/git/options.go b/pkg/git/options.go index 64458f5ec..9b186b391 100644 --- a/pkg/git/options.go +++ b/pkg/git/options.go @@ -129,3 +129,24 @@ func AuthOptionsFromSecret(URL string, secret *v1.Secret) (*AuthOptions, error) return opts, nil } + +// AuthOptionsWithoutSecret constructs a minimal AuthOptions object from the +// given URL and then validates the result. It returns the AuthOptions, or an +// error. +func AuthOptionsWithoutSecret(URL string) (*AuthOptions, error) { + u, err := url.Parse(URL) + if err != nil { + return nil, fmt.Errorf("failed to parse URL to determine auth strategy: %w", err) + } + + opts := &AuthOptions{ + Transport: TransportType(u.Scheme), + Host: u.Host, + } + + if err = opts.Validate(); err != nil { + return nil, err + } + + return opts, nil +} From 18180776e7829f7296e3382cb5cd2cc0defe8842 Mon Sep 17 00:00:00 2001 From: Sunny Date: Mon, 2 Aug 2021 05:42:32 +0530 Subject: [PATCH 15/63] controllers: Add more tests for reconcileArtifact Fixes error returned from target path validation check and adds more test cases for TestGitRepositoryReconciler_reconcileArtifact. Signed-off-by: Sunny --- controllers/gitrepository_controller.go | 3 +- controllers/gitrepository_controller_test.go | 48 +++++++++++++++++++- 2 files changed, 49 insertions(+), 2 deletions(-) diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go index 34c2a4350..18307b695 100644 --- a/controllers/gitrepository_controller.go +++ b/controllers/gitrepository_controller.go @@ -400,7 +400,8 @@ func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, obj *so ctrl.LoggerFrom(ctx).Error(err, "failed to stat source path") return ctrl.Result{}, err } else if !f.IsDir() { - ctrl.LoggerFrom(ctx).Error(err, fmt.Sprintf("source path '%s' is not a directory", dir)) + err := fmt.Errorf("source path '%s' is not a directory", dir) + ctrl.LoggerFrom(ctx).Error(err, "invalid target path") return ctrl.Result{}, err } diff --git a/controllers/gitrepository_controller_test.go b/controllers/gitrepository_controller_test.go index f9570f085..08f7dd314 100644 --- a/controllers/gitrepository_controller_test.go +++ b/controllers/gitrepository_controller_test.go @@ -661,6 +661,7 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { tests := []struct { name string dir string + includes artifactSet beforeFunc func(obj *sourcev1.GitRepository) afterFunc func(t *WithT, obj *sourcev1.GitRepository, artifact sourcev1.Artifact) want ctrl.Result @@ -673,9 +674,44 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { beforeFunc: func(obj *sourcev1.GitRepository) { obj.Spec.Interval = metav1.Duration{Duration: interval} }, + afterFunc: func(t *WithT, obj *sourcev1.GitRepository, artifact sourcev1.Artifact) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + t.Expect(obj.Status.URL).ToNot(BeEmpty()) + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'main/revision'"), + }, + }, + { + name: "Archiving artifact to storage with includes makes Ready=True", + dir: "testdata/git/repository", + includes: artifactSet{&sourcev1.Artifact{Revision: "main/revision"}}, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + }, afterFunc: func(t *WithT, obj *sourcev1.GitRepository, artifact sourcev1.Artifact) { t.Expect(obj.GetArtifact()).ToNot(BeNil()) t.Expect(obj.GetArtifact().Checksum).To(Equal("ef9c34eab0584035ac8b8a4070876954ea46f270250d60648672feef3e943426")) + t.Expect(obj.Status.IncludedArtifacts).ToNot(BeEmpty()) + t.Expect(obj.Status.URL).ToNot(BeEmpty()) + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'main/revision'"), + }, + }, + { + name: "Up-to-date artifact should not update status", + dir: "testdata/git/repository", + includes: artifactSet{&sourcev1.Artifact{Revision: "main/revision"}}, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Status.Artifact = &sourcev1.Artifact{Revision: "main/revision"} + obj.Status.IncludedArtifacts = []*sourcev1.Artifact{{Revision: "main/revision"}} + }, + afterFunc: func(t *WithT, obj *sourcev1.GitRepository, artifact sourcev1.Artifact) { + t.Expect(obj.Status.URL).To(BeEmpty()) }, want: ctrl.Result{RequeueAfter: interval}, assertConditions: []metav1.Condition{ @@ -698,6 +734,16 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'main/revision'"), }, }, + { + name: "Target path does not exists", + dir: "testdata/git/foo", + wantErr: true, + }, + { + name: "Target path is not a directory", + dir: "testdata/git/repository/foo.txt", + wantErr: true, + }, } for _, tt := range tests { @@ -723,7 +769,7 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { artifact := testStorage.NewArtifactFor(obj.Kind, obj, "main/revision", "checksum.tar.gz") - got, err := r.reconcileArtifact(ctx, obj, artifact, nil, tt.dir) + got, err := r.reconcileArtifact(ctx, obj, artifact, tt.includes, tt.dir) g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(got).To(Equal(tt.want)) From 67bd2cb81cbd2d44351e1f35ac4544d2d8c87e70 Mon Sep 17 00:00:00 2001 From: Hidde Beydals Date: Tue, 3 Aug 2021 12:20:19 +0200 Subject: [PATCH 16/63] Replace %q in messages with '%s' Signed-off-by: Hidde Beydals --- controllers/gitrepository_controller.go | 24 ++++++++++---------- controllers/gitrepository_controller_test.go | 8 +++---- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go index 18307b695..d8d49f8e1 100644 --- a/controllers/gitrepository_controller.go +++ b/controllers/gitrepository_controller.go @@ -310,9 +310,9 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, } if err != nil { conditions.MarkTrue(obj, sourcev1.CheckoutFailedCondition, sourcev1.AuthenticationFailedReason, - "Failed to configure auth strategy for Git implementation %q: %s", obj.Spec.GitImplementation, err) + "Failed to configure auth strategy for Git implementation '%s': %s", obj.Spec.GitImplementation, err) r.Eventf(obj, events.EventSeverityError, sourcev1.AuthenticationFailedReason, - "Failed to configure auth strategy for Git implementation %q: %s", obj.Spec.GitImplementation, err) + "Failed to configure auth strategy for Git implementation '%s': %s", obj.Spec.GitImplementation, err) // Return error as the contents of the secret may change return ctrl.Result{}, err } @@ -329,7 +329,7 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, git.Implementation(obj.Spec.GitImplementation), checkoutOpts) if err != nil { conditions.MarkTrue(obj, sourcev1.CheckoutFailedCondition, sourcev1.GitOperationFailedReason, - "Failed to configure checkout strategy for Git implementation %q: %s", obj.Spec.GitImplementation, err) + "Failed to configure checkout strategy for Git implementation '%s': %s", obj.Spec.GitImplementation, err) // Do not return err as recovery without changes is impossible return ctrl.Result{}, nil } @@ -470,7 +470,7 @@ func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context, obj *sou toPath, err := securejoin.SecureJoin(dir, incl.GetToPath()) if err != nil { conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "IllegalPath", - "Path calculation for include %q failed: %s", incl.GitRepositoryRef.Name, err.Error()) + "Path calculation for include '%s' failed: %s", incl.GitRepositoryRef.Name, err.Error()) return ctrl.Result{}, err } @@ -478,23 +478,23 @@ func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context, obj *sou dep := &sourcev1.GitRepository{} if err := r.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: incl.GitRepositoryRef.Name}, dep); err != nil { conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "NotFound", - "Could not get resource for include %q: %s", incl.GitRepositoryRef.Name, err.Error()) + "Could not get resource for include '%s': %s", incl.GitRepositoryRef.Name, err.Error()) return ctrl.Result{}, err } // Confirm include has an artifact if dep.GetArtifact() == nil { conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "NoArtifact", - "No artifact available for include %q", incl.GitRepositoryRef.Name) + "No artifact available for include '%s'", incl.GitRepositoryRef.Name) return ctrl.Result{}, nil } // Copy artifact (sub)contents to configured directory if err := r.Storage.CopyToPath(dep.GetArtifact(), incl.GetFromPath(), toPath); err != nil { conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "CopyFailure", - "Failed to copy %q include from %s to %s: %s", incl.GitRepositoryRef.Name, incl.GetFromPath(), incl.GetToPath(), err.Error()) + "Failed to copy '%s' include from %s to %s: %s", incl.GitRepositoryRef.Name, incl.GetFromPath(), incl.GetToPath(), err.Error()) r.Eventf(obj, events.EventSeverityError, sourcev1.IncludeUnavailableCondition, - "Failed to copy %q include from %s to %s: %s", incl.GitRepositoryRef.Name, incl.GetFromPath(), incl.GetToPath(), err.Error()) + "Failed to copy '%s' include from %s to %s: %s", incl.GitRepositoryRef.Name, incl.GetFromPath(), incl.GetToPath(), err.Error()) return ctrl.Result{}, err } artifacts[i] = dep.GetArtifact().DeepCopy() @@ -552,14 +552,14 @@ func (r *GitRepositoryReconciler) verifyCommitSignature(ctx context.Context, obj } // Verify commit with GPG data from secret if _, err := commit.Verify(keyRings...); err != nil { - conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, meta.FailedReason, "Signature verification of commit %q failed: %s", commit.Hash.String(), err) - r.Eventf(obj, events.EventSeverityError, "InvalidCommitSignature", "Signature verification of commit %q failed: %s", commit.Hash.String(), err) + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, meta.FailedReason, "Signature verification of commit '%s' failed: %s", commit.Hash.String(), err) + r.Eventf(obj, events.EventSeverityError, "InvalidCommitSignature", "Signature verification of commit '%s' failed: %s", commit.Hash.String(), err) // Return error in the hope the secret changes return ctrl.Result{}, err } - conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, meta.SucceededReason, "Verified signature of commit %q", commit.Hash.String()) - r.Eventf(obj, events.EventSeverityInfo, "VerifiedCommit", "Verified signature of commit %q", commit.Hash.String()) + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, meta.SucceededReason, "Verified signature of commit '%s'", commit.Hash.String()) + r.Eventf(obj, events.EventSeverityInfo, "VerifiedCommit", "Verified signature of commit '%s'", commit.Hash.String()) return ctrl.Result{RequeueAfter: obj.Spec.Interval.Duration}, nil } diff --git a/controllers/gitrepository_controller_test.go b/controllers/gitrepository_controller_test.go index 08f7dd314..4b710f655 100644 --- a/controllers/gitrepository_controller_test.go +++ b/controllers/gitrepository_controller_test.go @@ -848,7 +848,7 @@ func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "NotFound", "Could not get resource for include \"a\": gitrepositories.source.toolkit.fluxcd.io \"a\" not found"), + *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "NotFound", "Could not get resource for include 'a': gitrepositories.source.toolkit.fluxcd.io \"a\" not found"), }, }, { @@ -866,7 +866,7 @@ func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) { {name: "a", toPath: "a/"}, }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "NoArtifact", "No artifact available for include \"a\""), + *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "NoArtifact", "No artifact available for include 'a'"), }, }, { @@ -1052,7 +1052,7 @@ func TestGitRepositoryReconciler_verifyCommitSignature(t *testing.T) { }, want: ctrl.Result{RequeueAfter: interval}, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "Verified signature of commit \"shasum\""), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "Verified signature of commit 'shasum'"), }, }, { @@ -1078,7 +1078,7 @@ func TestGitRepositoryReconciler_verifyCommitSignature(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, meta.FailedReason, "Signature verification of commit \"shasum\" failed: failed to verify commit with any of the given key rings"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, meta.FailedReason, "Signature verification of commit 'shasum' failed: failed to verify commit with any of the given key rings"), }, }, { From 1e326e8f1c7fe566bee125c89d14fcf35beb80b4 Mon Sep 17 00:00:00 2001 From: Hidde Beydals Date: Tue, 3 Aug 2021 13:23:50 +0200 Subject: [PATCH 17/63] source: `GetRequeueAfter` in place of `GetInterval` The problem with `GetInterval()` was that the returned type was of `metav1.Duration`, while almost anywhere it was used, a type of `time.Duration` was requested. The result of this was that we had to call `GetInterval().Duration` all the time, which would become a bit cumbersome after awhile. To prevent this, we introduce a new `GetRequeueAfter() time.Duration` method, which both results the right type, and bears a name that is easier to remember where the value is used most; while setting the `Result.RequeueAfter` during reconcile operations. The introduced of this method deprecates `GetInterval()`, which should be removed in a future MINOR release. Signed-off-by: Hidde Beydals --- controllers/gitrepository_controller.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go index d8d49f8e1..c171ea7bf 100644 --- a/controllers/gitrepository_controller.go +++ b/controllers/gitrepository_controller.go @@ -218,7 +218,7 @@ func (r *GitRepositoryReconciler) reconcile(ctx context.Context, obj *sourcev1.G // Reconcile the source from upstream var artifact sourcev1.Artifact if result, err := r.reconcileSource(ctx, obj, &artifact, tmpDir); err != nil || result.IsZero() { - return ctrl.Result{RequeueAfter: obj.Spec.Interval.Duration}, err + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, err } // Reconcile includes from the storage @@ -232,7 +232,7 @@ func (r *GitRepositoryReconciler) reconcile(ctx context.Context, obj *sourcev1.G return result, err } - return ctrl.Result{RequeueAfter: obj.Spec.Interval.Duration}, nil + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil } // reconcileStorage ensures the current state of the storage matches the desired and previously observed state. @@ -265,7 +265,7 @@ func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context, obj *sou r.Storage.SetArtifactURL(obj.GetArtifact()) obj.Status.URL = r.Storage.SetHostname(obj.Status.URL) - return ctrl.Result{RequeueAfter: obj.Spec.Interval.Duration}, nil + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil } // reconcileSource ensures the upstream Git repository can be reached and checked out using the declared configuration, @@ -362,7 +362,7 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, if !obj.GetArtifact().HasRevision(commit.String()) { conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '%s'", commit.String()) } - return ctrl.Result{RequeueAfter: obj.Spec.Interval.Duration}, nil + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil } // reconcileArtifact archives a new artifact to the storage, if the current observation on the object does not match the @@ -392,7 +392,7 @@ func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, obj *so // The artifact is up-to-date if obj.GetArtifact().HasRevision(artifact.Revision) && !includes.Diff(obj.Status.IncludedArtifacts) { ctrl.LoggerFrom(ctx).Info("Artifact is up-to-date") - return ctrl.Result{RequeueAfter: obj.GetInterval().Duration}, nil + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil } // Ensure target path exists and is a directory @@ -452,7 +452,7 @@ func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, obj *so if url != "" { obj.Status.URL = url } - return ctrl.Result{RequeueAfter: obj.Spec.Interval.Duration}, nil + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil } // reconcileInclude reconciles the declared includes from the object by copying their artifact (sub)contents to the @@ -507,7 +507,7 @@ func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context, obj *sou if artifacts.Diff(obj.Status.IncludedArtifacts) { conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "IncludeChange", "Included artifacts differ from last observed includes") } - return ctrl.Result{RequeueAfter: obj.Spec.Interval.Duration}, nil + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil } // reconcileDelete handles the delete of an object. It first garbage collects all artifacts for the object from the @@ -531,7 +531,7 @@ func (r *GitRepositoryReconciler) verifyCommitSignature(ctx context.Context, obj // Check if there is a commit verification is configured and remove any old observations if there is none if obj.Spec.Verification == nil || obj.Spec.Verification.Mode == "" { conditions.Delete(obj, sourcev1.SourceVerifiedCondition) - return ctrl.Result{RequeueAfter: obj.Spec.Interval.Duration}, nil + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil } // Get secret with GPG data From c2e6875284329cb6ea05b362832e5d5d3148c906 Mon Sep 17 00:00:00 2001 From: Hidde Beydals Date: Tue, 3 Aug 2021 14:01:18 +0200 Subject: [PATCH 18/63] Tweak logged messages - Mention the current revision in the up-to-date log message. - Ensure any error that is "swallowed" (not returned) is logged to ensure they are visible within the logs, and not just by inspecting the object. Signed-off-by: Hidde Beydals --- controllers/gitrepository_controller.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go index c171ea7bf..8aae01dda 100644 --- a/controllers/gitrepository_controller.go +++ b/controllers/gitrepository_controller.go @@ -328,6 +328,7 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, checkoutStrategy, err := strategy.CheckoutStrategyForImplementation(ctx, git.Implementation(obj.Spec.GitImplementation), checkoutOpts) if err != nil { + ctrl.LoggerFrom(ctx).Error(err, fmt.Sprintf("Failed to configure checkout strategy for Git implementation '%s'", obj.Spec.GitImplementation)) conditions.MarkTrue(obj, sourcev1.CheckoutFailedCondition, sourcev1.GitOperationFailedReason, "Failed to configure checkout strategy for Git implementation '%s': %s", obj.Spec.GitImplementation, err) // Do not return err as recovery without changes is impossible @@ -391,7 +392,7 @@ func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, obj *so // The artifact is up-to-date if obj.GetArtifact().HasRevision(artifact.Revision) && !includes.Diff(obj.Status.IncludedArtifacts) { - ctrl.LoggerFrom(ctx).Info("Artifact is up-to-date") + ctrl.LoggerFrom(ctx).Info(fmt.Sprintf("Already up to date, current revision '%s'", artifact.Revision)) return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil } @@ -484,6 +485,7 @@ func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context, obj *sou // Confirm include has an artifact if dep.GetArtifact() == nil { + ctrl.LoggerFrom(ctx).Error(nil, fmt.Sprintf("No artifact available for include '%s'", incl.GitRepositoryRef.Name)) conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "NoArtifact", "No artifact available for include '%s'", incl.GitRepositoryRef.Name) return ctrl.Result{}, nil From 7e71185594c00b6d7f660af8807e13ddc9a03d2c Mon Sep 17 00:00:00 2001 From: Sunny Date: Fri, 6 Aug 2021 19:04:45 +0530 Subject: [PATCH 19/63] gitrepo: reconcileInclude test assertion fixes Use the created artifact server test storage in reconcileInclude test's GitRepositoryReconciler and cleanup the created storage. Fix the test assertions to check the copied artifact directories in the correct path. Also, update the tests to expect artifacts in the include `toPath` to exist. Signed-off-by: Sunny --- controllers/gitrepository_controller_test.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/controllers/gitrepository_controller_test.go b/controllers/gitrepository_controller_test.go index 4b710f655..b4005f62a 100644 --- a/controllers/gitrepository_controller_test.go +++ b/controllers/gitrepository_controller_test.go @@ -788,7 +788,7 @@ func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) { g.Expect(err).NotTo(HaveOccurred()) storage, err := newTestStorage(server.HTTPServer) g.Expect(err).NotTo(HaveOccurred()) - defer os.RemoveAll(testStorage.BasePath) + defer os.RemoveAll(storage.BasePath) dependencyInterval := 5 * time.Second @@ -833,8 +833,8 @@ func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) { }, }, includes: []include{ - {name: "a", toPath: "a/"}, - {name: "b", toPath: "b/"}, + {name: "a", toPath: "a/", shouldExist: true}, + {name: "b", toPath: "b/", shouldExist: true}, }, want: ctrl.Result{RequeueAfter: interval}, assertConditions: []metav1.Condition{ @@ -970,7 +970,7 @@ func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) { g.Expect(got).To(Equal(tt.want)) for _, i := range tt.includes { if i.toPath != "" { - expect := g.Expect(filepath.Join(testStorage.BasePath, i.toPath)) + expect := g.Expect(filepath.Join(tmpDir, i.toPath)) if i.shouldExist { expect.To(BeADirectory()) } else { @@ -978,9 +978,9 @@ func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) { } } if i.shouldExist { - g.Expect(filepath.Join(testStorage.BasePath, i.toPath)).Should(BeADirectory()) + g.Expect(filepath.Join(tmpDir, i.toPath)).Should(BeADirectory()) } else { - g.Expect(filepath.Join(testStorage.BasePath, i.toPath)).ShouldNot(BeADirectory()) + g.Expect(filepath.Join(tmpDir, i.toPath)).ShouldNot(BeADirectory()) } } }) From 59b3e5da5d0ff7efaf31a5fd8bf611ad0a182d14 Mon Sep 17 00:00:00 2001 From: Sunny Date: Sun, 8 Aug 2021 03:59:42 +0530 Subject: [PATCH 20/63] gitrepo: Add tests for old conditions update This tests the status conditions update in the gitrepository reconciler. Given a mix of old status conditions, on a successful reconciliation, the status condition is set to Ready=True. Signed-off-by: Sunny --- controllers/gitrepository_controller_test.go | 128 +++++++++++++++++++ 1 file changed, 128 insertions(+) diff --git a/controllers/gitrepository_controller_test.go b/controllers/gitrepository_controller_test.go index b4005f62a..ee68e1160 100644 --- a/controllers/gitrepository_controller_test.go +++ b/controllers/gitrepository_controller_test.go @@ -1154,6 +1154,134 @@ func TestGitRepositoryReconciler_verifyCommitSignature(t *testing.T) { } } +func TestGitRepositoryReconciler_ConditionsUpdate(t *testing.T) { + g := NewWithT(t) + + server, err := gittestserver.NewTempGitServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(server.Root()) + server.AutoCreate() + g.Expect(server.StartHTTP()).To(Succeed()) + defer server.StopHTTP() + + repoPath := "/test.git" + _, err = initGitRepo(server, "testdata/git/repository", git.DefaultBranch, repoPath) + g.Expect(err).NotTo(HaveOccurred()) + + tests := []struct { + name string + beforeFunc func(obj *sourcev1.GitRepository) + want ctrl.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "no condition", + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "Stored artifact for revision"), + }, + }, + { + name: "reconciling condition", + beforeFunc: func(obj *sourcev1.GitRepository) { + conditions.MarkTrue(obj, meta.ReconcilingCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "Stored artifact for revision"), + }, + }, + { + name: "stalled condition", + beforeFunc: func(obj *sourcev1.GitRepository) { + conditions.MarkTrue(obj, meta.StalledCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "Stored artifact for revision"), + }, + }, + { + name: "mixed failed conditions", + beforeFunc: func(obj *sourcev1.GitRepository) { + conditions.MarkTrue(obj, sourcev1.CheckoutFailedCondition, "Foo", "") + conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "Foo", "") + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Foo", "") + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + conditions.MarkTrue(obj, sourcev1.ArtifactUnavailableCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "Stored artifact for revision"), + }, + }, + { + name: "reconciling and failed conditions", + beforeFunc: func(obj *sourcev1.GitRepository) { + conditions.MarkTrue(obj, meta.ReconcilingCondition, "Foo", "") + conditions.MarkTrue(obj, sourcev1.CheckoutFailedCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "Stored artifact for revision"), + }, + }, + { + name: "stalled and failed conditions", + beforeFunc: func(obj *sourcev1.GitRepository) { + conditions.MarkTrue(obj, meta.StalledCondition, "Foo", "") + conditions.MarkTrue(obj, sourcev1.CheckoutFailedCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "Stored artifact for revision"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: "condition-update", + Namespace: "default", + Finalizers: []string{sourcev1.SourceFinalizer}, + }, + Spec: sourcev1.GitRepositorySpec{ + URL: server.HTTPAddress() + repoPath, + GitImplementation: sourcev1.GoGitImplementation, + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: interval}, + }, + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme()).WithObjects(obj) + + r := &GitRepositoryReconciler{ + Client: builder.Build(), + Storage: testStorage, + } + + key := client.ObjectKeyFromObject(obj) + res, err := r.Reconcile(logr.NewContext(ctx, log.NullLogger{}), ctrl.Request{NamespacedName: key}) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(res).To(Equal(tt.want)) + + updatedObj := &sourcev1.GitRepository{} + err = r.Get(ctx, key, updatedObj) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(updatedObj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + // helpers func initGitRepo(server *gittestserver.GitServer, fixture, branch, repositoryPath string) (*gogit.Repository, error) { From 3d5698856a226f3a6b9c37c5443e16c894a4d181 Mon Sep 17 00:00:00 2001 From: Sunny Date: Mon, 9 Aug 2021 04:09:51 +0530 Subject: [PATCH 21/63] gitrepo: test reconcileArtifact condtns & symlink Adds test cases for reconcileArtifact to check if old status conditions are removed after new artifact is created. Adds a test case to verify that the latest artifact symlink points to the created artifact. Signed-off-by: Sunny --- controllers/gitrepository_controller_test.go | 54 ++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/controllers/gitrepository_controller_test.go b/controllers/gitrepository_controller_test.go index ee68e1160..f356e3afe 100644 --- a/controllers/gitrepository_controller_test.go +++ b/controllers/gitrepository_controller_test.go @@ -734,6 +734,60 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'main/revision'"), }, }, + { + name: "Removes ArtifactUnavailableCondition after creating artifact", + dir: "testdata/git/repository", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + conditions.MarkTrue(obj, sourcev1.ArtifactUnavailableCondition, "Foo", "") + }, + afterFunc: func(t *WithT, obj *sourcev1.GitRepository, artifact sourcev1.Artifact) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + t.Expect(obj.GetArtifact().Checksum).To(Equal("ef9c34eab0584035ac8b8a4070876954ea46f270250d60648672feef3e943426")) + t.Expect(obj.Status.URL).ToNot(BeEmpty()) + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'main/revision'"), + }, + }, + { + name: "Removes ArtifactOutdatedCondition after creating new artifact", + dir: "testdata/git/repository", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + afterFunc: func(t *WithT, obj *sourcev1.GitRepository, artifact sourcev1.Artifact) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + t.Expect(obj.GetArtifact().Checksum).To(Equal("ef9c34eab0584035ac8b8a4070876954ea46f270250d60648672feef3e943426")) + t.Expect(obj.Status.URL).ToNot(BeEmpty()) + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'main/revision'"), + }, + }, + { + name: "Creates latest symlink to the created artifact", + dir: "testdata/git/repository", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + }, + afterFunc: func(t *WithT, obj *sourcev1.GitRepository, artifact sourcev1.Artifact) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + + localPath := testStorage.LocalPath(*obj.GetArtifact()) + symlinkPath := filepath.Join(filepath.Dir(localPath), "latest.tar.gz") + targetFile, err := os.Readlink(symlinkPath) + t.Expect(err).NotTo(HaveOccurred()) + t.Expect(localPath).To(Equal(targetFile)) + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'main/revision'"), + }, + }, { name: "Target path does not exists", dir: "testdata/git/foo", From a437ed69bd0e8d7e45fa280c85558268d12a6ee1 Mon Sep 17 00:00:00 2001 From: Hidde Beydals Date: Mon, 9 Aug 2021 13:24:49 +0200 Subject: [PATCH 22/63] Consolidate condition types into `FetchFailed` This commit consolidates the `DownloadFailed` and `CheckoutFailed` Condition types into a new more generic `FetchFailed` type to simplify the API and observations by consumers. Signed-off-by: Hidde Beydals --- api/v1beta2/gitrepository_types.go | 5 ----- controllers/gitrepository_controller.go | 20 +++++++++---------- controllers/gitrepository_controller_test.go | 21 +++++++++++--------- 3 files changed, 22 insertions(+), 24 deletions(-) diff --git a/api/v1beta2/gitrepository_types.go b/api/v1beta2/gitrepository_types.go index 010ddac12..76c048659 100644 --- a/api/v1beta2/gitrepository_types.go +++ b/api/v1beta2/gitrepository_types.go @@ -36,11 +36,6 @@ const ( ) const ( - // CheckoutFailedCondition indicates a transient or persistent checkout failure. If True, observations on the - // upstream Source revision are not possible, and the Artifact available for the Source may be outdated. - // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. - CheckoutFailedCondition string = "CheckoutFailed" - // SourceVerifiedCondition indicates the integrity of the Source has been verified. If True, the integrity check // succeeded. If False, it failed. The Condition is only present on the resource if the integrity has been verified. SourceVerifiedCondition string = "SourceVerified" diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go index 8aae01dda..88d662f38 100644 --- a/controllers/gitrepository_controller.go +++ b/controllers/gitrepository_controller.go @@ -123,13 +123,13 @@ func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reques conditions.WithConditions( sourcev1.IncludeUnavailableCondition, sourcev1.SourceVerifiedCondition, - sourcev1.CheckoutFailedCondition, + sourcev1.FetchFailedCondition, sourcev1.ArtifactOutdatedCondition, sourcev1.ArtifactUnavailableCondition, ), conditions.WithNegativePolarityConditions( sourcev1.ArtifactUnavailableCondition, - sourcev1.CheckoutFailedCondition, + sourcev1.FetchFailedCondition, sourcev1.SourceVerifiedCondition, sourcev1.IncludeUnavailableCondition, sourcev1.ArtifactOutdatedCondition, @@ -141,7 +141,7 @@ func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reques patch.WithOwnedConditions{ Conditions: []string{ sourcev1.ArtifactUnavailableCondition, - sourcev1.CheckoutFailedCondition, + sourcev1.FetchFailedCondition, sourcev1.IncludeUnavailableCondition, sourcev1.ArtifactOutdatedCondition, meta.ReadyCondition, @@ -272,8 +272,8 @@ func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context, obj *sou // and observes its state. // // The repository is checked out to the given dir using the defined configuration, and in case of an error during the -// checkout process (including transient errors), it records v1beta1.CheckoutFailedCondition=True and returns early. -// On a successful checkout it removes v1beta1.CheckoutFailedCondition, and compares the current revision of HEAD to the +// checkout process (including transient errors), it records v1beta1.FetchFailedCondition=True and returns early. +// On a successful checkout it removes v1beta1.FetchFailedCondition, and compares the current revision of HEAD to the // artifact on the object, and records v1beta1.ArtifactOutdatedCondition if they differ. // If instructed, the signature of the commit is verified if and recorded as v1beta1.SourceVerifiedCondition. If the // signature can not be verified or the verification fails, the Condition=False and it returns early. @@ -294,7 +294,7 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, } var secret corev1.Secret if err := r.Client.Get(ctx, name, &secret); err != nil { - conditions.MarkTrue(obj, sourcev1.CheckoutFailedCondition, sourcev1.AuthenticationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '%s': %s", name.String(), err.Error()) r.Eventf(obj, events.EventSeverityError, sourcev1.AuthenticationFailedReason, "Failed to get secret '%s': %s", name.String(), err.Error()) @@ -309,7 +309,7 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, authOpts, err = git.AuthOptionsWithoutSecret(obj.Spec.URL) } if err != nil { - conditions.MarkTrue(obj, sourcev1.CheckoutFailedCondition, sourcev1.AuthenticationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to configure auth strategy for Git implementation '%s': %s", obj.Spec.GitImplementation, err) r.Eventf(obj, events.EventSeverityError, sourcev1.AuthenticationFailedReason, "Failed to configure auth strategy for Git implementation '%s': %s", obj.Spec.GitImplementation, err) @@ -329,7 +329,7 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, git.Implementation(obj.Spec.GitImplementation), checkoutOpts) if err != nil { ctrl.LoggerFrom(ctx).Error(err, fmt.Sprintf("Failed to configure checkout strategy for Git implementation '%s'", obj.Spec.GitImplementation)) - conditions.MarkTrue(obj, sourcev1.CheckoutFailedCondition, sourcev1.GitOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "Failed to configure checkout strategy for Git implementation '%s': %s", obj.Spec.GitImplementation, err) // Do not return err as recovery without changes is impossible return ctrl.Result{}, nil @@ -340,7 +340,7 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, defer cancel() commit, err := checkoutStrategy.Checkout(gitCtx, dir, obj.Spec.URL, authOpts) if err != nil { - conditions.MarkTrue(obj, sourcev1.CheckoutFailedCondition, sourcev1.GitOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "Failed to checkout and determine revision: %s", err) r.Eventf(obj, events.EventSeverityError, sourcev1.GitOperationFailedReason, "Failed to checkout and determine revision: %s", err) @@ -349,7 +349,7 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, } r.Eventf(obj, events.EventSeverityInfo, sourcev1.GitOperationSucceedReason, "Cloned repository '%s' and checked out revision '%s'", obj.Spec.URL, commit.String()) - conditions.Delete(obj, sourcev1.CheckoutFailedCondition) + conditions.Delete(obj, sourcev1.FetchFailedCondition) // Verify commit signature if result, err := r.verifyCommitSignature(ctx, obj, *commit); err != nil || result.IsZero() { diff --git a/controllers/gitrepository_controller_test.go b/controllers/gitrepository_controller_test.go index f356e3afe..96b1b9ce4 100644 --- a/controllers/gitrepository_controller_test.go +++ b/controllers/gitrepository_controller_test.go @@ -298,7 +298,7 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.CheckoutFailedCondition, sourcev1.GitOperationFailedReason, "x509: certificate signed by unknown authority"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "x509: certificate signed by unknown authority"), }, }, { @@ -323,7 +323,7 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.CheckoutFailedCondition, sourcev1.GitOperationFailedReason, "Failed to checkout and determine revision: unable to clone: Certificate"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "Failed to checkout and determine revision: unable to clone: Certificate"), }, }, { @@ -384,7 +384,7 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.CheckoutFailedCondition, "AuthenticationFailed", "Failed to get secret '/non-existing': secrets \"non-existing\" not found"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '/non-existing': secrets \"non-existing\" not found"), }, }, } @@ -1259,7 +1259,7 @@ func TestGitRepositoryReconciler_ConditionsUpdate(t *testing.T) { { name: "mixed failed conditions", beforeFunc: func(obj *sourcev1.GitRepository) { - conditions.MarkTrue(obj, sourcev1.CheckoutFailedCondition, "Foo", "") + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "Foo", "") conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "Foo", "") conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Foo", "") conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") @@ -1274,7 +1274,7 @@ func TestGitRepositoryReconciler_ConditionsUpdate(t *testing.T) { name: "reconciling and failed conditions", beforeFunc: func(obj *sourcev1.GitRepository) { conditions.MarkTrue(obj, meta.ReconcilingCondition, "Foo", "") - conditions.MarkTrue(obj, sourcev1.CheckoutFailedCondition, "Foo", "") + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "Foo", "") }, want: ctrl.Result{RequeueAfter: interval}, assertConditions: []metav1.Condition{ @@ -1285,7 +1285,7 @@ func TestGitRepositoryReconciler_ConditionsUpdate(t *testing.T) { name: "stalled and failed conditions", beforeFunc: func(obj *sourcev1.GitRepository) { conditions.MarkTrue(obj, meta.StalledCondition, "Foo", "") - conditions.MarkTrue(obj, sourcev1.CheckoutFailedCondition, "Foo", "") + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "Foo", "") }, want: ctrl.Result{RequeueAfter: interval}, assertConditions: []metav1.Condition{ @@ -1319,12 +1319,15 @@ func TestGitRepositoryReconciler_ConditionsUpdate(t *testing.T) { builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme()).WithObjects(obj) r := &GitRepositoryReconciler{ - Client: builder.Build(), - Storage: testStorage, + Client: builder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, } key := client.ObjectKeyFromObject(obj) - res, err := r.Reconcile(logr.NewContext(ctx, log.NullLogger{}), ctrl.Request{NamespacedName: key}) + dlog := log.NewDelegatingLogSink(log.NullLogSink{}) + nullLogger := logr.New(dlog) + res, err := r.Reconcile(logr.NewContext(ctx, nullLogger), ctrl.Request{NamespacedName: key}) g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(res).To(Equal(tt.want)) From dbaf21b974ef26401657698f8fd325a58c335626 Mon Sep 17 00:00:00 2001 From: Hidde Beydals Date: Tue, 10 Aug 2021 13:59:28 +0200 Subject: [PATCH 23/63] Wrap err with context instead of logging twice This wraps the errors which are returned instead of logging them, as the returned error is logged at the end of the reconcile run. Signed-off-by: Hidde Beydals --- controllers/gitrepository_controller.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go index 88d662f38..4f24265ed 100644 --- a/controllers/gitrepository_controller.go +++ b/controllers/gitrepository_controller.go @@ -398,22 +398,21 @@ func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, obj *so // Ensure target path exists and is a directory if f, err := os.Stat(dir); err != nil { - ctrl.LoggerFrom(ctx).Error(err, "failed to stat source path") + err = fmt.Errorf("failed to stat target path: %w", err) return ctrl.Result{}, err } else if !f.IsDir() { - err := fmt.Errorf("source path '%s' is not a directory", dir) - ctrl.LoggerFrom(ctx).Error(err, "invalid target path") + err = fmt.Errorf("invalid target path: '%s' is not a directory", dir) return ctrl.Result{}, err } // Ensure artifact directory exists and acquire lock if err := r.Storage.MkdirAll(artifact); err != nil { - ctrl.LoggerFrom(ctx).Error(err, "failed to create artifact directory") + err = fmt.Errorf("failed to create artifact directory: %w", err) return ctrl.Result{}, err } unlock, err := r.Storage.Lock(artifact) if err != nil { - ctrl.LoggerFrom(ctx).Error(err, "failed to acquire lock for artifact") + err = fmt.Errorf("failed to acquire lock for artifact: %w", err) return ctrl.Result{}, err } defer unlock() From 88dc2e6ed605dca6156d0772b700821859ed55b0 Mon Sep 17 00:00:00 2001 From: Hidde Beydals Date: Wed, 11 Aug 2021 10:41:43 +0200 Subject: [PATCH 24/63] chore: ensure Git server dir is removed after test Signed-off-by: Hidde Beydals --- controllers/gitrepository_controller_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/controllers/gitrepository_controller_test.go b/controllers/gitrepository_controller_test.go index 96b1b9ce4..54ad3ea5a 100644 --- a/controllers/gitrepository_controller_test.go +++ b/controllers/gitrepository_controller_test.go @@ -582,6 +582,7 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T) server, err := gittestserver.NewTempGitServer() g.Expect(err).To(BeNil()) + defer os.RemoveAll(server.Root()) server.AutoCreate() g.Expect(server.StartHTTP()).To(Succeed()) defer server.StopHTTP() From 25ae83ad7d70ac4e50e3c11ca2dfe45df9863a52 Mon Sep 17 00:00:00 2001 From: Sunny Date: Mon, 6 Dec 2021 16:13:24 +0530 Subject: [PATCH 25/63] gitrepo: Fix SourceVerifiedCondition condition type SourceVerifiedCondition is a normal condition, remove it from negative polarity conditions. Add SourceVerifiedCondition in patch option WithOwnedConditions. Also, Update the signature of reconcileInclude() to remove include being passed and overwritten in the first line. Include is available as part of the passed source object. Signed-off-by: Sunny --- controllers/gitrepository_controller.go | 8 ++++---- controllers/gitrepository_controller_test.go | 3 +-- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go index 4f24265ed..a78bdb6d9 100644 --- a/controllers/gitrepository_controller.go +++ b/controllers/gitrepository_controller.go @@ -130,7 +130,6 @@ func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reques conditions.WithNegativePolarityConditions( sourcev1.ArtifactUnavailableCondition, sourcev1.FetchFailedCondition, - sourcev1.SourceVerifiedCondition, sourcev1.IncludeUnavailableCondition, sourcev1.ArtifactOutdatedCondition, ), @@ -141,6 +140,7 @@ func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reques patch.WithOwnedConditions{ Conditions: []string{ sourcev1.ArtifactUnavailableCondition, + sourcev1.SourceVerifiedCondition, sourcev1.FetchFailedCondition, sourcev1.IncludeUnavailableCondition, sourcev1.ArtifactOutdatedCondition, @@ -223,7 +223,7 @@ func (r *GitRepositoryReconciler) reconcile(ctx context.Context, obj *sourcev1.G // Reconcile includes from the storage var includes artifactSet - if result, err := r.reconcileInclude(ctx, obj, includes, tmpDir); err != nil || result.IsZero() { + if result, err := r.reconcileInclude(ctx, obj, tmpDir); err != nil || result.IsZero() { return ctrl.Result{RequeueAfter: r.requeueDependency}, err } @@ -463,8 +463,8 @@ func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, obj *so // If the artifactSet differs from the current set, it marks the object with v1beta1.ArtifactOutdatedCondition. // // The caller should assume a failure if an error is returned, or the Result is zero. -func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context, obj *sourcev1.GitRepository, artifacts artifactSet, dir string) (ctrl.Result, error) { - artifacts = make(artifactSet, len(obj.Spec.Include)) +func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context, obj *sourcev1.GitRepository, dir string) (ctrl.Result, error) { + artifacts := make(artifactSet, len(obj.Spec.Include)) for i, incl := range obj.Spec.Include { // Do this first as it is much cheaper than copy operations toPath, err := securejoin.SecureJoin(dir, incl.GetToPath()) diff --git a/controllers/gitrepository_controller_test.go b/controllers/gitrepository_controller_test.go index 54ad3ea5a..e90c84989 100644 --- a/controllers/gitrepository_controller_test.go +++ b/controllers/gitrepository_controller_test.go @@ -1018,8 +1018,7 @@ func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) { g.Expect(err).NotTo(HaveOccurred()) defer os.RemoveAll(tmpDir) - var artifacts artifactSet - got, err := r.reconcileInclude(ctx, obj, artifacts, tmpDir) + got, err := r.reconcileInclude(ctx, obj, tmpDir) g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions)) g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(got).To(Equal(tt.want)) From d9a947c909c3826345b2e3dbf03110ed987ee8e7 Mon Sep 17 00:00:00 2001 From: Sunny Date: Mon, 6 Dec 2021 21:12:55 +0530 Subject: [PATCH 26/63] gitrepo: Ignore patch error not found on delete While deleting, patching an object with new status results in "not found" error because the object is already deleted. The patching operation first patches the status conditions, the rest of the object and, at the very end, the rest of the status. When an object is deleted, the garbage collection results in the artifact in the status to be updated, resulting in a diff that is attempted to be patched when the deferred patch runs. Since the status patching runs at the very end, the object gets deleted before it can be patched. Ignore "not found" error while patching when the delete timestamp is set. Signed-off-by: Sunny --- controllers/gitrepository_controller.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go index a78bdb6d9..c5937c0f7 100644 --- a/controllers/gitrepository_controller.go +++ b/controllers/gitrepository_controller.go @@ -25,6 +25,7 @@ import ( securejoin "github.com/cyphar/filepath-securejoin" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" @@ -172,6 +173,10 @@ func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reques // Finally, patch the resource if err := patchHelper.Patch(ctx, obj, patchOpts...); err != nil { + // Ignore patch error "not found" when the object is being deleted. + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + err = kerrors.FilterOut(err, func(e error) bool { return apierrors.IsNotFound(e) }) + } retErr = kerrors.NewAggregate([]error{retErr, err}) } From 5767291b582efb8a0d3f471248907e578d8cc37e Mon Sep 17 00:00:00 2001 From: Sunny Date: Tue, 21 Dec 2021 01:57:06 +0530 Subject: [PATCH 27/63] gitrepo: Add more reconciler design improvements - Remove ArtifactUnavailable condition and use Reconciling condition to convey the same. - Make Reconciling condition affect the ready condition. - Introduce summarizeAndPatch() to calculate the final status conditions and patch them. - Introduce reconcile() to iterate through the sub-reconcilers and execute them. Signed-off-by: Sunny --- controllers/gitrepository_controller.go | 480 +++++++++++-------- controllers/gitrepository_controller_test.go | 171 +++---- go.mod | 2 + go.sum | 4 + 4 files changed, 371 insertions(+), 286 deletions(-) diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go index c5937c0f7..ba7532a11 100644 --- a/controllers/gitrepository_controller.go +++ b/controllers/gitrepository_controller.go @@ -18,6 +18,7 @@ package controllers import ( "context" + "errors" "fmt" "os" "strings" @@ -26,7 +27,7 @@ import ( securejoin "github.com/cyphar/filepath-securejoin" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" kuberecorder "k8s.io/client-go/tools/record" @@ -40,16 +41,48 @@ import ( "github.com/fluxcd/pkg/apis/meta" "github.com/fluxcd/pkg/runtime/conditions" helper "github.com/fluxcd/pkg/runtime/controller" - "github.com/fluxcd/pkg/runtime/events" "github.com/fluxcd/pkg/runtime/patch" "github.com/fluxcd/pkg/runtime/predicates" "github.com/fluxcd/source-controller/pkg/sourceignore" sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + serror "github.com/fluxcd/source-controller/internal/error" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" "github.com/fluxcd/source-controller/pkg/git" "github.com/fluxcd/source-controller/pkg/git/strategy" ) +// Status conditions owned by the GitRepository reconciler. +var gitRepoOwnedConditions = []string{ + sourcev1.SourceVerifiedCondition, + sourcev1.FetchFailedCondition, + sourcev1.IncludeUnavailableCondition, + sourcev1.ArtifactOutdatedCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, +} + +// Conditions that Ready condition is influenced by in descending order of their +// priority. +var gitRepoReadyDeps = []string{ + sourcev1.IncludeUnavailableCondition, + sourcev1.SourceVerifiedCondition, + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, +} + +// Negative conditions that Ready condition is influenced by. +var gitRepoReadyDepsNegative = []string{ + sourcev1.FetchFailedCondition, + sourcev1.IncludeUnavailableCondition, + sourcev1.ArtifactOutdatedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, +} + // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories/status,verbs=get;update;patch // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories/finalizers,verbs=get;create;update;patch;delete @@ -71,6 +104,10 @@ type GitRepositoryReconcilerOptions struct { DependencyRequeueInterval time.Duration } +// gitRepoReconcilerFunc is the function type for all the Git repository +// reconciler functions. +type gitRepoReconcilerFunc func(ctx context.Context, obj *sourcev1.GitRepository, artifact *sourcev1.Artifact, includes *artifactSet, dir string) (sreconcile.Result, error) + func (r *GitRepositoryReconciler) SetupWithManager(mgr ctrl.Manager) error { return r.SetupWithManagerAndOptions(mgr, GitRepositoryReconcilerOptions{}) } @@ -111,74 +148,14 @@ func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, err } + var recResult sreconcile.Result + // Always attempt to patch the object and status after each reconciliation + // NOTE: This deferred block only modifies the named return error. The + // result from the reconciliation remains the same. Any requeue attributes + // set in the result will continue to be effective. defer func() { - // Record the value of the reconciliation request, if any - if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { - obj.Status.SetLastHandledReconcileRequest(v) - } - - // Summarize the Ready condition based on abnormalities that may have been observed. - conditions.SetSummary(obj, - meta.ReadyCondition, - conditions.WithConditions( - sourcev1.IncludeUnavailableCondition, - sourcev1.SourceVerifiedCondition, - sourcev1.FetchFailedCondition, - sourcev1.ArtifactOutdatedCondition, - sourcev1.ArtifactUnavailableCondition, - ), - conditions.WithNegativePolarityConditions( - sourcev1.ArtifactUnavailableCondition, - sourcev1.FetchFailedCondition, - sourcev1.IncludeUnavailableCondition, - sourcev1.ArtifactOutdatedCondition, - ), - ) - - // Patch the object, ignoring conflicts on the conditions owned by this controller - patchOpts := []patch.Option{ - patch.WithOwnedConditions{ - Conditions: []string{ - sourcev1.ArtifactUnavailableCondition, - sourcev1.SourceVerifiedCondition, - sourcev1.FetchFailedCondition, - sourcev1.IncludeUnavailableCondition, - sourcev1.ArtifactOutdatedCondition, - meta.ReadyCondition, - meta.ReconcilingCondition, - meta.StalledCondition, - }, - }, - } - - // Determine if the resource is still being reconciled, or if it has stalled, and record this observation - if retErr == nil && (result.IsZero() || !result.Requeue) { - // We are no longer reconciling - conditions.Delete(obj, meta.ReconcilingCondition) - - // We have now observed this generation - patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{}) - - readyCondition := conditions.Get(obj, meta.ReadyCondition) - switch readyCondition.Status { - case metav1.ConditionFalse: - // As we are no longer reconciling and the end-state is not ready, the reconciliation has stalled - conditions.MarkStalled(obj, readyCondition.Reason, readyCondition.Message) - case metav1.ConditionTrue: - // As we are no longer reconciling and the end-state is ready, the reconciliation is no longer stalled - conditions.Delete(obj, meta.StalledCondition) - } - } - - // Finally, patch the resource - if err := patchHelper.Patch(ctx, obj, patchOpts...); err != nil { - // Ignore patch error "not found" when the object is being deleted. - if !obj.ObjectMeta.DeletionTimestamp.IsZero() { - err = kerrors.FilterOut(err, func(e error) bool { return apierrors.IsNotFound(e) }) - } - retErr = kerrors.NewAggregate([]error{retErr, err}) - } + retErr = r.summarizeAndPatch(ctx, obj, patchHelper, recResult, retErr) // Always record readiness and duration metrics r.Metrics.RecordReadiness(ctx, obj) @@ -189,55 +166,103 @@ func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reques // between init and delete if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) + recResult = sreconcile.ResultRequeue return ctrl.Result{Requeue: true}, nil } // Examine if the object is under deletion if !obj.ObjectMeta.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, obj) + res, err := r.reconcileDelete(ctx, obj) + return sreconcile.BuildRuntimeResult(ctx, r.EventRecorder, obj, res, err) } // Reconcile actual object - return r.reconcile(ctx, obj) + reconcilers := []gitRepoReconcilerFunc{ + r.reconcileStorage, + r.reconcileSource, + r.reconcileInclude, + r.reconcileArtifact, + } + recResult, err = r.reconcile(ctx, obj, reconcilers) + return sreconcile.BuildRuntimeResult(ctx, r.EventRecorder, obj, recResult, err) } -// reconcile steps through the actual reconciliation tasks for the object, it returns early on the first step that -// produces an error. -func (r *GitRepositoryReconciler) reconcile(ctx context.Context, obj *sourcev1.GitRepository) (ctrl.Result, error) { - // Mark the resource as under reconciliation - conditions.MarkReconciling(obj, meta.ProgressingReason, "") +// summarizeAndPatch analyzes the object conditions to create a summary of the +// status conditions and patches the object with the calculated summary. +func (r *GitRepositoryReconciler) summarizeAndPatch(ctx context.Context, obj *sourcev1.GitRepository, patchHelper *patch.Helper, res sreconcile.Result, recErr error) error { + // Record the value of the reconciliation request if any. + if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { + obj.Status.SetLastHandledReconcileRequest(v) + } + + // Compute the reconcile results, obtain patch options and reconcile error. + var patchOpts []patch.Option + patchOpts, recErr = sreconcile.ComputeReconcileResult(obj, res, recErr, gitRepoOwnedConditions) + + // Summarize the Ready condition based on abnormalities that may have been observed. + conditions.SetSummary(obj, + meta.ReadyCondition, + conditions.WithConditions( + gitRepoReadyDeps..., + ), + conditions.WithNegativePolarityConditions( + gitRepoReadyDepsNegative..., + ), + ) + + // Finally, patch the resource. + if err := patchHelper.Patch(ctx, obj, patchOpts...); err != nil { + // Ignore patch error "not found" when the object is being deleted. + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + err = kerrors.FilterOut(err, func(e error) bool { return apierrors.IsNotFound(e) }) + } + recErr = kerrors.NewAggregate([]error{recErr, err}) + } - // Reconcile the storage data - if result, err := r.reconcileStorage(ctx, obj); err != nil || result.IsZero() { - return result, err + return recErr +} + +// reconcile steps iterates through the actual reconciliation tasks for objec, +// it returns early on the first step that returns ResultRequeue or produces an +// error. +func (r *GitRepositoryReconciler) reconcile(ctx context.Context, obj *sourcev1.GitRepository, reconcilers []gitRepoReconcilerFunc) (sreconcile.Result, error) { + if obj.Generation != obj.Status.ObservedGeneration { + conditions.MarkReconciling(obj, "NewGeneration", "reconciling new generation %d", obj.Generation) } + var artifact sourcev1.Artifact + var includes artifactSet + // Create temp dir for Git clone tmpDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s-%s-", obj.Kind, obj.Namespace, obj.Name)) if err != nil { - r.Eventf(obj, events.EventSeverityError, sourcev1.StorageOperationFailedReason, "Failed to create temporary directory: %s", err) - return ctrl.Result{}, err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to create temporary directory: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } } defer os.RemoveAll(tmpDir) - // Reconcile the source from upstream - var artifact sourcev1.Artifact - if result, err := r.reconcileSource(ctx, obj, &artifact, tmpDir); err != nil || result.IsZero() { - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, err - } - - // Reconcile includes from the storage - var includes artifactSet - if result, err := r.reconcileInclude(ctx, obj, tmpDir); err != nil || result.IsZero() { - return ctrl.Result{RequeueAfter: r.requeueDependency}, err - } - - // Reconcile the artifact to storage - if result, err := r.reconcileArtifact(ctx, obj, artifact, includes, tmpDir); err != nil || result.IsZero() { - return result, err + // Run the sub-reconcilers and build the result of reconciliation. + var res sreconcile.Result + var resErr error + for _, rec := range reconcilers { + recResult, err := rec(ctx, obj, &artifact, &includes, tmpDir) + // Exit immediately on ResultRequeue. + if recResult == sreconcile.ResultRequeue { + return sreconcile.ResultRequeue, nil + } + // If an error is received, prioritize the returned results because an + // error also means immediate requeue. + if err != nil { + resErr = err + res = recResult + break + } + // Prioritize requeue request in the result. + res = sreconcile.LowestRequeuingResult(res, recResult) } - - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + return res, resErr } // reconcileStorage ensures the current state of the storage matches the desired and previously observed state. @@ -246,9 +271,7 @@ func (r *GitRepositoryReconciler) reconcile(ctx context.Context, obj *sourcev1.G // If the artifact in the Status object of the resource disappeared from storage, it is removed from the object. // If the object does not have an artifact in its Status object, a v1beta1.ArtifactUnavailableCondition is set. // If the hostname of any of the URLs on the object do not match the current storage server hostname, they are updated. -// -// The caller should assume a failure if an error is returned, or the Result is zero. -func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.GitRepository) (ctrl.Result, error) { +func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.GitRepository, artifact *sourcev1.Artifact, includes *artifactSet, dir string) (sreconcile.Result, error) { // Garbage collect previous advertised artifact(s) from storage _ = r.garbageCollect(ctx, obj) @@ -260,17 +283,16 @@ func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context, obj *sou // Record that we do not have an artifact if obj.GetArtifact() == nil { - conditions.MarkTrue(obj, sourcev1.ArtifactUnavailableCondition, "NoArtifact", "No artifact for resource in storage") - return ctrl.Result{Requeue: true}, nil + conditions.MarkReconciling(obj, "NoArtifact", "no artifact for resource in storage") + return sreconcile.ResultSuccess, nil } - conditions.Delete(obj, sourcev1.ArtifactUnavailableCondition) // Always update URLs to ensure hostname is up-to-date // TODO(hidde): we may want to send out an event only if we notice the URL has changed r.Storage.SetArtifactURL(obj.GetArtifact()) obj.Status.URL = r.Storage.SetHostname(obj.Status.URL) - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + return sreconcile.ResultSuccess, nil } // reconcileSource ensures the upstream Git repository can be reached and checked out using the declared configuration, @@ -284,10 +306,8 @@ func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context, obj *sou // signature can not be verified or the verification fails, the Condition=False and it returns early. // If both the checkout and signature verification are successful, the given artifact pointer is set to a new artifact // with the available metadata. -// -// The caller should assume a failure if an error is returned, or the Result is zero. func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, - obj *sourcev1.GitRepository, artifact *sourcev1.Artifact, dir string) (ctrl.Result, error) { + obj *sourcev1.GitRepository, artifact *sourcev1.Artifact, includes *artifactSet, dir string) (sreconcile.Result, error) { // Configure authentication strategy to access the source var authOpts *git.AuthOptions var err error @@ -299,12 +319,13 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, } var secret corev1.Secret if err := r.Client.Get(ctx, name, &secret); err != nil { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, - "Failed to get secret '%s': %s", name.String(), err.Error()) - r.Eventf(obj, events.EventSeverityError, sourcev1.AuthenticationFailedReason, - "Failed to get secret '%s': %s", name.String(), err.Error()) + e := &serror.Event{ + Err: fmt.Errorf("failed to get secret '%s': %w", name.String(), err), + Reason: sourcev1.AuthenticationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, e.Err.Error()) // Return error as the world as observed may change - return ctrl.Result{}, err + return sreconcile.ResultEmpty, e } // Configure strategy with secret @@ -314,12 +335,13 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, authOpts, err = git.AuthOptionsWithoutSecret(obj.Spec.URL) } if err != nil { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, - "Failed to configure auth strategy for Git implementation '%s': %s", obj.Spec.GitImplementation, err) - r.Eventf(obj, events.EventSeverityError, sourcev1.AuthenticationFailedReason, - "Failed to configure auth strategy for Git implementation '%s': %s", obj.Spec.GitImplementation, err) + e := &serror.Event{ + Err: fmt.Errorf("failed to configure auth strategy for Git implementation '%s': %w", obj.Spec.GitImplementation, err), + Reason: sourcev1.AuthenticationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, e.Err.Error()) // Return error as the contents of the secret may change - return ctrl.Result{}, err + return sreconcile.ResultEmpty, e } // Configure checkout strategy @@ -333,11 +355,13 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, checkoutStrategy, err := strategy.CheckoutStrategyForImplementation(ctx, git.Implementation(obj.Spec.GitImplementation), checkoutOpts) if err != nil { - ctrl.LoggerFrom(ctx).Error(err, fmt.Sprintf("Failed to configure checkout strategy for Git implementation '%s'", obj.Spec.GitImplementation)) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, - "Failed to configure checkout strategy for Git implementation '%s': %s", obj.Spec.GitImplementation, err) + e := &serror.Stalling{ + Err: fmt.Errorf("failed to configure checkout strategy for Git implementation '%s': %w", obj.Spec.GitImplementation, err), + Reason: sourcev1.GitOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, e.Err.Error()) // Do not return err as recovery without changes is impossible - return ctrl.Result{}, nil + return sreconcile.ResultEmpty, e } // Checkout HEAD of reference in object @@ -345,19 +369,20 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, defer cancel() commit, err := checkoutStrategy.Checkout(gitCtx, dir, obj.Spec.URL, authOpts) if err != nil { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, - "Failed to checkout and determine revision: %s", err) - r.Eventf(obj, events.EventSeverityError, sourcev1.GitOperationFailedReason, - "Failed to checkout and determine revision: %s", err) + e := &serror.Event{ + Err: fmt.Errorf("failed to checkout and determine revision: %w", err), + Reason: sourcev1.GitOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, e.Err.Error()) // Coin flip on transient or persistent error, return error and hope for the best - return ctrl.Result{}, err + return sreconcile.ResultEmpty, e } - r.Eventf(obj, events.EventSeverityInfo, sourcev1.GitOperationSucceedReason, - "Cloned repository '%s' and checked out revision '%s'", obj.Spec.URL, commit.String()) + r.eventLogf(ctx, obj, corev1.EventTypeNormal, sourcev1.GitOperationSucceedReason, + "cloned repository '%s' and checked out revision '%s'", obj.Spec.URL, commit.String()) conditions.Delete(obj, sourcev1.FetchFailedCondition) // Verify commit signature - if result, err := r.verifyCommitSignature(ctx, obj, *commit); err != nil || result.IsZero() { + if result, err := r.verifyCommitSignature(ctx, obj, *commit); err != nil || result == sreconcile.ResultEmpty { return result, err } @@ -366,9 +391,11 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, // Mark observations about the revision on the object if !obj.GetArtifact().HasRevision(commit.String()) { - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '%s'", commit.String()) + message := fmt.Sprintf("new upstream revision '%s'", commit.String()) + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", message) + conditions.MarkReconciling(obj, "NewRevision", message) } - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + return sreconcile.ResultSuccess, nil } // reconcileArtifact archives a new artifact to the storage, if the current observation on the object does not match the @@ -380,84 +407,96 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, // Source ignore patterns are loaded, and the given directory is archived. // On a successful archive, the artifact and includes in the status of the given object are set, and the symlink in the // storage is updated to its path. -// -// The caller should assume a failure if an error is returned, or the Result is zero. -func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.GitRepository, artifact sourcev1.Artifact, includes artifactSet, dir string) (ctrl.Result, error) { +func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.GitRepository, artifact *sourcev1.Artifact, includes *artifactSet, dir string) (sreconcile.Result, error) { // Always restore the Ready condition in case it got removed due to a transient error defer func() { - if obj.GetArtifact() != nil { - conditions.Delete(obj, sourcev1.ArtifactUnavailableCondition) - } if obj.GetArtifact().HasRevision(artifact.Revision) && !includes.Diff(obj.Status.IncludedArtifacts) { conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, - "Stored artifact for revision '%s'", artifact.Revision) + "stored artifact for revision '%s'", artifact.Revision) } }() // The artifact is up-to-date if obj.GetArtifact().HasRevision(artifact.Revision) && !includes.Diff(obj.Status.IncludedArtifacts) { - ctrl.LoggerFrom(ctx).Info(fmt.Sprintf("Already up to date, current revision '%s'", artifact.Revision)) - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + r.eventLogf(ctx, obj, corev1.EventTypeNormal, meta.SucceededReason, "already up to date, current revision '%s'", artifact.Revision) + return sreconcile.ResultSuccess, nil } + // Mark reconciling because the artifact and remote source are different. + // and they have to be reconciled. + conditions.MarkReconciling(obj, "NewRevision", "new upstream revision '%s'", artifact.Revision) + // Ensure target path exists and is a directory if f, err := os.Stat(dir); err != nil { - err = fmt.Errorf("failed to stat target path: %w", err) - return ctrl.Result{}, err + e := &serror.Event{ + Err: fmt.Errorf("failed to stat target path: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } + return sreconcile.ResultEmpty, e } else if !f.IsDir() { - err = fmt.Errorf("invalid target path: '%s' is not a directory", dir) - return ctrl.Result{}, err + e := &serror.Event{ + Err: fmt.Errorf("invalid target path: '%s' is not a directory", dir), + Reason: sourcev1.StorageOperationFailedReason, + } + return sreconcile.ResultEmpty, e } // Ensure artifact directory exists and acquire lock - if err := r.Storage.MkdirAll(artifact); err != nil { - err = fmt.Errorf("failed to create artifact directory: %w", err) - return ctrl.Result{}, err + if err := r.Storage.MkdirAll(*artifact); err != nil { + e := &serror.Event{ + Err: fmt.Errorf("failed to create artifact directory: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } + return sreconcile.ResultEmpty, e } - unlock, err := r.Storage.Lock(artifact) + unlock, err := r.Storage.Lock(*artifact) if err != nil { - err = fmt.Errorf("failed to acquire lock for artifact: %w", err) - return ctrl.Result{}, err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to acquire lock for artifact: %w", err), + Reason: meta.FailedReason, + } } defer unlock() // Load ignore rules for archiving ps, err := sourceignore.LoadIgnorePatterns(dir, nil) if err != nil { - r.Eventf(obj, events.EventSeverityError, - "SourceIgnoreError", "Failed to load source ignore patterns from repository: %s", err) - return ctrl.Result{}, err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to load source ignore patterns from repository: %w", err), + Reason: "SourceIgnoreError", + } } if obj.Spec.Ignore != nil { ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*obj.Spec.Ignore), nil)...) } // Archive directory to storage - if err := r.Storage.Archive(&artifact, dir, SourceIgnoreFilter(ps, nil)); err != nil { - r.Eventf(obj, events.EventSeverityError, sourcev1.StorageOperationFailedReason, - "Unable to archive artifact to storage: %s", err) - return ctrl.Result{}, err + if err := r.Storage.Archive(artifact, dir, SourceIgnoreFilter(ps, nil)); err != nil { + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("unable to archive artifact to storage: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } } r.AnnotatedEventf(obj, map[string]string{ "revision": artifact.Revision, "checksum": artifact.Checksum, - }, events.EventSeverityInfo, "NewArtifact", "Stored artifact for revision '%s'", artifact.Revision) + }, corev1.EventTypeNormal, "NewArtifact", "stored artifact for revision '%s'", artifact.Revision) // Record it on the object obj.Status.Artifact = artifact.DeepCopy() - obj.Status.IncludedArtifacts = includes + obj.Status.IncludedArtifacts = *includes // Update symlink on a "best effort" basis - url, err := r.Storage.Symlink(artifact, "latest.tar.gz") + url, err := r.Storage.Symlink(*artifact, "latest.tar.gz") if err != nil { - r.Eventf(obj, events.EventSeverityError, sourcev1.StorageOperationFailedReason, + r.eventLogf(ctx, obj, corev1.EventTypeWarning, sourcev1.StorageOperationFailedReason, "Failed to update status URL symlink: %s", err) } if url != "" { obj.Status.URL = url } - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + return sreconcile.ResultSuccess, nil } // reconcileInclude reconciles the declared includes from the object by copying their artifact (sub)contents to the @@ -466,42 +505,49 @@ func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, obj *so // If an include is unavailable, it marks the object with v1beta1.IncludeUnavailableCondition and returns early. // If the copy operations are successful, it deletes the v1beta1.IncludeUnavailableCondition from the object. // If the artifactSet differs from the current set, it marks the object with v1beta1.ArtifactOutdatedCondition. -// -// The caller should assume a failure if an error is returned, or the Result is zero. -func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context, obj *sourcev1.GitRepository, dir string) (ctrl.Result, error) { +func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context, obj *sourcev1.GitRepository, artifact *sourcev1.Artifact, includes *artifactSet, dir string) (sreconcile.Result, error) { artifacts := make(artifactSet, len(obj.Spec.Include)) for i, incl := range obj.Spec.Include { // Do this first as it is much cheaper than copy operations toPath, err := securejoin.SecureJoin(dir, incl.GetToPath()) if err != nil { - conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "IllegalPath", - "Path calculation for include '%s' failed: %s", incl.GitRepositoryRef.Name, err.Error()) - return ctrl.Result{}, err + e := &serror.Event{ + Err: fmt.Errorf("path calculation for include '%s' failed: %w", incl.GitRepositoryRef.Name, err), + Reason: "IllegalPath", + } + conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "IllegalPath", e.Err.Error()) + return sreconcile.ResultEmpty, e } // Retrieve the included GitRepository dep := &sourcev1.GitRepository{} if err := r.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: incl.GitRepositoryRef.Name}, dep); err != nil { - conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "NotFound", - "Could not get resource for include '%s': %s", incl.GitRepositoryRef.Name, err.Error()) - return ctrl.Result{}, err + e := &serror.Event{ + Err: fmt.Errorf("could not get resource for include '%s': %w", incl.GitRepositoryRef.Name, err), + Reason: "NotFound", + } + conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "NotFound", e.Err.Error()) + return sreconcile.ResultEmpty, err } // Confirm include has an artifact if dep.GetArtifact() == nil { - ctrl.LoggerFrom(ctx).Error(nil, fmt.Sprintf("No artifact available for include '%s'", incl.GitRepositoryRef.Name)) - conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "NoArtifact", - "No artifact available for include '%s'", incl.GitRepositoryRef.Name) - return ctrl.Result{}, nil + e := &serror.Stalling{ + Err: fmt.Errorf("no artifact available for include '%s'", incl.GitRepositoryRef.Name), + Reason: "NoArtifact", + } + conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "NoArtifact", e.Err.Error()) + return sreconcile.ResultEmpty, e } // Copy artifact (sub)contents to configured directory if err := r.Storage.CopyToPath(dep.GetArtifact(), incl.GetFromPath(), toPath); err != nil { - conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "CopyFailure", - "Failed to copy '%s' include from %s to %s: %s", incl.GitRepositoryRef.Name, incl.GetFromPath(), incl.GetToPath(), err.Error()) - r.Eventf(obj, events.EventSeverityError, sourcev1.IncludeUnavailableCondition, - "Failed to copy '%s' include from %s to %s: %s", incl.GitRepositoryRef.Name, incl.GetFromPath(), incl.GetToPath(), err.Error()) - return ctrl.Result{}, err + e := &serror.Event{ + Err: fmt.Errorf("Failed to copy '%s' include from %s to %s: %w", incl.GitRepositoryRef.Name, incl.GetFromPath(), incl.GetToPath(), err), + Reason: "CopyFailure", + } + conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "CopyFailure", e.Err.Error()) + return sreconcile.ResultEmpty, e } artifacts[i] = dep.GetArtifact().DeepCopy() } @@ -511,33 +557,34 @@ func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context, obj *sou // Observe if the artifacts still match the previous included ones if artifacts.Diff(obj.Status.IncludedArtifacts) { - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "IncludeChange", "Included artifacts differ from last observed includes") + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "IncludeChange", + "included artifacts differ from last observed includes") } - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + return sreconcile.ResultSuccess, nil } // reconcileDelete handles the delete of an object. It first garbage collects all artifacts for the object from the // artifact storage, if successful, the finalizer is removed from the object. -func (r *GitRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.GitRepository) (ctrl.Result, error) { +func (r *GitRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.GitRepository) (sreconcile.Result, error) { // Garbage collect the resource's artifacts if err := r.garbageCollect(ctx, obj); err != nil { // Return the error so we retry the failed garbage collection - return ctrl.Result{}, err + return sreconcile.ResultEmpty, err } // Remove our finalizer from the list controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) // Stop reconciliation as the object is being deleted - return ctrl.Result{}, nil + return sreconcile.ResultEmpty, nil } // verifyCommitSignature verifies the signature of the given commit if a verification mode is configured on the object. -func (r *GitRepositoryReconciler) verifyCommitSignature(ctx context.Context, obj *sourcev1.GitRepository, commit git.Commit) (ctrl.Result, error) { +func (r *GitRepositoryReconciler) verifyCommitSignature(ctx context.Context, obj *sourcev1.GitRepository, commit git.Commit) (sreconcile.Result, error) { // Check if there is a commit verification is configured and remove any old observations if there is none if obj.Spec.Verification == nil || obj.Spec.Verification.Mode == "" { conditions.Delete(obj, sourcev1.SourceVerifiedCondition) - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + return sreconcile.ResultSuccess, nil } // Get secret with GPG data @@ -547,9 +594,12 @@ func (r *GitRepositoryReconciler) verifyCommitSignature(ctx context.Context, obj } secret := &corev1.Secret{} if err := r.Client.Get(ctx, publicKeySecret, secret); err != nil { - conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, meta.FailedReason, "PGP public keys secret error: %s", err.Error()) - r.Eventf(obj, events.EventSeverityError, "VerificationError", "PGP public keys secret error: %s", err.Error()) - return ctrl.Result{}, err + e := &serror.Event{ + Err: fmt.Errorf("PGP public keys secret error: %w", err), + Reason: "VerificationError", + } + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, meta.FailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } var keyRings []string @@ -558,15 +608,20 @@ func (r *GitRepositoryReconciler) verifyCommitSignature(ctx context.Context, obj } // Verify commit with GPG data from secret if _, err := commit.Verify(keyRings...); err != nil { - conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, meta.FailedReason, "Signature verification of commit '%s' failed: %s", commit.Hash.String(), err) - r.Eventf(obj, events.EventSeverityError, "InvalidCommitSignature", "Signature verification of commit '%s' failed: %s", commit.Hash.String(), err) + e := &serror.Event{ + Err: fmt.Errorf("signature verification of commit '%s' failed: %w", commit.Hash.String(), err), + Reason: "InvalidCommitSignature", + } + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, meta.FailedReason, e.Err.Error()) // Return error in the hope the secret changes - return ctrl.Result{}, err + return sreconcile.ResultEmpty, e } - conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, meta.SucceededReason, "Verified signature of commit '%s'", commit.Hash.String()) - r.Eventf(obj, events.EventSeverityInfo, "VerifiedCommit", "Verified signature of commit '%s'", commit.Hash.String()) - return ctrl.Result{RequeueAfter: obj.Spec.Interval.Duration}, nil + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, meta.SucceededReason, + "verified signature of commit '%s'", commit.Hash.String()) + r.eventLogf(ctx, obj, corev1.EventTypeNormal, "VerifiedCommit", + "verified signature of commit '%s'", commit.Hash.String()) + return sreconcile.ResultSuccess, nil } // garbageCollect performs a garbage collection for the given v1beta1.GitRepository. It removes all but the current @@ -575,23 +630,40 @@ func (r *GitRepositoryReconciler) verifyCommitSignature(ctx context.Context, obj func (r *GitRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourcev1.GitRepository) error { if !obj.DeletionTimestamp.IsZero() { if err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { - r.Eventf(obj, events.EventSeverityError, "GarbageCollectionFailed", - "Garbage collection for deleted resource failed: %s", err) - return err + return &serror.Event{ + Err: fmt.Errorf("garbage collection for deleted resource failed: %w", err), + Reason: "GarbageCollectionFailed", + } } obj.Status.Artifact = nil // TODO(hidde): we should only push this event if we actually garbage collected something - r.Eventf(obj, events.EventSeverityInfo, "GarbageCollectionSucceeded", - "Garbage collected artifacts for deleted resource") + r.eventLogf(ctx, obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", + "garbage collected artifacts for deleted resource") return nil } if obj.GetArtifact() != nil { if err := r.Storage.RemoveAllButCurrent(*obj.GetArtifact()); err != nil { - r.Eventf(obj, events.EventSeverityError, "GarbageCollectionFailed", "Garbage collection of old artifacts failed: %s", err) - return err + return &serror.Event{ + Err: fmt.Errorf("garbage collection of old artifacts failed: %w", err), + } } // TODO(hidde): we should only push this event if we actually garbage collected something - r.Eventf(obj, events.EventSeverityInfo, "GarbageCollectionSucceeded", "Garbage collected old artifacts") + r.eventLogf(ctx, obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", + "garbage collected old artifacts") } return nil } + +// eventLog records event and logs at the same time. This log is different from +// the debug log in the event recorder in the sense that this is a simple log, +// the event recorder debug log contains complete details about the event. +func (r *GitRepositoryReconciler) eventLogf(ctx context.Context, obj runtime.Object, eventType string, reason string, messageFmt string, args ...interface{}) { + msg := fmt.Sprintf(messageFmt, args...) + // Log and emit event. + if eventType == corev1.EventTypeWarning { + ctrl.LoggerFrom(ctx).Error(errors.New(reason), msg) + } else { + ctrl.LoggerFrom(ctx).Info(msg) + } + r.Eventf(obj, eventType, reason, msg) +} diff --git a/controllers/gitrepository_controller_test.go b/controllers/gitrepository_controller_test.go index e90c84989..f29f32aed 100644 --- a/controllers/gitrepository_controller_test.go +++ b/controllers/gitrepository_controller_test.go @@ -25,6 +25,7 @@ import ( "testing" "time" + "github.com/darkowlzz/controller-check/status" "github.com/go-git/go-billy/v5/memfs" gogit "github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5/config" @@ -53,6 +54,7 @@ import ( "github.com/fluxcd/pkg/testserver" sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" "github.com/fluxcd/source-controller/pkg/git" ) @@ -189,6 +191,11 @@ func TestGitRepositoryReconciler_Reconcile(t *testing.T) { obj.Generation == obj.Status.ObservedGeneration }, timeout).Should(BeTrue()) + // Check if the object status is valid. + condns := &status.Conditions{NegativePolarity: gitRepoReadyDepsNegative} + checker := status.NewChecker(testEnv.Client, testEnv.GetScheme(), condns) + checker.CheckErr(ctx, obj) + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) // Wait for GitRepository to be deleted @@ -216,16 +223,17 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { server options secret *corev1.Secret beforeFunc func(obj *sourcev1.GitRepository) - want ctrl.Result + want sreconcile.Result wantErr bool assertConditions []metav1.Condition }{ { name: "HTTP without secretRef makes ArtifactOutdated=True", protocol: "http", - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'master/'"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'master/'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'master/'"), }, }, { @@ -247,9 +255,10 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { beforeFunc: func(obj *sourcev1.GitRepository) { obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "basic-auth"} }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'master/'"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'master/'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'master/'"), }, }, { @@ -271,9 +280,10 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { beforeFunc: func(obj *sourcev1.GitRepository) { obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "ca-file"} }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'master/'"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'master/'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'master/'"), }, }, { @@ -323,7 +333,7 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "Failed to checkout and determine revision: unable to clone: Certificate"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "failed to checkout and determine revision: unable to clone '': PEM CA bundle could not be appended to x509 certificate pool"), }, }, { @@ -344,9 +354,10 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { beforeFunc: func(obj *sourcev1.GitRepository) { obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "private-key"} }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'master/'"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'master/'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'master/'"), }, }, { @@ -368,9 +379,10 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { beforeFunc: func(obj *sourcev1.GitRepository) { obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "private-key"} }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'master/'"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'master/'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'master/'"), }, }, { @@ -384,7 +396,7 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '/non-existing': secrets \"non-existing\" not found"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret '/non-existing': secrets \"non-existing\" not found"), }, }, } @@ -486,9 +498,10 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { } var artifact sourcev1.Artifact + var includes artifactSet dlog := log.NewDelegatingLogSink(log.NullLogSink{}) nullLogger := logr.New(dlog) - got, err := r.reconcileSource(logr.NewContext(ctx, nullLogger), obj, &artifact, tmpDir) + got, err := r.reconcileSource(logr.NewContext(ctx, nullLogger), obj, &artifact, &includes, tmpDir) g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(got).To(Equal(tt.want)) @@ -509,13 +522,13 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T) name string skipForImplementation string reference *sourcev1.GitRepositoryRef - want ctrl.Result + want sreconcile.Result wantErr bool wantRevision string }{ { name: "Nil reference (default branch)", - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, wantRevision: "master/", }, { @@ -523,7 +536,7 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T) reference: &sourcev1.GitRepositoryRef{ Branch: "staging", }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, wantRevision: "staging/", }, { @@ -531,7 +544,7 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T) reference: &sourcev1.GitRepositoryRef{ Tag: "v0.1.0", }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, wantRevision: "v0.1.0/", }, { @@ -541,7 +554,7 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T) Branch: "staging", Commit: "", }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, wantRevision: "staging/", }, { @@ -551,7 +564,7 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T) Branch: "staging", Commit: "", }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, wantRevision: "HEAD/", }, { @@ -559,7 +572,7 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T) reference: &sourcev1.GitRepositoryRef{ SemVer: "*", }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, wantRevision: "v2.0.0/", }, { @@ -567,7 +580,7 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T) reference: &sourcev1.GitRepositoryRef{ SemVer: "", }, { @@ -576,7 +589,7 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T) SemVer: ">=1.0.0-0 <1.1.0-0", }, wantRevision: "v1.0.0-alpha/", - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, }, } @@ -641,7 +654,8 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T) obj.Spec.GitImplementation = i var artifact sourcev1.Artifact - got, err := r.reconcileSource(ctx, obj, &artifact, tmpDir) + var includes artifactSet + got, err := r.reconcileSource(ctx, obj, &artifact, &includes, tmpDir) if err != nil { println(err.Error()) } @@ -665,7 +679,7 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { includes artifactSet beforeFunc func(obj *sourcev1.GitRepository) afterFunc func(t *WithT, obj *sourcev1.GitRepository, artifact sourcev1.Artifact) - want ctrl.Result + want sreconcile.Result wantErr bool assertConditions []metav1.Condition }{ @@ -679,9 +693,10 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { t.Expect(obj.GetArtifact()).ToNot(BeNil()) t.Expect(obj.Status.URL).ToNot(BeEmpty()) }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'main/revision'"), + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'main/revision'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'main/revision'"), }, }, { @@ -697,9 +712,10 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { t.Expect(obj.Status.IncludedArtifacts).ToNot(BeEmpty()) t.Expect(obj.Status.URL).ToNot(BeEmpty()) }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'main/revision'"), + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'main/revision'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'main/revision'"), }, }, { @@ -714,9 +730,9 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { afterFunc: func(t *WithT, obj *sourcev1.GitRepository, artifact sourcev1.Artifact) { t.Expect(obj.Status.URL).To(BeEmpty()) }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'main/revision'"), + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'main/revision'"), }, }, { @@ -730,26 +746,10 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { t.Expect(obj.GetArtifact()).ToNot(BeNil()) t.Expect(obj.GetArtifact().Checksum).To(Equal("dc95ae14c19d335b693bbba58ae2a562242b0cf33893baffd1b7605ba578e0d6")) }, - want: ctrl.Result{RequeueAfter: interval}, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'main/revision'"), - }, - }, - { - name: "Removes ArtifactUnavailableCondition after creating artifact", - dir: "testdata/git/repository", - beforeFunc: func(obj *sourcev1.GitRepository) { - obj.Spec.Interval = metav1.Duration{Duration: interval} - conditions.MarkTrue(obj, sourcev1.ArtifactUnavailableCondition, "Foo", "") - }, - afterFunc: func(t *WithT, obj *sourcev1.GitRepository, artifact sourcev1.Artifact) { - t.Expect(obj.GetArtifact()).ToNot(BeNil()) - t.Expect(obj.GetArtifact().Checksum).To(Equal("ef9c34eab0584035ac8b8a4070876954ea46f270250d60648672feef3e943426")) - t.Expect(obj.Status.URL).ToNot(BeEmpty()) - }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'main/revision'"), + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'main/revision'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'main/revision'"), }, }, { @@ -764,9 +764,10 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { t.Expect(obj.GetArtifact().Checksum).To(Equal("ef9c34eab0584035ac8b8a4070876954ea46f270250d60648672feef3e943426")) t.Expect(obj.Status.URL).ToNot(BeEmpty()) }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'main/revision'"), + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'main/revision'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'main/revision'"), }, }, { @@ -784,20 +785,27 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { t.Expect(err).NotTo(HaveOccurred()) t.Expect(localPath).To(Equal(targetFile)) }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'main/revision'"), + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'main/revision'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'main/revision'"), }, }, { name: "Target path does not exists", dir: "testdata/git/foo", wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'main/revision'"), + }, }, { name: "Target path is not a directory", dir: "testdata/git/repository/foo.txt", wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'main/revision'"), + }, }, } @@ -824,7 +832,7 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { artifact := testStorage.NewArtifactFor(obj.Kind, obj, "main/revision", "checksum.tar.gz") - got, err := r.reconcileArtifact(ctx, obj, artifact, tt.includes, tt.dir) + got, err := r.reconcileArtifact(ctx, obj, &artifact, &tt.includes, tt.dir) g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(got).To(Equal(tt.want)) @@ -865,7 +873,7 @@ func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) { dependencies []dependency includes []include beforeFunc func(obj *sourcev1.GitRepository) - want ctrl.Result + want sreconcile.Result wantErr bool assertConditions []metav1.Condition }{ @@ -891,9 +899,9 @@ func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) { {name: "a", toPath: "a/", shouldExist: true}, {name: "b", toPath: "b/", shouldExist: true}, }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "IncludeChange", "Included artifacts differ from last observed includes"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "IncludeChange", "included artifacts differ from last observed includes"), }, }, { @@ -903,7 +911,7 @@ func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "NotFound", "Could not get resource for include 'a': gitrepositories.source.toolkit.fluxcd.io \"a\" not found"), + *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "NotFound", "could not get resource for include 'a': gitrepositories.source.toolkit.fluxcd.io \"a\" not found"), }, }, { @@ -920,8 +928,9 @@ func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) { includes: []include{ {name: "a", toPath: "a/"}, }, + wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "NoArtifact", "No artifact available for include 'a'"), + *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "NoArtifact", "no artifact available for include 'a'"), }, }, { @@ -945,7 +954,7 @@ func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) { beforeFunc: func(obj *sourcev1.GitRepository) { conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "NoArtifact", "") }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{}, }, } @@ -980,12 +989,8 @@ func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) { } r := &GitRepositoryReconciler{ - Client: builder.Build(), - EventRecorder: record.NewFakeRecorder(32), - // Events: helper.Events{ - // Scheme: testEnv.GetScheme(), - // EventRecorder: record.NewFakeRecorder(32), - // }, + Client: builder.Build(), + EventRecorder: record.NewFakeRecorder(32), Storage: storage, requeueDependency: dependencyInterval, } @@ -1018,7 +1023,10 @@ func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) { g.Expect(err).NotTo(HaveOccurred()) defer os.RemoveAll(tmpDir) - got, err := r.reconcileInclude(ctx, obj, tmpDir) + var artifact sourcev1.Artifact + var includes artifactSet + + got, err := r.reconcileInclude(ctx, obj, &artifact, &includes, tmpDir) g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions)) g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(got).To(Equal(tt.want)) @@ -1065,7 +1073,7 @@ func TestGitRepositoryReconciler_reconcileDelete(t *testing.T) { got, err := r.reconcileDelete(ctx, obj) g.Expect(err).NotTo(HaveOccurred()) - g.Expect(got).To(Equal(ctrl.Result{})) + g.Expect(got).To(Equal(sreconcile.ResultEmpty)) g.Expect(controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer)).To(BeFalse()) g.Expect(obj.Status.Artifact).To(BeNil()) } @@ -1076,7 +1084,7 @@ func TestGitRepositoryReconciler_verifyCommitSignature(t *testing.T) { secret *corev1.Secret commit git.Commit beforeFunc func(obj *sourcev1.GitRepository) - want ctrl.Result + want sreconcile.Result wantErr bool assertConditions []metav1.Condition }{ @@ -1104,9 +1112,9 @@ func TestGitRepositoryReconciler_verifyCommitSignature(t *testing.T) { }, } }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "Verified signature of commit 'shasum'"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of commit 'shasum'"), }, }, { @@ -1132,7 +1140,7 @@ func TestGitRepositoryReconciler_verifyCommitSignature(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, meta.FailedReason, "Signature verification of commit 'shasum' failed: failed to verify commit with any of the given key rings"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, meta.FailedReason, "signature verification of commit 'shasum' failed: failed to verify commit with any of the given key rings"), }, }, { @@ -1157,7 +1165,7 @@ func TestGitRepositoryReconciler_verifyCommitSignature(t *testing.T) { obj.Spec.Interval = metav1.Duration{Duration: interval} conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Foo", "") }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{}, }, { @@ -1167,7 +1175,7 @@ func TestGitRepositoryReconciler_verifyCommitSignature(t *testing.T) { obj.Spec.Verification = &sourcev1.GitRepositoryVerification{} conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Foo", "") }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{}, }, } @@ -1233,7 +1241,7 @@ func TestGitRepositoryReconciler_ConditionsUpdate(t *testing.T) { name: "no condition", want: ctrl.Result{RequeueAfter: interval}, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "Stored artifact for revision"), + *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"), }, }, { @@ -1243,7 +1251,7 @@ func TestGitRepositoryReconciler_ConditionsUpdate(t *testing.T) { }, want: ctrl.Result{RequeueAfter: interval}, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "Stored artifact for revision"), + *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"), }, }, { @@ -1253,7 +1261,7 @@ func TestGitRepositoryReconciler_ConditionsUpdate(t *testing.T) { }, want: ctrl.Result{RequeueAfter: interval}, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "Stored artifact for revision"), + *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"), }, }, { @@ -1263,11 +1271,10 @@ func TestGitRepositoryReconciler_ConditionsUpdate(t *testing.T) { conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "Foo", "") conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Foo", "") conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") - conditions.MarkTrue(obj, sourcev1.ArtifactUnavailableCondition, "Foo", "") }, want: ctrl.Result{RequeueAfter: interval}, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "Stored artifact for revision"), + *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"), }, }, { @@ -1278,7 +1285,7 @@ func TestGitRepositoryReconciler_ConditionsUpdate(t *testing.T) { }, want: ctrl.Result{RequeueAfter: interval}, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "Stored artifact for revision"), + *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"), }, }, { @@ -1289,7 +1296,7 @@ func TestGitRepositoryReconciler_ConditionsUpdate(t *testing.T) { }, want: ctrl.Result{RequeueAfter: interval}, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "Stored artifact for revision"), + *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"), }, }, } diff --git a/go.mod b/go.mod index 262953542..6664ab816 100644 --- a/go.mod +++ b/go.mod @@ -9,6 +9,7 @@ require ( github.com/Masterminds/semver/v3 v3.1.1 github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 github.com/cyphar/filepath-securejoin v0.2.2 + github.com/darkowlzz/controller-check v0.0.0-20220119215126-648356cef22c github.com/elazarl/goproxy v0.0.0-20211114080932-d06c3be7c11b github.com/fluxcd/pkg/apis/meta v0.11.0-rc.3 github.com/fluxcd/pkg/gittestserver v0.5.0 @@ -115,6 +116,7 @@ require ( github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 // indirect github.com/klauspost/compress v1.13.5 // indirect github.com/klauspost/cpuid v1.3.1 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect github.com/lib/pq v1.10.0 // indirect diff --git a/go.sum b/go.sum index c297abfa3..48bb6d3e8 100644 --- a/go.sum +++ b/go.sum @@ -232,6 +232,8 @@ github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1S github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/darkowlzz/controller-check v0.0.0-20220119215126-648356cef22c h1:pyp/Dvd1gYP/D3z1zs46h0YhYzFp0hjxw0XVIO9+vh4= +github.com/darkowlzz/controller-check v0.0.0-20220119215126-648356cef22c/go.mod h1:haYO9UW76kUUKpIBbv3ydaU5wZ/7r0yqp61PGzVRSYU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -622,6 +624,8 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= From 2acb721c35ec506a9b866933f1979e560854b41c Mon Sep 17 00:00:00 2001 From: Sunny Date: Mon, 24 Jan 2022 22:29:10 +0530 Subject: [PATCH 28/63] gitrepo: Use internal/util for creating temp dir Signed-off-by: Sunny --- controllers/gitrepository_controller.go | 3 ++- controllers/gitrepository_controller_test.go | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go index ba7532a11..58519306a 100644 --- a/controllers/gitrepository_controller.go +++ b/controllers/gitrepository_controller.go @@ -48,6 +48,7 @@ import ( sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" serror "github.com/fluxcd/source-controller/internal/error" sreconcile "github.com/fluxcd/source-controller/internal/reconcile" + "github.com/fluxcd/source-controller/internal/util" "github.com/fluxcd/source-controller/pkg/git" "github.com/fluxcd/source-controller/pkg/git/strategy" ) @@ -234,7 +235,7 @@ func (r *GitRepositoryReconciler) reconcile(ctx context.Context, obj *sourcev1.G var includes artifactSet // Create temp dir for Git clone - tmpDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s-%s-", obj.Kind, obj.Namespace, obj.Name)) + tmpDir, err := util.TempDirForObj("", obj) if err != nil { return sreconcile.ResultEmpty, &serror.Event{ Err: fmt.Errorf("failed to create temporary directory: %w", err), diff --git a/controllers/gitrepository_controller_test.go b/controllers/gitrepository_controller_test.go index f29f32aed..6a0325d54 100644 --- a/controllers/gitrepository_controller_test.go +++ b/controllers/gitrepository_controller_test.go @@ -708,7 +708,7 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { }, afterFunc: func(t *WithT, obj *sourcev1.GitRepository, artifact sourcev1.Artifact) { t.Expect(obj.GetArtifact()).ToNot(BeNil()) - t.Expect(obj.GetArtifact().Checksum).To(Equal("ef9c34eab0584035ac8b8a4070876954ea46f270250d60648672feef3e943426")) + t.Expect(obj.GetArtifact().Checksum).To(Equal("60a3bf69f337cb5ec9ebd00abefbb6e7f2a2cf27158ecf438d52b2035b184172")) t.Expect(obj.Status.IncludedArtifacts).ToNot(BeEmpty()) t.Expect(obj.Status.URL).ToNot(BeEmpty()) }, @@ -744,7 +744,7 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { }, afterFunc: func(t *WithT, obj *sourcev1.GitRepository, artifact sourcev1.Artifact) { t.Expect(obj.GetArtifact()).ToNot(BeNil()) - t.Expect(obj.GetArtifact().Checksum).To(Equal("dc95ae14c19d335b693bbba58ae2a562242b0cf33893baffd1b7605ba578e0d6")) + t.Expect(obj.GetArtifact().Checksum).To(Equal("11f7f007dce5619bd79e6c57688261058d09f5271e802463ac39f2b9ead7cabd")) }, want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ @@ -761,7 +761,7 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { }, afterFunc: func(t *WithT, obj *sourcev1.GitRepository, artifact sourcev1.Artifact) { t.Expect(obj.GetArtifact()).ToNot(BeNil()) - t.Expect(obj.GetArtifact().Checksum).To(Equal("ef9c34eab0584035ac8b8a4070876954ea46f270250d60648672feef3e943426")) + t.Expect(obj.GetArtifact().Checksum).To(Equal("60a3bf69f337cb5ec9ebd00abefbb6e7f2a2cf27158ecf438d52b2035b184172")) t.Expect(obj.Status.URL).ToNot(BeEmpty()) }, want: sreconcile.ResultSuccess, From c4fa79c85ee5e349fd61de173992965f93a678e2 Mon Sep 17 00:00:00 2001 From: Sunny Date: Wed, 26 Jan 2022 21:08:13 +0530 Subject: [PATCH 29/63] gitrepo: Fix reconcileInclude() includes The artifacts built in reconcileInclude() should be persisted by writing it to includes. reconcileArtifact() later adds includes to the git repo object status and persists it. Signed-off-by: Sunny --- controllers/gitrepository_controller.go | 3 +++ controllers/gitrepository_controller_test.go | 3 +++ 2 files changed, 6 insertions(+) diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go index 58519306a..204823b4d 100644 --- a/controllers/gitrepository_controller.go +++ b/controllers/gitrepository_controller.go @@ -561,6 +561,9 @@ func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context, obj *sou conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "IncludeChange", "included artifacts differ from last observed includes") } + + // Persist the artifactSet. + *includes = artifacts return sreconcile.ResultSuccess, nil } diff --git a/controllers/gitrepository_controller_test.go b/controllers/gitrepository_controller_test.go index 6a0325d54..792495a86 100644 --- a/controllers/gitrepository_controller_test.go +++ b/controllers/gitrepository_controller_test.go @@ -1029,6 +1029,9 @@ func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) { got, err := r.reconcileInclude(ctx, obj, &artifact, &includes, tmpDir) g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions)) g.Expect(err != nil).To(Equal(tt.wantErr)) + if err == nil { + g.Expect(len(includes)).To(Equal(len(tt.includes))) + } g.Expect(got).To(Equal(tt.want)) for _, i := range tt.includes { if i.toPath != "" { From 52f4a2a8005a905c6fe88e82bcf7db00c23538eb Mon Sep 17 00:00:00 2001 From: Sunny Date: Wed, 24 Nov 2021 22:08:52 +0530 Subject: [PATCH 30/63] bucket: Replace GetInterval() with GetRequeueAfter() Signed-off-by: Sunny --- controllers/bucket_controller.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index a819f09e4..44b9ee034 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -171,10 +171,10 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr log.Info(fmt.Sprintf("Reconciliation finished in %s, next run in %s", time.Since(start).String(), - bucket.GetInterval().Duration.String(), + bucket.GetRequeueAfter().String(), )) - return ctrl.Result{RequeueAfter: bucket.GetInterval().Duration}, nil + return ctrl.Result{RequeueAfter: bucket.GetRequeueAfter()}, nil } func (r *BucketReconciler) reconcile(ctx context.Context, bucket sourcev1.Bucket) (sourcev1.Bucket, error) { From 89ba8374b673cd30da9adf242ecab77842026a75 Mon Sep 17 00:00:00 2001 From: Hidde Beydals Date: Sat, 31 Jul 2021 03:58:43 +0200 Subject: [PATCH 31/63] Rewrite `BucketReconciler` to new standards This commit rewrites the `BucketReconciler` to new standards, while implementing the newly introduced Condition types, and trying to adhere better to Kubernetes API conventions. More specifically it introduces: - Implementation of more explicit Condition types to highlight abnormalities. - Extensive usage of the `conditions` subpackage from `runtime`. - Better and more conflict-resilient (status)patching of reconciled objects using the `patch` subpackage from runtime. - Proper implementation of kstatus' `Reconciling` and `Stalled` conditions. - Refactor of reconciler logic, including more efficient detection of changes to bucket objects by making use of the etag data available, and downloading of object files in parallel with a limited number of workers (4). - Integration tests that solely rely on `testenv` and do not use Ginkgo. There are a couple of TODOs marked in-code, these are suggestions for the future and should be non-blocking. In addition to the TODOs, more complex and/or edge-case test scenarios may be added as well. Signed-off-by: Hidde Beydals --- api/v1beta2/bucket_types.go | 60 +- controllers/bucket_controller.go | 956 +++++++++++++++----------- controllers/bucket_controller_test.go | 677 +++++++++++++++++- controllers/suite_test.go | 9 + main.go | 9 +- 5 files changed, 1241 insertions(+), 470 deletions(-) diff --git a/api/v1beta2/bucket_types.go b/api/v1beta2/bucket_types.go index 4626f1693..d074fc60b 100644 --- a/api/v1beta2/bucket_types.go +++ b/api/v1beta2/bucket_types.go @@ -19,12 +19,10 @@ package v1beta2 import ( "time" - apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/runtime/conditions" ) const ( @@ -32,6 +30,19 @@ const ( BucketKind = "Bucket" ) +const ( + GenericBucketProvider string = "generic" + AmazonBucketProvider string = "aws" + GoogleBucketProvider string = "gcp" +) + +const ( + // DownloadFailedCondition indicates a transient or persistent download failure. If True, observations on the + // upstream Source revision are not possible, and the Artifact available for the Source may be outdated. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + DownloadFailedCondition string = "DownloadFailed" +) + // BucketSpec defines the desired state of an S3 compatible bucket type BucketSpec struct { // The S3 compatible storage provider name, default ('generic'). @@ -85,12 +96,6 @@ type BucketSpec struct { AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` } -const ( - GenericBucketProvider string = "generic" - AmazonBucketProvider string = "aws" - GoogleBucketProvider string = "gcp" -) - // BucketStatus defines the observed state of a bucket type BucketStatus struct { // ObservedGeneration is the last observed generation. @@ -122,45 +127,6 @@ const ( BucketOperationFailedReason string = "BucketOperationFailed" ) -// BucketProgressing resets the conditions of the Bucket to metav1.Condition of -// type meta.ReadyCondition with status 'Unknown' and meta.ProgressingReason -// reason and message. It returns the modified Bucket. -func BucketProgressing(bucket Bucket) Bucket { - bucket.Status.ObservedGeneration = bucket.Generation - bucket.Status.URL = "" - bucket.Status.Conditions = []metav1.Condition{} - conditions.MarkUnknown(&bucket, meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress") - return bucket -} - -// BucketReady sets the given Artifact and URL on the Bucket and sets the -// meta.ReadyCondition to 'True', with the given reason and message. It returns -// the modified Bucket. -func BucketReady(bucket Bucket, artifact Artifact, url, reason, message string) Bucket { - bucket.Status.Artifact = &artifact - bucket.Status.URL = url - conditions.MarkTrue(&bucket, meta.ReadyCondition, reason, message) - return bucket -} - -// BucketNotReady sets the meta.ReadyCondition on the Bucket to 'False', with -// the given reason and message. It returns the modified Bucket. -func BucketNotReady(bucket Bucket, reason, message string) Bucket { - conditions.MarkFalse(&bucket, meta.ReadyCondition, reason, message) - return bucket -} - -// BucketReadyMessage returns the message of the metav1.Condition of type -// meta.ReadyCondition with status 'True' if present, or an empty string. -func BucketReadyMessage(bucket Bucket) string { - if c := apimeta.FindStatusCondition(bucket.Status.Conditions, meta.ReadyCondition); c != nil { - if c.Status == metav1.ConditionTrue { - return c.Message - } - } - return "" -} - // GetConditions returns the status conditions of the object. func (in Bucket) GetConditions() []metav1.Condition { return in.Status.Conditions diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index 44b9ee034..e97f97b23 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -18,24 +18,27 @@ package controllers import ( "context" - "crypto/sha1" + "crypto/sha256" "fmt" "os" "path/filepath" + "sort" "strings" "time" + gcpstorage "cloud.google.com/go/storage" + "github.com/fluxcd/source-controller/pkg/gcp" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" "github.com/minio/minio-go/v7/pkg/s3utils" + "golang.org/x/sync/errgroup" + "golang.org/x/sync/semaphore" "google.golang.org/api/option" corev1 "k8s.io/api/core/v1" - apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + kerrors "k8s.io/apimachinery/pkg/util/errors" kuberecorder "k8s.io/client-go/tools/record" - "k8s.io/client-go/tools/reference" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -43,10 +46,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/runtime/events" - "github.com/fluxcd/pkg/runtime/metrics" + "github.com/fluxcd/pkg/runtime/conditions" + helper "github.com/fluxcd/pkg/runtime/controller" + "github.com/fluxcd/pkg/runtime/patch" "github.com/fluxcd/pkg/runtime/predicates" - "github.com/fluxcd/source-controller/pkg/gcp" sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" "github.com/fluxcd/source-controller/pkg/sourceignore" @@ -60,11 +63,10 @@ import ( // BucketReconciler reconciles a Bucket object type BucketReconciler struct { client.Client - Scheme *runtime.Scheme - Storage *Storage - EventRecorder kuberecorder.EventRecorder - ExternalEventRecorder *events.Recorder - MetricsRecorder *metrics.Recorder + kuberecorder.EventRecorder + helper.Metrics + + Storage *Storage } type BucketReconcilerOptions struct { @@ -83,519 +85,683 @@ func (r *BucketReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts Buc Complete(r) } -func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) { start := time.Now() log := ctrl.LoggerFrom(ctx) - var bucket sourcev1.Bucket - if err := r.Get(ctx, req.NamespacedName, &bucket); err != nil { + // Fetch the Bucket + obj := &sourcev1.Bucket{} + if err := r.Get(ctx, req.NamespacedName, obj); err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } // Record suspended status metric - defer r.recordSuspension(ctx, bucket) - - // Add our finalizer if it does not exist - if !controllerutil.ContainsFinalizer(&bucket, sourcev1.SourceFinalizer) { - patch := client.MergeFrom(bucket.DeepCopy()) - controllerutil.AddFinalizer(&bucket, sourcev1.SourceFinalizer) - if err := r.Patch(ctx, &bucket, patch); err != nil { - log.Error(err, "unable to register finalizer") - return ctrl.Result{}, err - } - } + r.RecordSuspend(ctx, obj, obj.Spec.Suspend) - // Examine if the object is under deletion - if !bucket.ObjectMeta.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, bucket) - } - - // Return early if the object is suspended. - if bucket.Spec.Suspend { + // Return early if the object is suspended + if obj.Spec.Suspend { log.Info("Reconciliation is suspended for this object") return ctrl.Result{}, nil } - // record reconciliation duration - if r.MetricsRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &bucket) - if err != nil { - return ctrl.Result{}, err - } - defer r.MetricsRecorder.RecordDuration(*objRef, start) - } - - // set initial status - if resetBucket, ok := r.resetStatus(bucket); ok { - bucket = resetBucket - if err := r.updateStatus(ctx, req, bucket.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err - } - r.recordReadiness(ctx, bucket) - } - - // record the value of the reconciliation request, if any - // TODO(hidde): would be better to defer this in combination with - // always patching the status sub-resource after a reconciliation. - if v, ok := meta.ReconcileAnnotationValue(bucket.GetAnnotations()); ok { - bucket.Status.SetLastHandledReconcileRequest(v) - } - - // purge old artifacts from storage - if err := r.gc(bucket); err != nil { - log.Error(err, "unable to purge old artifacts") + // Initialize the patch helper + patchHelper, err := patch.NewHelper(obj, r.Client) + if err != nil { + return ctrl.Result{}, err } - // reconcile bucket by downloading its content - reconciledBucket, reconcileErr := r.reconcile(ctx, *bucket.DeepCopy()) - - // update status with the reconciliation result - if err := r.updateStatus(ctx, req, reconciledBucket.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err - } + // Always attempt to patch the object and status after each reconciliation + defer func() { + // Record the value of the reconciliation request, if any + if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { + obj.Status.SetLastHandledReconcileRequest(v) + } + + // Summarize the Ready condition based on abnormalities that may have been observed + conditions.SetSummary(obj, + meta.ReadyCondition, + conditions.WithConditions( + sourcev1.ArtifactOutdatedCondition, + sourcev1.DownloadFailedCondition, + sourcev1.ArtifactUnavailableCondition, + ), + conditions.WithNegativePolarityConditions( + sourcev1.ArtifactOutdatedCondition, + sourcev1.DownloadFailedCondition, + sourcev1.ArtifactUnavailableCondition, + ), + ) + + // Patch the object, ignoring conflicts on the conditions owned by this controller + patchOpts := []patch.Option{ + patch.WithOwnedConditions{ + Conditions: []string{ + sourcev1.ArtifactOutdatedCondition, + sourcev1.DownloadFailedCondition, + sourcev1.ArtifactUnavailableCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, + }, + }, + } + + // Determine if the resource is still being reconciled, or if it has stalled, and record this observation + if retErr == nil && (result.IsZero() || !result.Requeue) { + // We are no longer reconciling + conditions.Delete(obj, meta.ReconcilingCondition) + + // We have now observed this generation + patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{}) + + readyCondition := conditions.Get(obj, meta.ReadyCondition) + switch readyCondition.Status { + case metav1.ConditionFalse: + // As we are no longer reconciling and the end-state is not ready, the reconciliation has stalled + conditions.MarkStalled(obj, readyCondition.Reason, readyCondition.Message) + case metav1.ConditionTrue: + // As we are no longer reconciling and the end-state is ready, the reconciliation is no longer stalled + conditions.Delete(obj, meta.StalledCondition) + } + } + + // Finally, patch the resource + if err := patchHelper.Patch(ctx, obj, patchOpts...); err != nil { + retErr = kerrors.NewAggregate([]error{retErr, err}) + } + + // Always record readiness and duration metrics + r.Metrics.RecordReadiness(ctx, obj) + r.Metrics.RecordDuration(ctx, obj, start) + }() - // if reconciliation failed, record the failure and requeue immediately - if reconcileErr != nil { - r.event(ctx, reconciledBucket, events.EventSeverityError, reconcileErr.Error()) - r.recordReadiness(ctx, reconciledBucket) - return ctrl.Result{Requeue: true}, reconcileErr + // Add finalizer first if not exist to avoid the race condition between init and delete + if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { + controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) + return ctrl.Result{Requeue: true}, nil } - // emit revision change event - if bucket.Status.Artifact == nil || reconciledBucket.Status.Artifact.Revision != bucket.Status.Artifact.Revision { - r.event(ctx, reconciledBucket, events.EventSeverityInfo, sourcev1.BucketReadyMessage(reconciledBucket)) + // Examine if the object is under deletion + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, obj) } - r.recordReadiness(ctx, reconciledBucket) - log.Info(fmt.Sprintf("Reconciliation finished in %s, next run in %s", - time.Since(start).String(), - bucket.GetRequeueAfter().String(), - )) - - return ctrl.Result{RequeueAfter: bucket.GetRequeueAfter()}, nil + // Reconcile actual object + return r.reconcile(ctx, obj) } -func (r *BucketReconciler) reconcile(ctx context.Context, bucket sourcev1.Bucket) (sourcev1.Bucket, error) { - log := ctrl.LoggerFrom(ctx) - var err error - var sourceBucket sourcev1.Bucket - - tempDir, err := os.MkdirTemp("", bucket.Name) - if err != nil { - err = fmt.Errorf("tmp dir error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err - } - defer func() { - if err := os.RemoveAll(tempDir); err != nil { - log.Error(err, "failed to remove working directory", "path", tempDir) - } - }() +// reconcile steps through the actual reconciliation tasks for the object, it returns early on the first step that +// produces an error. +func (r *BucketReconciler) reconcile(ctx context.Context, obj *sourcev1.Bucket) (ctrl.Result, error) { + // Mark the resource as under reconciliation + conditions.MarkReconciling(obj, meta.ProgressingReason, "") - if bucket.Spec.Provider == sourcev1.GoogleBucketProvider { - sourceBucket, err = r.reconcileWithGCP(ctx, bucket, tempDir) - if err != nil { - return sourceBucket, err - } - } else { - sourceBucket, err = r.reconcileWithMinio(ctx, bucket, tempDir) - if err != nil { - return sourceBucket, err - } - } - revision, err := r.checksum(tempDir) - if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + // Reconcile the storage data + if result, err := r.reconcileStorage(ctx, obj); err != nil || result.IsZero() { + return result, err } - // return early on unchanged revision - artifact := r.Storage.NewArtifactFor(bucket.Kind, bucket.GetObjectMeta(), revision, fmt.Sprintf("%s.tar.gz", revision)) - if apimeta.IsStatusConditionTrue(bucket.Status.Conditions, meta.ReadyCondition) && bucket.GetArtifact().HasRevision(artifact.Revision) { - if artifact.URL != bucket.GetArtifact().URL { - r.Storage.SetArtifactURL(bucket.GetArtifact()) - bucket.Status.URL = r.Storage.SetHostname(bucket.Status.URL) - } - return bucket, nil - } - - // create artifact dir - err = r.Storage.MkdirAll(artifact) + // Create temp working dir + tmpDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s-%s-", obj.Kind, obj.Namespace, obj.Name)) if err != nil { - err = fmt.Errorf("mkdir dir error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.StorageOperationFailedReason, "Failed to create temporary directory: %s", err) + return ctrl.Result{}, err } + defer os.RemoveAll(tmpDir) - // acquire lock - unlock, err := r.Storage.Lock(artifact) - if err != nil { - err = fmt.Errorf("unable to acquire lock: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + // Reconcile the source from upstream + var artifact sourcev1.Artifact + if result, err := r.reconcileSource(ctx, obj, &artifact, tmpDir); err != nil || result.IsZero() { + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, err } - defer unlock() - // archive artifact and check integrity - if err := r.Storage.Archive(&artifact, tempDir, nil); err != nil { - err = fmt.Errorf("storage archive error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + // Reconcile the artifact to storage + if result, err := r.reconcileArtifact(ctx, obj, artifact, tmpDir); err != nil || result.IsZero() { + return result, err } - // update latest symlink - url, err := r.Storage.Symlink(artifact, "latest.tar.gz") - if err != nil { - err = fmt.Errorf("storage symlink error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err - } + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil +} - message := fmt.Sprintf("Fetched revision: %s", artifact.Revision) - return sourcev1.BucketReady(bucket, artifact, url, sourcev1.BucketOperationSucceedReason, message), nil +// reconcileStorage ensures the current state of the storage matches the desired and previously observed state. +// +// All artifacts for the resource except for the current one are garbage collected from the storage. +// If the artifact in the Status object of the resource disappeared from storage, it is removed from the object. +// If the object does not have an artifact in its Status object, a v1beta1.ArtifactUnavailableCondition is set. +// If the hostname of the URLs on the object do not match the current storage server hostname, they are updated. +// +// The caller should assume a failure if an error is returned, or the Result is zero. +func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.Bucket) (ctrl.Result, error) { + // Garbage collect previous advertised artifact(s) from storage + _ = r.garbageCollect(ctx, obj) + + // Determine if the advertised artifact is still in storage + if artifact := obj.GetArtifact(); artifact != nil && !r.Storage.ArtifactExist(*artifact) { + obj.Status.Artifact = nil + obj.Status.URL = "" + } + + // Record that we do not have an artifact + if obj.GetArtifact() == nil { + conditions.MarkTrue(obj, sourcev1.ArtifactUnavailableCondition, "NoArtifact", "No artifact for resource in storage") + return ctrl.Result{Requeue: true}, nil + } + conditions.Delete(obj, sourcev1.ArtifactUnavailableCondition) + + // Always update URLs to ensure hostname is up-to-date + // TODO(hidde): we may want to send out an event only if we notice the URL has changed + r.Storage.SetArtifactURL(obj.GetArtifact()) + obj.Status.URL = r.Storage.SetHostname(obj.Status.URL) + + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil } -func (r *BucketReconciler) reconcileDelete(ctx context.Context, bucket sourcev1.Bucket) (ctrl.Result, error) { - if err := r.gc(bucket); err != nil { - r.event(ctx, bucket, events.EventSeverityError, - fmt.Sprintf("garbage collection for deleted resource failed: %s", err.Error())) - // Return the error so we retry the failed garbage collection - return ctrl.Result{}, err +// reconcileSource reconciles the upstream bucket with the client for the given object's Provider, and returns the +// result. +// If a SecretRef is defined, it attempts to fetch the Secret before calling the provider. If the fetch of the Secret +// fails, it records v1beta1.DownloadFailedCondition=True and returns early. +// +// The caller should assume a failure if an error is returned, or the Result is zero. +func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (ctrl.Result, error) { + var secret corev1.Secret + if obj.Spec.SecretRef != nil { + secretName := types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.Spec.SecretRef.Name, + } + if err := r.Get(ctx, secretName, &secret); err != nil { + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.AuthenticationFailedReason, + "Failed to get secret '%s': %s", secretName.String(), err.Error()) + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.AuthenticationFailedReason, + "Failed to get secret '%s': %s", secretName.String(), err.Error()) + // Return error as the world as observed may change + return ctrl.Result{}, err + } } - // Record deleted status - r.recordReadiness(ctx, bucket) - - // Remove our finalizer from the list and update it - controllerutil.RemoveFinalizer(&bucket, sourcev1.SourceFinalizer) - if err := r.Update(ctx, &bucket); err != nil { - return ctrl.Result{}, err + switch obj.Spec.Provider { + case sourcev1.GoogleBucketProvider: + return r.reconcileGCPSource(ctx, obj, artifact, &secret, dir) + default: + return r.reconcileMinioSource(ctx, obj, artifact, &secret, dir) } - - // Stop reconciliation as the object is being deleted - return ctrl.Result{}, nil } -// reconcileWithGCP handles getting objects from a Google Cloud Platform bucket -// using a gcp client -func (r *BucketReconciler) reconcileWithGCP(ctx context.Context, bucket sourcev1.Bucket, tempDir string) (sourcev1.Bucket, error) { - log := ctrl.LoggerFrom(ctx) - gcpClient, err := r.authGCP(ctx, bucket) +// reconcileMinioSource ensures the upstream Minio client compatible bucket can be reached and downloaded from using the +// declared configuration, and observes its state. +// +// The bucket contents are downloaded to the given dir using the defined configuration, while taking ignore rules into +// account. In case of an error during the download process (including transient errors), it records +// v1beta1.DownloadFailedCondition=True and returns early. +// On a successful download, it removes v1beta1.DownloadFailedCondition, and compares the current revision of HEAD to +// the artifact on the object, and records v1beta1.ArtifactOutdatedCondition if they differ. +// If the download was successful, the given artifact pointer is set to a new artifact with the available metadata. +// +// The caller should assume a failure if an error is returned, or the Result is zero. +func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, + secret *corev1.Secret, dir string) (ctrl.Result, error) { + // Build the client with the configuration from the object and secret + s3Client, err := r.buildMinioClient(obj, secret) if err != nil { - err = fmt.Errorf("auth error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.AuthenticationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to construct S3 client: %s", err.Error()) + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, + "Failed to construct S3 client: %s", err.Error()) + // Return error as the contents of the secret may change + return ctrl.Result{}, err } - defer gcpClient.Close(log) - ctxTimeout, cancel := context.WithTimeout(ctx, bucket.Spec.Timeout.Duration) + // Confirm bucket exists + ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) defer cancel() - - exists, err := gcpClient.BucketExists(ctxTimeout, bucket.Spec.BucketName) + exists, err := s3Client.BucketExists(ctxTimeout, obj.Spec.BucketName) if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to verify existence of bucket '%s': %s", obj.Spec.BucketName, err.Error()) + return ctrl.Result{}, err } if !exists { - err = fmt.Errorf("bucket '%s' not found", bucket.Spec.BucketName) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Bucket '%s' does not exist", obj.Spec.BucketName) + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, + "Bucket '%s' does not exist", obj.Spec.BucketName) + return ctrl.Result{}, fmt.Errorf("bucket '%s' does not exist", obj.Spec.BucketName) } - // Look for file with ignore rules first. - path := filepath.Join(tempDir, sourceignore.IgnoreFile) - if err := gcpClient.FGetObject(ctxTimeout, bucket.Spec.BucketName, sourceignore.IgnoreFile, path); err != nil { - if err == gcp.ErrorObjectDoesNotExist && sourceignore.IgnoreFile != ".sourceignore" { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + // Look for file with ignore rules first + path := filepath.Join(dir, sourceignore.IgnoreFile) + if err := s3Client.FGetObject(ctxTimeout, obj.Spec.BucketName, sourceignore.IgnoreFile, path, minio.GetObjectOptions{}); err != nil { + if resp, ok := err.(minio.ErrorResponse); ok && resp.Code != "NoSuchKey" { + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, + "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + return ctrl.Result{}, err } } ps, err := sourceignore.ReadIgnoreFile(path, nil) if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, + "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + return ctrl.Result{}, err } // In-spec patterns take precedence - if bucket.Spec.Ignore != nil { - ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*bucket.Spec.Ignore), nil)...) + if obj.Spec.Ignore != nil { + ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*obj.Spec.Ignore), nil)...) } matcher := sourceignore.NewMatcher(ps) - objects := gcpClient.ListObjects(ctxTimeout, bucket.Spec.BucketName, nil) - // download bucket content - for { - object, err := objects.Next() - if err == gcp.IteratorDone { - break - } - if err != nil { - err = fmt.Errorf("listing objects from bucket '%s' failed: %w", bucket.Spec.BucketName, err) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + + // Build up an index of object keys and their etags + // As the keys define the paths and the etags represent a change in file contents, this should be sufficient to + // detect both structural and file changes + var index = make(etagIndex) + for object := range s3Client.ListObjects(ctxTimeout, obj.Spec.BucketName, minio.ListObjectsOptions{ + Recursive: true, + UseV1: s3utils.IsGoogleEndpoint(*s3Client.EndpointURL()), + }) { + if err = object.Err; err != nil { + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, + "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) + return ctrl.Result{}, err } - if strings.HasSuffix(object.Name, "/") || object.Name == sourceignore.IgnoreFile { + // Ignore directories and the .sourceignore file + if strings.HasSuffix(object.Key, "/") || object.Key == sourceignore.IgnoreFile { continue } - - if matcher.Match(strings.Split(object.Name, "/"), false) { + // Ignore matches + if matcher.Match(strings.Split(object.Key, "/"), false) { continue } - localPath := filepath.Join(tempDir, object.Name) - if err = gcpClient.FGetObject(ctxTimeout, bucket.Spec.BucketName, object.Name, localPath); err != nil { - err = fmt.Errorf("downloading object from bucket '%s' failed: %w", bucket.Spec.BucketName, err) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + index[object.Key] = object.ETag + } + + // Calculate revision checksum from the collected index values + revision, err := index.Revision() + if err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to calculate revision") + return ctrl.Result{}, err + } + + if !obj.GetArtifact().HasRevision(revision) { + // Mark observations about the revision on the object + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", + "New upstream revision '%s'", revision) + + // Download the files in parallel, but with a limited number of workers + group, groupCtx := errgroup.WithContext(ctx) + group.Go(func() error { + const workers = 4 + sem := semaphore.NewWeighted(workers) + for key := range index { + k := key + if err := sem.Acquire(groupCtx, 1); err != nil { + return err + } + group.Go(func() error { + defer sem.Release(1) + localPath := filepath.Join(dir, k) + if err := s3Client.FGetObject(ctxTimeout, obj.Spec.BucketName, k, localPath, minio.GetObjectOptions{}); err != nil { + return fmt.Errorf("failed to get '%s' file: %w", k, err) + } + return nil + }) + } + return nil + }) + if err = group.Wait(); err != nil { + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, + "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) + return ctrl.Result{}, err } + r.Eventf(obj, corev1.EventTypeNormal, sourcev1.BucketOperationSucceedReason, + "Downloaded %d files from bucket '%s' revision '%s'", len(index), obj.Spec.BucketName, revision) } - return sourcev1.Bucket{}, nil + conditions.Delete(obj, sourcev1.DownloadFailedCondition) + + // Create potential new artifact + *artifact = r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision)) + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil } -// reconcileWithMinio handles getting objects from an S3 compatible bucket -// using a minio client -func (r *BucketReconciler) reconcileWithMinio(ctx context.Context, bucket sourcev1.Bucket, tempDir string) (sourcev1.Bucket, error) { - s3Client, err := r.authMinio(ctx, bucket) +// reconcileGCPSource ensures the upstream Google Cloud Storage bucket can be reached and downloaded from using the +// declared configuration, and observes its state. +// +// The bucket contents are downloaded to the given dir using the defined configuration, while taking ignore rules into +// account. In case of an error during the download process (including transient errors), it records +// v1beta1.DownloadFailedCondition=True and returns early. +// On a successful download, it removes v1beta1.DownloadFailedCondition, and compares the current revision of HEAD to +// the artifact on the object, and records v1beta1.ArtifactOutdatedCondition if they differ. +// If the download was successful, the given artifact pointer is set to a new artifact with the available metadata. +// +// The caller should assume a failure if an error is returned, or the Result is zero. +func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, + secret *corev1.Secret, dir string) (ctrl.Result, error) { + gcpClient, err := r.buildGCPClient(ctx, secret) if err != nil { - err = fmt.Errorf("auth error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.AuthenticationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to construct GCP client: %s", err.Error()) + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, + "Failed to construct GCP client: %s", err.Error()) + // Return error as the contents of the secret may change + return ctrl.Result{}, err } + defer gcpClient.Close(ctrl.LoggerFrom(ctx)) - ctxTimeout, cancel := context.WithTimeout(ctx, bucket.Spec.Timeout.Duration) + // Confirm bucket exists + ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) defer cancel() - - exists, err := s3Client.BucketExists(ctxTimeout, bucket.Spec.BucketName) + exists, err := gcpClient.BucketExists(ctxTimeout, obj.Spec.BucketName) if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to verify existence of bucket '%s': %s", obj.Spec.BucketName, err.Error()) + return ctrl.Result{}, err } if !exists { - err = fmt.Errorf("bucket '%s' not found", bucket.Spec.BucketName) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Bucket '%s' does not exist", obj.Spec.BucketName) + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, + "Bucket '%s' does not exist", obj.Spec.BucketName) + return ctrl.Result{}, fmt.Errorf("bucket '%s' does not exist", obj.Spec.BucketName) } // Look for file with ignore rules first - // NB: S3 has flat filepath keys making it impossible to look - // for files in "subdirectories" without building up a tree first. - path := filepath.Join(tempDir, sourceignore.IgnoreFile) - if err := s3Client.FGetObject(ctxTimeout, bucket.Spec.BucketName, sourceignore.IgnoreFile, path, minio.GetObjectOptions{}); err != nil { - if resp, ok := err.(minio.ErrorResponse); ok && resp.Code != "NoSuchKey" { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + path := filepath.Join(dir, sourceignore.IgnoreFile) + if err := gcpClient.FGetObject(ctxTimeout, obj.Spec.BucketName, sourceignore.IgnoreFile, path); err != nil { + if err != gcpstorage.ErrObjectNotExist { + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, + "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + return ctrl.Result{}, err } } ps, err := sourceignore.ReadIgnoreFile(path, nil) if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, + "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + return ctrl.Result{}, err } // In-spec patterns take precedence - if bucket.Spec.Ignore != nil { - ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*bucket.Spec.Ignore), nil)...) + if obj.Spec.Ignore != nil { + ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*obj.Spec.Ignore), nil)...) } matcher := sourceignore.NewMatcher(ps) - // download bucket content - for object := range s3Client.ListObjects(ctxTimeout, bucket.Spec.BucketName, minio.ListObjectsOptions{ - Recursive: true, - UseV1: s3utils.IsGoogleEndpoint(*s3Client.EndpointURL()), - }) { - if object.Err != nil { - err = fmt.Errorf("listing objects from bucket '%s' failed: %w", bucket.Spec.BucketName, object.Err) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + // Build up an index of object keys and their etags + // As the keys define the paths and the etags represent a change in file contents, this should be sufficient to + // detect both structural and file changes + var index = make(etagIndex) + objects := gcpClient.ListObjects(ctxTimeout, obj.Spec.BucketName, nil) + for { + object, err := objects.Next() + if err != nil { + if err == gcp.IteratorDone { + break + } + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, + "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) + return ctrl.Result{}, err } - if strings.HasSuffix(object.Key, "/") || object.Key == sourceignore.IgnoreFile { + if strings.HasSuffix(object.Name, "/") || object.Name == sourceignore.IgnoreFile { continue } - if matcher.Match(strings.Split(object.Key, "/"), false) { + if matcher.Match(strings.Split(object.Name, "/"), false) { continue } - localPath := filepath.Join(tempDir, object.Key) - err := s3Client.FGetObject(ctxTimeout, bucket.Spec.BucketName, object.Key, localPath, minio.GetObjectOptions{}) - if err != nil { - err = fmt.Errorf("downloading object from bucket '%s' failed: %w", bucket.Spec.BucketName, err) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err - } + index[object.Name] = object.Etag } - return sourcev1.Bucket{}, nil -} -// authGCP creates a new Google Cloud Platform storage client -// to interact with the storage service. -func (r *BucketReconciler) authGCP(ctx context.Context, bucket sourcev1.Bucket) (*gcp.GCPClient, error) { - var client *gcp.GCPClient - var err error - if bucket.Spec.SecretRef != nil { - secretName := types.NamespacedName{ - Namespace: bucket.GetNamespace(), - Name: bucket.Spec.SecretRef.Name, - } + // Calculate revision checksum from the collected index values + revision, err := index.Revision() + if err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to calculate revision") + return ctrl.Result{}, err + } - var secret corev1.Secret - if err := r.Get(ctx, secretName, &secret); err != nil { - return nil, fmt.Errorf("credentials secret error: %w", err) - } - if err := gcp.ValidateSecret(secret.Data, secret.Name); err != nil { - return nil, err - } - client, err = gcp.NewClient(ctx, option.WithCredentialsJSON(secret.Data["serviceaccount"])) - if err != nil { - return nil, err - } - } else { - client, err = gcp.NewClient(ctx) - if err != nil { - return nil, err + if !obj.GetArtifact().HasRevision(revision) { + // Mark observations about the revision on the object + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", + "New upstream revision '%s'", revision) + + // Download the files in parallel, but with a limited number of workers + group, groupCtx := errgroup.WithContext(ctx) + group.Go(func() error { + const workers = 4 + sem := semaphore.NewWeighted(workers) + for key := range index { + k := key + if err := sem.Acquire(groupCtx, 1); err != nil { + return err + } + group.Go(func() error { + defer sem.Release(1) + localPath := filepath.Join(dir, k) + if err := gcpClient.FGetObject(ctxTimeout, obj.Spec.BucketName, k, localPath); err != nil { + return fmt.Errorf("failed to get '%s' file: %w", k, err) + } + return nil + }) + } + return nil + }) + if err = group.Wait(); err != nil { + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, + "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) + return ctrl.Result{}, err } + r.Eventf(obj, corev1.EventTypeNormal, sourcev1.BucketOperationSucceedReason, + "Downloaded %d files from bucket '%s' revision '%s'", len(index), obj.Spec.BucketName, revision) } - return client, nil + conditions.Delete(obj, sourcev1.DownloadFailedCondition) + // Create potential new artifact + *artifact = r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision)) + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil } -// authMinio creates a new Minio client to interact with S3 -// compatible storage services. -func (r *BucketReconciler) authMinio(ctx context.Context, bucket sourcev1.Bucket) (*minio.Client, error) { - opt := minio.Options{ - Region: bucket.Spec.Region, - Secure: !bucket.Spec.Insecure, - } - - if bucket.Spec.SecretRef != nil { - secretName := types.NamespacedName{ - Namespace: bucket.GetNamespace(), - Name: bucket.Spec.SecretRef.Name, +// reconcileArtifact archives a new artifact to the storage, if the current observation on the object does not match the +// given data. +// +// The inspection of the given data to the object is differed, ensuring any stale observations as +// v1beta1.ArtifactUnavailableCondition and v1beta1.ArtifactOutdatedCondition are always deleted. +// If the given artifact does not differ from the object's current, it returns early. +// On a successful archive, the artifact in the status of the given object is set, and the symlink in the storage is +// updated to its path. +// +// The caller should assume a failure if an error is returned, or the Result is zero. +func (r *BucketReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) (ctrl.Result, error) { + // Always restore the Ready condition in case it got removed due to a transient error + defer func() { + if obj.GetArtifact() != nil { + conditions.Delete(obj, sourcev1.ArtifactUnavailableCondition) } - - var secret corev1.Secret - if err := r.Get(ctx, secretName, &secret); err != nil { - return nil, fmt.Errorf("credentials secret error: %w", err) + if obj.GetArtifact().HasRevision(artifact.Revision) { + conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, + "Stored artifact for revision '%s'", artifact.Revision) } + }() - accesskey := "" - secretkey := "" - if k, ok := secret.Data["accesskey"]; ok { - accesskey = string(k) - } - if k, ok := secret.Data["secretkey"]; ok { - secretkey = string(k) - } - if accesskey == "" || secretkey == "" { - return nil, fmt.Errorf("invalid '%s' secret data: required fields 'accesskey' and 'secretkey'", secret.Name) - } - opt.Creds = credentials.NewStaticV4(accesskey, secretkey, "") - } else if bucket.Spec.Provider == sourcev1.AmazonBucketProvider { - opt.Creds = credentials.NewIAM("") + // The artifact is up-to-date + if obj.GetArtifact().HasRevision(artifact.Revision) { + ctrl.LoggerFrom(ctx).Info(fmt.Sprintf("Already up to date, current revision '%s'", artifact.Revision)) + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil } - if opt.Creds == nil { - return nil, fmt.Errorf("no bucket credentials found") + // Ensure target path exists and is a directory + if f, err := os.Stat(dir); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to stat source path") + return ctrl.Result{}, err + } else if !f.IsDir() { + err := fmt.Errorf("source path '%s' is not a directory", dir) + ctrl.LoggerFrom(ctx).Error(err, "invalid target path") + return ctrl.Result{}, err } - return minio.New(bucket.Spec.Endpoint, &opt) -} - -// checksum calculates the SHA1 checksum of the given root directory. -// It traverses the given root directory and calculates the checksum for any found file, and returns the SHA1 sum of the -// list with relative file paths and their checksums. -func (r *BucketReconciler) checksum(root string) (string, error) { - sum := sha1.New() - if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.Mode().IsRegular() { - return nil - } - data, err := os.ReadFile(path) - if err != nil { - return err - } - relPath, err := filepath.Rel(root, path) - if err != nil { - return err - } - sum.Write([]byte(fmt.Sprintf("%x %s\n", sha1.Sum(data), relPath))) - return nil - }); err != nil { - return "", err + // Ensure artifact directory exists and acquire lock + if err := r.Storage.MkdirAll(artifact); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to create artifact directory") + return ctrl.Result{}, err } - return fmt.Sprintf("%x", sum.Sum(nil)), nil -} - -// resetStatus returns a modified v1beta1.Bucket and a boolean indicating -// if the status field has been reset. -func (r *BucketReconciler) resetStatus(bucket sourcev1.Bucket) (sourcev1.Bucket, bool) { - // We do not have an artifact, or it does no longer exist - if bucket.GetArtifact() == nil || !r.Storage.ArtifactExist(*bucket.GetArtifact()) { - bucket = sourcev1.BucketProgressing(bucket) - bucket.Status.Artifact = nil - return bucket, true + unlock, err := r.Storage.Lock(artifact) + if err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to acquire lock for artifact") + return ctrl.Result{}, err } - if bucket.Generation != bucket.Status.ObservedGeneration { - return sourcev1.BucketProgressing(bucket), true + defer unlock() + + // Archive directory to storage + if err := r.Storage.Archive(&artifact, dir, nil); err != nil { + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.StorageOperationFailedReason, + "Unable to archive artifact to storage: %s", err) + return ctrl.Result{}, err } - return bucket, false -} + r.AnnotatedEventf(obj, map[string]string{ + "revision": artifact.Revision, + "checksum": artifact.Checksum, + }, corev1.EventTypeNormal, "NewArtifact", "Stored artifact for revision '%s'", artifact.Revision) -// gc performs a garbage collection for the given v1beta1.Bucket. -// It removes all but the current artifact except for when the -// deletion timestamp is set, which will result in the removal of -// all artifacts for the resource. -func (r *BucketReconciler) gc(bucket sourcev1.Bucket) error { - if !bucket.DeletionTimestamp.IsZero() { - return r.Storage.RemoveAll(r.Storage.NewArtifactFor(bucket.Kind, bucket.GetObjectMeta(), "", "*")) + // Record it on the object + obj.Status.Artifact = artifact.DeepCopy() + + // Update symlink on a "best effort" basis + url, err := r.Storage.Symlink(artifact, "latest.tar.gz") + if err != nil { + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.StorageOperationFailedReason, + "Failed to update status URL symlink: %s", err) } - if bucket.GetArtifact() != nil { - return r.Storage.RemoveAllButCurrent(*bucket.GetArtifact()) + if url != "" { + obj.Status.URL = url } - return nil + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil } -// event emits a Kubernetes event and forwards the event to notification controller if configured -func (r *BucketReconciler) event(ctx context.Context, bucket sourcev1.Bucket, severity, msg string) { - if r.EventRecorder != nil { - r.EventRecorder.Eventf(&bucket, corev1.EventTypeNormal, severity, msg) - } - if r.ExternalEventRecorder != nil { - r.ExternalEventRecorder.Eventf(&bucket, corev1.EventTypeNormal, severity, msg) +// reconcileDelete handles the deletion of an object. It first garbage collects all artifacts for the object from the +// artifact storage, if successful, the finalizer is removed from the object. +func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bucket) (ctrl.Result, error) { + // Garbage collect the resource's artifacts + if err := r.garbageCollect(ctx, obj); err != nil { + // Return the error so we retry the failed garbage collection + return ctrl.Result{}, err } + + // Remove our finalizer from the list + controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) + + // Stop reconciliation as the object is being deleted + return ctrl.Result{}, nil } -func (r *BucketReconciler) recordReadiness(ctx context.Context, bucket sourcev1.Bucket) { - log := ctrl.LoggerFrom(ctx) - if r.MetricsRecorder == nil { - return - } - objRef, err := reference.GetReference(r.Scheme, &bucket) - if err != nil { - log.Error(err, "unable to record readiness metric") - return +// garbageCollect performs a garbage collection for the given v1beta1.Bucket. It removes all but the current +// artifact except for when the deletion timestamp is set, which will result in the removal of all artifacts for the +// resource. +func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *sourcev1.Bucket) error { + if !obj.DeletionTimestamp.IsZero() { + if err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { + r.Eventf(obj, corev1.EventTypeWarning, "GarbageCollectionFailed", + "Garbage collection for deleted resource failed: %s", err) + return err + } + obj.Status.Artifact = nil + // TODO(hidde): we should only push this event if we actually garbage collected something + r.Eventf(obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", + "Garbage collected artifacts for deleted resource") + return nil } - if rc := apimeta.FindStatusCondition(bucket.Status.Conditions, meta.ReadyCondition); rc != nil { - r.MetricsRecorder.RecordCondition(*objRef, *rc, !bucket.DeletionTimestamp.IsZero()) - } else { - r.MetricsRecorder.RecordCondition(*objRef, metav1.Condition{ - Type: meta.ReadyCondition, - Status: metav1.ConditionUnknown, - }, !bucket.DeletionTimestamp.IsZero()) + if obj.GetArtifact() != nil { + if err := r.Storage.RemoveAllButCurrent(*obj.GetArtifact()); err != nil { + r.Eventf(obj, corev1.EventTypeNormal, "GarbageCollectionFailed", "Garbage collection of old artifacts failed: %s", err) + return err + } + // TODO(hidde): we should only push this event if we actually garbage collected something + r.Eventf(obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", "Garbage collected old artifacts") } + return nil } -func (r *BucketReconciler) recordSuspension(ctx context.Context, bucket sourcev1.Bucket) { - if r.MetricsRecorder == nil { - return +// buildMinioClient constructs a minio.Client with the data from the given object and Secret. +// It returns an error if the Secret does not have the required fields, or if there is no credential handler +// configured. +func (r *BucketReconciler) buildMinioClient(obj *sourcev1.Bucket, secret *corev1.Secret) (*minio.Client, error) { + opts := minio.Options{ + Region: obj.Spec.Region, + Secure: !obj.Spec.Insecure, } - log := ctrl.LoggerFrom(ctx) - - objRef, err := reference.GetReference(r.Scheme, &bucket) - if err != nil { - log.Error(err, "unable to record suspended metric") - return + if secret != nil { + var accessKey, secretKey string + if k, ok := secret.Data["accesskey"]; ok { + accessKey = string(k) + } + if k, ok := secret.Data["secretkey"]; ok { + secretKey = string(k) + } + if accessKey == "" || secretKey == "" { + return nil, fmt.Errorf("invalid '%s' secret data: required fields 'accesskey' and 'secretkey'", secret.Name) + } + opts.Creds = credentials.NewStaticV4(accessKey, secretKey, "") + } else if obj.Spec.Provider == sourcev1.AmazonBucketProvider { + opts.Creds = credentials.NewIAM("") } + return minio.New(obj.Spec.Endpoint, &opts) +} - if !bucket.DeletionTimestamp.IsZero() { - r.MetricsRecorder.RecordSuspend(*objRef, false) +// buildGCPClient constructs a gcp.GCPClient with the data from the given Secret. +// It returns an error if the Secret does not have the required field, or if the client construction fails. +func (r *BucketReconciler) buildGCPClient(ctx context.Context, secret *corev1.Secret) (*gcp.GCPClient, error) { + var client *gcp.GCPClient + var err error + if secret != nil { + if err := gcp.ValidateSecret(secret.Data, secret.Name); err != nil { + return nil, err + } + client, err = gcp.NewClient(ctx, option.WithCredentialsJSON(secret.Data["serviceaccount"])) + if err != nil { + return nil, err + } } else { - r.MetricsRecorder.RecordSuspend(*objRef, bucket.Spec.Suspend) + client, err = gcp.NewClient(ctx) + if err != nil { + return nil, err + } } + return client, nil } -func (r *BucketReconciler) updateStatus(ctx context.Context, req ctrl.Request, newStatus sourcev1.BucketStatus) error { - var bucket sourcev1.Bucket - if err := r.Get(ctx, req.NamespacedName, &bucket); err != nil { - return err - } - - patch := client.MergeFrom(bucket.DeepCopy()) - bucket.Status = newStatus +// etagIndex is an index of bucket keys and their Etag values. +type etagIndex map[string]string - return r.Status().Patch(ctx, &bucket, patch) +// Revision calculates the SHA256 checksum of the index. +// The keys are sorted to ensure a stable order, and the SHA256 sum is then calculated for the string representations of +// the key/value pairs, each pair written on a newline +// The sum result is returned as a string. +func (i etagIndex) Revision() (string, error) { + keyIndex := make([]string, 0, len(i)) + for k := range i { + keyIndex = append(keyIndex, k) + } + sort.Strings(keyIndex) + sum := sha256.New() + for _, k := range keyIndex { + if _, err := sum.Write([]byte(fmt.Sprintf("%s %s\n", k, i[k]))); err != nil { + return "", err + } + } + return fmt.Sprintf("%x", sum.Sum(nil)), nil } diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go index 01ff20d87..b5c9debeb 100644 --- a/controllers/bucket_controller_test.go +++ b/controllers/bucket_controller_test.go @@ -17,59 +17,573 @@ limitations under the License. package controllers import ( + "context" + "crypto/md5" + "fmt" + "net/http" + "net/http/httptest" + "net/url" "os" + "path" "path/filepath" + "strings" "testing" + "time" + + "github.com/go-logr/logr" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/log" + + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" + + sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" ) -func TestBucketReconciler_checksum(t *testing.T) { +func TestBucketReconciler_Reconcile(t *testing.T) { + g := NewWithT(t) + + s3Server := newS3Server("test-bucket") + s3Server.Objects = []*s3MockObject{ + { + Key: "test.yaml", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + } + s3Server.Start() + defer s3Server.Stop() + + g.Expect(s3Server.HTTPAddress()).ToNot(BeEmpty()) + u, err := url.Parse(s3Server.HTTPAddress()) + g.Expect(err).NotTo(HaveOccurred()) + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "bucket-reconcile-", + Namespace: "default", + }, + Data: map[string][]byte{ + "accesskey": []byte("key"), + "secretkey": []byte("secret"), + }, + } + g.Expect(testEnv.Create(ctx, secret)).To(Succeed()) + defer testEnv.Delete(ctx, secret) + + obj := &sourcev1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "bucket-reconcile-", + Namespace: "default", + }, + Spec: sourcev1.BucketSpec{ + Provider: "generic", + BucketName: s3Server.BucketName, + Endpoint: u.Host, + Insecure: true, + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + SecretRef: &meta.LocalObjectReference{ + Name: secret.Name, + }, + }, + } + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + // Wait for finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return len(obj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for Bucket to be Ready + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + if !conditions.IsReady(obj) || obj.Status.Artifact == nil { + return false + } + readyCondition := conditions.Get(obj, meta.ReadyCondition) + return obj.Generation == readyCondition.ObservedGeneration && + obj.Generation == obj.Status.ObservedGeneration + }, timeout).Should(BeTrue()) + + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + + // Wait for Bucket to be deleted + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) +} + +func TestBucketReconciler_reconcileStorage(t *testing.T) { tests := []struct { - name string - beforeFunc func(root string) - want string - wantErr bool + name string + beforeFunc func(obj *sourcev1.Bucket, storage *Storage) error + want ctrl.Result + wantErr bool + assertArtifact *sourcev1.Artifact + assertConditions []metav1.Condition + assertPaths []string }{ { - name: "empty root", - want: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + name: "garbage collects", + beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error { + revisions := []string{"a", "b", "c"} + for n := range revisions { + v := revisions[n] + obj.Status.Artifact = &sourcev1.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", v), + Revision: v, + } + if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(v), 0644); err != nil { + return err + } + } + testStorage.SetArtifactURL(obj.Status.Artifact) + return nil + }, + assertArtifact: &sourcev1.Artifact{ + Path: "/reconcile-storage/c.txt", + Revision: "c", + Checksum: "84a516841ba77a5b4648de2cd0dfcb30ea46dbb4", + URL: testStorage.Hostname + "/reconcile-storage/c.txt", + }, + assertPaths: []string{ + "/reconcile-storage/c.txt", + "!/reconcile-storage/b.txt", + "!/reconcile-storage/a.txt", + }, + }, + { + name: "notices missing artifact in storage", + beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error { + obj.Status.Artifact = &sourcev1.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/invalid.txt"), + Revision: "d", + } + testStorage.SetArtifactURL(obj.Status.Artifact) + return nil + }, + want: ctrl.Result{Requeue: true}, + assertPaths: []string{ + "!/reconcile-storage/invalid.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactUnavailableCondition, "NoArtifact", "No artifact for resource in storage"), + }, + }, + { + name: "updates hostname on diff from current", + beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error { + obj.Status.Artifact = &sourcev1.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/hostname.txt"), + Revision: "f", + Checksum: "971c419dd609331343dee105fffd0f4608dc0bf2", + URL: "http://outdated.com/reconcile-storage/hostname.txt", + } + if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader("file"), 0644); err != nil { + return err + } + return nil + }, + assertPaths: []string{ + "/reconcile-storage/hostname.txt", + }, + assertArtifact: &sourcev1.Artifact{ + Path: "/reconcile-storage/hostname.txt", + Revision: "f", + Checksum: "971c419dd609331343dee105fffd0f4608dc0bf2", + URL: testStorage.Hostname + "/reconcile-storage/hostname.txt", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + r := &BucketReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + } + + obj := &sourcev1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + }, + } + if tt.beforeFunc != nil { + g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed()) + } + + got, err := r.reconcileStorage(context.TODO(), obj) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.assertArtifact)) + if tt.assertArtifact != nil && tt.assertArtifact.URL != "" { + g.Expect(obj.Status.Artifact.URL).To(Equal(tt.assertArtifact.URL)) + } + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + for _, p := range tt.assertPaths { + absoluteP := filepath.Join(testStorage.BasePath, p) + if !strings.HasPrefix(p, "!") { + g.Expect(absoluteP).To(BeAnExistingFile()) + continue + } + g.Expect(absoluteP).NotTo(BeAnExistingFile()) + } + }) + } +} + +func TestBucketReconciler_reconcileMinioSource(t *testing.T) { + tests := []struct { + name string + bucketName string + bucketObjects []*s3MockObject + middleware http.Handler + secret *corev1.Secret + beforeFunc func(obj *sourcev1.Bucket) + want ctrl.Result + wantErr bool + assertArtifact sourcev1.Artifact + assertConditions []metav1.Condition + }{ + { + name: "reconciles source", + bucketName: "dummy", + bucketObjects: []*s3MockObject{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8.tar.gz", + Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), + }, + }, + // TODO(hidde): middleware for mock server + //{ + // name: "authenticates using secretRef", + // bucketName: "dummy", + //}, + { + name: "observes non-existing secretRef", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '/dummy': secrets \"dummy\" not found"), + }, + }, + { + name: "observes invalid secretRef", + bucketName: "dummy", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to construct S3 client: invalid 'dummy' secret data: required fields"), + }, + }, + { + name: "observes non-existing bucket name", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.BucketName = "invalid" + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, "Bucket 'invalid' does not exist"), + }, + }, + { + name: "transient bucket name API failure", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.Endpoint = "transient.example.com" + obj.Spec.BucketName = "unavailable" + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to verify existence of bucket 'unavailable'"), + }, + }, + { + // TODO(hidde): test the lesser happy paths + name: ".sourceignore", + bucketName: "dummy", + bucketObjects: []*s3MockObject{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100.tar.gz", + Revision: "94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100'"), + }, }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + builder := fakeclient.NewClientBuilder().WithScheme(testEnv.Scheme()) + if tt.secret != nil { + builder.WithObjects(tt.secret) + } + r := &BucketReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Client: builder.Build(), + Storage: testStorage, + } + tmpDir, err := os.MkdirTemp("", "reconcile-bucket-source-") + g.Expect(err).ToNot(HaveOccurred()) + defer os.RemoveAll(tmpDir) + + obj := &sourcev1.Bucket{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.BucketKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bucket", + }, + Spec: sourcev1.BucketSpec{ + Timeout: &metav1.Duration{Duration: timeout}, + }, + } + + var server *s3MockServer + if tt.bucketName != "" { + server = newS3Server(tt.bucketName) + server.Objects = tt.bucketObjects + server.Start() + defer server.Stop() + + g.Expect(server.HTTPAddress()).ToNot(BeEmpty()) + u, err := url.Parse(server.HTTPAddress()) + g.Expect(err).NotTo(HaveOccurred()) + + obj.Spec.BucketName = tt.bucketName + obj.Spec.Endpoint = u.Host + // TODO(hidde): also test TLS + obj.Spec.Insecure = true + } + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + artifact := &sourcev1.Artifact{} + got, err := r.reconcileSource(context.TODO(), obj, artifact, tmpDir) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + g.Expect(artifact).To(MatchArtifact(tt.assertArtifact.DeepCopy())) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestBucketReconciler_reconcileArtifact(t *testing.T) { + tests := []struct { + name string + artifact sourcev1.Artifact + beforeFunc func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) + want ctrl.Result + wantErr bool + assertConditions []metav1.Condition + }{ { - name: "with file", - beforeFunc: func(root string) { - mockFile(root, "a/b/c.txt", "a dummy string") + name: "artifact revision up-to-date", + artifact: sourcev1.Artifact{ + Revision: "existing", + }, + beforeFunc: func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Status.Artifact = &artifact + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), }, - want: "309a5e6e96b4a7eea0d1cfaabf1be8ec1c063fa0", }, { - name: "with file in different path", - beforeFunc: func(root string) { - mockFile(root, "a/b.txt", "a dummy string") + name: "dir path deleted", + beforeFunc: func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + _ = os.RemoveAll(dir) }, - want: "e28c62b5cc488849950c4355dddc5523712616d4", + wantErr: true, }, + //{ + // name: "dir path empty", + //}, + //{ + // name: "success", + // artifact: sourcev1.Artifact{ + // Revision: "existing", + // }, + // beforeFunc: func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + // obj.Status.Artifact = &artifact + // }, + // assertConditions: []metav1.Condition{ + // *conditions.TrueCondition(sourcev1.ArtifactAvailableCondition, meta.SucceededReason, "Compressed source to artifact with revision 'existing'"), + // }, + //}, + //{ + // name: "symlink", + //}, } + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - root, err := os.MkdirTemp("", "bucket-checksum-") - if err != nil { - t.Fatal(err) + g := NewWithT(t) + + tmpDir, err := os.MkdirTemp("", "reconcile-bucket-artifact-") + g.Expect(err).ToNot(HaveOccurred()) + defer os.RemoveAll(tmpDir) + + obj := &sourcev1.Bucket{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.BucketKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bucket", + }, + Spec: sourcev1.BucketSpec{ + Timeout: &metav1.Duration{Duration: timeout}, + }, } - defer os.RemoveAll(root) + if tt.beforeFunc != nil { - tt.beforeFunc(root) + tt.beforeFunc(obj, tt.artifact, tmpDir) } - got, err := (&BucketReconciler{}).checksum(root) + + r := &BucketReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + } + + dlog := log.NewDelegatingLogSink(log.NullLogSink{}) + nullLogger := logr.New(dlog) + got, err := r.reconcileArtifact(logr.NewContext(ctx, nullLogger), obj, tt.artifact, tmpDir) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + //g.Expect(artifact).To(MatchArtifact(tt.assertArtifact.DeepCopy())) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func Test_etagIndex_Revision(t *testing.T) { + tests := []struct { + name string + list etagIndex + want string + wantErr bool + }{ + { + name: "index with items", + list: map[string]string{ + "one": "one", + "two": "two", + "three": "three", + }, + want: "8afaa9c32d7c187e8acaeffe899226011001f67c095519cdd8b4c03487c5b8bc", + }, + { + name: "index with items in different order", + list: map[string]string{ + "three": "three", + "one": "one", + "two": "two", + }, + want: "8afaa9c32d7c187e8acaeffe899226011001f67c095519cdd8b4c03487c5b8bc", + }, + { + name: "empty index", + list: map[string]string{}, + want: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + }, + { + name: "nil index", + list: nil, + want: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.list.Revision() if (err != nil) != tt.wantErr { - t.Errorf("checksum() error = %v, wantErr %v", err, tt.wantErr) + t.Errorf("revision() error = %v, wantErr %v", err, tt.wantErr) return } if got != tt.want { - t.Errorf("checksum() got = %v, want %v", got, tt.want) + t.Errorf("revision() got = %v, want %v", got, tt.want) } }) } } +// helpers + func mockFile(root, path, content string) error { filePath := filepath.Join(root, path) if err := os.MkdirAll(filepath.Dir(filePath), os.ModePerm); err != nil { @@ -80,3 +594,120 @@ func mockFile(root, path, content string) error { } return nil } + +type s3MockObject struct { + Key string + LastModified time.Time + ContentType string + Content []byte +} + +type s3MockServer struct { + srv *httptest.Server + mux *http.ServeMux + + BucketName string + Objects []*s3MockObject +} + +func newS3Server(bucketName string) *s3MockServer { + s := &s3MockServer{BucketName: bucketName} + s.mux = http.NewServeMux() + s.mux.Handle(fmt.Sprintf("/%s/", s.BucketName), http.HandlerFunc(s.handler)) + + s.srv = httptest.NewUnstartedServer(s.mux) + + return s +} + +func (s *s3MockServer) Start() { + s.srv.Start() +} + +func (s *s3MockServer) Stop() { + s.srv.Close() +} + +func (s *s3MockServer) HTTPAddress() string { + return s.srv.URL +} + +func (s *s3MockServer) handler(w http.ResponseWriter, r *http.Request) { + key := path.Base(r.URL.Path) + + switch key { + case s.BucketName: + w.Header().Add("Content-Type", "application/xml") + + if r.Method == http.MethodHead { + return + } + + q := r.URL.Query() + + if q["location"] != nil { + fmt.Fprint(w, ` + +Europe + `) + return + } + + contents := "" + for _, o := range s.Objects { + etag := md5.Sum(o.Content) + contents += fmt.Sprintf(` + + %s + %s + %d + "%b" + STANDARD + `, o.Key, o.LastModified.UTC().Format(time.RFC3339), len(o.Content), etag) + } + + fmt.Fprintf(w, ` + + + %s + + + %d + 1000 + false + %s + + `, s.BucketName, len(s.Objects), contents) + default: + key, err := filepath.Rel("/"+s.BucketName, r.URL.Path) + if err != nil { + w.WriteHeader(500) + return + } + + var found *s3MockObject + for _, o := range s.Objects { + if key == o.Key { + found = o + } + } + if found == nil { + w.WriteHeader(404) + return + } + + etag := md5.Sum(found.Content) + lastModified := strings.Replace(found.LastModified.UTC().Format(time.RFC1123), "UTC", "GMT", 1) + + w.Header().Add("Content-Type", found.ContentType) + w.Header().Add("Last-Modified", lastModified) + w.Header().Add("ETag", fmt.Sprintf("\"%b\"", etag)) + w.Header().Add("Content-Length", fmt.Sprintf("%d", len(found.Content))) + + if r.Method == http.MethodHead { + return + } + + w.Write(found.Content) + } +} diff --git a/controllers/suite_test.go b/controllers/suite_test.go index a33108dc2..b27bda8fa 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -97,6 +97,15 @@ func TestMain(m *testing.M) { panic(fmt.Sprintf("Failed to start GitRepositoryReconciler: %v", err)) } + if err := (&BucketReconciler{ + Client: testEnv, + EventRecorder: record.NewFakeRecorder(32), + Metrics: testMetricsH, + Storage: testStorage, + }).SetupWithManager(testEnv); err != nil { + panic(fmt.Sprintf("Failed to start BucketReconciler: %v", err)) + } + go func() { fmt.Println("Starting the test environment") if err := testEnv.Start(ctx); err != nil { diff --git a/main.go b/main.go index 3c0f2791a..4ea5a102a 100644 --- a/main.go +++ b/main.go @@ -203,11 +203,10 @@ func main() { os.Exit(1) } if err = (&controllers.BucketReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Storage: storage, - EventRecorder: eventRecorder, - MetricsRecorder: metricsH.MetricsRecorder, + Client: mgr.GetClient(), + EventRecorder: eventRecorder, + Metrics: metricsH, + Storage: storage, }).SetupWithManagerAndOptions(mgr, controllers.BucketReconcilerOptions{ MaxConcurrentReconciles: concurrent, }); err != nil { From 84301e63703deca2ea38274e742632d8c508c2c4 Mon Sep 17 00:00:00 2001 From: Hidde Beydals Date: Mon, 9 Aug 2021 13:24:49 +0200 Subject: [PATCH 32/63] Consolidate condition types into `FetchFailed` This commit consolidates the `DownloadFailed` and `CheckoutFailed` Condition types into a new more generic `FetchFailed` type to simplify the API and observations by consumers. Signed-off-by: Hidde Beydals --- api/v1beta2/bucket_types.go | 7 ---- controllers/bucket_controller.go | 46 +++++++++++++-------------- controllers/bucket_controller_test.go | 8 ++--- 3 files changed, 27 insertions(+), 34 deletions(-) diff --git a/api/v1beta2/bucket_types.go b/api/v1beta2/bucket_types.go index d074fc60b..7f403b4b8 100644 --- a/api/v1beta2/bucket_types.go +++ b/api/v1beta2/bucket_types.go @@ -36,13 +36,6 @@ const ( GoogleBucketProvider string = "gcp" ) -const ( - // DownloadFailedCondition indicates a transient or persistent download failure. If True, observations on the - // upstream Source revision are not possible, and the Artifact available for the Source may be outdated. - // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. - DownloadFailedCondition string = "DownloadFailed" -) - // BucketSpec defines the desired state of an S3 compatible bucket type BucketSpec struct { // The S3 compatible storage provider name, default ('generic'). diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index e97f97b23..f4bc56150 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -122,12 +122,12 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res meta.ReadyCondition, conditions.WithConditions( sourcev1.ArtifactOutdatedCondition, - sourcev1.DownloadFailedCondition, + sourcev1.FetchFailedCondition, sourcev1.ArtifactUnavailableCondition, ), conditions.WithNegativePolarityConditions( sourcev1.ArtifactOutdatedCondition, - sourcev1.DownloadFailedCondition, + sourcev1.FetchFailedCondition, sourcev1.ArtifactUnavailableCondition, ), ) @@ -137,7 +137,7 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res patch.WithOwnedConditions{ Conditions: []string{ sourcev1.ArtifactOutdatedCondition, - sourcev1.DownloadFailedCondition, + sourcev1.FetchFailedCondition, sourcev1.ArtifactUnavailableCondition, meta.ReadyCondition, meta.ReconcilingCondition, @@ -259,7 +259,7 @@ func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.B // reconcileSource reconciles the upstream bucket with the client for the given object's Provider, and returns the // result. // If a SecretRef is defined, it attempts to fetch the Secret before calling the provider. If the fetch of the Secret -// fails, it records v1beta1.DownloadFailedCondition=True and returns early. +// fails, it records v1beta1.FetchFailedCondition=True and returns early. // // The caller should assume a failure if an error is returned, or the Result is zero. func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (ctrl.Result, error) { @@ -270,7 +270,7 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bu Name: obj.Spec.SecretRef.Name, } if err := r.Get(ctx, secretName, &secret); err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.AuthenticationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '%s': %s", secretName.String(), err.Error()) r.Eventf(obj, corev1.EventTypeWarning, sourcev1.AuthenticationFailedReason, "Failed to get secret '%s': %s", secretName.String(), err.Error()) @@ -292,8 +292,8 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bu // // The bucket contents are downloaded to the given dir using the defined configuration, while taking ignore rules into // account. In case of an error during the download process (including transient errors), it records -// v1beta1.DownloadFailedCondition=True and returns early. -// On a successful download, it removes v1beta1.DownloadFailedCondition, and compares the current revision of HEAD to +// v1beta1.FetchFailedCondition=True and returns early. +// On a successful download, it removes v1beta1.FetchFailedCondition, and compares the current revision of HEAD to // the artifact on the object, and records v1beta1.ArtifactOutdatedCondition if they differ. // If the download was successful, the given artifact pointer is set to a new artifact with the available metadata. // @@ -303,7 +303,7 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source // Build the client with the configuration from the object and secret s3Client, err := r.buildMinioClient(obj, secret) if err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to construct S3 client: %s", err.Error()) r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, "Failed to construct S3 client: %s", err.Error()) @@ -316,12 +316,12 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source defer cancel() exists, err := s3Client.BucketExists(ctxTimeout, obj.Spec.BucketName) if err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to verify existence of bucket '%s': %s", obj.Spec.BucketName, err.Error()) return ctrl.Result{}, err } if !exists { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Bucket '%s' does not exist", obj.Spec.BucketName) r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, "Bucket '%s' does not exist", obj.Spec.BucketName) @@ -332,7 +332,7 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source path := filepath.Join(dir, sourceignore.IgnoreFile) if err := s3Client.FGetObject(ctxTimeout, obj.Spec.BucketName, sourceignore.IgnoreFile, path, minio.GetObjectOptions{}); err != nil { if resp, ok := err.(minio.ErrorResponse); ok && resp.Code != "NoSuchKey" { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) @@ -341,7 +341,7 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source } ps, err := sourceignore.ReadIgnoreFile(path, nil) if err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) @@ -362,7 +362,7 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source UseV1: s3utils.IsGoogleEndpoint(*s3Client.EndpointURL()), }) { if err = object.Err; err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) @@ -415,7 +415,7 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source return nil }) if err = group.Wait(); err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) @@ -424,7 +424,7 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source r.Eventf(obj, corev1.EventTypeNormal, sourcev1.BucketOperationSucceedReason, "Downloaded %d files from bucket '%s' revision '%s'", len(index), obj.Spec.BucketName, revision) } - conditions.Delete(obj, sourcev1.DownloadFailedCondition) + conditions.Delete(obj, sourcev1.FetchFailedCondition) // Create potential new artifact *artifact = r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision)) @@ -446,7 +446,7 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 secret *corev1.Secret, dir string) (ctrl.Result, error) { gcpClient, err := r.buildGCPClient(ctx, secret) if err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to construct GCP client: %s", err.Error()) r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, "Failed to construct GCP client: %s", err.Error()) @@ -460,12 +460,12 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 defer cancel() exists, err := gcpClient.BucketExists(ctxTimeout, obj.Spec.BucketName) if err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to verify existence of bucket '%s': %s", obj.Spec.BucketName, err.Error()) return ctrl.Result{}, err } if !exists { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Bucket '%s' does not exist", obj.Spec.BucketName) r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, "Bucket '%s' does not exist", obj.Spec.BucketName) @@ -476,7 +476,7 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 path := filepath.Join(dir, sourceignore.IgnoreFile) if err := gcpClient.FGetObject(ctxTimeout, obj.Spec.BucketName, sourceignore.IgnoreFile, path); err != nil { if err != gcpstorage.ErrObjectNotExist { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) @@ -485,7 +485,7 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 } ps, err := sourceignore.ReadIgnoreFile(path, nil) if err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) @@ -508,7 +508,7 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 if err == gcp.IteratorDone { break } - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) @@ -560,7 +560,7 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 return nil }) if err = group.Wait(); err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) @@ -569,7 +569,7 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 r.Eventf(obj, corev1.EventTypeNormal, sourcev1.BucketOperationSucceedReason, "Downloaded %d files from bucket '%s' revision '%s'", len(index), obj.Spec.BucketName, revision) } - conditions.Delete(obj, sourcev1.DownloadFailedCondition) + conditions.Delete(obj, sourcev1.FetchFailedCondition) // Create potential new artifact *artifact = r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision)) diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go index b5c9debeb..c08f0d1db 100644 --- a/controllers/bucket_controller_test.go +++ b/controllers/bucket_controller_test.go @@ -307,7 +307,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '/dummy': secrets \"dummy\" not found"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '/dummy': secrets \"dummy\" not found"), }, }, { @@ -325,7 +325,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to construct S3 client: invalid 'dummy' secret data: required fields"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to construct S3 client: invalid 'dummy' secret data: required fields"), }, }, { @@ -336,7 +336,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, "Bucket 'invalid' does not exist"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Bucket 'invalid' does not exist"), }, }, { @@ -347,7 +347,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to verify existence of bucket 'unavailable'"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to verify existence of bucket 'unavailable'"), }, }, { From c79a55baa8520b36a0df5af5f3578342f01fe55f Mon Sep 17 00:00:00 2001 From: Sunny Date: Tue, 10 Aug 2021 03:48:11 +0530 Subject: [PATCH 33/63] BucketReconciler: Add reconcileArtifact tests Add `BucketReconciler.reconcileArtifact` tests based on `GitRepositoryReconciler.reconcileArtifact` test cases. Signed-off-by: Sunny --- controllers/bucket_controller_test.go | 145 +++++++++++++++++++------- 1 file changed, 105 insertions(+), 40 deletions(-) diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go index c08f0d1db..4c2060724 100644 --- a/controllers/bucket_controller_test.go +++ b/controllers/bucket_controller_test.go @@ -165,7 +165,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { assertArtifact: &sourcev1.Artifact{ Path: "/reconcile-storage/c.txt", Revision: "c", - Checksum: "84a516841ba77a5b4648de2cd0dfcb30ea46dbb4", + Checksum: "2e7d2c03a9507ae265ecf5b5356885a53393a2029d241394997265a1a25aefc6", URL: testStorage.Hostname + "/reconcile-storage/c.txt", }, assertPaths: []string{ @@ -198,7 +198,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { obj.Status.Artifact = &sourcev1.Artifact{ Path: fmt.Sprintf("/reconcile-storage/hostname.txt"), Revision: "f", - Checksum: "971c419dd609331343dee105fffd0f4608dc0bf2", + Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", URL: "http://outdated.com/reconcile-storage/hostname.txt", } if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { @@ -215,7 +215,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { assertArtifact: &sourcev1.Artifact{ Path: "/reconcile-storage/hostname.txt", Revision: "f", - Checksum: "971c419dd609331343dee105fffd0f4608dc0bf2", + Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", URL: testStorage.Hostname + "/reconcile-storage/hostname.txt", }, }, @@ -444,57 +444,114 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { } func TestBucketReconciler_reconcileArtifact(t *testing.T) { + // testChecksum is the checksum value of the artifacts created in this + // test. + const testChecksum = "4f4fb700ef54461cfa02571ae0db9a0dc1e0cdb5577484a6d75e68dc38e8acc1" + tests := []struct { name string - artifact sourcev1.Artifact - beforeFunc func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) + beforeFunc func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) + afterFunc func(t *WithT, obj *sourcev1.Bucket, dir string) want ctrl.Result wantErr bool assertConditions []metav1.Condition }{ { - name: "artifact revision up-to-date", - artifact: sourcev1.Artifact{ - Revision: "existing", + name: "Archiving artifact to storage makes Ready=True", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + }, + }, + { + name: "Up-to-date artifact should not update status", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Status.Artifact = artifact.DeepCopy() + }, + afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { + t.Expect(obj.Status.URL).To(BeEmpty()) + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + }, + }, + { + name: "Removes ArtifactUnavailableCondition after creating artifact", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + conditions.MarkTrue(obj, sourcev1.ArtifactUnavailableCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + }, + }, + { + name: "Removes ArtifactOutdatedCondition after creating a new artifact", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + }, + }, + { + name: "Creates latest symlink to the created artifact", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} }, - beforeFunc: func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { - obj.Status.Artifact = &artifact + afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { + localPath := testStorage.LocalPath(*obj.GetArtifact()) + symlinkPath := filepath.Join(filepath.Dir(localPath), "latest.tar.gz") + targetFile, err := os.Readlink(symlinkPath) + t.Expect(err).NotTo(HaveOccurred()) + t.Expect(localPath).To(Equal(targetFile)) }, + want: ctrl.Result{RequeueAfter: interval}, assertConditions: []metav1.Condition{ *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), }, }, { - name: "dir path deleted", - beforeFunc: func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { - _ = os.RemoveAll(dir) + name: "Dir path deleted", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) + }, + wantErr: true, + }, + { + name: "Dir path is not a directory", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + // Remove the given directory and create a file for the same + // path. + t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) + f, err := os.Create(dir) + defer f.Close() + t.Expect(err).ToNot(HaveOccurred()) + }, + afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { + t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) }, wantErr: true, }, - //{ - // name: "dir path empty", - //}, - //{ - // name: "success", - // artifact: sourcev1.Artifact{ - // Revision: "existing", - // }, - // beforeFunc: func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { - // obj.Status.Artifact = &artifact - // }, - // assertConditions: []metav1.Condition{ - // *conditions.TrueCondition(sourcev1.ArtifactAvailableCondition, meta.SucceededReason, "Compressed source to artifact with revision 'existing'"), - // }, - //}, - //{ - // name: "symlink", - //}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + r := &BucketReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + } + tmpDir, err := os.MkdirTemp("", "reconcile-bucket-artifact-") g.Expect(err).ToNot(HaveOccurred()) defer os.RemoveAll(tmpDir) @@ -504,30 +561,38 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { Kind: sourcev1.BucketKind, }, ObjectMeta: metav1.ObjectMeta{ - Name: "test-bucket", + GenerateName: "test-bucket-", + Generation: 1, + Namespace: "default", }, Spec: sourcev1.BucketSpec{ Timeout: &metav1.Duration{Duration: timeout}, }, } - if tt.beforeFunc != nil { - tt.beforeFunc(obj, tt.artifact, tmpDir) - } + artifact := testStorage.NewArtifactFor(obj.Kind, obj, "existing", "foo.tar.gz") + artifact.Checksum = testChecksum - r := &BucketReconciler{ - EventRecorder: record.NewFakeRecorder(32), - Storage: testStorage, + if tt.beforeFunc != nil { + tt.beforeFunc(g, obj, artifact, tmpDir) } dlog := log.NewDelegatingLogSink(log.NullLogSink{}) nullLogger := logr.New(dlog) - got, err := r.reconcileArtifact(logr.NewContext(ctx, nullLogger), obj, tt.artifact, tmpDir) + got, err := r.reconcileArtifact(logr.NewContext(ctx, nullLogger), obj, artifact, tmpDir) g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(got).To(Equal(tt.want)) - //g.Expect(artifact).To(MatchArtifact(tt.assertArtifact.DeepCopy())) + // On error, artifact is empty. Check artifacts only on successful + // reconcile. + if !tt.wantErr { + g.Expect(obj.Status.Artifact).To(MatchArtifact(artifact.DeepCopy())) + } g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + if tt.afterFunc != nil { + tt.afterFunc(g, obj, tmpDir) + } }) } } From 25779241eca134199ee4caa84a8df8f72386ed39 Mon Sep 17 00:00:00 2001 From: Sunny Date: Sun, 28 Nov 2021 00:50:26 +0530 Subject: [PATCH 34/63] Add more reconcileMinioSource test cases Signed-off-by: Sunny --- controllers/bucket_controller.go | 9 +-- controllers/bucket_controller_test.go | 79 +++++++++++++++++++++++++++ 2 files changed, 84 insertions(+), 4 deletions(-) diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index f4bc56150..c65feea58 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -263,13 +263,14 @@ func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.B // // The caller should assume a failure if an error is returned, or the Result is zero. func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (ctrl.Result, error) { - var secret corev1.Secret + var secret *corev1.Secret if obj.Spec.SecretRef != nil { secretName := types.NamespacedName{ Namespace: obj.GetNamespace(), Name: obj.Spec.SecretRef.Name, } - if err := r.Get(ctx, secretName, &secret); err != nil { + secret = &corev1.Secret{} + if err := r.Get(ctx, secretName, secret); err != nil { conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '%s': %s", secretName.String(), err.Error()) r.Eventf(obj, corev1.EventTypeWarning, sourcev1.AuthenticationFailedReason, @@ -281,9 +282,9 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bu switch obj.Spec.Provider { case sourcev1.GoogleBucketProvider: - return r.reconcileGCPSource(ctx, obj, artifact, &secret, dir) + return r.reconcileGCPSource(ctx, obj, artifact, secret, dir) default: - return r.reconcileMinioSource(ctx, obj, artifact, &secret, dir) + return r.reconcileMinioSource(ctx, obj, artifact, secret, dir) } } diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go index 4c2060724..587bdc116 100644 --- a/controllers/bucket_controller_test.go +++ b/controllers/bucket_controller_test.go @@ -382,6 +382,85 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100'"), }, }, + { + name: "spec.ignore overrides .sourceignore", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + ignore := "included/file.txt" + obj.Spec.Ignore = &ignore + }, + bucketObjects: []*s3MockObject{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz", + Revision: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + }, + }, + { + name: "up-to-date artifact", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &sourcev1.Artifact{ + Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", + } + }, + bucketObjects: []*s3MockObject{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8.tar.gz", + Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", + }, + assertConditions: []metav1.Condition{}, + }, + { + name: "Removes FetchFailedCondition after reconciling source", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to read test file") + }, + bucketObjects: []*s3MockObject{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8.tar.gz", + Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { From f472cadab41ba76f790308b2835059e21bffcaf2 Mon Sep 17 00:00:00 2001 From: Sunny Date: Mon, 29 Nov 2021 17:42:41 +0530 Subject: [PATCH 35/63] Add bucket controller tests for reconcileGCPSource - Introduce mock GCP Server to test the gcp bucket client against mocked gcp server results. - Add tests for reconcileGCPSource(). - Patch GCPClient.BucketExists() to return no error when the bucket doesn't exists. This keeps the GCP client compatible with the minio client. Signed-off-by: Sunny --- controllers/bucket_controller_test.go | 415 ++++++++++++++++++++++++++ pkg/gcp/gcp.go | 3 +- pkg/gcp/gcp_test.go | 2 +- 3 files changed, 418 insertions(+), 2 deletions(-) diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go index 587bdc116..40fd9d0ca 100644 --- a/controllers/bucket_controller_test.go +++ b/controllers/bucket_controller_test.go @@ -19,6 +19,7 @@ package controllers import ( "context" "crypto/md5" + "encoding/json" "fmt" "net/http" "net/http/httptest" @@ -32,6 +33,7 @@ import ( "github.com/go-logr/logr" . "github.com/onsi/gomega" + raw "google.golang.org/api/storage/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -47,6 +49,9 @@ import ( sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" ) +// Environment variable to set the GCP Storage host for the GCP client. +const ENV_GCP_STORAGE_HOST = "STORAGE_EMULATOR_HOST" + func TestBucketReconciler_Reconcile(t *testing.T) { g := NewWithT(t) @@ -522,6 +527,272 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { } } +func TestBucketReconciler_reconcileGCPSource(t *testing.T) { + tests := []struct { + name string + bucketName string + bucketObjects []*gcpMockObject + secret *corev1.Secret + beforeFunc func(obj *sourcev1.Bucket) + want ctrl.Result + wantErr bool + assertArtifact sourcev1.Artifact + assertConditions []metav1.Condition + }{ + { + name: "reconciles source", + bucketName: "dummy", + bucketObjects: []*gcpMockObject{ + { + Key: "test.txt", + ContentType: "text/plain", + Content: []byte("test"), + }, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + Data: map[string][]byte{ + "accesskey": []byte("key"), + "secretkey": []byte("secret"), + "serviceaccount": []byte("testsa"), + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8.tar.gz", + Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), + }, + }, + { + name: "observes non-existing secretRef", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '/dummy': secrets \"dummy\" not found"), + }, + }, + { + name: "observes invalid secretRef", + bucketName: "dummy", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to construct GCP client: invalid 'dummy' secret data: required fields"), + }, + }, + { + name: "observes non-existing bucket name", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.BucketName = "invalid" + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Bucket 'invalid' does not exist"), + }, + }, + { + name: "transient bucket name API failure", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.Endpoint = "transient.example.com" + obj.Spec.BucketName = "unavailable" + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to verify existence of bucket 'unavailable'"), + }, + }, + { + name: ".sourceignore", + bucketName: "dummy", + bucketObjects: []*gcpMockObject{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4.tar.gz", + Revision: "7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4'"), + }, + }, + { + name: "spec.ignore overrides .sourceignore", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + ignore := "included/file.txt" + obj.Spec.Ignore = &ignore + }, + bucketObjects: []*gcpMockObject{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz", + Revision: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + }, + }, + { + name: "up-to-date artifact", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &sourcev1.Artifact{ + Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", + } + }, + bucketObjects: []*gcpMockObject{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8.tar.gz", + Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", + }, + assertConditions: []metav1.Condition{}, + }, + { + name: "Removes FetchFailedCondition after reconciling source", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to read test file") + }, + bucketObjects: []*gcpMockObject{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8.tar.gz", + Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), + }, + }, + // TODO: Middleware for mock server to test authentication using secret. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + builder := fakeclient.NewClientBuilder().WithScheme(testEnv.Scheme()) + if tt.secret != nil { + builder.WithObjects(tt.secret) + } + r := &BucketReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Client: builder.Build(), + Storage: testStorage, + } + tmpDir, err := os.MkdirTemp("", "reconcile-bucket-source-") + g.Expect(err).ToNot(HaveOccurred()) + defer os.RemoveAll(tmpDir) + + // Test bucket object. + obj := &sourcev1.Bucket{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.BucketKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bucket", + }, + Spec: sourcev1.BucketSpec{ + BucketName: tt.bucketName, + Timeout: &metav1.Duration{Duration: timeout}, + Provider: sourcev1.GoogleBucketProvider, + }, + } + + // Set up the mock GCP bucket server. + server := newGCPServer(tt.bucketName) + server.Objects = tt.bucketObjects + server.Start() + defer server.Stop() + + g.Expect(server.HTTPAddress()).ToNot(BeEmpty()) + + obj.Spec.Endpoint = server.HTTPAddress() + obj.Spec.Insecure = true + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + // Set the GCP storage host to be used by the GCP client. + g.Expect(os.Setenv(ENV_GCP_STORAGE_HOST, obj.Spec.Endpoint)).ToNot(HaveOccurred()) + defer func() { + g.Expect(os.Unsetenv(ENV_GCP_STORAGE_HOST)).ToNot(HaveOccurred()) + }() + + artifact := &sourcev1.Artifact{} + got, err := r.reconcileSource(context.TODO(), obj, artifact, tmpDir) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + g.Expect(artifact).To(MatchArtifact(tt.assertArtifact.DeepCopy())) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + func TestBucketReconciler_reconcileArtifact(t *testing.T) { // testChecksum is the checksum value of the artifacts created in this // test. @@ -855,3 +1126,147 @@ func (s *s3MockServer) handler(w http.ResponseWriter, r *http.Request) { w.Write(found.Content) } } + +type gcpMockObject struct { + Key string + ContentType string + Content []byte +} + +type gcpMockServer struct { + srv *httptest.Server + mux *http.ServeMux + + BucketName string + Etag string + Objects []*gcpMockObject + Close func() +} + +func newGCPServer(bucketName string) *gcpMockServer { + s := &gcpMockServer{BucketName: bucketName} + s.mux = http.NewServeMux() + s.mux.Handle("/", http.HandlerFunc(s.handler)) + + s.srv = httptest.NewUnstartedServer(s.mux) + + return s +} + +func (gs *gcpMockServer) Start() { + gs.srv.Start() +} + +func (gs *gcpMockServer) Stop() { + gs.srv.Close() +} + +func (gs *gcpMockServer) HTTPAddress() string { + return gs.srv.URL +} + +func (gs *gcpMockServer) GetAllObjects() *raw.Objects { + objs := &raw.Objects{} + for _, o := range gs.Objects { + objs.Items = append(objs.Items, getGCPObject(gs.BucketName, *o)) + } + return objs +} + +func (gs *gcpMockServer) GetObjectFile(key string) ([]byte, error) { + for _, o := range gs.Objects { + if o.Key == key { + return o.Content, nil + } + } + return nil, fmt.Errorf("not found") +} + +func (gs *gcpMockServer) handler(w http.ResponseWriter, r *http.Request) { + if strings.HasPrefix(r.RequestURI, "/b/") { + // Handle the bucket info related queries. + if r.RequestURI == fmt.Sprintf("/b/%s?alt=json&prettyPrint=false&projection=full", gs.BucketName) { + // Return info about the bucket. + response := getGCPBucket(gs.BucketName, gs.Etag) + jsonResponse, err := json.Marshal(response) + if err != nil { + w.WriteHeader(500) + return + } + w.WriteHeader(200) + w.Write(jsonResponse) + return + } else if strings.Contains(r.RequestURI, "/o/") { + // Return info about object in the bucket. + var obj *gcpMockObject + for _, o := range gs.Objects { + // The object key in the URI is escaped. + // e.g.: /b/dummy/o/included%2Ffile.txt?alt=json&prettyPrint=false&projection=full + if r.RequestURI == fmt.Sprintf("/b/%s/o/%s?alt=json&prettyPrint=false&projection=full", gs.BucketName, url.QueryEscape(o.Key)) { + obj = o + } + } + if obj != nil { + response := getGCPObject(gs.BucketName, *obj) + jsonResponse, err := json.Marshal(response) + if err != nil { + w.WriteHeader(500) + return + } + w.WriteHeader(200) + w.Write(jsonResponse) + return + } + w.WriteHeader(404) + return + } else if strings.Contains(r.RequestURI, "/o?") { + // Return info about all the objects in the bucket. + response := gs.GetAllObjects() + jsonResponse, err := json.Marshal(response) + if err != nil { + w.WriteHeader(500) + return + } + w.WriteHeader(200) + w.Write(jsonResponse) + return + } + w.WriteHeader(404) + return + } else { + // Handle object file query. + bucketPrefix := fmt.Sprintf("/%s/", gs.BucketName) + if strings.HasPrefix(r.RequestURI, bucketPrefix) { + // The URL path is of the format //included/file.txt. + // Extract the object key by discarding the bucket prefix. + key := strings.TrimPrefix(r.URL.Path, bucketPrefix) + // Handle returning object file in a bucket. + response, err := gs.GetObjectFile(key) + if err != nil { + w.WriteHeader(404) + return + } + w.WriteHeader(200) + w.Write(response) + return + } + w.WriteHeader(404) + return + } +} + +func getGCPObject(bucket string, obj gcpMockObject) *raw.Object { + return &raw.Object{ + Bucket: bucket, + Name: obj.Key, + ContentType: obj.ContentType, + } +} + +func getGCPBucket(name, eTag string) *raw.Bucket { + return &raw.Bucket{ + Name: name, + Location: "loc", + Etag: eTag, + } +} diff --git a/pkg/gcp/gcp.go b/pkg/gcp/gcp.go index 9127fcde3..f98e498c4 100644 --- a/pkg/gcp/gcp.go +++ b/pkg/gcp/gcp.go @@ -73,7 +73,8 @@ func ValidateSecret(secret map[string][]byte, name string) error { func (c *GCPClient) BucketExists(ctx context.Context, bucketName string) (bool, error) { _, err := c.Client.Bucket(bucketName).Attrs(ctx) if err == gcpstorage.ErrBucketNotExist { - return false, err + // Not returning error to be compatible with minio's API. + return false, nil } if err != nil { return false, err diff --git a/pkg/gcp/gcp_test.go b/pkg/gcp/gcp_test.go index 7f431a44d..6c27accf6 100644 --- a/pkg/gcp/gcp_test.go +++ b/pkg/gcp/gcp_test.go @@ -130,7 +130,7 @@ func TestBucketNotExists(t *testing.T) { Client: client, } exists, err := gcpClient.BucketExists(context.Background(), bucket) - assert.Error(t, err, gcpstorage.ErrBucketNotExist.Error()) + assert.NilError(t, err) assert.Assert(t, !exists) } From 848534a8f1911cc59a5fbd4f55d7816eaff20aa2 Mon Sep 17 00:00:00 2001 From: Sunny Date: Thu, 9 Dec 2021 03:42:54 +0530 Subject: [PATCH 36/63] bucket: Ignore patch error not found on delete Ignore "not found" error while patching when the delete timestamp is set. Signed-off-by: Sunny --- controllers/bucket_controller.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index c65feea58..9dc8dbff9 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -35,6 +35,7 @@ import ( "golang.org/x/sync/semaphore" "google.golang.org/api/option" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" @@ -167,6 +168,10 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res // Finally, patch the resource if err := patchHelper.Patch(ctx, obj, patchOpts...); err != nil { + // Ignore patch error "not found" when the object is being deleted. + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + err = kerrors.FilterOut(err, func(e error) bool { return apierrors.IsNotFound(e) }) + } retErr = kerrors.NewAggregate([]error{retErr, err}) } From ba7cbd31f18aaad1b4c985ae906fc84da96f50cb Mon Sep 17 00:00:00 2001 From: Sunny Date: Tue, 21 Dec 2021 04:43:16 +0530 Subject: [PATCH 37/63] bucket: Add more reconciler design improvements - Remove ArtifactUnavailable condition and use Reconciling condition to convey the same. - Make Reconciling condition affect the ready condition. - Introduce summarizeAndPatch() to calculate the final status conditions and patch them. - Introduce reconcile() to iterate through the sub-reconcilers and execute them. Signed-off-by: Sunny --- controllers/bucket_controller.go | 524 +++++++++++++++----------- controllers/bucket_controller_test.go | 126 ++++--- 2 files changed, 376 insertions(+), 274 deletions(-) diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index 9dc8dbff9..ebd2a6daf 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -19,6 +19,7 @@ package controllers import ( "context" "crypto/sha256" + "errors" "fmt" "os" "path/filepath" @@ -36,7 +37,7 @@ import ( "google.golang.org/api/option" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" kuberecorder "k8s.io/client-go/tools/record" @@ -53,9 +54,37 @@ import ( "github.com/fluxcd/pkg/runtime/predicates" sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + serror "github.com/fluxcd/source-controller/internal/error" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" "github.com/fluxcd/source-controller/pkg/sourceignore" ) +// Status conditions owned by Bucket reconciler. +var bucketOwnedConditions = []string{ + sourcev1.ArtifactOutdatedCondition, + sourcev1.FetchFailedCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, +} + +// Conditions that Ready condition is influenced by in descending order of their +// priority. +var bucketReadyDeps = []string{ + sourcev1.ArtifactOutdatedCondition, + sourcev1.FetchFailedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, +} + +// Negative conditions that Ready condition is influenced by. +var bucketReadyDepsNegative = []string{ + sourcev1.ArtifactOutdatedCondition, + sourcev1.FetchFailedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, +} + // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets/status,verbs=get;update;patch // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets/finalizers,verbs=get;create;update;patch;delete @@ -74,6 +103,10 @@ type BucketReconcilerOptions struct { MaxConcurrentReconciles int } +// bucketReconcilerFunc is the function type for all the bucket reconciler +// functions. +type bucketReconcilerFunc func(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error) + func (r *BucketReconciler) SetupWithManager(mgr ctrl.Manager) error { return r.SetupWithManagerAndOptions(mgr, BucketReconcilerOptions{}) } @@ -111,69 +144,14 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res return ctrl.Result{}, err } + var recResult sreconcile.Result + // Always attempt to patch the object and status after each reconciliation + // NOTE: This deferred block only modifies the named return error. The + // result from the reconciliation remains the same. Any requeue attributes + // set in the result will continue to be effective. defer func() { - // Record the value of the reconciliation request, if any - if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { - obj.Status.SetLastHandledReconcileRequest(v) - } - - // Summarize the Ready condition based on abnormalities that may have been observed - conditions.SetSummary(obj, - meta.ReadyCondition, - conditions.WithConditions( - sourcev1.ArtifactOutdatedCondition, - sourcev1.FetchFailedCondition, - sourcev1.ArtifactUnavailableCondition, - ), - conditions.WithNegativePolarityConditions( - sourcev1.ArtifactOutdatedCondition, - sourcev1.FetchFailedCondition, - sourcev1.ArtifactUnavailableCondition, - ), - ) - - // Patch the object, ignoring conflicts on the conditions owned by this controller - patchOpts := []patch.Option{ - patch.WithOwnedConditions{ - Conditions: []string{ - sourcev1.ArtifactOutdatedCondition, - sourcev1.FetchFailedCondition, - sourcev1.ArtifactUnavailableCondition, - meta.ReadyCondition, - meta.ReconcilingCondition, - meta.StalledCondition, - }, - }, - } - - // Determine if the resource is still being reconciled, or if it has stalled, and record this observation - if retErr == nil && (result.IsZero() || !result.Requeue) { - // We are no longer reconciling - conditions.Delete(obj, meta.ReconcilingCondition) - - // We have now observed this generation - patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{}) - - readyCondition := conditions.Get(obj, meta.ReadyCondition) - switch readyCondition.Status { - case metav1.ConditionFalse: - // As we are no longer reconciling and the end-state is not ready, the reconciliation has stalled - conditions.MarkStalled(obj, readyCondition.Reason, readyCondition.Message) - case metav1.ConditionTrue: - // As we are no longer reconciling and the end-state is ready, the reconciliation is no longer stalled - conditions.Delete(obj, meta.StalledCondition) - } - } - - // Finally, patch the resource - if err := patchHelper.Patch(ctx, obj, patchOpts...); err != nil { - // Ignore patch error "not found" when the object is being deleted. - if !obj.ObjectMeta.DeletionTimestamp.IsZero() { - err = kerrors.FilterOut(err, func(e error) bool { return apierrors.IsNotFound(e) }) - } - retErr = kerrors.NewAggregate([]error{retErr, err}) - } + retErr = r.summarizeAndPatch(ctx, obj, patchHelper, recResult, retErr) // Always record readiness and duration metrics r.Metrics.RecordReadiness(ctx, obj) @@ -183,60 +161,109 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res // Add finalizer first if not exist to avoid the race condition between init and delete if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) + recResult = sreconcile.ResultRequeue return ctrl.Result{Requeue: true}, nil } // Examine if the object is under deletion if !obj.ObjectMeta.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, obj) + res, err := r.reconcileDelete(ctx, obj) + return sreconcile.BuildRuntimeResult(ctx, r.EventRecorder, obj, res, err) } // Reconcile actual object - return r.reconcile(ctx, obj) + reconcilers := []bucketReconcilerFunc{ + r.reconcileStorage, + r.reconcileSource, + r.reconcileArtifact, + } + recResult, err = r.reconcile(ctx, obj, reconcilers) + return sreconcile.BuildRuntimeResult(ctx, r.EventRecorder, obj, recResult, err) } -// reconcile steps through the actual reconciliation tasks for the object, it returns early on the first step that -// produces an error. -func (r *BucketReconciler) reconcile(ctx context.Context, obj *sourcev1.Bucket) (ctrl.Result, error) { - // Mark the resource as under reconciliation - conditions.MarkReconciling(obj, meta.ProgressingReason, "") +// summarizeAndPatch analyzes the object conditions to create a summary of the +// status conditions and patches the object with the calculated summary. +func (r *BucketReconciler) summarizeAndPatch(ctx context.Context, obj *sourcev1.Bucket, patchHelper *patch.Helper, res sreconcile.Result, recErr error) error { + // Record the value of the reconciliation request if any. + if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { + obj.Status.SetLastHandledReconcileRequest(v) + } + + // Compute the reconcile results, obtain patch options and reconcile error. + var patchOpts []patch.Option + patchOpts, recErr = sreconcile.ComputeReconcileResult(obj, res, recErr, bucketOwnedConditions) + + // Summarize the Ready condition based on abnormalities that may have been observed. + conditions.SetSummary(obj, + meta.ReadyCondition, + conditions.WithConditions( + bucketReadyDeps..., + ), + conditions.WithNegativePolarityConditions( + bucketReadyDepsNegative..., + ), + ) + + // Finally, patch the resource. + if err := patchHelper.Patch(ctx, obj, patchOpts...); err != nil { + // Ignore patch error "not found" when the object is being deleted. + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + err = kerrors.FilterOut(err, func(e error) bool { return apierrors.IsNotFound(e) }) + } + recErr = kerrors.NewAggregate([]error{recErr, err}) + } + + return recErr +} - // Reconcile the storage data - if result, err := r.reconcileStorage(ctx, obj); err != nil || result.IsZero() { - return result, err +// reconcile steps iterates through the actual reconciliation tasks for objec, +// it returns early on the first step that returns ResultRequeue or produces an +// error. +func (r *BucketReconciler) reconcile(ctx context.Context, obj *sourcev1.Bucket, reconcilers []bucketReconcilerFunc) (sreconcile.Result, error) { + if obj.Generation != obj.Status.ObservedGeneration { + conditions.MarkReconciling(obj, "NewGeneration", "Reconciling new generation %d", obj.Generation) } + var artifact sourcev1.Artifact + // Create temp working dir tmpDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s-%s-", obj.Kind, obj.Namespace, obj.Name)) if err != nil { - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.StorageOperationFailedReason, "Failed to create temporary directory: %s", err) - return ctrl.Result{}, err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to create temporary directory: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } } defer os.RemoveAll(tmpDir) - // Reconcile the source from upstream - var artifact sourcev1.Artifact - if result, err := r.reconcileSource(ctx, obj, &artifact, tmpDir); err != nil || result.IsZero() { - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, err - } - - // Reconcile the artifact to storage - if result, err := r.reconcileArtifact(ctx, obj, artifact, tmpDir); err != nil || result.IsZero() { - return result, err + // Run the sub-reconcilers and build the result of reconciliation. + var res sreconcile.Result + var resErr error + for _, rec := range reconcilers { + recResult, err := rec(ctx, obj, &artifact, tmpDir) + // Exit immediately on ResultRequeue. + if recResult == sreconcile.ResultRequeue { + return sreconcile.ResultRequeue, nil + } + // If an error is received, prioritize the returned results because an + // error also means immediate requeue. + if err != nil { + resErr = err + res = recResult + break + } + // Prioritize requeue request in the result. + res = sreconcile.LowestRequeuingResult(res, recResult) } - - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + return res, resErr } // reconcileStorage ensures the current state of the storage matches the desired and previously observed state. // // All artifacts for the resource except for the current one are garbage collected from the storage. // If the artifact in the Status object of the resource disappeared from storage, it is removed from the object. -// If the object does not have an artifact in its Status object, a v1beta1.ArtifactUnavailableCondition is set. // If the hostname of the URLs on the object do not match the current storage server hostname, they are updated. -// -// The caller should assume a failure if an error is returned, or the Result is zero. -func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.Bucket) (ctrl.Result, error) { +func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error) { // Garbage collect previous advertised artifact(s) from storage _ = r.garbageCollect(ctx, obj) @@ -248,26 +275,23 @@ func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.B // Record that we do not have an artifact if obj.GetArtifact() == nil { - conditions.MarkTrue(obj, sourcev1.ArtifactUnavailableCondition, "NoArtifact", "No artifact for resource in storage") - return ctrl.Result{Requeue: true}, nil + conditions.MarkReconciling(obj, "NoArtifact", "No artifact for resource in storage") + return sreconcile.ResultSuccess, nil } - conditions.Delete(obj, sourcev1.ArtifactUnavailableCondition) // Always update URLs to ensure hostname is up-to-date // TODO(hidde): we may want to send out an event only if we notice the URL has changed r.Storage.SetArtifactURL(obj.GetArtifact()) obj.Status.URL = r.Storage.SetHostname(obj.Status.URL) - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + return sreconcile.ResultSuccess, nil } // reconcileSource reconciles the upstream bucket with the client for the given object's Provider, and returns the // result. // If a SecretRef is defined, it attempts to fetch the Secret before calling the provider. If the fetch of the Secret // fails, it records v1beta1.FetchFailedCondition=True and returns early. -// -// The caller should assume a failure if an error is returned, or the Result is zero. -func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (ctrl.Result, error) { +func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error) { var secret *corev1.Secret if obj.Spec.SecretRef != nil { secretName := types.NamespacedName{ @@ -276,12 +300,13 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bu } secret = &corev1.Secret{} if err := r.Get(ctx, secretName, secret); err != nil { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, - "Failed to get secret '%s': %s", secretName.String(), err.Error()) - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.AuthenticationFailedReason, - "Failed to get secret '%s': %s", secretName.String(), err.Error()) + e := &serror.Event{ + Err: fmt.Errorf("failed to get secret '%s': %w", secretName.String(), err), + Reason: sourcev1.AuthenticationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, e.Err.Error()) // Return error as the world as observed may change - return ctrl.Result{}, err + return sreconcile.ResultEmpty, e } } @@ -302,19 +327,18 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bu // On a successful download, it removes v1beta1.FetchFailedCondition, and compares the current revision of HEAD to // the artifact on the object, and records v1beta1.ArtifactOutdatedCondition if they differ. // If the download was successful, the given artifact pointer is set to a new artifact with the available metadata. -// -// The caller should assume a failure if an error is returned, or the Result is zero. func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, - secret *corev1.Secret, dir string) (ctrl.Result, error) { + secret *corev1.Secret, dir string) (sreconcile.Result, error) { // Build the client with the configuration from the object and secret s3Client, err := r.buildMinioClient(obj, secret) if err != nil { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, - "Failed to construct S3 client: %s", err.Error()) - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, - "Failed to construct S3 client: %s", err.Error()) + e := &serror.Event{ + Err: fmt.Errorf("failed to construct S3 client: %w", err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) // Return error as the contents of the secret may change - return ctrl.Result{}, err + return sreconcile.ResultEmpty, e } // Confirm bucket exists @@ -322,36 +346,42 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source defer cancel() exists, err := s3Client.BucketExists(ctxTimeout, obj.Spec.BucketName) if err != nil { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, - "Failed to verify existence of bucket '%s': %s", obj.Spec.BucketName, err.Error()) - return ctrl.Result{}, err + e := &serror.Event{ + Err: fmt.Errorf("failed to verify existence of bucket '%s': %w", obj.Spec.BucketName, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } if !exists { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, - "Bucket '%s' does not exist", obj.Spec.BucketName) - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, - "Bucket '%s' does not exist", obj.Spec.BucketName) - return ctrl.Result{}, fmt.Errorf("bucket '%s' does not exist", obj.Spec.BucketName) + e := &serror.Event{ + Err: fmt.Errorf("bucket '%s' does not exist", obj.Spec.BucketName), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } // Look for file with ignore rules first path := filepath.Join(dir, sourceignore.IgnoreFile) if err := s3Client.FGetObject(ctxTimeout, obj.Spec.BucketName, sourceignore.IgnoreFile, path, minio.GetObjectOptions{}); err != nil { if resp, ok := err.(minio.ErrorResponse); ok && resp.Code != "NoSuchKey" { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, - "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, - "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) - return ctrl.Result{}, err + e := &serror.Event{ + Err: fmt.Errorf("failed to get '%s' file: %w", sourceignore.IgnoreFile, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } } ps, err := sourceignore.ReadIgnoreFile(path, nil) if err != nil { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, - "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, - "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) - return ctrl.Result{}, err + e := &serror.Event{ + Err: fmt.Errorf("failed to read '%s' file: %w", sourceignore.IgnoreFile, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } // In-spec patterns take precedence if obj.Spec.Ignore != nil { @@ -368,11 +398,12 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source UseV1: s3utils.IsGoogleEndpoint(*s3Client.EndpointURL()), }) { if err = object.Err; err != nil { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, - "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, - "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) - return ctrl.Result{}, err + e := &serror.Event{ + Err: fmt.Errorf("failed to list objects from bucket '%s': %w", obj.Spec.BucketName, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } // Ignore directories and the .sourceignore file @@ -391,13 +422,17 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source revision, err := index.Revision() if err != nil { ctrl.LoggerFrom(ctx).Error(err, "failed to calculate revision") - return ctrl.Result{}, err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to calculate revision: %w", err), + Reason: meta.FailedReason, + } } if !obj.GetArtifact().HasRevision(revision) { // Mark observations about the revision on the object - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", - "New upstream revision '%s'", revision) + message := fmt.Sprintf("new upstream revision '%s'", revision) + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", message) + conditions.MarkReconciling(obj, "NewRevision", message) // Download the files in parallel, but with a limited number of workers group, groupCtx := errgroup.WithContext(ctx) @@ -421,20 +456,21 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source return nil }) if err = group.Wait(); err != nil { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, - "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, - "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) - return ctrl.Result{}, err + e := &serror.Event{ + Err: fmt.Errorf("download from bucket '%s' failed: %w", obj.Spec.BucketName, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } - r.Eventf(obj, corev1.EventTypeNormal, sourcev1.BucketOperationSucceedReason, - "Downloaded %d files from bucket '%s' revision '%s'", len(index), obj.Spec.BucketName, revision) + r.eventLogf(ctx, obj, corev1.EventTypeNormal, sourcev1.BucketOperationSucceedReason, + "downloaded %d files from bucket '%s' revision '%s'", len(index), obj.Spec.BucketName, revision) } conditions.Delete(obj, sourcev1.FetchFailedCondition) // Create potential new artifact *artifact = r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision)) - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + return sreconcile.ResultSuccess, nil } // reconcileGCPSource ensures the upstream Google Cloud Storage bucket can be reached and downloaded from using the @@ -446,18 +482,17 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source // On a successful download, it removes v1beta1.DownloadFailedCondition, and compares the current revision of HEAD to // the artifact on the object, and records v1beta1.ArtifactOutdatedCondition if they differ. // If the download was successful, the given artifact pointer is set to a new artifact with the available metadata. -// -// The caller should assume a failure if an error is returned, or the Result is zero. func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, - secret *corev1.Secret, dir string) (ctrl.Result, error) { + secret *corev1.Secret, dir string) (sreconcile.Result, error) { gcpClient, err := r.buildGCPClient(ctx, secret) if err != nil { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, - "Failed to construct GCP client: %s", err.Error()) - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, - "Failed to construct GCP client: %s", err.Error()) + e := &serror.Event{ + Err: fmt.Errorf("failed to construct GCP client: %w", err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) // Return error as the contents of the secret may change - return ctrl.Result{}, err + return sreconcile.ResultEmpty, e } defer gcpClient.Close(ctrl.LoggerFrom(ctx)) @@ -466,36 +501,42 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 defer cancel() exists, err := gcpClient.BucketExists(ctxTimeout, obj.Spec.BucketName) if err != nil { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, - "Failed to verify existence of bucket '%s': %s", obj.Spec.BucketName, err.Error()) - return ctrl.Result{}, err + e := &serror.Event{ + Err: fmt.Errorf("failed to verify existence of bucket '%s': %w", obj.Spec.BucketName, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } if !exists { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, - "Bucket '%s' does not exist", obj.Spec.BucketName) - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, - "Bucket '%s' does not exist", obj.Spec.BucketName) - return ctrl.Result{}, fmt.Errorf("bucket '%s' does not exist", obj.Spec.BucketName) + e := &serror.Event{ + Err: fmt.Errorf("bucket '%s' does not exist", obj.Spec.BucketName), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } // Look for file with ignore rules first path := filepath.Join(dir, sourceignore.IgnoreFile) if err := gcpClient.FGetObject(ctxTimeout, obj.Spec.BucketName, sourceignore.IgnoreFile, path); err != nil { if err != gcpstorage.ErrObjectNotExist { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, - "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, - "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) - return ctrl.Result{}, err + e := &serror.Event{ + Err: fmt.Errorf("failed to get '%s' file: %w", sourceignore.IgnoreFile, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } } ps, err := sourceignore.ReadIgnoreFile(path, nil) if err != nil { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, - "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, - "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) - return ctrl.Result{}, err + e := &serror.Event{ + Err: fmt.Errorf("failed to read '%s' file: %w", sourceignore.IgnoreFile, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } // In-spec patterns take precedence if obj.Spec.Ignore != nil { @@ -514,11 +555,12 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 if err == gcp.IteratorDone { break } - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, - "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, - "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) - return ctrl.Result{}, err + e := &serror.Event{ + Err: fmt.Errorf("failed to list objects from bucket '%s': %w", obj.Spec.BucketName, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } if strings.HasSuffix(object.Name, "/") || object.Name == sourceignore.IgnoreFile { @@ -535,14 +577,17 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 // Calculate revision checksum from the collected index values revision, err := index.Revision() if err != nil { - ctrl.LoggerFrom(ctx).Error(err, "failed to calculate revision") - return ctrl.Result{}, err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to calculate revision: %w", err), + Reason: meta.FailedReason, + } } if !obj.GetArtifact().HasRevision(revision) { // Mark observations about the revision on the object - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", - "New upstream revision '%s'", revision) + message := fmt.Sprintf("new upstream revision '%s'", revision) + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", message) + conditions.MarkReconciling(obj, "NewRevision", message) // Download the files in parallel, but with a limited number of workers group, groupCtx := errgroup.WithContext(ctx) @@ -566,113 +611,121 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 return nil }) if err = group.Wait(); err != nil { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, - "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, - "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) - return ctrl.Result{}, err + e := &serror.Event{ + Err: fmt.Errorf("download from bucket '%s' failed: %w", obj.Spec.BucketName, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } - r.Eventf(obj, corev1.EventTypeNormal, sourcev1.BucketOperationSucceedReason, - "Downloaded %d files from bucket '%s' revision '%s'", len(index), obj.Spec.BucketName, revision) + r.eventLogf(ctx, obj, corev1.EventTypeNormal, sourcev1.BucketOperationSucceedReason, + "downloaded %d files from bucket '%s' revision '%s'", len(index), obj.Spec.BucketName, revision) } conditions.Delete(obj, sourcev1.FetchFailedCondition) // Create potential new artifact *artifact = r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision)) - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + return sreconcile.ResultSuccess, nil } // reconcileArtifact archives a new artifact to the storage, if the current observation on the object does not match the // given data. // // The inspection of the given data to the object is differed, ensuring any stale observations as -// v1beta1.ArtifactUnavailableCondition and v1beta1.ArtifactOutdatedCondition are always deleted. // If the given artifact does not differ from the object's current, it returns early. // On a successful archive, the artifact in the status of the given object is set, and the symlink in the storage is // updated to its path. -// -// The caller should assume a failure if an error is returned, or the Result is zero. -func (r *BucketReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) (ctrl.Result, error) { +func (r *BucketReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error) { // Always restore the Ready condition in case it got removed due to a transient error defer func() { - if obj.GetArtifact() != nil { - conditions.Delete(obj, sourcev1.ArtifactUnavailableCondition) - } if obj.GetArtifact().HasRevision(artifact.Revision) { conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, - "Stored artifact for revision '%s'", artifact.Revision) + "stored artifact for revision '%s'", artifact.Revision) } }() // The artifact is up-to-date if obj.GetArtifact().HasRevision(artifact.Revision) { - ctrl.LoggerFrom(ctx).Info(fmt.Sprintf("Already up to date, current revision '%s'", artifact.Revision)) - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + r.eventLogf(ctx, obj, corev1.EventTypeNormal, meta.SucceededReason, "already up to date, current revision '%s'", artifact.Revision) + return sreconcile.ResultSuccess, nil } + // Mark reconciling because the artifact and remote source are different. + // and they have to be reconciled. + conditions.MarkReconciling(obj, "NewRevision", "new upstream revision '%s'", artifact.Revision) + // Ensure target path exists and is a directory if f, err := os.Stat(dir); err != nil { - ctrl.LoggerFrom(ctx).Error(err, "failed to stat source path") - return ctrl.Result{}, err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to stat source path: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } } else if !f.IsDir() { - err := fmt.Errorf("source path '%s' is not a directory", dir) - ctrl.LoggerFrom(ctx).Error(err, "invalid target path") - return ctrl.Result{}, err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("source path '%s' is not a directory", dir), + Reason: sourcev1.StorageOperationFailedReason, + } } // Ensure artifact directory exists and acquire lock - if err := r.Storage.MkdirAll(artifact); err != nil { - ctrl.LoggerFrom(ctx).Error(err, "failed to create artifact directory") - return ctrl.Result{}, err + if err := r.Storage.MkdirAll(*artifact); err != nil { + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to create artifact directory: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } } - unlock, err := r.Storage.Lock(artifact) + unlock, err := r.Storage.Lock(*artifact) if err != nil { - ctrl.LoggerFrom(ctx).Error(err, "failed to acquire lock for artifact") - return ctrl.Result{}, err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to acquire lock for artifact: %w", err), + Reason: meta.FailedReason, + } } defer unlock() // Archive directory to storage - if err := r.Storage.Archive(&artifact, dir, nil); err != nil { - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.StorageOperationFailedReason, - "Unable to archive artifact to storage: %s", err) - return ctrl.Result{}, err + if err := r.Storage.Archive(artifact, dir, nil); err != nil { + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("unable to archive artifact to storage: %s", err), + Reason: sourcev1.StorageOperationFailedReason, + } } r.AnnotatedEventf(obj, map[string]string{ "revision": artifact.Revision, "checksum": artifact.Checksum, - }, corev1.EventTypeNormal, "NewArtifact", "Stored artifact for revision '%s'", artifact.Revision) + }, corev1.EventTypeNormal, "NewArtifact", "stored artifact for revision '%s'", artifact.Revision) // Record it on the object obj.Status.Artifact = artifact.DeepCopy() // Update symlink on a "best effort" basis - url, err := r.Storage.Symlink(artifact, "latest.tar.gz") + url, err := r.Storage.Symlink(*artifact, "latest.tar.gz") if err != nil { - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.StorageOperationFailedReason, - "Failed to update status URL symlink: %s", err) + r.eventLogf(ctx, obj, corev1.EventTypeWarning, sourcev1.StorageOperationFailedReason, + "failed to update status URL symlink: %s", err) } if url != "" { obj.Status.URL = url } - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + return sreconcile.ResultSuccess, nil } // reconcileDelete handles the deletion of an object. It first garbage collects all artifacts for the object from the // artifact storage, if successful, the finalizer is removed from the object. -func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bucket) (ctrl.Result, error) { +// func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bucket) (ctrl.Result, error) { +func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bucket) (sreconcile.Result, error) { // Garbage collect the resource's artifacts if err := r.garbageCollect(ctx, obj); err != nil { // Return the error so we retry the failed garbage collection - return ctrl.Result{}, err + return sreconcile.ResultEmpty, err } // Remove our finalizer from the list controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) // Stop reconciliation as the object is being deleted - return ctrl.Result{}, nil + return sreconcile.ResultEmpty, nil } // garbageCollect performs a garbage collection for the given v1beta1.Bucket. It removes all but the current @@ -681,23 +734,26 @@ func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bu func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *sourcev1.Bucket) error { if !obj.DeletionTimestamp.IsZero() { if err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { - r.Eventf(obj, corev1.EventTypeWarning, "GarbageCollectionFailed", - "Garbage collection for deleted resource failed: %s", err) - return err + return &serror.Event{ + Err: fmt.Errorf("garbage collection for deleted resource failed: %s", err), + Reason: "GarbageCollectionFailed", + } } obj.Status.Artifact = nil // TODO(hidde): we should only push this event if we actually garbage collected something - r.Eventf(obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", - "Garbage collected artifacts for deleted resource") + r.eventLogf(ctx, obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", + "garbage collected artifacts for deleted resource") return nil } if obj.GetArtifact() != nil { if err := r.Storage.RemoveAllButCurrent(*obj.GetArtifact()); err != nil { - r.Eventf(obj, corev1.EventTypeNormal, "GarbageCollectionFailed", "Garbage collection of old artifacts failed: %s", err) - return err + return &serror.Event{ + Err: fmt.Errorf("garbage collection of old artifacts failed: %s", err), + Reason: "GarbageCollectionFailed", + } } // TODO(hidde): we should only push this event if we actually garbage collected something - r.Eventf(obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", "Garbage collected old artifacts") + r.eventLogf(ctx, obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", "garbage collected old artifacts") } return nil } @@ -771,3 +827,17 @@ func (i etagIndex) Revision() (string, error) { } return fmt.Sprintf("%x", sum.Sum(nil)), nil } + +// eventLog records event and logs at the same time. This log is different from +// the debug log in the event recorder in the sense that this is a simple log, +// the event recorder debug log contains complete details about the event. +func (r *BucketReconciler) eventLogf(ctx context.Context, obj runtime.Object, eventType string, reason string, messageFmt string, args ...interface{}) { + msg := fmt.Sprintf(messageFmt, args...) + // Log and emit event. + if eventType == corev1.EventTypeWarning { + ctrl.LoggerFrom(ctx).Error(errors.New(reason), msg) + } else { + ctrl.LoggerFrom(ctx).Info(msg) + } + r.Eventf(obj, eventType, reason, msg) +} diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go index 40fd9d0ca..c1c9c59c7 100644 --- a/controllers/bucket_controller_test.go +++ b/controllers/bucket_controller_test.go @@ -31,6 +31,7 @@ import ( "testing" "time" + "github.com/darkowlzz/controller-check/status" "github.com/go-logr/logr" . "github.com/onsi/gomega" raw "google.golang.org/api/storage/v1" @@ -38,7 +39,6 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/log" @@ -46,7 +46,8 @@ import ( "github.com/fluxcd/pkg/apis/meta" "github.com/fluxcd/pkg/runtime/conditions" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" ) // Environment variable to set the GCP Storage host for the GCP client. @@ -126,6 +127,11 @@ func TestBucketReconciler_Reconcile(t *testing.T) { obj.Generation == obj.Status.ObservedGeneration }, timeout).Should(BeTrue()) + // Check if the object status is valid. + condns := &status.Conditions{NegativePolarity: bucketReadyDepsNegative} + checker := status.NewChecker(testEnv.Client, testEnv.GetScheme(), condns) + checker.CheckErr(ctx, obj) + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) // Wait for Bucket to be deleted @@ -141,7 +147,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { tests := []struct { name string beforeFunc func(obj *sourcev1.Bucket, storage *Storage) error - want ctrl.Result + want sreconcile.Result wantErr bool assertArtifact *sourcev1.Artifact assertConditions []metav1.Condition @@ -167,6 +173,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { testStorage.SetArtifactURL(obj.Status.Artifact) return nil }, + want: sreconcile.ResultSuccess, assertArtifact: &sourcev1.Artifact{ Path: "/reconcile-storage/c.txt", Revision: "c", @@ -189,12 +196,12 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { testStorage.SetArtifactURL(obj.Status.Artifact) return nil }, - want: ctrl.Result{Requeue: true}, + want: sreconcile.ResultSuccess, assertPaths: []string{ "!/reconcile-storage/invalid.txt", }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactUnavailableCondition, "NoArtifact", "No artifact for resource in storage"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NoArtifact", "No artifact for resource in storage"), }, }, { @@ -214,6 +221,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { } return nil }, + want: sreconcile.ResultSuccess, assertPaths: []string{ "/reconcile-storage/hostname.txt", }, @@ -243,7 +251,9 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed()) } - got, err := r.reconcileStorage(context.TODO(), obj) + var artifact sourcev1.Artifact + + got, err := r.reconcileStorage(context.TODO(), obj, &artifact, "") g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(got).To(Equal(tt.want)) @@ -273,7 +283,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { middleware http.Handler secret *corev1.Secret beforeFunc func(obj *sourcev1.Bucket) - want ctrl.Result + want sreconcile.Result wantErr bool assertArtifact sourcev1.Artifact assertConditions []metav1.Condition @@ -289,12 +299,14 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { LastModified: time.Now(), }, }, + want: sreconcile.ResultSuccess, assertArtifact: sourcev1.Artifact{ Path: "bucket/test-bucket/f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8.tar.gz", Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), }, }, // TODO(hidde): middleware for mock server @@ -312,7 +324,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '/dummy': secrets \"dummy\" not found"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret '/dummy': secrets \"dummy\" not found"), }, }, { @@ -330,7 +342,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to construct S3 client: invalid 'dummy' secret data: required fields"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to construct S3 client: invalid 'dummy' secret data: required fields"), }, }, { @@ -341,7 +353,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Bucket 'invalid' does not exist"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "bucket 'invalid' does not exist"), }, }, { @@ -352,7 +364,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to verify existence of bucket 'unavailable'"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to verify existence of bucket 'unavailable'"), }, }, { @@ -379,12 +391,14 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { LastModified: time.Now(), }, }, + want: sreconcile.ResultSuccess, assertArtifact: sourcev1.Artifact{ Path: "bucket/test-bucket/94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100.tar.gz", Revision: "94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100", }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100'"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision '94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision '94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100'"), }, }, { @@ -414,12 +428,14 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { LastModified: time.Now(), }, }, + want: sreconcile.ResultSuccess, assertArtifact: sourcev1.Artifact{ Path: "bucket/test-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz", Revision: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), }, }, { @@ -438,6 +454,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { LastModified: time.Now(), }, }, + want: sreconcile.ResultSuccess, assertArtifact: sourcev1.Artifact{ Path: "bucket/test-bucket/f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8.tar.gz", Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", @@ -448,7 +465,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { name: "Removes FetchFailedCondition after reconciling source", bucketName: "dummy", beforeFunc: func(obj *sourcev1.Bucket) { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to read test file") + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to read test file") }, bucketObjects: []*s3MockObject{ { @@ -458,12 +475,14 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { LastModified: time.Now(), }, }, + want: sreconcile.ResultSuccess, assertArtifact: sourcev1.Artifact{ Path: "bucket/test-bucket/f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8.tar.gz", Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), }, }, } @@ -534,7 +553,7 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { bucketObjects []*gcpMockObject secret *corev1.Secret beforeFunc func(obj *sourcev1.Bucket) - want ctrl.Result + want sreconcile.Result wantErr bool assertArtifact sourcev1.Artifact assertConditions []metav1.Condition @@ -564,12 +583,14 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { Name: "dummy", } }, + want: sreconcile.ResultSuccess, assertArtifact: sourcev1.Artifact{ Path: "bucket/test-bucket/23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8.tar.gz", Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), }, }, { @@ -580,9 +601,10 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { Name: "dummy", } }, + want: sreconcile.ResultEmpty, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '/dummy': secrets \"dummy\" not found"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret '/dummy': secrets \"dummy\" not found"), }, }, { @@ -598,9 +620,10 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { Name: "dummy", } }, + want: sreconcile.ResultEmpty, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to construct GCP client: invalid 'dummy' secret data: required fields"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to construct GCP client: invalid 'dummy' secret data: required fields"), }, }, { @@ -609,9 +632,10 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.BucketName = "invalid" }, + want: sreconcile.ResultEmpty, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Bucket 'invalid' does not exist"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "bucket 'invalid' does not exist"), }, }, { @@ -620,9 +644,10 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { obj.Spec.Endpoint = "transient.example.com" obj.Spec.BucketName = "unavailable" }, + want: sreconcile.ResultEmpty, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to verify existence of bucket 'unavailable'"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to verify existence of bucket 'unavailable'"), }, }, { @@ -645,12 +670,14 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { ContentType: "text/plain", }, }, + want: sreconcile.ResultSuccess, assertArtifact: sourcev1.Artifact{ Path: "bucket/test-bucket/7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4.tar.gz", Revision: "7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4", }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4'"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision '7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision '7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4'"), }, }, { @@ -677,12 +704,14 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { ContentType: "text/plain", }, }, + want: sreconcile.ResultSuccess, assertArtifact: sourcev1.Artifact{ Path: "bucket/test-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz", Revision: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), }, }, { @@ -700,6 +729,7 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { ContentType: "text/plain", }, }, + want: sreconcile.ResultSuccess, assertArtifact: sourcev1.Artifact{ Path: "bucket/test-bucket/23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8.tar.gz", Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", @@ -710,7 +740,7 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { name: "Removes FetchFailedCondition after reconciling source", bucketName: "dummy", beforeFunc: func(obj *sourcev1.Bucket) { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to read test file") + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to read test file") }, bucketObjects: []*gcpMockObject{ { @@ -719,12 +749,14 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { ContentType: "text/plain", }, }, + want: sreconcile.ResultSuccess, assertArtifact: sourcev1.Artifact{ Path: "bucket/test-bucket/23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8.tar.gz", Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), }, }, // TODO: Middleware for mock server to test authentication using secret. @@ -802,7 +834,7 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { name string beforeFunc func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) afterFunc func(t *WithT, obj *sourcev1.Bucket, dir string) - want ctrl.Result + want sreconcile.Result wantErr bool assertConditions []metav1.Condition }{ @@ -811,9 +843,10 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { obj.Spec.Interval = metav1.Duration{Duration: interval} }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'existing'"), }, }, { @@ -825,20 +858,9 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { t.Expect(obj.Status.URL).To(BeEmpty()) }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), - }, - }, - { - name: "Removes ArtifactUnavailableCondition after creating artifact", - beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { - obj.Spec.Interval = metav1.Duration{Duration: interval} - conditions.MarkTrue(obj, sourcev1.ArtifactUnavailableCondition, "Foo", "") - }, - want: ctrl.Result{RequeueAfter: interval}, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'existing'"), }, }, { @@ -847,9 +869,10 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { obj.Spec.Interval = metav1.Duration{Duration: interval} conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'existing'"), }, }, { @@ -864,9 +887,10 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { t.Expect(err).NotTo(HaveOccurred()) t.Expect(localPath).To(Equal(targetFile)) }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'existing'"), }, }, { @@ -874,7 +898,11 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) }, + want: sreconcile.ResultEmpty, wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'existing'"), + }, }, { name: "Dir path is not a directory", @@ -889,7 +917,11 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) }, + want: sreconcile.ResultEmpty, wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'existing'"), + }, }, } @@ -929,7 +961,7 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { dlog := log.NewDelegatingLogSink(log.NullLogSink{}) nullLogger := logr.New(dlog) - got, err := r.reconcileArtifact(logr.NewContext(ctx, nullLogger), obj, artifact, tmpDir) + got, err := r.reconcileArtifact(logr.NewContext(ctx, nullLogger), obj, &artifact, tmpDir) g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(got).To(Equal(tt.want)) From 5f125ebfcdff5beb48a74074e7cb2f7d9c99d2d6 Mon Sep 17 00:00:00 2001 From: Sunny Date: Wed, 24 Nov 2021 22:15:26 +0530 Subject: [PATCH 38/63] helmrepo: Replace GetInterval() with GetRequeueAfter() Signed-off-by: Sunny --- controllers/helmrepository_controller.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/controllers/helmrepository_controller.go b/controllers/helmrepository_controller.go index 42050368b..c81bb264a 100644 --- a/controllers/helmrepository_controller.go +++ b/controllers/helmrepository_controller.go @@ -167,10 +167,10 @@ func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reque log.Info(fmt.Sprintf("Reconciliation finished in %s, next run in %s", time.Since(start).String(), - repository.GetInterval().Duration.String(), + repository.GetRequeueAfter().String(), )) - return ctrl.Result{RequeueAfter: repository.GetInterval().Duration}, nil + return ctrl.Result{RequeueAfter: repository.GetRequeueAfter()}, nil } func (r *HelmRepositoryReconciler) reconcile(ctx context.Context, repo sourcev1.HelmRepository) (sourcev1.HelmRepository, error) { From dd68cd57b7d9d8f561803f7890714d9895d25689 Mon Sep 17 00:00:00 2001 From: Hidde Beydals Date: Sat, 31 Jul 2021 16:48:03 +0200 Subject: [PATCH 39/63] Rewrite `HelmRepositoryReconciler` to new standards This commit rewrites the `HelmRepositoryReconciler` to new standards, while implementing the newly introduced Condition types, and trying to adhere better to Kubernetes API conventions. More specifically it introduces: - Implementation of more explicit Condition types to highlight abnormalities. - Extensive usage of the `conditions` subpackage from `runtime`. - Better and more conflict-resilient (status)patching of reconciled objects using the `patch` subpackage from runtime. - Proper implementation of kstatus' `Reconciling` and `Stalled` conditions. - Refactoring of some Helm elements to make them easier to use within the new reconciler logic. - Integration tests that solely rely on `testenv` and do not use Ginkgo. There are a couple of TODOs marked in-code, these are suggestions for the future and should be non-blocking. In addition to the TODOs, more complex and/or edge-case test scenarios may be added as well. Signed-off-by: Hidde Beydals --- api/v1beta2/helmrepository_types.go | 43 - controllers/helmrepository_controller.go | 544 ++++++----- controllers/helmrepository_controller_test.go | 883 +++++++++++------- controllers/legacy_suite_test.go | 6 +- controllers/suite_test.go | 10 + main.go | 11 +- 6 files changed, 872 insertions(+), 625 deletions(-) diff --git a/api/v1beta2/helmrepository_types.go b/api/v1beta2/helmrepository_types.go index 492ece868..4cc6bd260 100644 --- a/api/v1beta2/helmrepository_types.go +++ b/api/v1beta2/helmrepository_types.go @@ -19,12 +19,10 @@ package v1beta2 import ( "time" - apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/runtime/conditions" ) const ( @@ -108,47 +106,6 @@ const ( IndexationSucceededReason string = "IndexationSucceed" ) -// HelmRepositoryProgressing resets the conditions of the HelmRepository to -// metav1.Condition of type meta.ReadyCondition with status 'Unknown' and -// meta.ProgressingReason reason and message. It returns the modified -// HelmRepository. -func HelmRepositoryProgressing(repository HelmRepository) HelmRepository { - repository.Status.ObservedGeneration = repository.Generation - repository.Status.URL = "" - repository.Status.Conditions = []metav1.Condition{} - conditions.MarkUnknown(&repository, meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress") - return repository -} - -// HelmRepositoryReady sets the given Artifact and URL on the HelmRepository and -// sets the meta.ReadyCondition to 'True', with the given reason and message. It -// returns the modified HelmRepository. -func HelmRepositoryReady(repository HelmRepository, artifact Artifact, url, reason, message string) HelmRepository { - repository.Status.Artifact = &artifact - repository.Status.URL = url - conditions.MarkTrue(&repository, meta.ReadyCondition, reason, message) - return repository -} - -// HelmRepositoryNotReady sets the meta.ReadyCondition on the given -// HelmRepository to 'False', with the given reason and message. It returns the -// modified HelmRepository. -func HelmRepositoryNotReady(repository HelmRepository, reason, message string) HelmRepository { - conditions.MarkFalse(&repository, meta.ReadyCondition, reason, message) - return repository -} - -// HelmRepositoryReadyMessage returns the message of the metav1.Condition of type -// meta.ReadyCondition with status 'True' if present, or an empty string. -func HelmRepositoryReadyMessage(repository HelmRepository) string { - if c := apimeta.FindStatusCondition(repository.Status.Conditions, meta.ReadyCondition); c != nil { - if c.Status == metav1.ConditionTrue { - return c.Message - } - } - return "" -} - // GetConditions returns the status conditions of the object. func (in HelmRepository) GetConditions() []metav1.Condition { return in.Status.Conditions diff --git a/controllers/helmrepository_controller.go b/controllers/helmrepository_controller.go index c81bb264a..d3944ff70 100644 --- a/controllers/helmrepository_controller.go +++ b/controllers/helmrepository_controller.go @@ -25,12 +25,11 @@ import ( helmgetter "helm.sh/helm/v3/pkg/getter" corev1 "k8s.io/api/core/v1" - apimeta "k8s.io/apimachinery/pkg/api/meta" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + kerrors "k8s.io/apimachinery/pkg/util/errors" kuberecorder "k8s.io/client-go/tools/record" - "k8s.io/client-go/tools/reference" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -38,8 +37,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/runtime/events" - "github.com/fluxcd/pkg/runtime/metrics" + "github.com/fluxcd/pkg/runtime/conditions" + helper "github.com/fluxcd/pkg/runtime/controller" + "github.com/fluxcd/pkg/runtime/patch" "github.com/fluxcd/pkg/runtime/predicates" sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" @@ -55,12 +55,11 @@ import ( // HelmRepositoryReconciler reconciles a HelmRepository object type HelmRepositoryReconciler struct { client.Client - Scheme *runtime.Scheme - Storage *Storage - Getters helmgetter.Providers - EventRecorder kuberecorder.EventRecorder - ExternalEventRecorder *events.Recorder - MetricsRecorder *metrics.Recorder + kuberecorder.EventRecorder + helper.Metrics + + Getters helmgetter.Providers + Storage *Storage } type HelmRepositoryReconcilerOptions struct { @@ -79,317 +78,388 @@ func (r *HelmRepositoryReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, Complete(r) } -func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) { start := time.Now() log := ctrl.LoggerFrom(ctx) - var repository sourcev1.HelmRepository - if err := r.Get(ctx, req.NamespacedName, &repository); err != nil { + // Fetch the HelmRepository + obj := &sourcev1.HelmRepository{} + if err := r.Get(ctx, req.NamespacedName, obj); err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } // Record suspended status metric - defer r.recordSuspension(ctx, repository) - - // Add our finalizer if it does not exist - if !controllerutil.ContainsFinalizer(&repository, sourcev1.SourceFinalizer) { - patch := client.MergeFrom(repository.DeepCopy()) - controllerutil.AddFinalizer(&repository, sourcev1.SourceFinalizer) - if err := r.Patch(ctx, &repository, patch); err != nil { - log.Error(err, "unable to register finalizer") - return ctrl.Result{}, err - } - } - - // Examine if the object is under deletion - if !repository.ObjectMeta.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, repository) - } + r.RecordSuspend(ctx, obj, obj.Spec.Suspend) - // Return early if the object is suspended. - if repository.Spec.Suspend { + // Return early if the object is suspended + if obj.Spec.Suspend { log.Info("Reconciliation is suspended for this object") return ctrl.Result{}, nil } - // record reconciliation duration - if r.MetricsRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &repository) - if err != nil { - return ctrl.Result{}, err - } - defer r.MetricsRecorder.RecordDuration(*objRef, start) + // Initialize the patch helper + patchHelper, err := patch.NewHelper(obj, r.Client) + if err != nil { + return ctrl.Result{}, err } - // set initial status - if resetRepository, ok := r.resetStatus(repository); ok { - repository = resetRepository - if err := r.updateStatus(ctx, req, repository.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err + // Always attempt to patch the object and status after each reconciliation + defer func() { + // Record the value of the reconciliation request, if any + if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { + obj.Status.SetLastHandledReconcileRequest(v) + } + + // Summarize Ready condition + conditions.SetSummary(obj, + meta.ReadyCondition, + conditions.WithConditions( + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + sourcev1.ArtifactUnavailableCondition, + ), + conditions.WithNegativePolarityConditions( + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + sourcev1.ArtifactUnavailableCondition, + ), + ) + + // Patch the object, ignoring conflicts on the conditions owned by this controller + patchOpts := []patch.Option{ + patch.WithOwnedConditions{ + Conditions: []string{ + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + sourcev1.ArtifactUnavailableCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, + }, + }, + } + + // Determine if the resource is still being reconciled, or if it has stalled, and record this observation + if retErr == nil && (result.IsZero() || !result.Requeue) { + // We are no longer reconciling + conditions.Delete(obj, meta.ReconcilingCondition) + + // We have now observed this generation + patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{}) + + readyCondition := conditions.Get(obj, meta.ReadyCondition) + switch readyCondition.Status { + case metav1.ConditionFalse: + // As we are no longer reconciling and the end-state + // is not ready, the reconciliation has stalled + conditions.MarkStalled(obj, readyCondition.Reason, readyCondition.Message) + case metav1.ConditionTrue: + // As we are no longer reconciling and the end-state + // is ready, the reconciliation is no longer stalled + conditions.Delete(obj, meta.StalledCondition) + } } - r.recordReadiness(ctx, repository) - } - // record the value of the reconciliation request, if any - // TODO(hidde): would be better to defer this in combination with - // always patching the status sub-resource after a reconciliation. - if v, ok := meta.ReconcileAnnotationValue(repository.GetAnnotations()); ok { - repository.Status.SetLastHandledReconcileRequest(v) + // Finally, patch the resource + if err := patchHelper.Patch(ctx, obj, patchOpts...); err != nil { + // Ignore patch error "not found" when the object is being deleted. + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + err = kerrors.FilterOut(err, func(e error) bool { return apierrors.IsNotFound(e) }) + } + retErr = kerrors.NewAggregate([]error{retErr, err}) + } + + // Always record readiness and duration metrics + r.Metrics.RecordReadiness(ctx, obj) + r.Metrics.RecordDuration(ctx, obj, start) + }() + + // Add finalizer first if not exist to avoid the race condition + // between init and delete + if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { + controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) + return ctrl.Result{Requeue: true}, nil } - // purge old artifacts from storage - if err := r.gc(repository); err != nil { - log.Error(err, "unable to purge old artifacts") + // Examine if the object is under deletion + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, obj) } - // reconcile repository by downloading the index.yaml file - reconciledRepository, reconcileErr := r.reconcile(ctx, *repository.DeepCopy()) + // Reconcile actual object + return r.reconcile(ctx, obj) +} + +// reconcile steps through the actual reconciliation tasks for the object, it returns early on the first step that +// produces an error. +func (r *HelmRepositoryReconciler) reconcile(ctx context.Context, obj *sourcev1.HelmRepository) (ctrl.Result, error) { + // Mark the resource as under reconciliation + conditions.MarkReconciling(obj, meta.ProgressingReason, "") - // update status with the reconciliation result - if err := r.updateStatus(ctx, req, reconciledRepository.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err + // Reconcile the storage data + if result, err := r.reconcileStorage(ctx, obj); err != nil { + return result, err } - // if reconciliation failed, record the failure and requeue immediately - if reconcileErr != nil { - r.event(ctx, reconciledRepository, events.EventSeverityError, reconcileErr.Error()) - r.recordReadiness(ctx, reconciledRepository) - return ctrl.Result{Requeue: true}, reconcileErr + var chartRepo repository.ChartRepository + var artifact sourcev1.Artifact + // Reconcile the source from upstream + if result, err := r.reconcileSource(ctx, obj, &artifact, &chartRepo); err != nil || result.IsZero() { + return result, err } - // emit revision change event - if repository.Status.Artifact == nil || reconciledRepository.Status.Artifact.Revision != repository.Status.Artifact.Revision { - r.event(ctx, reconciledRepository, events.EventSeverityInfo, sourcev1.HelmRepositoryReadyMessage(reconciledRepository)) + // Reconcile the artifact. + if result, err := r.reconcileArtifact(ctx, obj, artifact, &chartRepo); err != nil || result.IsZero() { + return result, err } - r.recordReadiness(ctx, reconciledRepository) - log.Info(fmt.Sprintf("Reconciliation finished in %s, next run in %s", - time.Since(start).String(), - repository.GetRequeueAfter().String(), - )) + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil +} - return ctrl.Result{RequeueAfter: repository.GetRequeueAfter()}, nil +// reconcileStorage ensures the current state of the storage matches the desired and previously observed state. +// +// All artifacts for the resource except for the current one are garbage collected from the storage. +// If the artifact in the Status object of the resource disappeared from storage, it is removed from the object. +// If the object does not have an artifact in its Status object, a v1beta1.ArtifactUnavailableCondition is set. +// If the hostname of any of the URLs on the object do not match the current storage server hostname, they are updated. +// +// The caller should assume a failure if an error is returned, or the Result is zero. +func (r *HelmRepositoryReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.HelmRepository) (ctrl.Result, error) { + // Garbage collect previous advertised artifact(s) from storage + _ = r.garbageCollect(ctx, obj) + + // Determine if the advertised artifact is still in storage + if artifact := obj.GetArtifact(); artifact != nil && !r.Storage.ArtifactExist(*artifact) { + obj.Status.Artifact = nil + obj.Status.URL = "" + } + + // Record that we do not have an artifact + if obj.GetArtifact() == nil { + conditions.MarkTrue(obj, sourcev1.ArtifactUnavailableCondition, "NoArtifact", "No artifact for resource in storage") + return ctrl.Result{Requeue: true}, nil + } + conditions.Delete(obj, sourcev1.ArtifactUnavailableCondition) + + // Always update URLs to ensure hostname is up-to-date + // TODO(hidde): we may want to send out an event only if we notice the URL has changed + r.Storage.SetArtifactURL(obj.GetArtifact()) + obj.Status.URL = r.Storage.SetHostname(obj.Status.URL) + + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil } -func (r *HelmRepositoryReconciler) reconcile(ctx context.Context, repo sourcev1.HelmRepository) (sourcev1.HelmRepository, error) { - log := ctrl.LoggerFrom(ctx) +// reconcileSource ensures the upstream Helm repository can be reached and downloaded out using the declared +// configuration, and stores a new artifact in the storage. +// +// The Helm repository index is downloaded using the defined configuration, and in case of an error during this process +// (including transient errors), it records v1beta1.FetchFailedCondition=True and returns early. +// On a successful write of a new artifact, the artifact in the status of the given object is set, and the symlink in +// the storage is updated to its path. +// +// The caller should assume a failure if an error is returned, or the Result is zero. +func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, chartRepo *repository.ChartRepository) (ctrl.Result, error) { + // Configure Helm client to access repository clientOpts := []helmgetter.Option{ - helmgetter.WithURL(repo.Spec.URL), - helmgetter.WithTimeout(repo.Spec.Timeout.Duration), - helmgetter.WithPassCredentialsAll(repo.Spec.PassCredentials), + helmgetter.WithTimeout(obj.Spec.Timeout.Duration), + helmgetter.WithURL(obj.Spec.URL), + helmgetter.WithPassCredentialsAll(obj.Spec.PassCredentials), } - if repo.Spec.SecretRef != nil { + + // Configure any authentication related options + if obj.Spec.SecretRef != nil { + // Attempt to retrieve secret name := types.NamespacedName{ - Namespace: repo.GetNamespace(), - Name: repo.Spec.SecretRef.Name, + Namespace: obj.GetNamespace(), + Name: obj.Spec.SecretRef.Name, } - var secret corev1.Secret - err := r.Client.Get(ctx, name, &secret) - if err != nil { - err = fmt.Errorf("auth secret error: %w", err) - return sourcev1.HelmRepositoryNotReady(repo, sourcev1.AuthenticationFailedReason, err.Error()), err + if err := r.Client.Get(ctx, name, &secret); err != nil { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, + "Failed to get secret '%s': %s", name.String(), err.Error()) + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.AuthenticationFailedReason, + "Failed to get secret '%s': %s", name.String(), err.Error()) + // Return error as the world as observed may change + return ctrl.Result{}, err } - authDir, err := os.MkdirTemp("", repo.Kind+"-"+repo.Namespace+"-"+repo.Name+"-") + // Get client options from secret + tmpDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s-auth-", obj.Name, obj.Namespace)) if err != nil { - err = fmt.Errorf("failed to create temporary working directory for credentials: %w", err) - return sourcev1.HelmRepositoryNotReady(repo, sourcev1.AuthenticationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.StorageOperationFailedReason, + "Failed to create temporary directory for credentials: %s", err.Error()) + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.StorageOperationFailedReason, + "Failed to create temporary directory for credentials: %s", err.Error()) + return ctrl.Result{}, err } - defer func() { - if err := os.RemoveAll(authDir); err != nil { - log.Error(err, "failed to remove working directory", "path", authDir) - } - }() + defer os.RemoveAll(tmpDir) - opts, err := getter.ClientOptionsFromSecret(authDir, secret) + // Construct actual options + opts, err := getter.ClientOptionsFromSecret(tmpDir, secret) if err != nil { - err = fmt.Errorf("auth options error: %w", err) - return sourcev1.HelmRepositoryNotReady(repo, sourcev1.AuthenticationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, + "Failed to configure Helm client with secret data: %s", err) + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.AuthenticationFailedReason, + "Failed to configure Helm client with secret data: %s", err) + // Return err as the content of the secret may change + return ctrl.Result{}, err } clientOpts = append(clientOpts, opts...) } - chartRepo, err := repository.NewChartRepository(repo.Spec.URL, "", r.Getters, clientOpts) + // Construct Helm chart repository with options and download index + newChartRepo, err := repository.NewChartRepository(obj.Spec.URL, "", r.Getters, clientOpts) if err != nil { switch err.(type) { case *url.Error: - return sourcev1.HelmRepositoryNotReady(repo, sourcev1.URLInvalidReason, err.Error()), err + ctrl.LoggerFrom(ctx).Error(err, "invalid Helm repository URL") + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.URLInvalidReason, + "Invalid Helm repository URL: %s", err.Error()) + return ctrl.Result{}, nil default: - return sourcev1.HelmRepositoryNotReady(repo, sourcev1.IndexationFailedReason, err.Error()), err + ctrl.LoggerFrom(ctx).Error(err, "failed to construct Helm client") + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, meta.FailedReason, + "Failed to construct Helm client: %s", err.Error()) + return ctrl.Result{}, nil } } - checksum, err := chartRepo.CacheIndex() + checksum, err := newChartRepo.CacheIndex() if err != nil { - err = fmt.Errorf("failed to download repository index: %w", err) - return sourcev1.HelmRepositoryNotReady(repo, sourcev1.IndexationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, meta.FailedReason, + "Failed to download Helm repository index: %s", err.Error()) + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.FetchFailedCondition, + "Failed to download Helm repository index: %s", err.Error()) + // Coin flip on transient or persistent error, return error and hope for the best + return ctrl.Result{}, err } - defer chartRepo.RemoveCache() + *chartRepo = *newChartRepo + + // Load the cached repository index to ensure it passes validation. + if err := chartRepo.LoadFromCache(); err != nil { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.IndexationFailedReason, + "Failed to load Helm repository from cache: %s", err.Error()) + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.FetchFailedCondition, + "Failed to load Helm repository from cache: %s", err.Error()) + return ctrl.Result{}, err + } + defer chartRepo.Unload() - artifact := r.Storage.NewArtifactFor(repo.Kind, - repo.ObjectMeta.GetObjectMeta(), - "", + // Mark observations about the revision on the object. + if !obj.GetArtifact().HasRevision(checksum) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", + "New index revision '%s'", checksum) + } + + conditions.Delete(obj, sourcev1.FetchFailedCondition) + + // Create potential new artifact. + *artifact = r.Storage.NewArtifactFor(obj.Kind, + obj.ObjectMeta.GetObjectMeta(), + chartRepo.Checksum, fmt.Sprintf("index-%s.yaml", checksum)) - // Return early on unchanged index - if apimeta.IsStatusConditionTrue(repo.Status.Conditions, meta.ReadyCondition) && - (repo.GetArtifact() != nil && repo.GetArtifact().Checksum == checksum) { - if artifact.URL != repo.GetArtifact().URL { - r.Storage.SetArtifactURL(repo.GetArtifact()) - repo.Status.URL = r.Storage.SetHostname(repo.Status.URL) + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil +} + +func (r *HelmRepositoryReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) (ctrl.Result, error) { + // Always restore the Ready condition in case it got removed due to a transient error. + defer func() { + if obj.GetArtifact() != nil { + conditions.Delete(obj, sourcev1.ArtifactUnavailableCondition) } - return repo, nil - } + if obj.GetArtifact().HasRevision(artifact.Revision) { + conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, + "Stored artifact for revision '%s'", artifact.Revision) + } + }() - // Load the cached repository index to ensure it passes validation - if err := chartRepo.LoadFromCache(); err != nil { - return sourcev1.HelmRepositoryNotReady(repo, sourcev1.IndexationFailedReason, err.Error()), err + if obj.GetArtifact().HasRevision(artifact.Revision) { + ctrl.LoggerFrom(ctx).Info(fmt.Sprintf("Already up to date, current revision '%s'", artifact.Revision)) + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil } - // The repository checksum is the SHA256 of the loaded bytes, after sorting - artifact.Revision = chartRepo.Checksum - chartRepo.Unload() - // Create artifact dir - err = r.Storage.MkdirAll(artifact) - if err != nil { - err = fmt.Errorf("unable to create repository index directory: %w", err) - return sourcev1.HelmRepositoryNotReady(repo, sourcev1.StorageOperationFailedReason, err.Error()), err + // Clear cache at the very end. + defer chartRepo.RemoveCache() + + // Create artifact dir. + if err := r.Storage.MkdirAll(artifact); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to create artifact directory") + return ctrl.Result{}, err } - // Acquire lock + // Acquire lock. unlock, err := r.Storage.Lock(artifact) if err != nil { - err = fmt.Errorf("unable to acquire lock: %w", err) - return sourcev1.HelmRepositoryNotReady(repo, sourcev1.StorageOperationFailedReason, err.Error()), err + ctrl.LoggerFrom(ctx).Error(err, "failed to acquire lock for artifact") + return ctrl.Result{}, err } defer unlock() - // Save artifact to storage + // Save artifact to storage. if err = r.Storage.CopyFromPath(&artifact, chartRepo.CachePath); err != nil { - return sourcev1.HelmRepositoryNotReady(repo, sourcev1.StorageOperationFailedReason, err.Error()), err + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.StorageOperationFailedReason, + "Unable to save artifact to storage: %s", err) + return ctrl.Result{}, err } - // Update index symlink + // Record it on the object. + obj.Status.Artifact = artifact.DeepCopy() + + // Update index symlink. indexURL, err := r.Storage.Symlink(artifact, "index.yaml") if err != nil { - err = fmt.Errorf("storage error: %w", err) - return sourcev1.HelmRepositoryNotReady(repo, sourcev1.StorageOperationFailedReason, err.Error()), err + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.StorageOperationFailedReason, + "Failed to update status URL symlink: %s", err) } - message := fmt.Sprintf("Fetched revision: %s", artifact.Revision) - return sourcev1.HelmRepositoryReady(repo, artifact, indexURL, sourcev1.IndexationSucceededReason, message), nil + if indexURL != "" { + obj.Status.URL = indexURL + } + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil } -func (r *HelmRepositoryReconciler) reconcileDelete(ctx context.Context, repository sourcev1.HelmRepository) (ctrl.Result, error) { - // Our finalizer is still present, so lets handle garbage collection - if err := r.gc(repository); err != nil { - r.event(ctx, repository, events.EventSeverityError, - fmt.Sprintf("garbage collection for deleted resource failed: %s", err.Error())) +// reconcileDelete handles the delete of an object. It first garbage collects all artifacts for the object from the +// artifact storage, if successful, the finalizer is removed from the object. +func (r *HelmRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.HelmRepository) (ctrl.Result, error) { + // Garbage collect the resource's artifacts + if err := r.garbageCollect(ctx, obj); err != nil { // Return the error so we retry the failed garbage collection return ctrl.Result{}, err } - // Record deleted status - r.recordReadiness(ctx, repository) - - // Remove our finalizer from the list and update it - controllerutil.RemoveFinalizer(&repository, sourcev1.SourceFinalizer) - if err := r.Update(ctx, &repository); err != nil { - return ctrl.Result{}, err - } + // Remove our finalizer from the list + controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) // Stop reconciliation as the object is being deleted return ctrl.Result{}, nil } -// resetStatus returns a modified v1beta1.HelmRepository and a boolean indicating -// if the status field has been reset. -func (r *HelmRepositoryReconciler) resetStatus(repository sourcev1.HelmRepository) (sourcev1.HelmRepository, bool) { - // We do not have an artifact, or it does no longer exist - if repository.GetArtifact() == nil || !r.Storage.ArtifactExist(*repository.GetArtifact()) { - repository = sourcev1.HelmRepositoryProgressing(repository) - repository.Status.Artifact = nil - return repository, true - } - if repository.Generation != repository.Status.ObservedGeneration { - return sourcev1.HelmRepositoryProgressing(repository), true - } - return repository, false -} - -// gc performs a garbage collection for the given v1beta1.HelmRepository. -// It removes all but the current artifact except for when the -// deletion timestamp is set, which will result in the removal of -// all artifacts for the resource. -func (r *HelmRepositoryReconciler) gc(repository sourcev1.HelmRepository) error { - if !repository.DeletionTimestamp.IsZero() { - return r.Storage.RemoveAll(r.Storage.NewArtifactFor(repository.Kind, repository.GetObjectMeta(), "", "*")) - } - if repository.GetArtifact() != nil { - return r.Storage.RemoveAllButCurrent(*repository.GetArtifact()) +// garbageCollect performs a garbage collection for the given v1beta1.HelmRepository. It removes all but the current +// artifact except for when the deletion timestamp is set, which will result in the removal of all artifacts for the +// resource. +func (r *HelmRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourcev1.HelmRepository) error { + if !obj.DeletionTimestamp.IsZero() { + if err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { + r.Eventf(obj, corev1.EventTypeWarning, "GarbageCollectionFailed", + "Garbage collection for deleted resource failed: %s", err) + return err + } + obj.Status.Artifact = nil + // TODO(hidde): we should only push this event if we actually garbage collected something + r.Eventf(obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", + "Garbage collected artifacts for deleted resource") + return nil + } + if obj.GetArtifact() != nil { + if err := r.Storage.RemoveAllButCurrent(*obj.GetArtifact()); err != nil { + r.Eventf(obj, corev1.EventTypeWarning, "GarbageCollectionFailed", + "Garbage collection of old artifacts failed: %s", err) + return err + } + // TODO(hidde): we should only push this event if we actually garbage collected something + r.Eventf(obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", + "Garbage collected old artifacts") } return nil } - -// event emits a Kubernetes event and forwards the event to notification controller if configured -func (r *HelmRepositoryReconciler) event(ctx context.Context, repository sourcev1.HelmRepository, severity, msg string) { - if r.EventRecorder != nil { - r.EventRecorder.Eventf(&repository, corev1.EventTypeNormal, severity, msg) - } - if r.ExternalEventRecorder != nil { - r.ExternalEventRecorder.Eventf(&repository, corev1.EventTypeNormal, severity, msg) - } -} - -func (r *HelmRepositoryReconciler) recordReadiness(ctx context.Context, repository sourcev1.HelmRepository) { - log := ctrl.LoggerFrom(ctx) - if r.MetricsRecorder == nil { - return - } - objRef, err := reference.GetReference(r.Scheme, &repository) - if err != nil { - log.Error(err, "unable to record readiness metric") - return - } - if rc := apimeta.FindStatusCondition(repository.Status.Conditions, meta.ReadyCondition); rc != nil { - r.MetricsRecorder.RecordCondition(*objRef, *rc, !repository.DeletionTimestamp.IsZero()) - } else { - r.MetricsRecorder.RecordCondition(*objRef, metav1.Condition{ - Type: meta.ReadyCondition, - Status: metav1.ConditionUnknown, - }, !repository.DeletionTimestamp.IsZero()) - } -} - -func (r *HelmRepositoryReconciler) updateStatus(ctx context.Context, req ctrl.Request, newStatus sourcev1.HelmRepositoryStatus) error { - var repository sourcev1.HelmRepository - if err := r.Get(ctx, req.NamespacedName, &repository); err != nil { - return err - } - - patch := client.MergeFrom(repository.DeepCopy()) - repository.Status = newStatus - - return r.Status().Patch(ctx, &repository, patch) -} - -func (r *HelmRepositoryReconciler) recordSuspension(ctx context.Context, hr sourcev1.HelmRepository) { - if r.MetricsRecorder == nil { - return - } - log := ctrl.LoggerFrom(ctx) - - objRef, err := reference.GetReference(r.Scheme, &hr) - if err != nil { - log.Error(err, "unable to record suspended metric") - return - } - - if !hr.DeletionTimestamp.IsZero() { - r.MetricsRecorder.RecordSuspend(*objRef, false) - } else { - r.MetricsRecorder.RecordSuspend(*objRef, hr.Spec.Suspend) - } -} diff --git a/controllers/helmrepository_controller_test.go b/controllers/helmrepository_controller_test.go index bd7172fca..55bbe5573 100644 --- a/controllers/helmrepository_controller_test.go +++ b/controllers/helmrepository_controller_test.go @@ -18,389 +18,600 @@ package controllers import ( "context" + "fmt" "net/http" "os" - "path" + "path/filepath" "strings" - "time" + "testing" - . "github.com/onsi/ginkgo" + "github.com/go-logr/logr" . "github.com/onsi/gomega" + "helm.sh/helm/v3/pkg/getter" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/log" "github.com/fluxcd/pkg/apis/meta" "github.com/fluxcd/pkg/helmtestserver" + "github.com/fluxcd/pkg/runtime/conditions" sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + "github.com/fluxcd/source-controller/internal/helm/repository" ) -var _ = Describe("HelmRepositoryReconciler", func() { - - const ( - timeout = time.Second * 30 - interval = time.Second * 1 - indexInterval = time.Second * 2 - repositoryTimeout = time.Second * 5 - ) - - Context("HelmRepository", func() { - var ( - namespace *corev1.Namespace - helmServer *helmtestserver.HelmServer - err error - ) - - BeforeEach(func() { - namespace = &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: "helm-repository-" + randStringRunes(5)}, - } - err = k8sClient.Create(context.Background(), namespace) - Expect(err).NotTo(HaveOccurred(), "failed to create test namespace") - - helmServer, err = helmtestserver.NewTempHelmServer() - Expect(err).To(Succeed()) - }) - - AfterEach(func() { - helmServer.Stop() - os.RemoveAll(helmServer.Root()) - - Eventually(func() error { - return k8sClient.Delete(context.Background(), namespace) - }, timeout, interval).Should(Succeed(), "failed to delete test namespace") - }) - - It("Creates artifacts for", func() { - helmServer.Start() - - Expect(helmServer.PackageChart(path.Join("testdata/charts/helmchart"))).Should(Succeed()) - Expect(helmServer.GenerateIndex()).Should(Succeed()) +var ( + testGetters = getter.Providers{ + getter.Provider{ + Schemes: []string{"http", "https"}, + New: getter.NewHTTPGetter, + }, + } +) - key := types.NamespacedName{ - Name: "helmrepository-sample-" + randStringRunes(5), - Namespace: namespace.Name, +func TestHelmRepositoryReconciler_Reconcile(t *testing.T) { + g := NewWithT(t) + + testServer, err := helmtestserver.NewTempHelmServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(testServer.Root()) + + g.Expect(testServer.PackageChart("testdata/charts/helmchart")).To(Succeed()) + g.Expect(testServer.GenerateIndex()).To(Succeed()) + + testServer.Start() + defer testServer.Stop() + + obj := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-reconcile-", + Namespace: "default", + }, + Spec: sourcev1.HelmRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + URL: testServer.URL(), + }, + } + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + // Wait for finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return len(obj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for HelmRepository to be Ready + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + if !conditions.IsReady(obj) && obj.Status.Artifact == nil { + return false + } + readyCondition := conditions.Get(obj, meta.ReadyCondition) + return readyCondition.Status == metav1.ConditionTrue && + obj.Generation == readyCondition.ObservedGeneration + }, timeout).Should(BeTrue()) + + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + + // Wait for HelmRepository to be deleted + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) +} + +func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) { + tests := []struct { + name string + beforeFunc func(obj *sourcev1.HelmRepository, storage *Storage) error + want ctrl.Result + wantErr bool + assertArtifact *sourcev1.Artifact + assertConditions []metav1.Condition + assertPaths []string + }{ + { + name: "garbage collects", + beforeFunc: func(obj *sourcev1.HelmRepository, storage *Storage) error { + revisions := []string{"a", "b", "c"} + for n := range revisions { + v := revisions[n] + obj.Status.Artifact = &sourcev1.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", v), + Revision: v, + } + if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(v), 0644); err != nil { + return err + } + } + testStorage.SetArtifactURL(obj.Status.Artifact) + return nil + }, + assertArtifact: &sourcev1.Artifact{ + Path: "/reconcile-storage/c.txt", + Revision: "c", + Checksum: "2e7d2c03a9507ae265ecf5b5356885a53393a2029d241394997265a1a25aefc6", + URL: testStorage.Hostname + "/reconcile-storage/c.txt", + }, + assertPaths: []string{ + "/reconcile-storage/c.txt", + "!/reconcile-storage/b.txt", + "!/reconcile-storage/a.txt", + }, + }, + { + name: "notices missing artifact in storage", + beforeFunc: func(obj *sourcev1.HelmRepository, storage *Storage) error { + obj.Status.Artifact = &sourcev1.Artifact{ + Path: "/reconcile-storage/invalid.txt", + Revision: "d", + } + testStorage.SetArtifactURL(obj.Status.Artifact) + return nil + }, + want: ctrl.Result{Requeue: true}, + assertPaths: []string{ + "!/reconcile-storage/invalid.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactUnavailableCondition, "NoArtifact", "No artifact for resource in storage"), + }, + }, + { + name: "updates hostname on diff from current", + beforeFunc: func(obj *sourcev1.HelmRepository, storage *Storage) error { + obj.Status.Artifact = &sourcev1.Artifact{ + Path: "/reconcile-storage/hostname.txt", + Revision: "f", + Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: "http://outdated.com/reconcile-storage/hostname.txt", + } + if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader("file"), 0644); err != nil { + return err + } + return nil + }, + assertPaths: []string{ + "/reconcile-storage/hostname.txt", + }, + assertArtifact: &sourcev1.Artifact{ + Path: "/reconcile-storage/hostname.txt", + Revision: "f", + Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: testStorage.Hostname + "/reconcile-storage/hostname.txt", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + r := &HelmRepositoryReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, } - created := &sourcev1.HelmRepository{ + + obj := &sourcev1.HelmRepository{ ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, - }, - Spec: sourcev1.HelmRepositorySpec{ - URL: helmServer.URL(), - Interval: metav1.Duration{Duration: indexInterval}, - Timeout: &metav1.Duration{Duration: repositoryTimeout}, + GenerateName: "test-", }, } - Expect(k8sClient.Create(context.Background(), created)).Should(Succeed()) - - By("Expecting artifact") - got := &sourcev1.HelmRepository{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - - By("Updating the chart index") - // Regenerating the index is sufficient to make the revision change - Expect(helmServer.GenerateIndex()).Should(Succeed()) - - By("Expecting revision change and GC") - Eventually(func() bool { - now := &sourcev1.HelmRepository{} - _ = k8sClient.Get(context.Background(), key, now) - // Test revision change and garbage collection - return now.Status.Artifact.Revision != got.Status.Artifact.Revision && - !ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - - updated := &sourcev1.HelmRepository{} - Expect(k8sClient.Get(context.Background(), key, updated)).Should(Succeed()) - updated.Spec.URL = "invalid#url?" - Expect(k8sClient.Update(context.Background(), updated)).Should(Succeed()) - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, updated) - for _, c := range updated.Status.Conditions { - if c.Reason == sourcev1.IndexationFailedReason { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - Expect(updated.Status.Artifact).ToNot(BeNil()) - - By("Expecting to delete successfully") - got = &sourcev1.HelmRepository{} - Eventually(func() error { - _ = k8sClient.Get(context.Background(), key, got) - return k8sClient.Delete(context.Background(), got) - }, timeout, interval).Should(Succeed()) - - By("Expecting delete to finish") - Eventually(func() error { - r := &sourcev1.HelmRepository{} - return k8sClient.Get(context.Background(), key, r) - }, timeout, interval).ShouldNot(Succeed()) - - exists := func(path string) bool { - // wait for tmp sync on macOS - time.Sleep(time.Second) - _, err := os.Stat(path) - return err == nil + if tt.beforeFunc != nil { + g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed()) } - By("Expecting GC after delete") - Eventually(exists(got.Status.Artifact.Path), timeout, interval).ShouldNot(BeTrue()) - }) - - It("Handles timeout", func() { - helmServer.Start() + got, err := r.reconcileStorage(context.TODO(), obj) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) - Expect(helmServer.PackageChart(path.Join("testdata/charts/helmchart"))).Should(Succeed()) - Expect(helmServer.GenerateIndex()).Should(Succeed()) + g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.assertArtifact)) + if tt.assertArtifact != nil && tt.assertArtifact.URL != "" { + g.Expect(obj.Status.Artifact.URL).To(Equal(tt.assertArtifact.URL)) + } + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) - key := types.NamespacedName{ - Name: "helmrepository-sample-" + randStringRunes(5), - Namespace: namespace.Name, + for _, p := range tt.assertPaths { + absoluteP := filepath.Join(testStorage.BasePath, p) + if !strings.HasPrefix(p, "!") { + g.Expect(absoluteP).To(BeAnExistingFile()) + continue + } + g.Expect(absoluteP).NotTo(BeAnExistingFile()) } - created := &sourcev1.HelmRepository{ + }) + } +} + +func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { + type options struct { + username string + password string + publicKey []byte + privateKey []byte + ca []byte + } + + tests := []struct { + name string + protocol string + server options + secret *corev1.Secret + beforeFunc func(t *WithT, obj *sourcev1.HelmRepository) + afterFunc func(t *WithT, obj *sourcev1.HelmRepository) + want ctrl.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "HTTP without secretRef makes ArtifactOutdated=True", + protocol: "http", + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New index revision"), + }, + }, + { + name: "HTTP with Basic Auth secret makes ArtifactOutdated=True", + protocol: "http", + server: options{ + username: "git", + password: "1234", + }, + secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, + Name: "basic-auth", }, - Spec: sourcev1.HelmRepositorySpec{ - URL: helmServer.URL(), - Interval: metav1.Duration{Duration: indexInterval}, + Data: map[string][]byte{ + "username": []byte("git"), + "password": []byte("1234"), }, + }, + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "basic-auth"} + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New index revision"), + }, + }, + { + name: "HTTPS with CAFile secret makes ArtifactOutdated=True", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-file", + }, + Data: map[string][]byte{ + "caFile": tlsCA, + }, + }, + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "ca-file"} + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New index revision"), + }, + }, + { + name: "HTTPS with invalid CAFile secret makes FetchFailed=True and returns error", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "invalid-ca", + }, + Data: map[string][]byte{ + "caFile": []byte("invalid"), + }, + }, + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "invalid-ca"} + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, meta.FailedReason, "can't create TLS config for client: failed to append certificates from file"), + }, + }, + { + name: "Invalid URL makes FetchFailed=True and returns zero Result", + protocol: "http", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.URL = strings.ReplaceAll(obj.Spec.URL, "http://", "") + }, + want: ctrl.Result{}, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.URLInvalidReason, "first path segment in URL cannot contain colon"), + }, + }, + { + name: "Unsupported scheme makes FetchFailed=True and returns zero Result", + protocol: "http", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.URL = strings.ReplaceAll(obj.Spec.URL, "http://", "ftp://") + }, + want: ctrl.Result{}, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, meta.FailedReason, "scheme \"ftp\" not supported"), + }, + }, + { + name: "Missing secret returns FetchFailed=True and returns error", + protocol: "http", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "non-existing"} + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "secrets \"non-existing\" not found"), + }, + }, + { + name: "Malformed secret returns FetchFailed=True and returns error", + protocol: "http", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "malformed-basic-auth", + }, + Data: map[string][]byte{ + "username": []byte("git"), + }, + }, + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "malformed-basic-auth"} + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "required fields 'username' and 'password"), + }, + }, + } + + for _, tt := range tests { + obj := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "auth-strategy-", + }, + Spec: sourcev1.HelmRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: interval}, + }, + } + + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + server, err := helmtestserver.NewTempHelmServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(server.Root()) + + g.Expect(server.PackageChart("testdata/charts/helmchart")).To(Succeed()) + g.Expect(server.GenerateIndex()).To(Succeed()) + + if len(tt.server.username+tt.server.password) > 0 { + server.WithMiddleware(func(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + u, p, ok := r.BasicAuth() + if !ok || u != tt.server.username || p != tt.server.password { + w.WriteHeader(401) + return + } + handler.ServeHTTP(w, r) + }) + }) } - Expect(k8sClient.Create(context.Background(), created)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), created) - - By("Expecting index download to succeed") - Eventually(func() bool { - got := &sourcev1.HelmRepository{} - _ = k8sClient.Get(context.Background(), key, got) - for _, condition := range got.Status.Conditions { - if condition.Reason == sourcev1.IndexationSucceededReason { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - - By("Expecting index download to timeout") - updated := &sourcev1.HelmRepository{} - Expect(k8sClient.Get(context.Background(), key, updated)).Should(Succeed()) - updated.Spec.Timeout = &metav1.Duration{Duration: time.Microsecond} - Expect(k8sClient.Update(context.Background(), updated)).Should(Succeed()) - Eventually(func() string { - got := &sourcev1.HelmRepository{} - _ = k8sClient.Get(context.Background(), key, got) - for _, condition := range got.Status.Conditions { - if condition.Reason == sourcev1.IndexationFailedReason { - return condition.Message - } - } - return "" - }, timeout, interval).Should(MatchRegexp("(?i)timeout")) - }) - It("Authenticates when basic auth credentials are provided", func() { - var username, password = "john", "doe" - helmServer.WithMiddleware(func(handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - u, p, ok := r.BasicAuth() - if !ok || username != u || password != p { - w.WriteHeader(401) - return - } - handler.ServeHTTP(w, r) - }) - }) - helmServer.Start() + secret := tt.secret.DeepCopy() + switch tt.protocol { + case "http": + server.Start() + defer server.Stop() + obj.Spec.URL = server.URL() + case "https": + g.Expect(server.StartTLS(tt.server.publicKey, tt.server.privateKey, tt.server.ca, "example.com")).To(Succeed()) + defer server.Stop() + obj.Spec.URL = server.URL() + default: + t.Fatalf("unsupported protocol %q", tt.protocol) + } - Expect(helmServer.PackageChart(path.Join("testdata/charts/helmchart"))).Should(Succeed()) - Expect(helmServer.GenerateIndex()).Should(Succeed()) + if tt.beforeFunc != nil { + tt.beforeFunc(g, obj) + } - secretKey := types.NamespacedName{ - Name: "helmrepository-auth-" + randStringRunes(5), - Namespace: namespace.Name, + builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme()) + if secret != nil { + builder.WithObjects(secret.DeepCopy()) } - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretKey.Name, - Namespace: secretKey.Namespace, - }, + + r := &HelmRepositoryReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Client: builder.Build(), + Storage: testStorage, + Getters: testGetters, } - Expect(k8sClient.Create(context.Background(), secret)).Should(Succeed()) - key := types.NamespacedName{ - Name: "helmrepository-sample-" + randStringRunes(5), - Namespace: namespace.Name, + var chartRepo repository.ChartRepository + var artifact sourcev1.Artifact + dlog := log.NewDelegatingLogSink(log.NullLogSink{}) + nullLogger := logr.New(dlog) + got, err := r.reconcileSource(logr.NewContext(ctx, nullLogger), obj, &artifact, &chartRepo) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + if tt.afterFunc != nil { + tt.afterFunc(g, obj) } - created := &sourcev1.HelmRepository{ + }) + } +} + +func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) { + tests := []struct { + name string + beforeFunc func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) + afterFunc func(t *WithT, obj *sourcev1.HelmRepository) + want ctrl.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "Archiving artifact to storage makes Ready=True", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + }, + }, + { + name: "Up-to-date artifact should not update status", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Status.Artifact = artifact.DeepCopy() + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + t.Expect(obj.Status.URL).To(BeEmpty()) + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + }, + }, + { + name: "Removes ArtifactUnavailableCondition after creating artifact", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + conditions.MarkTrue(obj, sourcev1.ArtifactUnavailableCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + }, + }, + { + name: "Removes ArtifactOutdatedCondition after creating a new artifact", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + }, + }, + { + name: "Creates latest symlink to the created artifact", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + localPath := testStorage.LocalPath(*obj.GetArtifact()) + symlinkPath := filepath.Join(filepath.Dir(localPath), "index.yaml") + targetFile, err := os.Readlink(symlinkPath) + t.Expect(err).NotTo(HaveOccurred()) + t.Expect(localPath).To(Equal(targetFile)) + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + r := &HelmRepositoryReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + } + + obj := &sourcev1.HelmRepository{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.HelmRepositoryKind, + }, ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, + GenerateName: "test-bucket-", + Generation: 1, + Namespace: "default", }, Spec: sourcev1.HelmRepositorySpec{ - URL: helmServer.URL(), - SecretRef: &meta.LocalObjectReference{ - Name: secretKey.Name, - }, - Interval: metav1.Duration{Duration: indexInterval}, + Timeout: &metav1.Duration{Duration: timeout}, + URL: "https://example.com/index.yaml", }, } - Expect(k8sClient.Create(context.Background(), created)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), created) - - By("Expecting 401") - Eventually(func() bool { - got := &sourcev1.HelmRepository{} - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.IndexationFailedReason && - strings.Contains(c.Message, "401 Unauthorized") { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - By("Expecting missing field error") - secret.Data = map[string][]byte{ - "username": []byte(username), - } - Expect(k8sClient.Update(context.Background(), secret)).Should(Succeed()) - Eventually(func() bool { - got := &sourcev1.HelmRepository{} - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.AuthenticationFailedReason { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - - By("Expecting artifact") - secret.Data["password"] = []byte(password) - Expect(k8sClient.Update(context.Background(), secret)).Should(Succeed()) - Eventually(func() bool { - got := &sourcev1.HelmRepository{} - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && - ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - - By("Expecting missing secret error") - Expect(k8sClient.Delete(context.Background(), secret)).Should(Succeed()) - got := &sourcev1.HelmRepository{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.AuthenticationFailedReason { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - Expect(got.Status.Artifact).ShouldNot(BeNil()) - }) + tmpDir, err := os.MkdirTemp("", "test-reconcile-artifact-") + g.Expect(err).ToNot(HaveOccurred()) + defer os.RemoveAll(tmpDir) - It("Authenticates when TLS credentials are provided", func() { - err = helmServer.StartTLS(examplePublicKey, examplePrivateKey, exampleCA, "example.com") - Expect(err).NotTo(HaveOccurred()) + // Create an empty cache file. + cachePath := filepath.Join(tmpDir, "index.yaml") + cacheFile, err := os.Create(cachePath) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cacheFile.Close()).ToNot(HaveOccurred()) - Expect(helmServer.PackageChart(path.Join("testdata/charts/helmchart"))).Should(Succeed()) - Expect(helmServer.GenerateIndex()).Should(Succeed()) + chartRepo, err := repository.NewChartRepository(obj.Spec.URL, "", testGetters, nil) + g.Expect(err).ToNot(HaveOccurred()) + chartRepo.CachePath = cachePath - secretKey := types.NamespacedName{ - Name: "helmrepository-auth-" + randStringRunes(5), - Namespace: namespace.Name, - } - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretKey.Name, - Namespace: secretKey.Namespace, - }, - } - Expect(k8sClient.Create(context.Background(), secret)).Should(Succeed()) + artifact := testStorage.NewArtifactFor(obj.Kind, obj, "existing", "foo.tar.gz") + // Checksum of the index file calculated by the ChartRepository. + artifact.Checksum = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - key := types.NamespacedName{ - Name: "helmrepository-sample-" + randStringRunes(5), - Namespace: namespace.Name, + if tt.beforeFunc != nil { + tt.beforeFunc(g, obj, artifact, chartRepo) } - created := &sourcev1.HelmRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, - }, - Spec: sourcev1.HelmRepositorySpec{ - URL: helmServer.URL(), - SecretRef: &meta.LocalObjectReference{ - Name: secretKey.Name, - }, - Interval: metav1.Duration{Duration: indexInterval}, - }, + dlog := log.NewDelegatingLogSink(log.NullLogSink{}) + nullLogger := logr.New(dlog) + got, err := r.reconcileArtifact(logr.NewContext(ctx, nullLogger), obj, artifact, chartRepo) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + // On error, artifact is empty. Check artifacts only on successful + // reconcile. + if !tt.wantErr { + g.Expect(obj.Status.Artifact).To(MatchArtifact(artifact.DeepCopy())) } - Expect(k8sClient.Create(context.Background(), created)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), created) - - By("Expecting unknown authority error") - Eventually(func() bool { - got := &sourcev1.HelmRepository{} - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.IndexationFailedReason && - strings.Contains(c.Message, "certificate signed by unknown authority") { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) - By("Expecting missing field error") - secret.Data = map[string][]byte{ - "certFile": examplePublicKey, + if tt.afterFunc != nil { + tt.afterFunc(g, obj) } - Expect(k8sClient.Update(context.Background(), secret)).Should(Succeed()) - Eventually(func() bool { - got := &sourcev1.HelmRepository{} - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.AuthenticationFailedReason { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - - By("Expecting artifact") - secret.Data["keyFile"] = examplePrivateKey - secret.Data["caFile"] = exampleCA - Expect(k8sClient.Update(context.Background(), secret)).Should(Succeed()) - Eventually(func() bool { - got := &sourcev1.HelmRepository{} - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && - ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - - By("Expecting missing secret error") - Expect(k8sClient.Delete(context.Background(), secret)).Should(Succeed()) - got := &sourcev1.HelmRepository{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.AuthenticationFailedReason { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - Expect(got.Status.Artifact).ShouldNot(BeNil()) }) - }) -}) + } +} diff --git a/controllers/legacy_suite_test.go b/controllers/legacy_suite_test.go index 748145feb..9edfdd799 100644 --- a/controllers/legacy_suite_test.go +++ b/controllers/legacy_suite_test.go @@ -126,9 +126,9 @@ var _ = BeforeSuite(func() { Expect(err).ToNot(HaveOccurred(), "failed to setup GitRepositoryReconciler") err = (&HelmRepositoryReconciler{ - Client: k8sManager.GetClient(), - Scheme: scheme.Scheme, - Storage: ginkgoTestStorage, + Client: k8sManager.GetClient(), + EventRecorder: record.NewFakeRecorder(32), + Storage: ginkgoTestStorage, Getters: getter.Providers{getter.Provider{ Schemes: []string{"http", "https"}, New: getter.NewHTTPGetter, diff --git a/controllers/suite_test.go b/controllers/suite_test.go index b27bda8fa..0be4ce587 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -106,6 +106,16 @@ func TestMain(m *testing.M) { panic(fmt.Sprintf("Failed to start BucketReconciler: %v", err)) } + if err := (&HelmRepositoryReconciler{ + Client: testEnv, + EventRecorder: record.NewFakeRecorder(32), + Metrics: testMetricsH, + Getters: testGetters, + Storage: testStorage, + }).SetupWithManager(testEnv); err != nil { + panic(fmt.Sprintf("Failed to start HelmRepositoryReconciler: %v", err)) + } + go func() { fmt.Println("Starting the test environment") if err := testEnv.Start(ctx); err != nil { diff --git a/main.go b/main.go index 4ea5a102a..31bd1cb9f 100644 --- a/main.go +++ b/main.go @@ -177,12 +177,11 @@ func main() { os.Exit(1) } if err = (&controllers.HelmRepositoryReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Storage: storage, - Getters: getters, - EventRecorder: eventRecorder, - MetricsRecorder: metricsH.MetricsRecorder, + Client: mgr.GetClient(), + EventRecorder: eventRecorder, + Metrics: metricsH, + Storage: storage, + Getters: getters, }).SetupWithManagerAndOptions(mgr, controllers.HelmRepositoryReconcilerOptions{ MaxConcurrentReconciles: concurrent, }); err != nil { From f14a053f0ac98af7f0d331a38fe935f44d55ea19 Mon Sep 17 00:00:00 2001 From: Sunny Date: Thu, 16 Dec 2021 14:44:34 +0530 Subject: [PATCH 40/63] helmrepo: Add more reconciler design improvements - Remove ArtifactUnavailable condition and use Reconciling condition to convey the same. - Make Reconciling condition affect the ready condition. - Introduce summarizeAndPatch() to calculate the final status conditions and patch them. - Introduce reconcile() to iterate through the sub-reconcilers and execute them. - Simplify logging and event recording - Introduce controller-check/status checks to assert that the status conditions are valid at the end of the tests. - Create variables for various condition groups: owned conditions, ready dependencies and ready dependencies negative. Signed-off-by: Sunny --- controllers/helmrepository_controller.go | 401 ++++++++++-------- controllers/helmrepository_controller_test.go | 377 ++++++++++++++-- 2 files changed, 569 insertions(+), 209 deletions(-) diff --git a/controllers/helmrepository_controller.go b/controllers/helmrepository_controller.go index d3944ff70..b870797f6 100644 --- a/controllers/helmrepository_controller.go +++ b/controllers/helmrepository_controller.go @@ -18,6 +18,7 @@ package controllers import ( "context" + "errors" "fmt" "net/url" "os" @@ -26,7 +27,7 @@ import ( helmgetter "helm.sh/helm/v3/pkg/getter" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" kuberecorder "k8s.io/client-go/tools/record" @@ -43,10 +44,38 @@ import ( "github.com/fluxcd/pkg/runtime/predicates" sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + serror "github.com/fluxcd/source-controller/internal/error" "github.com/fluxcd/source-controller/internal/helm/getter" "github.com/fluxcd/source-controller/internal/helm/repository" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" ) +// Status conditions owned by HelmRepository reconciler. +var helmRepoOwnedConditions = []string{ + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, +} + +// Conditions that Ready condition is influenced by in descending order of their +// priority. +var helmRepoReadyDeps = []string{ + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, +} + +// Negative conditions that Ready condition is influenced by. +var helmRepoReadyDepsNegative = []string{ + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, +} + // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmrepositories,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmrepositories/status,verbs=get;update;patch // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmrepositories/finalizers,verbs=get;create;update;patch;delete @@ -66,6 +95,11 @@ type HelmRepositoryReconcilerOptions struct { MaxConcurrentReconciles int } +// helmRepoReconcilerFunc is the function type for all the helm repository +// reconciler functions. The reconciler functions are grouped together and +// executed serially to perform the main operation of the reconciler. +type helmRepoReconcilerFunc func(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) + func (r *HelmRepositoryReconciler) SetupWithManager(mgr ctrl.Manager) error { return r.SetupWithManagerAndOptions(mgr, HelmRepositoryReconcilerOptions{}) } @@ -103,71 +137,15 @@ func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reque return ctrl.Result{}, err } - // Always attempt to patch the object and status after each reconciliation - defer func() { - // Record the value of the reconciliation request, if any - if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { - obj.Status.SetLastHandledReconcileRequest(v) - } - - // Summarize Ready condition - conditions.SetSummary(obj, - meta.ReadyCondition, - conditions.WithConditions( - sourcev1.FetchFailedCondition, - sourcev1.ArtifactOutdatedCondition, - sourcev1.ArtifactUnavailableCondition, - ), - conditions.WithNegativePolarityConditions( - sourcev1.FetchFailedCondition, - sourcev1.ArtifactOutdatedCondition, - sourcev1.ArtifactUnavailableCondition, - ), - ) - - // Patch the object, ignoring conflicts on the conditions owned by this controller - patchOpts := []patch.Option{ - patch.WithOwnedConditions{ - Conditions: []string{ - sourcev1.FetchFailedCondition, - sourcev1.ArtifactOutdatedCondition, - sourcev1.ArtifactUnavailableCondition, - meta.ReadyCondition, - meta.ReconcilingCondition, - meta.StalledCondition, - }, - }, - } + // Result of the sub-reconciliation. + var recResult sreconcile.Result - // Determine if the resource is still being reconciled, or if it has stalled, and record this observation - if retErr == nil && (result.IsZero() || !result.Requeue) { - // We are no longer reconciling - conditions.Delete(obj, meta.ReconcilingCondition) - - // We have now observed this generation - patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{}) - - readyCondition := conditions.Get(obj, meta.ReadyCondition) - switch readyCondition.Status { - case metav1.ConditionFalse: - // As we are no longer reconciling and the end-state - // is not ready, the reconciliation has stalled - conditions.MarkStalled(obj, readyCondition.Reason, readyCondition.Message) - case metav1.ConditionTrue: - // As we are no longer reconciling and the end-state - // is ready, the reconciliation is no longer stalled - conditions.Delete(obj, meta.StalledCondition) - } - } - - // Finally, patch the resource - if err := patchHelper.Patch(ctx, obj, patchOpts...); err != nil { - // Ignore patch error "not found" when the object is being deleted. - if !obj.ObjectMeta.DeletionTimestamp.IsZero() { - err = kerrors.FilterOut(err, func(e error) bool { return apierrors.IsNotFound(e) }) - } - retErr = kerrors.NewAggregate([]error{retErr, err}) - } + // Always attempt to patch the object after each reconciliation. + // NOTE: This deferred block only modifies the named return error. The + // result from the reconciliation remains the same. Any requeue attributes + // set in the result will continue to be effective. + defer func() { + retErr = r.summarizeAndPatch(ctx, obj, patchHelper, recResult, retErr) // Always record readiness and duration metrics r.Metrics.RecordReadiness(ctx, obj) @@ -178,53 +156,104 @@ func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reque // between init and delete if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) + recResult = sreconcile.ResultRequeue return ctrl.Result{Requeue: true}, nil } // Examine if the object is under deletion if !obj.ObjectMeta.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, obj) + res, err := r.reconcileDelete(ctx, obj) + return sreconcile.BuildRuntimeResult(ctx, r.EventRecorder, obj, res, err) } // Reconcile actual object - return r.reconcile(ctx, obj) + reconcilers := []helmRepoReconcilerFunc{ + r.reconcileStorage, + r.reconcileSource, + r.reconcileArtifact, + } + recResult, err = r.reconcile(ctx, obj, reconcilers) + return sreconcile.BuildRuntimeResult(ctx, r.EventRecorder, obj, recResult, err) } -// reconcile steps through the actual reconciliation tasks for the object, it returns early on the first step that -// produces an error. -func (r *HelmRepositoryReconciler) reconcile(ctx context.Context, obj *sourcev1.HelmRepository) (ctrl.Result, error) { - // Mark the resource as under reconciliation - conditions.MarkReconciling(obj, meta.ProgressingReason, "") +// summarizeAndPatch analyzes the object conditions to create a summary of the +// status conditions and patches the object with the calculated summary. The +// reconciler error type is also used to determine the conditions and the +// returned error. +func (r *HelmRepositoryReconciler) summarizeAndPatch(ctx context.Context, obj *sourcev1.HelmRepository, patchHelper *patch.Helper, res sreconcile.Result, recErr error) error { + // Record the value of the reconciliation request, if any. + if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { + obj.Status.SetLastHandledReconcileRequest(v) + } - // Reconcile the storage data - if result, err := r.reconcileStorage(ctx, obj); err != nil { - return result, err + // Compute the reconcile results, obtain patch options and reconcile error. + var patchOpts []patch.Option + patchOpts, recErr = sreconcile.ComputeReconcileResult(obj, res, recErr, helmRepoOwnedConditions) + + // Summarize Ready condition. + conditions.SetSummary(obj, + meta.ReadyCondition, + conditions.WithConditions( + helmRepoReadyDeps..., + ), + conditions.WithNegativePolarityConditions( + helmRepoReadyDepsNegative..., + ), + ) + + // Finally, patch the resource. + if err := patchHelper.Patch(ctx, obj, patchOpts...); err != nil { + // Ignore patch error "not found" when the object is being deleted. + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + err = kerrors.FilterOut(err, func(e error) bool { return apierrors.IsNotFound(e) }) + } + recErr = kerrors.NewAggregate([]error{recErr, err}) + } + + return recErr +} + +// reconcile iterates through the sub-reconcilers and processes the source +// object. The sub-reconcilers are run sequentially. The result and error of +// the sub-reconciliation are collected and returned. For multiple results +// from different sub-reconcilers, the results are combined to return the +// result with the shortest requeue period. +func (r *HelmRepositoryReconciler) reconcile(ctx context.Context, obj *sourcev1.HelmRepository, reconcilers []helmRepoReconcilerFunc) (sreconcile.Result, error) { + if obj.Generation != obj.Status.ObservedGeneration { + conditions.MarkReconciling(obj, "NewGeneration", "reconciling new generation %d", obj.Generation) } var chartRepo repository.ChartRepository var artifact sourcev1.Artifact - // Reconcile the source from upstream - if result, err := r.reconcileSource(ctx, obj, &artifact, &chartRepo); err != nil || result.IsZero() { - return result, err - } - // Reconcile the artifact. - if result, err := r.reconcileArtifact(ctx, obj, artifact, &chartRepo); err != nil || result.IsZero() { - return result, err + // Run the sub-reconcilers and build the result of reconciliation. + var res sreconcile.Result + var resErr error + for _, rec := range reconcilers { + recResult, err := rec(ctx, obj, &artifact, &chartRepo) + // Exit immediately on ResultRequeue. + if recResult == sreconcile.ResultRequeue { + return sreconcile.ResultRequeue, nil + } + // If an error is received, prioritize the returned results because an + // error also means immediate requeue. + if err != nil { + resErr = err + res = recResult + break + } + // Prioritize requeue request in the result for successful results. + res = sreconcile.LowestRequeuingResult(res, recResult) } - - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + return res, resErr } // reconcileStorage ensures the current state of the storage matches the desired and previously observed state. // // All artifacts for the resource except for the current one are garbage collected from the storage. // If the artifact in the Status object of the resource disappeared from storage, it is removed from the object. -// If the object does not have an artifact in its Status object, a v1beta1.ArtifactUnavailableCondition is set. // If the hostname of any of the URLs on the object do not match the current storage server hostname, they are updated. -// -// The caller should assume a failure if an error is returned, or the Result is zero. -func (r *HelmRepositoryReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.HelmRepository) (ctrl.Result, error) { +func (r *HelmRepositoryReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) { // Garbage collect previous advertised artifact(s) from storage _ = r.garbageCollect(ctx, obj) @@ -236,17 +265,16 @@ func (r *HelmRepositoryReconciler) reconcileStorage(ctx context.Context, obj *so // Record that we do not have an artifact if obj.GetArtifact() == nil { - conditions.MarkTrue(obj, sourcev1.ArtifactUnavailableCondition, "NoArtifact", "No artifact for resource in storage") - return ctrl.Result{Requeue: true}, nil + conditions.MarkReconciling(obj, "NoArtifact", "no artifact for resource in storage") + return sreconcile.ResultSuccess, nil } - conditions.Delete(obj, sourcev1.ArtifactUnavailableCondition) // Always update URLs to ensure hostname is up-to-date // TODO(hidde): we may want to send out an event only if we notice the URL has changed r.Storage.SetArtifactURL(obj.GetArtifact()) obj.Status.URL = r.Storage.SetHostname(obj.Status.URL) - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + return sreconcile.ResultSuccess, nil } // reconcileSource ensures the upstream Helm repository can be reached and downloaded out using the declared @@ -254,11 +282,9 @@ func (r *HelmRepositoryReconciler) reconcileStorage(ctx context.Context, obj *so // // The Helm repository index is downloaded using the defined configuration, and in case of an error during this process // (including transient errors), it records v1beta1.FetchFailedCondition=True and returns early. -// On a successful write of a new artifact, the artifact in the status of the given object is set, and the symlink in -// the storage is updated to its path. -// -// The caller should assume a failure if an error is returned, or the Result is zero. -func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, chartRepo *repository.ChartRepository) (ctrl.Result, error) { +// If the download is successful, the given artifact pointer is set to a new artifact with the available metadata, and +// the index pointer is set to the newly downloaded index. +func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) { // Configure Helm client to access repository clientOpts := []helmgetter.Option{ helmgetter.WithTimeout(obj.Spec.Timeout.Duration), @@ -275,34 +301,34 @@ func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, obj *sou } var secret corev1.Secret if err := r.Client.Get(ctx, name, &secret); err != nil { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, - "Failed to get secret '%s': %s", name.String(), err.Error()) - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.AuthenticationFailedReason, - "Failed to get secret '%s': %s", name.String(), err.Error()) - // Return error as the world as observed may change - return ctrl.Result{}, err + e := &serror.Event{ + Err: fmt.Errorf("failed to get secret '%s': %w", name.String(), err), + Reason: sourcev1.AuthenticationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } // Get client options from secret tmpDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s-auth-", obj.Name, obj.Namespace)) if err != nil { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.StorageOperationFailedReason, - "Failed to create temporary directory for credentials: %s", err.Error()) - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.StorageOperationFailedReason, - "Failed to create temporary directory for credentials: %s", err.Error()) - return ctrl.Result{}, err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to create temporary directory for credentials: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } } defer os.RemoveAll(tmpDir) // Construct actual options opts, err := getter.ClientOptionsFromSecret(tmpDir, secret) if err != nil { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, - "Failed to configure Helm client with secret data: %s", err) - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.AuthenticationFailedReason, - "Failed to configure Helm client with secret data: %s", err) - // Return err as the content of the secret may change - return ctrl.Result{}, err + e := &serror.Event{ + Err: fmt.Errorf("failed to configure Helm client with secret data: %w", err), + Reason: sourcev1.AuthenticationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, e.Err.Error()) + // Return err as the content of the secret may change. + return sreconcile.ResultEmpty, e } clientOpts = append(clientOpts, opts...) } @@ -312,42 +338,49 @@ func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, obj *sou if err != nil { switch err.(type) { case *url.Error: - ctrl.LoggerFrom(ctx).Error(err, "invalid Helm repository URL") - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.URLInvalidReason, - "Invalid Helm repository URL: %s", err.Error()) - return ctrl.Result{}, nil + e := &serror.Stalling{ + Err: fmt.Errorf("invalid Helm repository URL: %w", err), + Reason: sourcev1.URLInvalidReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.URLInvalidReason, e.Err.Error()) + return sreconcile.ResultEmpty, e default: - ctrl.LoggerFrom(ctx).Error(err, "failed to construct Helm client") - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, meta.FailedReason, - "Failed to construct Helm client: %s", err.Error()) - return ctrl.Result{}, nil + e := &serror.Stalling{ + Err: fmt.Errorf("failed to construct Helm client: %w", err), + Reason: meta.FailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, meta.FailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } } checksum, err := newChartRepo.CacheIndex() if err != nil { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, meta.FailedReason, - "Failed to download Helm repository index: %s", err.Error()) - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.FetchFailedCondition, - "Failed to download Helm repository index: %s", err.Error()) + e := &serror.Event{ + Err: fmt.Errorf("failed to download Helm repository index: %w", err), + Reason: meta.FailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, meta.FailedReason, e.Err.Error()) // Coin flip on transient or persistent error, return error and hope for the best - return ctrl.Result{}, err + return sreconcile.ResultEmpty, e } *chartRepo = *newChartRepo // Load the cached repository index to ensure it passes validation. if err := chartRepo.LoadFromCache(); err != nil { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.IndexationFailedReason, - "Failed to load Helm repository from cache: %s", err.Error()) - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.FetchFailedCondition, - "Failed to load Helm repository from cache: %s", err.Error()) - return ctrl.Result{}, err + e := &serror.Event{ + Err: fmt.Errorf("failed to load Helm repository from cache: %w", err), + Reason: sourcev1.FetchFailedCondition, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.IndexationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } defer chartRepo.Unload() // Mark observations about the revision on the object. if !obj.GetArtifact().HasRevision(checksum) { - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", - "New index revision '%s'", checksum) + message := fmt.Sprintf("new index revision '%s'", checksum) + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", message) + conditions.MarkReconciling(obj, "NewRevision", message) } conditions.Delete(obj, sourcev1.FetchFailedCondition) @@ -358,81 +391,95 @@ func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, obj *sou chartRepo.Checksum, fmt.Sprintf("index-%s.yaml", checksum)) - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + return sreconcile.ResultSuccess, nil } -func (r *HelmRepositoryReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) (ctrl.Result, error) { +// reconcileArtifact stores a new artifact in the storage, if the current observation on the object does not match the +// given data. +// +// The inspection of the given data to the object is differed, ensuring any stale observations as +// v1beta1.ArtifactUnavailableCondition and v1beta1.ArtifactOutdatedCondition are always deleted. +// If the given artifact does not differ from the object's current, it returns early. +// On a successful write of a new artifact, the artifact in the status of the given object is set, and the symlink in +// the storage is updated to its path. +func (r *HelmRepositoryReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) { // Always restore the Ready condition in case it got removed due to a transient error. defer func() { - if obj.GetArtifact() != nil { - conditions.Delete(obj, sourcev1.ArtifactUnavailableCondition) - } if obj.GetArtifact().HasRevision(artifact.Revision) { conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, - "Stored artifact for revision '%s'", artifact.Revision) + "stored artifact for revision '%s'", artifact.Revision) } }() if obj.GetArtifact().HasRevision(artifact.Revision) { - ctrl.LoggerFrom(ctx).Info(fmt.Sprintf("Already up to date, current revision '%s'", artifact.Revision)) - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + r.eventLogf(ctx, obj, corev1.EventTypeNormal, meta.SucceededReason, "already up to date, current revision '%s'", artifact.Revision) + return sreconcile.ResultSuccess, nil } + // Mark reconciling because the artifact and remote source are different. + // and they have to be reconciled. + conditions.MarkReconciling(obj, "NewRevision", "new index revision '%s'", artifact.Revision) + // Clear cache at the very end. defer chartRepo.RemoveCache() // Create artifact dir. - if err := r.Storage.MkdirAll(artifact); err != nil { - ctrl.LoggerFrom(ctx).Error(err, "failed to create artifact directory") - return ctrl.Result{}, err + if err := r.Storage.MkdirAll(*artifact); err != nil { + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to create artifact directory: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } } // Acquire lock. - unlock, err := r.Storage.Lock(artifact) + unlock, err := r.Storage.Lock(*artifact) if err != nil { - ctrl.LoggerFrom(ctx).Error(err, "failed to acquire lock for artifact") - return ctrl.Result{}, err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to acquire lock for artifact: %w", err), + Reason: meta.FailedReason, + } } defer unlock() // Save artifact to storage. - if err = r.Storage.CopyFromPath(&artifact, chartRepo.CachePath); err != nil { - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.StorageOperationFailedReason, - "Unable to save artifact to storage: %s", err) - return ctrl.Result{}, err + if err = r.Storage.CopyFromPath(artifact, chartRepo.CachePath); err != nil { + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("unable to save artifact to storage: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } } // Record it on the object. obj.Status.Artifact = artifact.DeepCopy() // Update index symlink. - indexURL, err := r.Storage.Symlink(artifact, "index.yaml") + indexURL, err := r.Storage.Symlink(*artifact, "index.yaml") if err != nil { - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.StorageOperationFailedReason, - "Failed to update status URL symlink: %s", err) + r.eventLogf(ctx, obj, corev1.EventTypeWarning, sourcev1.StorageOperationFailedReason, + "failed to update status URL symlink: %s", err) } if indexURL != "" { obj.Status.URL = indexURL } - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + return sreconcile.ResultSuccess, nil } // reconcileDelete handles the delete of an object. It first garbage collects all artifacts for the object from the // artifact storage, if successful, the finalizer is removed from the object. -func (r *HelmRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.HelmRepository) (ctrl.Result, error) { +func (r *HelmRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.HelmRepository) (sreconcile.Result, error) { // Garbage collect the resource's artifacts if err := r.garbageCollect(ctx, obj); err != nil { // Return the error so we retry the failed garbage collection - return ctrl.Result{}, err + return sreconcile.ResultEmpty, err } // Remove our finalizer from the list controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) // Stop reconciliation as the object is being deleted - return ctrl.Result{}, nil + return sreconcile.ResultEmpty, nil } // garbageCollect performs a garbage collection for the given v1beta1.HelmRepository. It removes all but the current @@ -441,25 +488,41 @@ func (r *HelmRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sou func (r *HelmRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourcev1.HelmRepository) error { if !obj.DeletionTimestamp.IsZero() { if err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { - r.Eventf(obj, corev1.EventTypeWarning, "GarbageCollectionFailed", - "Garbage collection for deleted resource failed: %s", err) - return err + return &serror.Event{ + Err: fmt.Errorf("garbage collection for deleted resource failed: %w", err), + Reason: "GarbageCollectionFailed", + } } obj.Status.Artifact = nil // TODO(hidde): we should only push this event if we actually garbage collected something - r.Eventf(obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", - "Garbage collected artifacts for deleted resource") + r.eventLogf(ctx, obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", + "garbage collected artifacts for deleted resource") return nil } if obj.GetArtifact() != nil { if err := r.Storage.RemoveAllButCurrent(*obj.GetArtifact()); err != nil { - r.Eventf(obj, corev1.EventTypeWarning, "GarbageCollectionFailed", - "Garbage collection of old artifacts failed: %s", err) - return err + return &serror.Event{ + Err: fmt.Errorf("garbage collection of old artifacts failed: %w", err), + Reason: "GarbageCollectionFailed", + } } // TODO(hidde): we should only push this event if we actually garbage collected something - r.Eventf(obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", - "Garbage collected old artifacts") + r.eventLogf(ctx, obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", + "garbage collected old artifacts") } return nil } + +// eventLog records event and logs at the same time. This log is different from +// the debug log in the event recorder in the sense that this is a simple log, +// the event recorder debug log contains complete details about the event. +func (r *HelmRepositoryReconciler) eventLogf(ctx context.Context, obj runtime.Object, eventType string, reason string, messageFmt string, args ...interface{}) { + msg := fmt.Sprintf(messageFmt, args...) + // Log and emit event. + if eventType == corev1.EventTypeWarning { + ctrl.LoggerFrom(ctx).Error(errors.New(reason), msg) + } else { + ctrl.LoggerFrom(ctx).Info(msg) + } + r.Eventf(obj, eventType, reason, msg) +} diff --git a/controllers/helmrepository_controller_test.go b/controllers/helmrepository_controller_test.go index 55bbe5573..f397a3d3c 100644 --- a/controllers/helmrepository_controller_test.go +++ b/controllers/helmrepository_controller_test.go @@ -24,7 +24,9 @@ import ( "path/filepath" "strings" "testing" + "time" + "github.com/darkowlzz/controller-check/status" "github.com/go-logr/logr" . "github.com/onsi/gomega" "helm.sh/helm/v3/pkg/getter" @@ -32,7 +34,6 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/log" @@ -40,9 +41,12 @@ import ( "github.com/fluxcd/pkg/apis/meta" "github.com/fluxcd/pkg/helmtestserver" "github.com/fluxcd/pkg/runtime/conditions" + "github.com/fluxcd/pkg/runtime/patch" sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + serror "github.com/fluxcd/source-controller/internal/error" "github.com/fluxcd/source-controller/internal/helm/repository" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" ) var ( @@ -99,9 +103,15 @@ func TestHelmRepositoryReconciler_Reconcile(t *testing.T) { } readyCondition := conditions.Get(obj, meta.ReadyCondition) return readyCondition.Status == metav1.ConditionTrue && - obj.Generation == readyCondition.ObservedGeneration + obj.Generation == readyCondition.ObservedGeneration && + obj.Generation == obj.Status.ObservedGeneration }, timeout).Should(BeTrue()) + // Check if the object status is valid. + condns := &status.Conditions{NegativePolarity: helmRepoReadyDepsNegative} + checker := status.NewChecker(testEnv.Client, testEnv.GetScheme(), condns) + checker.CheckErr(ctx, obj) + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) // Wait for HelmRepository to be deleted @@ -117,7 +127,7 @@ func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) { tests := []struct { name string beforeFunc func(obj *sourcev1.HelmRepository, storage *Storage) error - want ctrl.Result + want sreconcile.Result wantErr bool assertArtifact *sourcev1.Artifact assertConditions []metav1.Condition @@ -154,6 +164,7 @@ func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) { "!/reconcile-storage/b.txt", "!/reconcile-storage/a.txt", }, + want: sreconcile.ResultSuccess, }, { name: "notices missing artifact in storage", @@ -165,12 +176,12 @@ func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) { testStorage.SetArtifactURL(obj.Status.Artifact) return nil }, - want: ctrl.Result{Requeue: true}, + want: sreconcile.ResultSuccess, assertPaths: []string{ "!/reconcile-storage/invalid.txt", }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactUnavailableCondition, "NoArtifact", "No artifact for resource in storage"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NoArtifact", "no artifact for resource in storage"), }, }, { @@ -190,6 +201,7 @@ func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) { } return nil }, + want: sreconcile.ResultSuccess, assertPaths: []string{ "/reconcile-storage/hostname.txt", }, @@ -219,7 +231,10 @@ func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) { g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed()) } - got, err := r.reconcileStorage(context.TODO(), obj) + var chartRepo repository.ChartRepository + var artifact sourcev1.Artifact + + got, err := r.reconcileStorage(context.TODO(), obj, &artifact, &chartRepo) g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(got).To(Equal(tt.want)) @@ -257,16 +272,17 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { secret *corev1.Secret beforeFunc func(t *WithT, obj *sourcev1.HelmRepository) afterFunc func(t *WithT, obj *sourcev1.HelmRepository) - want ctrl.Result + want sreconcile.Result wantErr bool assertConditions []metav1.Condition }{ { name: "HTTP without secretRef makes ArtifactOutdated=True", protocol: "http", - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New index revision"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new index revision"), }, }, { @@ -288,9 +304,10 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "basic-auth"} }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New index revision"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new index revision"), }, }, { @@ -312,9 +329,10 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "ca-file"} }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New index revision"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new index revision"), }, }, { @@ -342,25 +360,25 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { }, }, { - name: "Invalid URL makes FetchFailed=True and returns zero Result", + name: "Invalid URL makes FetchFailed=True and returns stalling error", protocol: "http", beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { obj.Spec.URL = strings.ReplaceAll(obj.Spec.URL, "http://", "") }, - want: ctrl.Result{}, - wantErr: false, + want: sreconcile.ResultEmpty, + wantErr: true, assertConditions: []metav1.Condition{ *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.URLInvalidReason, "first path segment in URL cannot contain colon"), }, }, { - name: "Unsupported scheme makes FetchFailed=True and returns zero Result", + name: "Unsupported scheme makes FetchFailed=True and returns stalling error", protocol: "http", beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { obj.Spec.URL = strings.ReplaceAll(obj.Spec.URL, "http://", "ftp://") }, - want: ctrl.Result{}, - wantErr: false, + want: sreconcile.ResultEmpty, + wantErr: true, assertConditions: []metav1.Condition{ *conditions.TrueCondition(sourcev1.FetchFailedCondition, meta.FailedReason, "scheme \"ftp\" not supported"), }, @@ -482,7 +500,7 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) { name string beforeFunc func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) afterFunc func(t *WithT, obj *sourcev1.HelmRepository) - want ctrl.Result + want sreconcile.Result wantErr bool assertConditions []metav1.Condition }{ @@ -491,9 +509,10 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) { beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) { obj.Spec.Interval = metav1.Duration{Duration: interval} }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new index revision 'existing'"), }, }, { @@ -505,20 +524,9 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) { afterFunc: func(t *WithT, obj *sourcev1.HelmRepository) { t.Expect(obj.Status.URL).To(BeEmpty()) }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), - }, - }, - { - name: "Removes ArtifactUnavailableCondition after creating artifact", - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) { - obj.Spec.Interval = metav1.Duration{Duration: interval} - conditions.MarkTrue(obj, sourcev1.ArtifactUnavailableCondition, "Foo", "") - }, - want: ctrl.Result{RequeueAfter: interval}, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'existing'"), }, }, { @@ -527,9 +535,10 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) { obj.Spec.Interval = metav1.Duration{Duration: interval} conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new index revision 'existing'"), }, }, { @@ -544,9 +553,10 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) { t.Expect(err).NotTo(HaveOccurred()) t.Expect(localPath).To(Equal(targetFile)) }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new index revision 'existing'"), }, }, } @@ -598,7 +608,7 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) { } dlog := log.NewDelegatingLogSink(log.NullLogSink{}) nullLogger := logr.New(dlog) - got, err := r.reconcileArtifact(logr.NewContext(ctx, nullLogger), obj, artifact, chartRepo) + got, err := r.reconcileArtifact(logr.NewContext(ctx, nullLogger), obj, &artifact, chartRepo) g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(got).To(Equal(tt.want)) @@ -615,3 +625,290 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) { }) } } + +func TestHelmRepositoryReconciler_summarizeAndPatch(t *testing.T) { + tests := []struct { + name string + generation int64 + beforeFunc func(obj *sourcev1.HelmRepository) + result sreconcile.Result + reconcileErr error + wantErr bool + afterFunc func(t *WithT, obj *sourcev1.HelmRepository) + assertConditions []metav1.Condition + }{ + // Success/Fail indicates if a reconciliation succeeded or failed. On + // a successful reconciliation, the object generation is expected to + // match the observed generation in the object status. + // All the cases have some Ready condition set, even if a test case is + // unrelated to the conditions, because it's neseccary for a valid + // status. + { + name: "Success, no extra conditions", + generation: 4, + beforeFunc: func(obj *sourcev1.HelmRepository) { + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "test-msg") + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "test-msg"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + t.Expect(obj.Status.ObservedGeneration).To(Equal(int64(4))) + }, + }, + { + name: "Success, Ready=True", + generation: 5, + beforeFunc: func(obj *sourcev1.HelmRepository) { + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "created") + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "created"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + t.Expect(obj.Status.ObservedGeneration).To(Equal(int64(5))) + }, + }, + { + name: "Success, removes reconciling for successful result", + generation: 2, + beforeFunc: func(obj *sourcev1.HelmRepository) { + conditions.MarkReconciling(obj, "NewRevision", "new index version") + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "stored artifact") + }, + result: sreconcile.ResultSuccess, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + t.Expect(obj.Status.ObservedGeneration).To(Equal(int64(2))) + }, + }, + { + name: "Success, record reconciliation request", + beforeFunc: func(obj *sourcev1.HelmRepository) { + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "now", + } + obj.SetAnnotations(annotations) + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "test-msg") + }, + generation: 3, + result: sreconcile.ResultSuccess, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "test-msg"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + t.Expect(obj.Status.LastHandledReconcileAt).To(Equal("now")) + t.Expect(obj.Status.ObservedGeneration).To(Equal(int64(3))) + }, + }, + { + name: "Fail, with multiple conditions ArtifactOutdated=True,Reconciling=True", + generation: 7, + beforeFunc: func(obj *sourcev1.HelmRepository) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision") + conditions.MarkReconciling(obj, "NewRevision", "new index revision") + }, + reconcileErr: fmt.Errorf("failed to create dir"), + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, "NewRevision", "new index revision"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new index revision"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + t.Expect(obj.Status.ObservedGeneration).ToNot(Equal(int64(7))) + }, + }, + { + name: "Success, with subreconciler stalled error", + generation: 9, + beforeFunc: func(obj *sourcev1.HelmRepository) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.FetchFailedCondition, "failed to construct helm client") + }, + reconcileErr: &serror.Stalling{Err: fmt.Errorf("some error"), Reason: "some reason"}, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, sourcev1.FetchFailedCondition, "failed to construct helm client"), + *conditions.TrueCondition(meta.StalledCondition, "some reason", "some error"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.FetchFailedCondition, "failed to construct helm client"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + t.Expect(obj.Status.ObservedGeneration).To(Equal(int64(9))) + }, + }, + { + name: "Fail, no error but requeue requested", + generation: 3, + beforeFunc: func(obj *sourcev1.HelmRepository) { + conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "test-msg") + }, + result: sreconcile.ResultRequeue, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, meta.FailedReason, "test-msg"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + t.Expect(obj.Status.ObservedGeneration).ToNot(Equal(int64(3))) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme()) + r := &HelmRepositoryReconciler{ + Client: builder.Build(), + } + obj := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + Generation: tt.generation, + }, + Spec: sourcev1.HelmRepositorySpec{ + Interval: metav1.Duration{Duration: 5 * time.Second}, + }, + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + ctx := context.TODO() + g.Expect(r.Create(ctx, obj)).To(Succeed()) + patchHelper, err := patch.NewHelper(obj, r.Client) + g.Expect(err).ToNot(HaveOccurred()) + + gotErr := r.summarizeAndPatch(ctx, obj, patchHelper, tt.result, tt.reconcileErr) + g.Expect(gotErr != nil).To(Equal(tt.wantErr)) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + if tt.afterFunc != nil { + tt.afterFunc(g, obj) + } + + // Check if the object status is valid. + condns := &status.Conditions{NegativePolarity: helmRepoReadyDepsNegative} + checker := status.NewChecker(r.Client, testEnv.GetScheme(), condns) + checker.CheckErr(ctx, obj) + }) + } +} + +func TestHelmRepositoryReconciler_reconcileSubRecs(t *testing.T) { + // Helper to build simple helmRepoReconcilerFunc with result and error. + buildReconcileFuncs := func(r sreconcile.Result, e error) helmRepoReconcilerFunc { + return func(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) { + return r, e + } + } + + tests := []struct { + name string + generation int64 + observedGeneration int64 + reconcileFuncs []helmRepoReconcilerFunc + wantResult sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "successful reconciliations", + reconcileFuncs: []helmRepoReconcilerFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + }, + wantResult: sreconcile.ResultSuccess, + wantErr: false, + }, + { + name: "successful reconciliation with generation difference", + generation: 3, + observedGeneration: 2, + reconcileFuncs: []helmRepoReconcilerFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + }, + wantResult: sreconcile.ResultSuccess, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, "NewGeneration", "reconciling new generation 3"), + }, + }, + { + name: "failed reconciliation", + reconcileFuncs: []helmRepoReconcilerFunc{ + buildReconcileFuncs(sreconcile.ResultEmpty, fmt.Errorf("some error")), + }, + wantResult: sreconcile.ResultEmpty, + wantErr: true, + }, + { + name: "multiple object status conditions mutations", + reconcileFuncs: []helmRepoReconcilerFunc{ + func(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision") + return sreconcile.ResultSuccess, nil + }, + func(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) { + conditions.MarkTrue(obj, meta.ReconcilingCondition, "Progressing", "creating artifact") + return sreconcile.ResultSuccess, nil + }, + }, + wantResult: sreconcile.ResultSuccess, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), + *conditions.TrueCondition(meta.ReconcilingCondition, "Progressing", "creating artifact"), + }, + }, + { + name: "subrecs with one result=Requeue, no error", + reconcileFuncs: []helmRepoReconcilerFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + buildReconcileFuncs(sreconcile.ResultRequeue, nil), + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + }, + wantResult: sreconcile.ResultRequeue, + wantErr: false, + }, + { + name: "subrecs with error before result=Requeue", + reconcileFuncs: []helmRepoReconcilerFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + buildReconcileFuncs(sreconcile.ResultEmpty, fmt.Errorf("some error")), + buildReconcileFuncs(sreconcile.ResultRequeue, nil), + }, + wantResult: sreconcile.ResultEmpty, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + r := &HelmRepositoryReconciler{} + obj := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + Generation: tt.generation, + }, + Status: sourcev1.HelmRepositoryStatus{ + ObservedGeneration: tt.observedGeneration, + }, + } + + ctx := context.TODO() + + gotRes, gotErr := r.reconcile(ctx, obj, tt.reconcileFuncs) + g.Expect(gotErr != nil).To(Equal(tt.wantErr)) + g.Expect(gotRes).To(Equal(tt.wantResult)) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} From e0e048ad6dd312ef47a5522bcfff64462707541c Mon Sep 17 00:00:00 2001 From: Sunny Date: Wed, 24 Nov 2021 22:12:15 +0530 Subject: [PATCH 41/63] helmchart: Replace GetInterval() with GetRequeueAfter() Signed-off-by: Sunny --- controllers/helmchart_controller.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/controllers/helmchart_controller.go b/controllers/helmchart_controller.go index 06255be25..d9153af1f 100644 --- a/controllers/helmchart_controller.go +++ b/controllers/helmchart_controller.go @@ -252,9 +252,9 @@ func (r *HelmChartReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( log.Info(fmt.Sprintf("Reconciliation finished in %s, next run in %s", time.Since(start).String(), - chart.GetInterval().Duration.String(), + chart.GetRequeueAfter().String(), )) - return ctrl.Result{RequeueAfter: chart.GetInterval().Duration}, nil + return ctrl.Result{RequeueAfter: chart.GetRequeueAfter()}, nil } type HelmChartReconcilerOptions struct { From 8e107ea60e95d525ee4a0ef598c9127a0321c3e7 Mon Sep 17 00:00:00 2001 From: Hidde Beydals Date: Wed, 8 Dec 2021 22:15:27 +0100 Subject: [PATCH 42/63] HelmChartReconciler refactor Signed-off-by: Hidde Beydals --- api/v1beta2/condition_types.go | 4 + api/v1beta2/helmchart_types.go | 52 +- .../source.toolkit.fluxcd.io_helmcharts.yaml | 8 + controllers/helmchart_controller.go | 1038 ++++++++++------- controllers/helmchart_controller_test.go | 28 +- controllers/legacy_suite_test.go | 6 +- internal/helm/chart/builder.go | 48 +- internal/helm/chart/builder_local.go | 26 +- internal/helm/chart/builder_remote.go | 16 +- internal/helm/chart/builder_test.go | 35 +- internal/helm/chart/errors.go | 39 +- internal/helm/chart/errors_test.go | 4 +- main.go | 11 +- 13 files changed, 753 insertions(+), 562 deletions(-) diff --git a/api/v1beta2/condition_types.go b/api/v1beta2/condition_types.go index 13c14498e..787703b3a 100644 --- a/api/v1beta2/condition_types.go +++ b/api/v1beta2/condition_types.go @@ -32,6 +32,10 @@ const ( // Source may be outdated. // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. FetchFailedCondition string = "FetchFailed" + + // BuildFailedCondition indicates a transient or persistent build failure of a Source's Artifact. + // If True, the Source can be in an ArtifactOutdatedCondition + BuildFailedCondition string = "BuildFailed" ) const ( diff --git a/api/v1beta2/helmchart_types.go b/api/v1beta2/helmchart_types.go index 8b237da0e..4852e0a79 100644 --- a/api/v1beta2/helmchart_types.go +++ b/api/v1beta2/helmchart_types.go @@ -19,12 +19,10 @@ package v1beta2 import ( "time" - apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/runtime/conditions" ) // HelmChartKind is the string representation of a HelmChart. @@ -115,6 +113,16 @@ type HelmChartStatus struct { // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty"` + // ObservedSourceArtifactRevision is the last observed Artifact.Revision + // of the Source reference. + // +optional + ObservedSourceArtifactRevision string `json:"observedSourceArtifactRevision,omitempty"` + + // ObservedChartName is the last observed chart name as defined by the + // resolved chart reference. + // +optional + ObservedChartName string `json:"observedChartName,omitempty"` + // Conditions holds the conditions for the HelmChart. // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` @@ -148,46 +156,6 @@ const ( ChartPackageSucceededReason string = "ChartPackageSucceeded" ) -// HelmChartProgressing resets the conditions of the HelmChart to meta.Condition -// of type meta.ReadyCondition with status 'Unknown' and meta.ProgressingReason -// reason and message. It returns the modified HelmChart. -func HelmChartProgressing(chart HelmChart) HelmChart { - chart.Status.ObservedGeneration = chart.Generation - chart.Status.URL = "" - chart.Status.Conditions = []metav1.Condition{} - conditions.MarkUnknown(&chart, meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress") - return chart -} - -// HelmChartReady sets the given Artifact and URL on the HelmChart and sets the -// meta.ReadyCondition to 'True', with the given reason and message. It returns -// the modified HelmChart. -func HelmChartReady(chart HelmChart, artifact Artifact, url, reason, message string) HelmChart { - chart.Status.Artifact = &artifact - chart.Status.URL = url - conditions.MarkTrue(&chart, meta.ReadyCondition, reason, message) - return chart -} - -// HelmChartNotReady sets the meta.ReadyCondition on the given HelmChart to -// 'False', with the given reason and message. It returns the modified -// HelmChart. -func HelmChartNotReady(chart HelmChart, reason, message string) HelmChart { - conditions.MarkFalse(&chart, meta.ReadyCondition, reason, message) - return chart -} - -// HelmChartReadyMessage returns the message of the meta.ReadyCondition with -// status 'True', or an empty string. -func HelmChartReadyMessage(chart HelmChart) string { - if c := apimeta.FindStatusCondition(chart.Status.Conditions, meta.ReadyCondition); c != nil { - if c.Status == metav1.ConditionTrue { - return c.Message - } - } - return "" -} - // GetConditions returns the status conditions of the object. func (in HelmChart) GetConditions() []metav1.Condition { return in.Status.Conditions diff --git a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml index 06d6773ab..dbf29410c 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml @@ -517,10 +517,18 @@ spec: reconcile request value, so a change of the annotation value can be detected. type: string + observedChartName: + description: ObservedChartName is the last observed chart name as + defined by the resolved chart reference. + type: string observedGeneration: description: ObservedGeneration is the last observed generation. format: int64 type: integer + observedSourceArtifactRevision: + description: ObservedSourceArtifactRevision is the last observed Artifact.Revision + of the Source reference. + type: string url: description: URL is the download link for the last chart pulled. type: string diff --git a/controllers/helmchart_controller.go b/controllers/helmchart_controller.go index d9153af1f..38a959836 100644 --- a/controllers/helmchart_controller.go +++ b/controllers/helmchart_controller.go @@ -31,12 +31,11 @@ import ( helmgetter "helm.sh/helm/v3/pkg/getter" corev1 "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" - apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + kerrors "k8s.io/apimachinery/pkg/util/errors" kuberecorder "k8s.io/client-go/tools/record" - "k8s.io/client-go/tools/reference" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -48,17 +47,51 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" + helper "github.com/fluxcd/pkg/runtime/controller" "github.com/fluxcd/pkg/runtime/events" - "github.com/fluxcd/pkg/runtime/metrics" + "github.com/fluxcd/pkg/runtime/patch" "github.com/fluxcd/pkg/runtime/predicates" "github.com/fluxcd/pkg/untar" sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + serror "github.com/fluxcd/source-controller/internal/error" "github.com/fluxcd/source-controller/internal/helm/chart" "github.com/fluxcd/source-controller/internal/helm/getter" "github.com/fluxcd/source-controller/internal/helm/repository" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" + "github.com/fluxcd/source-controller/internal/util" ) +// Status conditions owned by the HelmChart reconciler. +var helmChartOwnedConditions = []string{ + sourcev1.BuildFailedCondition, + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, +} + +// Conditions that Ready condition is influenced by in descending order of their +// priority. +var helmChartReadyDeps = []string{ + sourcev1.BuildFailedCondition, + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, +} + +// Negative conditions that Ready condition is influenced by. +var helmChartReadyDepsNegative = []string{ + sourcev1.BuildFailedCondition, + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, +} + // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmcharts,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmcharts/status,verbs=get;update;patch // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmcharts/finalizers,verbs=get;create;update;patch;delete @@ -67,18 +100,23 @@ import ( // HelmChartReconciler reconciles a HelmChart object type HelmChartReconciler struct { client.Client - Scheme *runtime.Scheme - Storage *Storage - Getters helmgetter.Providers - EventRecorder kuberecorder.EventRecorder - ExternalEventRecorder *events.Recorder - MetricsRecorder *metrics.Recorder + kuberecorder.EventRecorder + helper.Metrics + + Storage *Storage + Getters helmgetter.Providers } func (r *HelmChartReconciler) SetupWithManager(mgr ctrl.Manager) error { return r.SetupWithManagerAndOptions(mgr, HelmChartReconcilerOptions{}) } +type HelmChartReconcilerOptions struct { + MaxConcurrentReconciles int +} + +type helmChartReconcilerFunc func(ctx context.Context, obj *sourcev1.HelmChart, build *chart.Build) (sreconcile.Result, error) + func (r *HelmChartReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts HelmChartReconcilerOptions) error { if err := mgr.GetCache().IndexField(context.TODO(), &sourcev1.HelmRepository{}, sourcev1.HelmRepositoryURLIndexKey, r.indexHelmRepositoryByURL); err != nil { @@ -112,211 +150,264 @@ func (r *HelmChartReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts Complete(r) } -func (r *HelmChartReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *HelmChartReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) { start := time.Now() log := ctrl.LoggerFrom(ctx) - var chart sourcev1.HelmChart - if err := r.Get(ctx, req.NamespacedName, &chart); err != nil { - return ctrl.Result{Requeue: true}, client.IgnoreNotFound(err) + // Fetch the HelmChart + obj := &sourcev1.HelmChart{} + if err := r.Get(ctx, req.NamespacedName, obj); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) } // Record suspended status metric - defer r.recordSuspension(ctx, chart) - - // Add our finalizer if it does not exist - if !controllerutil.ContainsFinalizer(&chart, sourcev1.SourceFinalizer) { - patch := client.MergeFrom(chart.DeepCopy()) - controllerutil.AddFinalizer(&chart, sourcev1.SourceFinalizer) - if err := r.Patch(ctx, &chart, patch); err != nil { - log.Error(err, "unable to register finalizer") - return ctrl.Result{}, err - } - } - - // Examine if the object is under deletion - if !chart.ObjectMeta.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, chart) - } + r.RecordSuspend(ctx, obj, obj.Spec.Suspend) - // Return early if the object is suspended. - if chart.Spec.Suspend { + // Return early if the object is suspended + if obj.Spec.Suspend { log.Info("Reconciliation is suspended for this object") return ctrl.Result{}, nil } - // Record reconciliation duration - if r.MetricsRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &chart) - if err != nil { - return ctrl.Result{}, err - } - defer r.MetricsRecorder.RecordDuration(*objRef, start) + // Initialize the patch helper + patchHelper, err := patch.NewHelper(obj, r.Client) + if err != nil { + return ctrl.Result{}, err } - // Conditionally set progressing condition in status - resetChart, changed := r.resetStatus(chart) - if changed { - chart = resetChart - if err := r.updateStatus(ctx, req, chart.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err - } - r.recordReadiness(ctx, chart) + // Result of the sub-reconciliation + var recResult sreconcile.Result + + // Always attempt to patch the object after each reconciliation. + // NOTE: This deferred block only modifies the named return error. The + // result from the reconciliation remains the same. Any requeue attributes + // set in the result will continue to be effective. + defer func() { + retErr = r.summarizeAndPatch(ctx, obj, patchHelper, recResult, retErr) + + // Always record readiness and duration metrics + r.Metrics.RecordReadiness(ctx, obj) + r.Metrics.RecordDuration(ctx, obj, start) + }() + + // Add finalizer first if not exist to avoid the race condition + // between init and delete + if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { + controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) + recResult = sreconcile.ResultRequeue + return ctrl.Result{Requeue: true}, nil } - // Record the value of the reconciliation request, if any - // TODO(hidde): would be better to defer this in combination with - // always patching the status sub-resource after a reconciliation. - if v, ok := meta.ReconcileAnnotationValue(chart.GetAnnotations()); ok { - chart.Status.SetLastHandledReconcileRequest(v) + // Examine if the object is under deletion + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + res, err := r.reconcileDelete(ctx, obj) + return sreconcile.BuildRuntimeResult(ctx, r.EventRecorder, obj, res, err) } - // Purge all but current artifact from storage - if err := r.gc(chart); err != nil { - log.Error(err, "unable to purge old artifacts") + // Reconcile actual object + reconcilers := []helmChartReconcilerFunc{ + r.reconcileStorage, + r.reconcileSource, + r.reconcileArtifact, } + recResult, err = r.reconcile(ctx, obj, reconcilers) + return sreconcile.BuildRuntimeResult(ctx, r.EventRecorder, obj, recResult, err) +} - // Retrieve the source - source, err := r.getSource(ctx, chart) - if err != nil { - chart = sourcev1.HelmChartNotReady(*chart.DeepCopy(), sourcev1.ChartPullFailedReason, err.Error()) - if err := r.updateStatus(ctx, req, chart.Status); err != nil { - log.Error(err, "unable to update status") +// summarizeAndPatch analyzes the object conditions to create a summary of the +// status conditions and patches the object with the calculated summary. The +// reconciler error type is also used to determine the conditions and the +// returned error. +func (r *HelmChartReconciler) summarizeAndPatch(ctx context.Context, obj *sourcev1.HelmChart, patchHelper *patch.Helper, res sreconcile.Result, recErr error) error { + // Record the value of the reconciliation request, if any + if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { + obj.Status.SetLastHandledReconcileRequest(v) + } + + // Compute the reconcile results, obtain patch options and reconcile error. + var patchOpts []patch.Option + patchOpts, recErr = sreconcile.ComputeReconcileResult(obj, res, recErr, helmChartOwnedConditions) + + // Summarize Ready condition + conditions.SetSummary(obj, + meta.ReadyCondition, + conditions.WithConditions( + helmChartReadyDeps..., + ), + conditions.WithNegativePolarityConditions( + helmChartReadyDepsNegative..., + ), + ) + + // Finally, patch the resource + if err := patchHelper.Patch(ctx, obj, patchOpts...); err != nil { + // Ignore patch error "not found" when the object is being deleted. + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + err = kerrors.FilterOut(err, func(e error) bool { return apierrs.IsNotFound(err) }) } - return ctrl.Result{Requeue: true}, err + recErr = kerrors.NewAggregate([]error{recErr, err}) } + return recErr +} - // Assert source is ready - if source.GetArtifact() == nil { - err = fmt.Errorf("no artifact found for source `%s` kind '%s'", - chart.Spec.SourceRef.Name, chart.Spec.SourceRef.Kind) - chart = sourcev1.HelmChartNotReady(*chart.DeepCopy(), sourcev1.ChartPullFailedReason, err.Error()) - if err := r.updateStatus(ctx, req, chart.Status); err != nil { - log.Error(err, "unable to update status") - } - r.recordReadiness(ctx, chart) - return ctrl.Result{Requeue: true}, err +// reconcile steps through the actual reconciliation tasks for the object, it returns early on the first step that +// produces an error. +func (r *HelmChartReconciler) reconcile(ctx context.Context, obj *sourcev1.HelmChart, reconcilers []helmChartReconcilerFunc) (sreconcile.Result, error) { + if obj.Generation != obj.Status.ObservedGeneration { + conditions.MarkReconciling(obj, "NewGeneration", "reconciling new generation %d", obj.Generation) } - // Create working directory - workDir, err := os.MkdirTemp("", chart.Kind+"-"+chart.Namespace+"-"+chart.Name+"-") - if err != nil { - err = fmt.Errorf("failed to create temporary working directory: %w", err) - chart = sourcev1.HelmChartNotReady(*chart.DeepCopy(), sourcev1.ChartPullFailedReason, err.Error()) - if err := r.updateStatus(ctx, req, chart.Status); err != nil { - log.Error(err, "unable to update status") + // Run the sub-reconcilers and build the result of reconciliation. + var ( + build chart.Build + res sreconcile.Result + resErr error + ) + for _, rec := range reconcilers { + recResult, err := rec(ctx, obj, &build) + // Exit immediately on ResultRequeue. + if recResult == sreconcile.ResultRequeue { + return sreconcile.ResultRequeue, nil } - r.recordReadiness(ctx, chart) - return ctrl.Result{Requeue: true}, err - } - defer func() { - if err := os.RemoveAll(workDir); err != nil { - log.Error(err, "failed to remove working directory", "path", workDir) + // If an error is received, prioritize the returned results because an + // error also means immediate requeue. + if err != nil { + resErr = err + res = recResult + break } - }() - - // Perform the reconciliation for the chart source type - var reconciledChart sourcev1.HelmChart - var reconcileErr error - switch typedSource := source.(type) { - case *sourcev1.HelmRepository: - reconciledChart, reconcileErr = r.fromHelmRepository(ctx, *typedSource, *chart.DeepCopy(), workDir, changed) - case *sourcev1.GitRepository, *sourcev1.Bucket: - reconciledChart, reconcileErr = r.fromTarballArtifact(ctx, *typedSource.GetArtifact(), *chart.DeepCopy(), - workDir, changed) - default: - err := fmt.Errorf("unable to reconcile unsupported source reference kind '%s'", chart.Spec.SourceRef.Kind) - return ctrl.Result{Requeue: false}, err + // Prioritize requeue request in the result. + res = sreconcile.LowestRequeuingResult(res, recResult) } + return res, resErr +} - // Update status with the reconciliation result - if err := r.updateStatus(ctx, req, reconciledChart.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err - } +// reconcileStorage ensures the current state of the storage matches the desired and previously observed state. +// +// All artifacts for the resource except for the current one are garbage collected from the storage. +// If the artifact in the Status object of the resource disappeared from storage, it is removed from the object. +// If the object does not have an artifact in its Status object, a v1beta1.ArtifactUnavailableCondition is set. +// If the hostname of the URLs on the object do not match the current storage server hostname, they are updated. +// +// The caller should assume a failure if an error is returned, or the BuildResult is zero. +func (r *HelmChartReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.HelmChart, build *chart.Build) (sreconcile.Result, error) { + // Garbage collect previous advertised artifact(s) from storage + _ = r.garbageCollect(ctx, obj) - // If reconciliation failed, record the failure and requeue immediately - if reconcileErr != nil { - r.event(ctx, reconciledChart, events.EventSeverityError, reconcileErr.Error()) - r.recordReadiness(ctx, reconciledChart) - return ctrl.Result{Requeue: true}, reconcileErr + // Determine if the advertised artifact is still in storage + if artifact := obj.GetArtifact(); artifact != nil && !r.Storage.ArtifactExist(*artifact) { + obj.Status.Artifact = nil + obj.Status.URL = "" } - // Emit an event if we did not have an artifact before, or the revision has changed - if (chart.GetArtifact() == nil && reconciledChart.GetArtifact() != nil) || - (chart.GetArtifact() != nil && reconciledChart.GetArtifact() != nil && reconciledChart.GetArtifact().Revision != chart.GetArtifact().Revision) { - r.event(ctx, reconciledChart, events.EventSeverityInfo, sourcev1.HelmChartReadyMessage(reconciledChart)) + // Record that we do not have an artifact + if obj.GetArtifact() == nil { + conditions.MarkReconciling(obj, "NoArtifact", "no artifact for resource in storage") + return sreconcile.ResultSuccess, nil } - r.recordReadiness(ctx, reconciledChart) - log.Info(fmt.Sprintf("Reconciliation finished in %s, next run in %s", - time.Since(start).String(), - chart.GetRequeueAfter().String(), - )) - return ctrl.Result{RequeueAfter: chart.GetRequeueAfter()}, nil -} + // Always update URLs to ensure hostname is up-to-date + // TODO(hidde): we may want to send out an event only if we notice the URL has changed + r.Storage.SetArtifactURL(obj.GetArtifact()) + obj.Status.URL = r.Storage.SetHostname(obj.Status.URL) -type HelmChartReconcilerOptions struct { - MaxConcurrentReconciles int + return sreconcile.ResultSuccess, nil } -func (r *HelmChartReconciler) getSource(ctx context.Context, chart sourcev1.HelmChart) (sourcev1.Source, error) { - var source sourcev1.Source - namespacedName := types.NamespacedName{ - Namespace: chart.GetNamespace(), - Name: chart.Spec.SourceRef.Name, - } - switch chart.Spec.SourceRef.Kind { - case sourcev1.HelmRepositoryKind: - var repository sourcev1.HelmRepository - err := r.Client.Get(ctx, namespacedName, &repository) - if err != nil { - return source, fmt.Errorf("failed to retrieve source: %w", err) - } - source = &repository - case sourcev1.GitRepositoryKind: - var repository sourcev1.GitRepository - err := r.Client.Get(ctx, namespacedName, &repository) - if err != nil { - return source, fmt.Errorf("failed to retrieve source: %w", err) +// reconcileSource reconciles the upstream bucket with the client for the given object's Provider, and returns the +// result. +// If a SecretRef is defined, it attempts to fetch the Secret before calling the provider. If the fetch of the Secret +// fails, it records v1beta1.FetchFailedCondition=True and returns early. +// +// The caller should assume a failure if an error is returned, or the BuildResult is zero. +func (r *HelmChartReconciler) reconcileSource(ctx context.Context, obj *sourcev1.HelmChart, build *chart.Build) (sreconcile.Result, error) { + // Retrieve the source + s, err := r.getSource(ctx, obj) + if err != nil { + e := &serror.Event{ + Err: fmt.Errorf("failed to get source: %w", err), + Reason: "SourceUnavailable", } - source = &repository - case sourcev1.BucketKind: - var bucket sourcev1.Bucket - err := r.Client.Get(ctx, namespacedName, &bucket) - if err != nil { - return source, fmt.Errorf("failed to retrieve source: %w", err) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "SourceUnavailable", e.Err.Error()) + + // Return Kubernetes client errors, but ignore others which can only be + // solved by a change in generation + if apierrs.ReasonForError(err) != metav1.StatusReasonUnknown { + return sreconcile.ResultEmpty, &serror.Stalling{ + Err: fmt.Errorf("failed to get source: %w", err), + Reason: "UnsupportedSourceKind", + } } - source = &bucket + return sreconcile.ResultEmpty, e + } + + // Assert source has an artifact + if s.GetArtifact() == nil || !r.Storage.ArtifactExist(*s.GetArtifact()) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "NoSourceArtifact", + "no artifact available for %s source '%s'", obj.Spec.SourceRef.Kind, obj.Spec.SourceRef.Name) + r.eventLogf(ctx, obj, events.EventTypeTrace, "NoSourceArtifact", + "no artifact available for %s source '%s'", obj.Spec.SourceRef.Kind, obj.Spec.SourceRef.Name) + return sreconcile.ResultRequeue, nil + } + + // Record current artifact revision as last observed + obj.Status.ObservedSourceArtifactRevision = s.GetArtifact().Revision + + // Perform the reconciliation for the chart source type + switch typedSource := s.(type) { + case *sourcev1.HelmRepository: + return r.reconcileFromHelmRepository(ctx, obj, typedSource, build) + case *sourcev1.GitRepository, *sourcev1.Bucket: + return r.reconcileFromTarballArtifact(ctx, obj, *typedSource.GetArtifact(), build) default: - return source, fmt.Errorf("source `%s` kind '%s' not supported", - chart.Spec.SourceRef.Name, chart.Spec.SourceRef.Kind) + // Ending up here should generally not be possible + // as getSource already validates + return sreconcile.ResultEmpty, nil } - return source, nil } -func (r *HelmChartReconciler) fromHelmRepository(ctx context.Context, repo sourcev1.HelmRepository, c sourcev1.HelmChart, - workDir string, force bool) (sourcev1.HelmChart, error) { - // Configure Index getter options +func (r *HelmChartReconciler) reconcileFromHelmRepository(ctx context.Context, obj *sourcev1.HelmChart, + repo *sourcev1.HelmRepository, b *chart.Build) (sreconcile.Result, error) { + + // Construct the Getter options from the HelmRepository data clientOpts := []helmgetter.Option{ helmgetter.WithURL(repo.Spec.URL), helmgetter.WithTimeout(repo.Spec.Timeout.Duration), helmgetter.WithPassCredentialsAll(repo.Spec.PassCredentials), } - if secret, err := r.getHelmRepositorySecret(ctx, &repo); err != nil { - return sourcev1.HelmChartNotReady(c, sourcev1.AuthenticationFailedReason, err.Error()), err - } else if secret != nil { + if secret, err := r.getHelmRepositorySecret(ctx, repo); secret != nil || err != nil { + if err != nil { + e := &serror.Event{ + Err: fmt.Errorf("failed to get secret '%s': %w", repo.Spec.SecretRef.Name, err), + Reason: sourcev1.AuthenticationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, e.Err.Error()) + // Return error as the world as observed may change + return sreconcile.ResultEmpty, e + } + // Create temporary working directory for credentials - authDir := filepath.Join(workDir, "creds") - if err := os.Mkdir(authDir, 0700); err != nil { - err = fmt.Errorf("failed to create temporary directory for repository credentials: %w", err) - return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err + authDir, err := util.TempDirForObj("", obj) + if err != nil { + e := &serror.Event{ + Err: fmt.Errorf("failed to create temporary working directory: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.StorageOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } + defer os.RemoveAll(authDir) + + // Build client options from secret opts, err := getter.ClientOptionsFromSecret(authDir, *secret) if err != nil { - err = fmt.Errorf("failed to create client options for HelmRepository '%s': %w", repo.Name, err) - return sourcev1.HelmChartNotReady(c, sourcev1.AuthenticationFailedReason, err.Error()), err + e := &serror.Event{ + Err: fmt.Errorf("failed to configure Helm client with secret data: %w", err), + Reason: sourcev1.AuthenticationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, e.Err.Error()) + // Requeue as content of secret might change + return sreconcile.ResultEmpty, e } clientOpts = append(clientOpts, opts...) } @@ -324,139 +415,170 @@ func (r *HelmChartReconciler) fromHelmRepository(ctx context.Context, repo sourc // Initialize the chart repository chartRepo, err := repository.NewChartRepository(repo.Spec.URL, r.Storage.LocalPath(*repo.GetArtifact()), r.Getters, clientOpts) if err != nil { + // Any error requires a change in generation, + // which we should be informed about by the watcher switch err.(type) { case *url.Error: - return sourcev1.HelmChartNotReady(c, sourcev1.URLInvalidReason, err.Error()), err + e := &serror.Stalling{ + Err: fmt.Errorf("invalid Helm repository URL: %w", err), + Reason: sourcev1.URLInvalidReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.URLInvalidReason, e.Err.Error()) + return sreconcile.ResultEmpty, e default: - return sourcev1.HelmChartNotReady(c, sourcev1.ChartPullFailedReason, err.Error()), err + e := &serror.Stalling{ + Err: fmt.Errorf("failed to construct Helm client: %w", err), + Reason: meta.FailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, meta.FailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } } - // Build the chart + // Construct the chart builder with scoped configuration cb := chart.NewRemoteBuilder(chartRepo) - ref := chart.RemoteReference{Name: c.Spec.Chart, Version: c.Spec.Version} opts := chart.BuildOptions{ - ValuesFiles: c.GetValuesFiles(), - Force: force, + ValuesFiles: obj.GetValuesFiles(), + Force: obj.Generation != obj.Status.ObservedGeneration, } - if artifact := c.GetArtifact(); artifact != nil { + if artifact := obj.GetArtifact(); artifact != nil { opts.CachedChart = r.Storage.LocalPath(*artifact) } // Set the VersionMetadata to the object's Generation if ValuesFiles is defined // This ensures changes can be noticed by the Artifact consumer if len(opts.GetValuesFiles()) > 0 { - opts.VersionMetadata = strconv.FormatInt(c.Generation, 10) - } - b, err := cb.Build(ctx, ref, filepath.Join(workDir, "chart.tgz"), opts) - if err != nil { - return sourcev1.HelmChartNotReady(c, sourcev1.ChartPullFailedReason, err.Error()), err + opts.VersionMetadata = strconv.FormatInt(obj.Generation, 10) } - newArtifact := r.Storage.NewArtifactFor(c.Kind, c.GetObjectMeta(), b.Version, - fmt.Sprintf("%s-%s.tgz", b.Name, b.Version)) - - // If the path of the returned build equals the cache path, - // there are no changes to the chart - if b.Path == opts.CachedChart { - // Ensure hostname is updated - if c.GetArtifact().URL != newArtifact.URL { - r.Storage.SetArtifactURL(c.GetArtifact()) - c.Status.URL = r.Storage.SetHostname(c.Status.URL) - } - return c, nil - } + // Build the chart + ref := chart.RemoteReference{Name: obj.Spec.Chart, Version: obj.Spec.Version} + build, err := cb.Build(ctx, ref, util.TempPathForObj("", ".tgz", obj), opts) - // Ensure artifact directory exists - err = r.Storage.MkdirAll(newArtifact) - if err != nil { - err = fmt.Errorf("unable to create chart directory: %w", err) - return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err - } + // Record both success _and_ error observations on the object + processChartBuild(obj, build, err) - // Acquire a lock for the artifact - unlock, err := r.Storage.Lock(newArtifact) + // Handle any build error if err != nil { - err = fmt.Errorf("unable to acquire lock: %w", err) - return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err + e := fmt.Errorf("failed to build chart from remote source: %w", err) + reason := meta.FailedReason + if buildErr := new(chart.BuildError); errors.As(err, &buildErr) { + reason = buildErr.Reason.Reason + if chart.IsPersistentBuildErrorReason(buildErr.Reason) { + return sreconcile.ResultEmpty, &serror.Stalling{ + Err: e, + Reason: reason, + } + } + } + return sreconcile.ResultEmpty, &serror.Event{ + Err: e, + Reason: reason, + } } - defer unlock() - // Copy the packaged chart to the artifact path - if err = r.Storage.CopyFromPath(&newArtifact, b.Path); err != nil { - err = fmt.Errorf("failed to write chart package to storage: %w", err) - return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err - } + *b = *build + return sreconcile.ResultSuccess, nil +} - // Update symlink - cUrl, err := r.Storage.Symlink(newArtifact, fmt.Sprintf("%s-latest.tgz", b.Name)) +func (r *HelmChartReconciler) reconcileFromTarballArtifact(ctx context.Context, obj *sourcev1.HelmChart, source sourcev1.Artifact, b *chart.Build) (sreconcile.Result, error) { + // Create temporary working directory + tmpDir, err := util.TempDirForObj("", obj) if err != nil { - err = fmt.Errorf("storage error: %w", err) - return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err + e := &serror.Event{ + Err: fmt.Errorf("failed to create temporary working directory: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.StorageOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } - return sourcev1.HelmChartReady(c, newArtifact, cUrl, sourcev1.ChartPullSucceededReason, b.Summary()), nil -} + defer os.RemoveAll(tmpDir) -func (r *HelmChartReconciler) fromTarballArtifact(ctx context.Context, source sourcev1.Artifact, c sourcev1.HelmChart, - workDir string, force bool) (sourcev1.HelmChart, error) { - // Create temporary working directory to untar into - sourceDir := filepath.Join(workDir, "source") + // Create directory to untar source into + sourceDir := filepath.Join(tmpDir, "source") if err := os.Mkdir(sourceDir, 0700); err != nil { - err = fmt.Errorf("failed to create temporary directory to untar source into: %w", err) - return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err + e := &serror.Event{ + Err: fmt.Errorf("failed to create directory to untar source into: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.StorageOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } // Open the tarball artifact file and untar files into working directory f, err := os.Open(r.Storage.LocalPath(source)) if err != nil { - err = fmt.Errorf("artifact open error: %w", err) - return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err + e := &serror.Event{ + Err: fmt.Errorf("failed to open source artifact: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.StorageOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } if _, err = untar.Untar(f, sourceDir); err != nil { _ = f.Close() - err = fmt.Errorf("artifact untar error: %w", err) - return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("artifact untar error: %w", err), + Reason: meta.FailedReason, + } } if err = f.Close(); err != nil { - err = fmt.Errorf("artifact close error: %w", err) - return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("artifact close error: %w", err), + Reason: meta.FailedReason, + } } - chartPath, err := securejoin.SecureJoin(sourceDir, c.Spec.Chart) + // Calculate (secure) absolute chart path + chartPath, err := securejoin.SecureJoin(sourceDir, obj.Spec.Chart) if err != nil { - return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err + e := &serror.Stalling{ + Err: fmt.Errorf("Path calculation for chart '%s' failed: %w", obj.Spec.Chart, err), + Reason: "IllegalPath", + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "IllegalPath", e.Err.Error()) + // We are unable to recover from this change without a change in generation + return sreconcile.ResultEmpty, e } // Setup dependency manager - authDir := filepath.Join(workDir, "creds") + authDir := filepath.Join(tmpDir, "creds") if err = os.Mkdir(authDir, 0700); err != nil { - err = fmt.Errorf("failed to create temporaRy directory for dependency credentials: %w", err) - return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to create temporary directory for dependency credentials: %w", err), + Reason: meta.FailedReason, + } } dm := chart.NewDependencyManager( - chart.WithRepositoryCallback(r.namespacedChartRepositoryCallback(ctx, authDir, c.GetNamespace())), + chart.WithRepositoryCallback(r.namespacedChartRepositoryCallback(ctx, authDir, obj.GetNamespace())), ) defer dm.Clear() // Configure builder options, including any previously cached chart opts := chart.BuildOptions{ - ValuesFiles: c.GetValuesFiles(), - Force: force, + ValuesFiles: obj.GetValuesFiles(), + Force: obj.Generation != obj.Status.ObservedGeneration, } - if artifact := c.Status.Artifact; artifact != nil { - opts.CachedChart = artifact.Path + if artifact := obj.Status.Artifact; artifact != nil { + opts.CachedChart = r.Storage.LocalPath(*artifact) } + // Add revision metadata to chart build + if obj.Spec.ReconcileStrategy == sourcev1.ReconcileStrategyRevision { + // Isolate the commit SHA from GitRepository type artifacts by removing the branch/ prefix. + splitRev := strings.Split(source.Revision, "/") + opts.VersionMetadata = splitRev[len(splitRev)-1] + } // Configure revision metadata for chart build if we should react to revision changes - if c.Spec.ReconcileStrategy == sourcev1.ReconcileStrategyRevision { + if obj.Spec.ReconcileStrategy == sourcev1.ReconcileStrategyRevision { rev := source.Revision - if c.Spec.SourceRef.Kind == sourcev1.GitRepositoryKind { + if obj.Spec.SourceRef.Kind == sourcev1.GitRepositoryKind { // Split the reference by the `/` delimiter which may be present, // and take the last entry which contains the SHA. split := strings.Split(source.Revision, "/") rev = split[len(split)-1] } - if kind := c.Spec.SourceRef.Kind; kind == sourcev1.GitRepositoryKind || kind == sourcev1.BucketKind { + if kind := obj.Spec.SourceRef.Kind; kind == sourcev1.GitRepositoryKind || kind == sourcev1.BucketKind { // The SemVer from the metadata is at times used in e.g. the label metadata for a resource // in a chart, which has a limited length of 63 characters. // To not fill most of this space with a full length SHA hex (40 characters for SHA-1, and @@ -471,74 +593,207 @@ func (r *HelmChartReconciler) fromTarballArtifact(ctx context.Context, source so } opts.VersionMetadata = rev } - // Set the VersionMetadata to the object's Generation if ValuesFiles is defined, - // this ensures changes can be noticed by the Artifact consumer - if len(opts.GetValuesFiles()) > 0 { - if opts.VersionMetadata != "" { - opts.VersionMetadata += "." - } - opts.VersionMetadata += strconv.FormatInt(c.Generation, 10) - } // Build chart cb := chart.NewLocalBuilder(dm) - b, err := cb.Build(ctx, chart.LocalReference{WorkDir: sourceDir, Path: chartPath}, filepath.Join(workDir, "chart.tgz"), opts) + build, err := cb.Build(ctx, chart.LocalReference{ + WorkDir: sourceDir, + Path: chartPath, + }, util.TempPathForObj("", ".tgz", obj), opts) + + // Record both success _and_ error observations on the object + processChartBuild(obj, build, err) + + // Handle any build error if err != nil { - return sourcev1.HelmChartNotReady(c, reasonForBuildError(err), err.Error()), err + e := fmt.Errorf("failed to build chart from source artifact: %w", err) + reason := meta.FailedReason + if buildErr := new(chart.BuildError); errors.As(err, &buildErr) { + reason = buildErr.Reason.Reason + if chart.IsPersistentBuildErrorReason(buildErr.Reason) { + return sreconcile.ResultEmpty, &serror.Stalling{ + Err: e, + Reason: reason, + } + } + } + return sreconcile.ResultEmpty, &serror.Event{ + Err: e, + Reason: reason, + } } - newArtifact := r.Storage.NewArtifactFor(c.Kind, c.GetObjectMeta(), b.Version, - fmt.Sprintf("%s-%s.tgz", b.Name, b.Version)) + // If we actually build a chart, take a historical note of any dependencies we resolved. + // The reason this is a done conditionally, is because if we have a cached one in storage, + // we can not recover this information (and put it in a condition). Which would result in + // a sudden (partial) disappearance of observed state. + // TODO(hidde): include specific name/version information? + if depNum := build.ResolvedDependencies; depNum > 0 { + r.eventLogf(ctx, obj, corev1.EventTypeNormal, "ResolvedDependencies", "resolved %d chart dependencies", depNum) + } - // If the path of the returned build equals the cache path, - // there are no changes to the chart - if apimeta.IsStatusConditionTrue(c.Status.Conditions, meta.ReadyCondition) && - b.Path == opts.CachedChart { - // Ensure hostname is updated - if c.GetArtifact().URL != newArtifact.URL { - r.Storage.SetArtifactURL(c.GetArtifact()) - c.Status.URL = r.Storage.SetHostname(c.Status.URL) - } - return c, nil + *b = *build + return sreconcile.ResultSuccess, nil +} + +// reconcileArtifact reconciles the given chart.Build to an v1beta1.Artifact in the Storage, and records it +// on the object. +func (r *HelmChartReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.HelmChart, b *chart.Build) (sreconcile.Result, error) { + // Without a complete chart build, there is little to reconcile + if !b.Complete() { + return sreconcile.ResultRequeue, nil } - // Ensure artifact directory exists - err = r.Storage.MkdirAll(newArtifact) - if err != nil { - err = fmt.Errorf("unable to create chart directory: %w", err) - return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err + // Always restore the conditions in case they got overwritten by transient errors + defer func() { + if obj.Status.ObservedChartName == b.Name && obj.GetArtifact().HasRevision(b.Version) { + conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) + conditions.MarkTrue(obj, meta.ReadyCondition, reasonForBuild(b), b.Summary()) + } + }() + + // Create artifact from build data + artifact := r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), b.Version, fmt.Sprintf("%s-%s.tgz", b.Name, b.Version)) + + // Return early if the build path equals the current artifact path + if curArtifact := obj.GetArtifact(); curArtifact != nil && r.Storage.LocalPath(*curArtifact) == b.Path { + r.eventLogf(ctx, obj, corev1.EventTypeNormal, meta.SucceededReason, "already up to date, current revision '%s'", curArtifact.Revision) + return sreconcile.ResultSuccess, nil } - // Acquire a lock for the artifact - unlock, err := r.Storage.Lock(newArtifact) + // Garbage collect chart build once persisted to storage + defer os.Remove(b.Path) + + // Ensure artifact directory exists and acquire lock + if err := r.Storage.MkdirAll(artifact); err != nil { + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to create artifact directory: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } + } + unlock, err := r.Storage.Lock(artifact) if err != nil { - err = fmt.Errorf("unable to acquire lock: %w", err) - return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to acquire lock for artifact: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } } defer unlock() // Copy the packaged chart to the artifact path - if err = r.Storage.CopyFromPath(&newArtifact, b.Path); err != nil { - err = fmt.Errorf("failed to write chart package to storage: %w", err) - return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err + if err = r.Storage.CopyFromPath(&artifact, b.Path); err != nil { + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("unable to copy Helm chart to storage: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } } - // Update symlink - cUrl, err := r.Storage.Symlink(newArtifact, fmt.Sprintf("%s-latest.tgz", b.Name)) + // Record it on the object + obj.Status.Artifact = artifact.DeepCopy() + obj.Status.ObservedChartName = b.Name + + // Publish an event + r.AnnotatedEventf(obj, map[string]string{ + "revision": artifact.Revision, + "checksum": artifact.Checksum, + }, corev1.EventTypeNormal, reasonForBuild(b), b.Summary()) + + // Update symlink on a "best effort" basis + symURL, err := r.Storage.Symlink(artifact, "latest.tar.gz") if err != nil { - err = fmt.Errorf("storage error: %w", err) - return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err + r.eventLogf(ctx, obj, corev1.EventTypeWarning, sourcev1.StorageOperationFailedReason, + "failed to update status URL symlink: %s", err) + } + if symURL != "" { + obj.Status.URL = symURL + } + return sreconcile.ResultSuccess, nil +} + +// getSource returns the v1beta1.Source for the given object, or an error describing why the source could not be +// returned. +func (r *HelmChartReconciler) getSource(ctx context.Context, obj *sourcev1.HelmChart) (sourcev1.Source, error) { + namespacedName := types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.Spec.SourceRef.Name, + } + var s sourcev1.Source + switch obj.Spec.SourceRef.Kind { + case sourcev1.HelmRepositoryKind: + var repo sourcev1.HelmRepository + if err := r.Client.Get(ctx, namespacedName, &repo); err != nil { + return nil, err + } + s = &repo + case sourcev1.GitRepositoryKind: + var repo sourcev1.GitRepository + if err := r.Client.Get(ctx, namespacedName, &repo); err != nil { + return nil, err + } + s = &repo + case sourcev1.BucketKind: + var bucket sourcev1.Bucket + if err := r.Client.Get(ctx, namespacedName, &bucket); err != nil { + return nil, err + } + s = &bucket + default: + return nil, fmt.Errorf("unsupported source kind '%s', must be one of: %v", obj.Spec.SourceRef.Kind, []string{ + sourcev1.HelmRepositoryKind, sourcev1.GitRepositoryKind, sourcev1.BucketKind}) + } + return s, nil +} + +// reconcileDelete handles the delete of an object. It first garbage collects all artifacts for the object from the +// artifact storage, if successful, the finalizer is removed from the object. +func (r *HelmChartReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.HelmChart) (sreconcile.Result, error) { + // Garbage collect the resource's artifacts + if err := r.garbageCollect(ctx, obj); err != nil { + // Return the error so we retry the failed garbage collection + return sreconcile.ResultEmpty, err } - return sourcev1.HelmChartReady(c, newArtifact, cUrl, reasonForBuildSuccess(b), b.Summary()), nil + // Remove our finalizer from the list + controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) + + // Stop reconciliation as the object is being deleted + return sreconcile.ResultEmpty, nil +} + +// garbageCollect performs a garbage collection for the given v1beta1.HelmChart. It removes all but the current +// artifact except for when the deletion timestamp is set, which will result in the removal of all artifacts for the +// resource. +func (r *HelmChartReconciler) garbageCollect(ctx context.Context, obj *sourcev1.HelmChart) error { + if !obj.DeletionTimestamp.IsZero() { + if err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { + return &serror.Event{ + Err: fmt.Errorf("garbage collection for deleted resource failed: %w", err), + Reason: "GarbageCollectionFailed", + } + } + obj.Status.Artifact = nil + // TODO(hidde): we should only push this event if we actually garbage collected something + r.eventLogf(ctx, obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", + "garbage collected artifacts for deleted resource") + return nil + } + if obj.GetArtifact() != nil { + if err := r.Storage.RemoveAllButCurrent(*obj.GetArtifact()); err != nil { + return &serror.Event{ + Err: fmt.Errorf("garbage collection of old artifacts failed: %w", err), + Reason: "GarbageCollectionFailed", + } + } + // TODO(hidde): we should only push this event if we actually garbage collected something + r.eventLogf(ctx, obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", "garbage collected old artifacts") + } + return nil } -// namespacedChartRepositoryCallback returns a chart.GetChartRepositoryCallback -// scoped to the given namespace. Credentials for retrieved v1beta1.HelmRepository -// objects are stored in the given directory. -// The returned callback returns a repository.ChartRepository configured with the -// retrieved v1beta1.HelmRepository, or a shim with defaults if no object could -// be found. +// namespacedChartRepositoryCallback returns a chart.GetChartRepositoryCallback scoped to the given namespace. +// Credentials for retrieved v1beta1.HelmRepository objects are stored in the given directory. +// The returned callback returns a repository.ChartRepository configured with the retrieved v1beta1.HelmRepository, +// or a shim with defaults if no object could be found. func (r *HelmChartReconciler) namespacedChartRepositoryCallback(ctx context.Context, dir, namespace string) chart.GetChartRepositoryCallback { return func(url string) (*repository.ChartRepository, error) { repo, err := r.resolveDependencyRepository(ctx, url, namespace) @@ -559,9 +814,10 @@ func (r *HelmChartReconciler) namespacedChartRepositoryCallback(ctx context.Cont helmgetter.WithTimeout(repo.Spec.Timeout.Duration), helmgetter.WithPassCredentialsAll(repo.Spec.PassCredentials), } - if secret, err := r.getHelmRepositorySecret(ctx, repo); err != nil { - return nil, err - } else if secret != nil { + if secret, err := r.getHelmRepositorySecret(ctx, repo); secret != nil || err != nil { + if err != nil { + return nil, err + } opts, err := getter.ClientOptionsFromSecret(dir, *secret) if err != nil { return nil, err @@ -579,99 +835,36 @@ func (r *HelmChartReconciler) namespacedChartRepositoryCallback(ctx context.Cont } } -func (r *HelmChartReconciler) reconcileDelete(ctx context.Context, chart sourcev1.HelmChart) (ctrl.Result, error) { - // Our finalizer is still present, so lets handle garbage collection - if err := r.gc(chart); err != nil { - r.event(ctx, chart, events.EventSeverityError, - fmt.Sprintf("garbage collection for deleted resource failed: %s", err.Error())) - // Return the error so we retry the failed garbage collection - return ctrl.Result{}, err - } - - // Record deleted status - r.recordReadiness(ctx, chart) - - // Remove our finalizer from the list and update it - controllerutil.RemoveFinalizer(&chart, sourcev1.SourceFinalizer) - if err := r.Update(ctx, &chart); err != nil { - return ctrl.Result{}, err - } - - // Stop reconciliation as the object is being deleted - return ctrl.Result{}, nil -} - -// resetStatus returns a modified v1beta1.HelmChart and a boolean indicating -// if the status field has been reset. -func (r *HelmChartReconciler) resetStatus(chart sourcev1.HelmChart) (sourcev1.HelmChart, bool) { - // We do not have an artifact, or it does no longer exist - if chart.GetArtifact() == nil || !r.Storage.ArtifactExist(*chart.GetArtifact()) { - chart = sourcev1.HelmChartProgressing(chart) - chart.Status.Artifact = nil - return chart, true - } - // The chart specification has changed - if chart.Generation != chart.Status.ObservedGeneration { - return sourcev1.HelmChartProgressing(chart), true +func (r *HelmChartReconciler) resolveDependencyRepository(ctx context.Context, url string, namespace string) (*sourcev1.HelmRepository, error) { + listOpts := []client.ListOption{ + client.InNamespace(namespace), + client.MatchingFields{sourcev1.HelmRepositoryURLIndexKey: url}, } - return chart, false -} - -// gc performs a garbage collection for the given v1beta1.HelmChart. -// It removes all but the current artifact except for when the -// deletion timestamp is set, which will result in the removal of -// all artifacts for the resource. -func (r *HelmChartReconciler) gc(chart sourcev1.HelmChart) error { - if !chart.DeletionTimestamp.IsZero() { - return r.Storage.RemoveAll(r.Storage.NewArtifactFor(chart.Kind, chart.GetObjectMeta(), "", "*")) + var list sourcev1.HelmRepositoryList + err := r.Client.List(ctx, &list, listOpts...) + if err != nil { + return nil, fmt.Errorf("unable to retrieve HelmRepositoryList: %w", err) } - if chart.GetArtifact() != nil { - return r.Storage.RemoveAllButCurrent(*chart.GetArtifact()) + if len(list.Items) > 0 { + return &list.Items[0], nil } - return nil + return nil, fmt.Errorf("no HelmRepository found for '%s' in '%s' namespace", url, namespace) } -// event emits a Kubernetes event and forwards the event to notification -// controller if configured. -func (r *HelmChartReconciler) event(ctx context.Context, chart sourcev1.HelmChart, severity, msg string) { - if r.EventRecorder != nil { - r.EventRecorder.Eventf(&chart, corev1.EventTypeNormal, severity, msg) - } - if r.ExternalEventRecorder != nil { - r.ExternalEventRecorder.Eventf(&chart, corev1.EventTypeNormal, severity, msg) +func (r *HelmChartReconciler) getHelmRepositorySecret(ctx context.Context, repository *sourcev1.HelmRepository) (*corev1.Secret, error) { + if repository.Spec.SecretRef == nil { + return nil, nil } -} - -func (r *HelmChartReconciler) recordReadiness(ctx context.Context, chart sourcev1.HelmChart) { - log := ctrl.LoggerFrom(ctx) - if r.MetricsRecorder == nil { - return + name := types.NamespacedName{ + Namespace: repository.GetNamespace(), + Name: repository.Spec.SecretRef.Name, } - objRef, err := reference.GetReference(r.Scheme, &chart) + var secret corev1.Secret + err := r.Client.Get(ctx, name, &secret) if err != nil { - log.Error(err, "unable to record readiness metric") - return - } - if rc := apimeta.FindStatusCondition(chart.Status.Conditions, meta.ReadyCondition); rc != nil { - r.MetricsRecorder.RecordCondition(*objRef, *rc, !chart.DeletionTimestamp.IsZero()) - } else { - r.MetricsRecorder.RecordCondition(*objRef, metav1.Condition{ - Type: meta.ReadyCondition, - Status: metav1.ConditionUnknown, - }, !chart.DeletionTimestamp.IsZero()) - } -} - -func (r *HelmChartReconciler) updateStatus(ctx context.Context, req ctrl.Request, newStatus sourcev1.HelmChartStatus) error { - var chart sourcev1.HelmChart - if err := r.Get(ctx, req.NamespacedName, &chart); err != nil { - return err + return nil, err } - - patch := client.MergeFrom(chart.DeepCopy()) - chart.Status = newStatus - - return r.Status().Patch(ctx, &chart, patch) + return &secret, nil } func (r *HelmChartReconciler) indexHelmRepositoryByURL(o client.Object) []string { @@ -694,40 +887,6 @@ func (r *HelmChartReconciler) indexHelmChartBySource(o client.Object) []string { return []string{fmt.Sprintf("%s/%s", hc.Spec.SourceRef.Kind, hc.Spec.SourceRef.Name)} } -func (r *HelmChartReconciler) resolveDependencyRepository(ctx context.Context, url string, namespace string) (*sourcev1.HelmRepository, error) { - listOpts := []client.ListOption{ - client.InNamespace(namespace), - client.MatchingFields{sourcev1.HelmRepositoryURLIndexKey: url}, - } - var list sourcev1.HelmRepositoryList - err := r.Client.List(ctx, &list, listOpts...) - if err != nil { - return nil, fmt.Errorf("unable to retrieve HelmRepositoryList: %w", err) - } - if len(list.Items) > 0 { - return &list.Items[0], nil - } - return nil, fmt.Errorf("no HelmRepository found for '%s' in '%s' namespace", url, namespace) -} - -func (r *HelmChartReconciler) getHelmRepositorySecret(ctx context.Context, repository *sourcev1.HelmRepository) (*corev1.Secret, error) { - if repository.Spec.SecretRef != nil { - name := types.NamespacedName{ - Namespace: repository.GetNamespace(), - Name: repository.Spec.SecretRef.Name, - } - - var secret corev1.Secret - err := r.Client.Get(ctx, name, &secret) - if err != nil { - err = fmt.Errorf("auth secret error: %w", err) - return nil, err - } - return &secret, nil - } - return nil, nil -} - func (r *HelmChartReconciler) requestsForHelmRepositoryChange(o client.Object) []reconcile.Request { repo, ok := o.(*sourcev1.HelmRepository) if !ok { @@ -746,13 +905,11 @@ func (r *HelmChartReconciler) requestsForHelmRepositoryChange(o client.Object) [ return nil } - // TODO(hidde): unlike other places (e.g. the helm-controller), - // we have no reference here to determine if the request is coming - // from the _old_ or _new_ update event, and resources are thus - // enqueued twice. var reqs []reconcile.Request for _, i := range list.Items { - reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&i)}) + if i.Status.ObservedSourceArtifactRevision != repo.GetArtifact().Revision { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&i)}) + } } return reqs } @@ -775,13 +932,11 @@ func (r *HelmChartReconciler) requestsForGitRepositoryChange(o client.Object) [] return nil } - // TODO(hidde): unlike other places (e.g. the helm-controller), - // we have no reference here to determine if the request is coming - // from the _old_ or _new_ update event, and resources are thus - // enqueued twice. var reqs []reconcile.Request for _, i := range list.Items { - reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&i)}) + if i.Status.ObservedSourceArtifactRevision != repo.GetArtifact().Revision { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&i)}) + } } return reqs } @@ -804,52 +959,63 @@ func (r *HelmChartReconciler) requestsForBucketChange(o client.Object) []reconci return nil } - // TODO(hidde): unlike other places (e.g. the helm-controller), - // we have no reference here to determine if the request is coming - // from the _old_ or _new_ update event, and resources are thus - // enqueued twice. var reqs []reconcile.Request for _, i := range list.Items { - reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&i)}) + if i.Status.ObservedSourceArtifactRevision != bucket.GetArtifact().Revision { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&i)}) + } } return reqs } -func (r *HelmChartReconciler) recordSuspension(ctx context.Context, chart sourcev1.HelmChart) { - if r.MetricsRecorder == nil { - return +func processChartBuild(obj *sourcev1.HelmChart, build *chart.Build, err error) { + if build.HasMetadata() { + if build.Name != obj.Status.ObservedChartName || !obj.GetArtifact().HasRevision(build.Version) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewChart", build.Summary()) + } } - log := ctrl.LoggerFrom(ctx) - objRef, err := reference.GetReference(r.Scheme, &chart) - if err != nil { - log.Error(err, "unable to record suspended metric") + if err == nil { + conditions.Delete(obj, sourcev1.FetchFailedCondition) + conditions.Delete(obj, sourcev1.BuildFailedCondition) return } - if !chart.DeletionTimestamp.IsZero() { - r.MetricsRecorder.RecordSuspend(*objRef, false) - } else { - r.MetricsRecorder.RecordSuspend(*objRef, chart.Spec.Suspend) - } -} - -func reasonForBuildError(err error) string { var buildErr *chart.BuildError if ok := errors.As(err, &buildErr); !ok { - return sourcev1.ChartPullFailedReason + buildErr = &chart.BuildError{ + Reason: chart.ErrUnknown, + Err: err, + } } + switch buildErr.Reason { case chart.ErrChartMetadataPatch, chart.ErrValuesFilesMerge, chart.ErrDependencyBuild, chart.ErrChartPackage: - return sourcev1.ChartPackageFailedReason + conditions.Delete(obj, sourcev1.FetchFailedCondition) + conditions.MarkTrue(obj, sourcev1.BuildFailedCondition, buildErr.Reason.Reason, buildErr.Error()) default: - return sourcev1.ChartPullFailedReason + conditions.Delete(obj, sourcev1.BuildFailedCondition) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, buildErr.Reason.Reason, buildErr.Error()) } } -func reasonForBuildSuccess(result *chart.Build) string { - if result.Packaged { +func reasonForBuild(build *chart.Build) string { + if build.Packaged { return sourcev1.ChartPackageSucceededReason } return sourcev1.ChartPullSucceededReason } + +// eventLog records event and logs at the same time. This log is different from +// the debug log in the event recorder in the sense that this is a simple log, +// the event recorder debug log contains complete details about the event. +func (r *HelmChartReconciler) eventLogf(ctx context.Context, obj runtime.Object, eventType string, reason string, messageFmt string, args ...interface{}) { + msg := fmt.Sprintf(messageFmt, args...) + // Log and emit event. + if eventType == corev1.EventTypeWarning { + ctrl.LoggerFrom(ctx).Error(errors.New(reason), msg) + } else { + ctrl.LoggerFrom(ctx).Info(msg) + } + r.Eventf(obj, eventType, reason, msg) +} diff --git a/controllers/helmchart_controller_test.go b/controllers/helmchart_controller_test.go index d53afff0c..c333478dd 100644 --- a/controllers/helmchart_controller_test.go +++ b/controllers/helmchart_controller_test.go @@ -30,6 +30,7 @@ import ( "github.com/fluxcd/pkg/apis/meta" "github.com/fluxcd/pkg/gittestserver" "github.com/fluxcd/pkg/helmtestserver" + "github.com/fluxcd/pkg/runtime/conditions" "github.com/go-git/go-billy/v5/memfs" "github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5/config" @@ -49,7 +50,7 @@ import ( sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" ) -var _ = Describe("HelmChartReconciler", func() { +var _ = FDescribe("HelmChartReconciler", func() { const ( timeout = time.Second * 30 @@ -270,7 +271,7 @@ var _ = Describe("HelmChartReconciler", func() { got := &sourcev1.HelmChart{} Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) - return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && + return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && got.GetArtifact() != nil && ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) @@ -292,6 +293,9 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, updated) for _, c := range updated.Status.Conditions { + fmt.Fprintf(GinkgoWriter, "condition type: %s\n", c.Type) + fmt.Fprintf(GinkgoWriter, "condition reason: %s\n", c.Reason) + fmt.Fprintf(GinkgoWriter, "condition message: %s\n", c.Message) if c.Reason == sourcev1.ChartPullFailedReason && strings.Contains(c.Message, "failed to retrieve source") { return true @@ -394,12 +398,8 @@ var _ = Describe("HelmChartReconciler", func() { Expect(k8sClient.Update(context.Background(), chart)).Should(Succeed()) Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, chart) - for _, c := range chart.Status.Conditions { - if c.Reason == sourcev1.ChartPullFailedReason { - return true - } - } - return false + return conditions.GetReason(chart, sourcev1.FetchFailedCondition) == "InvalidChartReference" && + conditions.IsStalled(chart) }, timeout, interval).Should(BeTrue()) Expect(chart.GetArtifact()).NotTo(BeNil()) Expect(chart.Status.Artifact.Revision).Should(Equal("0.1.1")) @@ -495,13 +495,7 @@ var _ = Describe("HelmChartReconciler", func() { got := &sourcev1.HelmChart{} Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.AuthenticationFailedReason && - strings.Contains(c.Message, "auth secret error") { - return true - } - } - return false + return conditions.GetReason(got, sourcev1.FetchFailedCondition) == sourcev1.AuthenticationFailedReason }, timeout, interval).Should(BeTrue()) By("Applying secret with missing keys") @@ -515,7 +509,7 @@ var _ = Describe("HelmChartReconciler", func() { got := &sourcev1.HelmChart{} _ = k8sClient.Get(context.Background(), key, got) for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.ChartPullFailedReason && + if c.Reason == "ChartPullError" && strings.Contains(c.Message, "401 Unauthorized") { return true } @@ -833,7 +827,7 @@ var _ = Describe("HelmChartReconciler", func() { // if the artifact was changed due to the current update. // Use status condition to be sure. for _, condn := range got.Status.Conditions { - if strings.Contains(condn.Message, "with merged values files [./testdata/charts/helmchart/override.yaml]") && + if strings.Contains(condn.Message, "merged values files [./testdata/charts/helmchart/override.yaml]") && ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) { return true } diff --git a/controllers/legacy_suite_test.go b/controllers/legacy_suite_test.go index 9edfdd799..46237896b 100644 --- a/controllers/legacy_suite_test.go +++ b/controllers/legacy_suite_test.go @@ -137,9 +137,9 @@ var _ = BeforeSuite(func() { Expect(err).ToNot(HaveOccurred(), "failed to setup HelmRepositoryReconciler") err = (&HelmChartReconciler{ - Client: k8sManager.GetClient(), - Scheme: scheme.Scheme, - Storage: ginkgoTestStorage, + Client: k8sManager.GetClient(), + Storage: ginkgoTestStorage, + EventRecorder: record.NewFakeRecorder(32), Getters: getter.Providers{getter.Provider{ Schemes: []string{"http", "https"}, New: getter.NewHTTPGetter, diff --git a/internal/helm/chart/builder.go b/internal/helm/chart/builder.go index 9aa2a17e4..c44720c12 100644 --- a/internal/helm/chart/builder.go +++ b/internal/helm/chart/builder.go @@ -115,15 +115,16 @@ func (o BuildOptions) GetValuesFiles() []string { return o.ValuesFiles } -// Build contains the Builder.Build result, including specific +// Build contains the (partial) Builder.Build result, including specific // information about the built chart like ResolvedDependencies. type Build struct { - // Path is the absolute path to the packaged chart. - Path string - // Name of the packaged chart. + // Name of the chart. Name string - // Version of the packaged chart. + // Version of the chart. Version string + // Path is the absolute path to the packaged chart. + // Can be empty, in which case a failure should be assumed. + Path string // ValuesFiles is the list of files used to compose the chart's // default "values.yaml". ValuesFiles []string @@ -138,28 +139,43 @@ type Build struct { // Summary returns a human-readable summary of the Build. func (b *Build) Summary() string { - if b == nil || b.Name == "" || b.Version == "" { - return "No chart build." + if !b.HasMetadata() { + return "No chart build" } var s strings.Builder - var action = "Pulled" - if b.Packaged { - action = "Packaged" + var action = "New" + if b.Path != "" { + action = "Pulled" + if b.Packaged { + action = "Packaged" + } } s.WriteString(fmt.Sprintf("%s '%s' chart with version '%s'", action, b.Name, b.Version)) - if b.Packaged && len(b.ValuesFiles) > 0 { - s.WriteString(fmt.Sprintf(", with merged values files %v", b.ValuesFiles)) + if len(b.ValuesFiles) > 0 { + s.WriteString(fmt.Sprintf(" and merged values files %v", b.ValuesFiles)) } - if b.Packaged && b.ResolvedDependencies > 0 { - s.WriteString(fmt.Sprintf(", resolving %d dependencies before packaging", b.ResolvedDependencies)) + return s.String() +} + +// HasMetadata returns if the Build contains chart metadata. +// +// NOTE: This may return True while the build did not Complete successfully. +// Which means it was able to successfully collect the metadata from the chart, +// but failed further into the process. +func (b *Build) HasMetadata() bool { + if b == nil { + return false } + return b.Name != "" && b.Version != "" +} - s.WriteString(".") - return s.String() +// Complete returns if the Build completed successfully. +func (b *Build) Complete() bool { + return b.HasMetadata() && b.Path != "" } // String returns the Path of the Build. diff --git a/internal/helm/chart/builder_local.go b/internal/helm/chart/builder_local.go index 721238fe9..923008dcb 100644 --- a/internal/helm/chart/builder_local.go +++ b/internal/helm/chart/builder_local.go @@ -101,6 +101,9 @@ func (b *localChartBuilder) Build(ctx context.Context, ref Reference, p string, result.Version = ver.String() } + isChartDir := pathIsDir(localRef.Path) + requiresPackaging := isChartDir || opts.VersionMetadata != "" || len(opts.GetValuesFiles()) != 0 + // If all the following is true, we do not need to package the chart: // - Chart name from cached chart matches resolved name // - Chart version from cached chart matches calculated version @@ -112,7 +115,9 @@ func (b *localChartBuilder) Build(ctx context.Context, ref Reference, p string, if err = curMeta.Validate(); err == nil { if result.Name == curMeta.Name && result.Version == curMeta.Version { result.Path = opts.CachedChart - result.ValuesFiles = opts.ValuesFiles + result.ValuesFiles = opts.GetValuesFiles() + result.Packaged = requiresPackaging + return result, nil } } @@ -121,10 +126,9 @@ func (b *localChartBuilder) Build(ctx context.Context, ref Reference, p string, // If the chart at the path is already packaged and no custom values files // options are set, we can copy the chart without making modifications - isChartDir := pathIsDir(localRef.Path) - if !isChartDir && len(opts.GetValuesFiles()) == 0 { + if !requiresPackaging { if err = copyFileToPath(localRef.Path, p); err != nil { - return nil, &BuildError{Reason: ErrChartPull, Err: err} + return result, &BuildError{Reason: ErrChartPull, Err: err} } result.Path = p return result, nil @@ -134,7 +138,7 @@ func (b *localChartBuilder) Build(ctx context.Context, ref Reference, p string, var mergedValues map[string]interface{} if len(opts.GetValuesFiles()) > 0 { if mergedValues, err = mergeFileValues(localRef.WorkDir, opts.ValuesFiles); err != nil { - return nil, &BuildError{Reason: ErrValuesFilesMerge, Err: err} + return result, &BuildError{Reason: ErrValuesFilesMerge, Err: err} } } @@ -143,7 +147,7 @@ func (b *localChartBuilder) Build(ctx context.Context, ref Reference, p string, // or because we have merged values and need to repackage chart, err := loader.Load(localRef.Path) if err != nil { - return nil, &BuildError{Reason: ErrChartPackage, Err: err} + return result, &BuildError{Reason: ErrChartPackage, Err: err} } // Set earlier resolved version (with metadata) chart.Metadata.Version = result.Version @@ -151,7 +155,7 @@ func (b *localChartBuilder) Build(ctx context.Context, ref Reference, p string, // Overwrite default values with merged values, if any if ok, err = OverwriteChartDefaultValues(chart, mergedValues); ok || err != nil { if err != nil { - return nil, &BuildError{Reason: ErrValuesFilesMerge, Err: err} + return result, &BuildError{Reason: ErrValuesFilesMerge, Err: err} } result.ValuesFiles = opts.GetValuesFiles() } @@ -160,19 +164,19 @@ func (b *localChartBuilder) Build(ctx context.Context, ref Reference, p string, if isChartDir { if b.dm == nil { err = fmt.Errorf("local chart builder requires dependency manager for unpackaged charts") - return nil, &BuildError{Reason: ErrDependencyBuild, Err: err} + return result, &BuildError{Reason: ErrDependencyBuild, Err: err} } if result.ResolvedDependencies, err = b.dm.Build(ctx, ref, chart); err != nil { - return nil, &BuildError{Reason: ErrDependencyBuild, Err: err} + return result, &BuildError{Reason: ErrDependencyBuild, Err: err} } } // Package the chart if err = packageToPath(chart, p); err != nil { - return nil, &BuildError{Reason: ErrChartPackage, Err: err} + return result, &BuildError{Reason: ErrChartPackage, Err: err} } result.Path = p - result.Packaged = true + result.Packaged = requiresPackaging return result, nil } diff --git a/internal/helm/chart/builder_remote.go b/internal/helm/chart/builder_remote.go index 3252ff226..778efd253 100644 --- a/internal/helm/chart/builder_remote.go +++ b/internal/helm/chart/builder_remote.go @@ -82,12 +82,13 @@ func (b *remoteChartBuilder) Build(_ context.Context, ref Reference, p string, o cv, err := b.remote.Get(remoteRef.Name, remoteRef.Version) if err != nil { err = fmt.Errorf("failed to get chart version for remote reference: %w", err) - return nil, &BuildError{Reason: ErrChartPull, Err: err} + return nil, &BuildError{Reason: ErrChartReference, Err: err} } result := &Build{} result.Name = cv.Name result.Version = cv.Version + // Set build specific metadata if instructed if opts.VersionMetadata != "" { ver, err := semver.NewVersion(result.Version) @@ -102,6 +103,8 @@ func (b *remoteChartBuilder) Build(_ context.Context, ref Reference, p string, o result.Version = ver.String() } + requiresPackaging := len(opts.GetValuesFiles()) != 0 || opts.VersionMetadata != "" + // If all the following is true, we do not need to download and/or build the chart: // - Chart name from cached chart matches resolved name // - Chart version from cached chart matches calculated version @@ -114,6 +117,7 @@ func (b *remoteChartBuilder) Build(_ context.Context, ref Reference, p string, o if result.Name == curMeta.Name && result.Version == curMeta.Version { result.Path = opts.CachedChart result.ValuesFiles = opts.GetValuesFiles() + result.Packaged = requiresPackaging return result, nil } } @@ -124,12 +128,12 @@ func (b *remoteChartBuilder) Build(_ context.Context, ref Reference, p string, o res, err := b.remote.DownloadChart(cv) if err != nil { err = fmt.Errorf("failed to download chart for remote reference: %w", err) - return nil, &BuildError{Reason: ErrChartPull, Err: err} + return result, &BuildError{Reason: ErrChartPull, Err: err} } // Use literal chart copy from remote if no custom values files options are - // set or build option version metadata isn't set. - if len(opts.GetValuesFiles()) == 0 && opts.VersionMetadata == "" { + // set or version metadata isn't set. + if !requiresPackaging { if err = validatePackageAndWriteToPath(res, p); err != nil { return nil, &BuildError{Reason: ErrChartPull, Err: err} } @@ -141,14 +145,14 @@ func (b *remoteChartBuilder) Build(_ context.Context, ref Reference, p string, o var chart *helmchart.Chart if chart, err = loader.LoadArchive(res); err != nil { err = fmt.Errorf("failed to load downloaded chart: %w", err) - return nil, &BuildError{Reason: ErrChartPackage, Err: err} + return result, &BuildError{Reason: ErrChartPackage, Err: err} } chart.Metadata.Version = result.Version mergedValues, err := mergeChartValues(chart, opts.ValuesFiles) if err != nil { err = fmt.Errorf("failed to merge chart values: %w", err) - return nil, &BuildError{Reason: ErrValuesFilesMerge, Err: err} + return result, &BuildError{Reason: ErrValuesFilesMerge, Err: err} } // Overwrite default values with merged values, if any if ok, err = OverwriteChartDefaultValues(chart, mergedValues); ok || err != nil { diff --git a/internal/helm/chart/builder_test.go b/internal/helm/chart/builder_test.go index d797a209f..23c3952b0 100644 --- a/internal/helm/chart/builder_test.go +++ b/internal/helm/chart/builder_test.go @@ -138,42 +138,53 @@ func TestChartBuildResult_Summary(t *testing.T) { want string }{ { - name: "Simple", + name: "Build with metadata", build: &Build{ Name: "chart", Version: "1.2.3-rc.1+bd6bf40", }, - want: "Pulled 'chart' chart with version '1.2.3-rc.1+bd6bf40'.", + want: "New 'chart' chart with version '1.2.3-rc.1+bd6bf40'", }, { - name: "With values files", + name: "Pulled chart", + build: &Build{ + Name: "chart", + Version: "1.2.3-rc.1+bd6bf40", + Path: "chart.tgz", + }, + want: "Pulled 'chart' chart with version '1.2.3-rc.1+bd6bf40'", + }, + { + name: "Packaged chart", build: &Build{ Name: "chart", Version: "arbitrary-version", Packaged: true, ValuesFiles: []string{"a.yaml", "b.yaml"}, + Path: "chart.tgz", }, - want: "Packaged 'chart' chart with version 'arbitrary-version', with merged values files [a.yaml b.yaml].", + want: "Packaged 'chart' chart with version 'arbitrary-version' and merged values files [a.yaml b.yaml]", }, { - name: "With dependencies", + name: "With values files", build: &Build{ - Name: "chart", - Version: "arbitrary-version", - Packaged: true, - ResolvedDependencies: 5, + Name: "chart", + Version: "arbitrary-version", + Packaged: true, + ValuesFiles: []string{"a.yaml", "b.yaml"}, + Path: "chart.tgz", }, - want: "Packaged 'chart' chart with version 'arbitrary-version', resolving 5 dependencies before packaging.", + want: "Packaged 'chart' chart with version 'arbitrary-version' and merged values files [a.yaml b.yaml]", }, { name: "Empty build", build: &Build{}, - want: "No chart build.", + want: "No chart build", }, { name: "Nil build", build: nil, - want: "No chart build.", + want: "No chart build", }, } for _, tt := range tests { diff --git a/internal/helm/chart/errors.go b/internal/helm/chart/errors.go index dddd2e298..5b3a5bec0 100644 --- a/internal/helm/chart/errors.go +++ b/internal/helm/chart/errors.go @@ -22,22 +22,29 @@ import ( ) // BuildErrorReason is the descriptive reason for a BuildError. -type BuildErrorReason string +type BuildErrorReason struct { + // Reason is the programmatic build error reason in CamelCase. + Reason string + + // Summary is the human build error reason, used to provide + // the Error string, and further context to the BuildError. + Summary string +} // Error returns the string representation of BuildErrorReason. func (e BuildErrorReason) Error() string { - return string(e) + return e.Summary } // BuildError contains a wrapped Err and a Reason indicating why it occurred. type BuildError struct { - Reason error + Reason BuildErrorReason Err error } // Error returns Err as a string, prefixed with the Reason to provide context. func (e *BuildError) Error() string { - if e.Reason == nil { + if e.Reason.Error() == "" { return e.Err.Error() } return fmt.Sprintf("%s: %s", e.Reason.Error(), e.Err.Error()) @@ -49,7 +56,7 @@ func (e *BuildError) Error() string { // err := &BuildError{Reason: ErrChartPull, Err: errors.New("arbitrary transport error")} // errors.Is(err, ErrChartPull) func (e *BuildError) Is(target error) bool { - if e.Reason != nil && e.Reason == target { + if e.Reason == target { return true } return errors.Is(e.Err, target) @@ -60,11 +67,21 @@ func (e *BuildError) Unwrap() error { return e.Err } +func IsPersistentBuildErrorReason(err error) bool { + switch err { + case ErrChartReference, ErrChartMetadataPatch, ErrValuesFilesMerge: + return true + default: + return false + } +} + var ( - ErrChartReference = BuildErrorReason("chart reference error") - ErrChartPull = BuildErrorReason("chart pull error") - ErrChartMetadataPatch = BuildErrorReason("chart metadata patch error") - ErrValuesFilesMerge = BuildErrorReason("values files merge error") - ErrDependencyBuild = BuildErrorReason("dependency build error") - ErrChartPackage = BuildErrorReason("chart package error") + ErrChartReference = BuildErrorReason{Reason: "InvalidChartReference", Summary: "invalid chart reference"} + ErrChartPull = BuildErrorReason{Reason: "ChartPullError", Summary: "chart pull error"} + ErrChartMetadataPatch = BuildErrorReason{Reason: "MetadataPatchError", Summary: "chart metadata patch error"} + ErrValuesFilesMerge = BuildErrorReason{Reason: "ValuesFilesError", Summary: "values files merge error"} + ErrDependencyBuild = BuildErrorReason{Reason: "DependencyBuildError", Summary: "dependency build error"} + ErrChartPackage = BuildErrorReason{Reason: "ChartPackageError", Summary: "chart package error"} + ErrUnknown = BuildErrorReason{Reason: "Unknown", Summary: "unknown build error"} ) diff --git a/internal/helm/chart/errors_test.go b/internal/helm/chart/errors_test.go index f006f3364..13428e6cd 100644 --- a/internal/helm/chart/errors_test.go +++ b/internal/helm/chart/errors_test.go @@ -26,7 +26,7 @@ import ( func TestBuildErrorReason_Error(t *testing.T) { g := NewWithT(t) - err := BuildErrorReason("reason") + err := BuildErrorReason{"Reason", "reason"} g.Expect(err.Error()).To(Equal("reason")) } @@ -39,7 +39,7 @@ func TestBuildError_Error(t *testing.T) { { name: "with reason", err: &BuildError{ - Reason: BuildErrorReason("reason"), + Reason: BuildErrorReason{"Reason", "reason"}, Err: errors.New("error"), }, want: "reason: error", diff --git a/main.go b/main.go index 31bd1cb9f..270548035 100644 --- a/main.go +++ b/main.go @@ -189,12 +189,11 @@ func main() { os.Exit(1) } if err = (&controllers.HelmChartReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Storage: storage, - Getters: getters, - EventRecorder: eventRecorder, - MetricsRecorder: metricsH.MetricsRecorder, + Client: mgr.GetClient(), + Storage: storage, + Getters: getters, + EventRecorder: eventRecorder, + Metrics: metricsH, }).SetupWithManagerAndOptions(mgr, controllers.HelmChartReconcilerOptions{ MaxConcurrentReconciles: concurrent, }); err != nil { From 527fce05df0a9960aec3ea6afd7b0e65995e5e47 Mon Sep 17 00:00:00 2001 From: Hidde Beydals Date: Fri, 21 Jan 2022 14:45:26 +0100 Subject: [PATCH 43/63] Rewrite HelmChartReconciler tests Signed-off-by: Hidde Beydals --- controllers/helmchart_controller.go | 188 +- controllers/helmchart_controller_test.go | 2601 ++++++++++++---------- internal/helm/chart/builder_local.go | 2 +- 3 files changed, 1496 insertions(+), 1295 deletions(-) diff --git a/controllers/helmchart_controller.go b/controllers/helmchart_controller.go index 38a959836..93ffad3ca 100644 --- a/controllers/helmchart_controller.go +++ b/controllers/helmchart_controller.go @@ -314,13 +314,7 @@ func (r *HelmChartReconciler) reconcileStorage(ctx context.Context, obj *sourcev return sreconcile.ResultSuccess, nil } -// reconcileSource reconciles the upstream bucket with the client for the given object's Provider, and returns the -// result. -// If a SecretRef is defined, it attempts to fetch the Secret before calling the provider. If the fetch of the Secret -// fails, it records v1beta1.FetchFailedCondition=True and returns early. -// -// The caller should assume a failure if an error is returned, or the BuildResult is zero. -func (r *HelmChartReconciler) reconcileSource(ctx context.Context, obj *sourcev1.HelmChart, build *chart.Build) (sreconcile.Result, error) { +func (r *HelmChartReconciler) reconcileSource(ctx context.Context, obj *sourcev1.HelmChart, build *chart.Build) (_ sreconcile.Result, retErr error) { // Retrieve the source s, err := r.getSource(ctx, obj) if err != nil { @@ -332,7 +326,7 @@ func (r *HelmChartReconciler) reconcileSource(ctx context.Context, obj *sourcev1 // Return Kubernetes client errors, but ignore others which can only be // solved by a change in generation - if apierrs.ReasonForError(err) != metav1.StatusReasonUnknown { + if apierrs.ReasonForError(err) == metav1.StatusReasonUnknown { return sreconcile.ResultEmpty, &serror.Stalling{ Err: fmt.Errorf("failed to get source: %w", err), Reason: "UnsupportedSourceKind", @@ -353,12 +347,44 @@ func (r *HelmChartReconciler) reconcileSource(ctx context.Context, obj *sourcev1 // Record current artifact revision as last observed obj.Status.ObservedSourceArtifactRevision = s.GetArtifact().Revision - // Perform the reconciliation for the chart source type + // Defer observation of build result + defer func() { + // Record both success and error observations on the object + observeChartBuild(obj, build, retErr) + + // If we actually build a chart, take a historical note of any dependencies we resolved. + // The reason this is a done conditionally, is because if we have a cached one in storage, + // we can not recover this information (and put it in a condition). Which would result in + // a sudden (partial) disappearance of observed state. + // TODO(hidde): include specific name/version information? + if depNum := build.ResolvedDependencies; build.Complete() && depNum > 0 { + r.Eventf(obj, corev1.EventTypeNormal, "ResolvedDependencies", "Resolved %d chart dependencies", depNum) + } + + // Handle any build error + if retErr != nil { + e := fmt.Errorf("failed to build chart from source artifact: %w", retErr) + retErr = &serror.Event{ + Err: e, + Reason: meta.FailedReason, + } + if buildErr := new(chart.BuildError); errors.As(e, &buildErr) { + if chart.IsPersistentBuildErrorReason(buildErr.Reason) { + retErr = &serror.Stalling{ + Err: e, + Reason: buildErr.Reason.Reason, + } + } + } + } + }() + + // Perform the build for the chart source type switch typedSource := s.(type) { case *sourcev1.HelmRepository: - return r.reconcileFromHelmRepository(ctx, obj, typedSource, build) + return r.buildFromHelmRepository(ctx, obj, typedSource, build) case *sourcev1.GitRepository, *sourcev1.Bucket: - return r.reconcileFromTarballArtifact(ctx, obj, *typedSource.GetArtifact(), build) + return r.buildFromTarballArtifact(ctx, obj, *typedSource.GetArtifact(), build) default: // Ending up here should generally not be possible // as getSource already validates @@ -366,7 +392,7 @@ func (r *HelmChartReconciler) reconcileSource(ctx context.Context, obj *sourcev1 } } -func (r *HelmChartReconciler) reconcileFromHelmRepository(ctx context.Context, obj *sourcev1.HelmChart, +func (r *HelmChartReconciler) buildFromHelmRepository(ctx context.Context, obj *sourcev1.HelmChart, repo *sourcev1.HelmRepository, b *chart.Build) (sreconcile.Result, error) { // Construct the Getter options from the HelmRepository data @@ -454,34 +480,15 @@ func (r *HelmChartReconciler) reconcileFromHelmRepository(ctx context.Context, o // Build the chart ref := chart.RemoteReference{Name: obj.Spec.Chart, Version: obj.Spec.Version} build, err := cb.Build(ctx, ref, util.TempPathForObj("", ".tgz", obj), opts) - - // Record both success _and_ error observations on the object - processChartBuild(obj, build, err) - - // Handle any build error if err != nil { - e := fmt.Errorf("failed to build chart from remote source: %w", err) - reason := meta.FailedReason - if buildErr := new(chart.BuildError); errors.As(err, &buildErr) { - reason = buildErr.Reason.Reason - if chart.IsPersistentBuildErrorReason(buildErr.Reason) { - return sreconcile.ResultEmpty, &serror.Stalling{ - Err: e, - Reason: reason, - } - } - } - return sreconcile.ResultEmpty, &serror.Event{ - Err: e, - Reason: reason, - } + return sreconcile.ResultEmpty, err } *b = *build return sreconcile.ResultSuccess, nil } -func (r *HelmChartReconciler) reconcileFromTarballArtifact(ctx context.Context, obj *sourcev1.HelmChart, source sourcev1.Artifact, b *chart.Build) (sreconcile.Result, error) { +func (r *HelmChartReconciler) buildFromTarballArtifact(ctx context.Context, obj *sourcev1.HelmChart, source sourcev1.Artifact, b *chart.Build) (sreconcile.Result, error) { // Create temporary working directory tmpDir, err := util.TempDirForObj("", obj) if err != nil { @@ -533,7 +540,7 @@ func (r *HelmChartReconciler) reconcileFromTarballArtifact(ctx context.Context, chartPath, err := securejoin.SecureJoin(sourceDir, obj.Spec.Chart) if err != nil { e := &serror.Stalling{ - Err: fmt.Errorf("Path calculation for chart '%s' failed: %w", obj.Spec.Chart, err), + Err: fmt.Errorf("path calculation for chart '%s' failed: %w", obj.Spec.Chart, err), Reason: "IllegalPath", } conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "IllegalPath", e.Err.Error()) @@ -563,12 +570,6 @@ func (r *HelmChartReconciler) reconcileFromTarballArtifact(ctx context.Context, opts.CachedChart = r.Storage.LocalPath(*artifact) } - // Add revision metadata to chart build - if obj.Spec.ReconcileStrategy == sourcev1.ReconcileStrategyRevision { - // Isolate the commit SHA from GitRepository type artifacts by removing the branch/ prefix. - splitRev := strings.Split(source.Revision, "/") - opts.VersionMetadata = splitRev[len(splitRev)-1] - } // Configure revision metadata for chart build if we should react to revision changes if obj.Spec.ReconcileStrategy == sourcev1.ReconcileStrategyRevision { rev := source.Revision @@ -593,6 +594,14 @@ func (r *HelmChartReconciler) reconcileFromTarballArtifact(ctx context.Context, } opts.VersionMetadata = rev } + // Set the VersionMetadata to the object's Generation if ValuesFiles is defined, + // this ensures changes can be noticed by the Artifact consumer + if len(opts.GetValuesFiles()) > 0 { + if opts.VersionMetadata != "" { + opts.VersionMetadata += "." + } + opts.VersionMetadata += strconv.FormatInt(obj.Generation, 10) + } // Build chart cb := chart.NewLocalBuilder(dm) @@ -600,36 +609,8 @@ func (r *HelmChartReconciler) reconcileFromTarballArtifact(ctx context.Context, WorkDir: sourceDir, Path: chartPath, }, util.TempPathForObj("", ".tgz", obj), opts) - - // Record both success _and_ error observations on the object - processChartBuild(obj, build, err) - - // Handle any build error if err != nil { - e := fmt.Errorf("failed to build chart from source artifact: %w", err) - reason := meta.FailedReason - if buildErr := new(chart.BuildError); errors.As(err, &buildErr) { - reason = buildErr.Reason.Reason - if chart.IsPersistentBuildErrorReason(buildErr.Reason) { - return sreconcile.ResultEmpty, &serror.Stalling{ - Err: e, - Reason: reason, - } - } - } - return sreconcile.ResultEmpty, &serror.Event{ - Err: e, - Reason: reason, - } - } - - // If we actually build a chart, take a historical note of any dependencies we resolved. - // The reason this is a done conditionally, is because if we have a cached one in storage, - // we can not recover this information (and put it in a condition). Which would result in - // a sudden (partial) disappearance of observed state. - // TODO(hidde): include specific name/version information? - if depNum := build.ResolvedDependencies; depNum > 0 { - r.eventLogf(ctx, obj, corev1.EventTypeNormal, "ResolvedDependencies", "resolved %d chart dependencies", depNum) + return sreconcile.ResultEmpty, err } *b = *build @@ -761,7 +742,7 @@ func (r *HelmChartReconciler) reconcileDelete(ctx context.Context, obj *sourcev1 } // garbageCollect performs a garbage collection for the given v1beta1.HelmChart. It removes all but the current -// artifact except for when the deletion timestamp is set, which will result in the removal of all artifacts for the +// artifact, unless the deletion timestamp is set. Which will result in the removal of all artifacts for the // resource. func (r *HelmChartReconciler) garbageCollect(ctx context.Context, obj *sourcev1.HelmChart) error { if !obj.DeletionTimestamp.IsZero() { @@ -839,6 +820,7 @@ func (r *HelmChartReconciler) resolveDependencyRepository(ctx context.Context, u listOpts := []client.ListOption{ client.InNamespace(namespace), client.MatchingFields{sourcev1.HelmRepositoryURLIndexKey: url}, + client.Limit(1), } var list sourcev1.HelmRepositoryList err := r.Client.List(ctx, &list, listOpts...) @@ -968,54 +950,60 @@ func (r *HelmChartReconciler) requestsForBucketChange(o client.Object) []reconci return reqs } -func processChartBuild(obj *sourcev1.HelmChart, build *chart.Build, err error) { +// eventLogf records event and logs at the same time. This log is different from +// the debug log in the event recorder in the sense that this is a simple log, +// the event recorder debug log contains complete details about the event. +func (r *HelmChartReconciler) eventLogf(ctx context.Context, obj runtime.Object, eventType string, reason string, messageFmt string, args ...interface{}) { + msg := fmt.Sprintf(messageFmt, args...) + // Log and emit event. + if eventType == corev1.EventTypeWarning { + ctrl.LoggerFrom(ctx).Error(errors.New(reason), msg) + } else { + ctrl.LoggerFrom(ctx).Info(msg) + } + r.Eventf(obj, eventType, reason, msg) +} + +// observeChartBuild records the observation on the given given build and error on the object. +func observeChartBuild(obj *sourcev1.HelmChart, build *chart.Build, err error) { if build.HasMetadata() { if build.Name != obj.Status.ObservedChartName || !obj.GetArtifact().HasRevision(build.Version) { conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewChart", build.Summary()) } } - if err == nil { + if build.Complete() { conditions.Delete(obj, sourcev1.FetchFailedCondition) conditions.Delete(obj, sourcev1.BuildFailedCondition) - return } - var buildErr *chart.BuildError - if ok := errors.As(err, &buildErr); !ok { - buildErr = &chart.BuildError{ - Reason: chart.ErrUnknown, - Err: err, + if err != nil { + var buildErr *chart.BuildError + if ok := errors.As(err, &buildErr); !ok { + buildErr = &chart.BuildError{ + Reason: chart.ErrUnknown, + Err: err, + } } - } - switch buildErr.Reason { - case chart.ErrChartMetadataPatch, chart.ErrValuesFilesMerge, chart.ErrDependencyBuild, chart.ErrChartPackage: - conditions.Delete(obj, sourcev1.FetchFailedCondition) - conditions.MarkTrue(obj, sourcev1.BuildFailedCondition, buildErr.Reason.Reason, buildErr.Error()) - default: - conditions.Delete(obj, sourcev1.BuildFailedCondition) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, buildErr.Reason.Reason, buildErr.Error()) + switch buildErr.Reason { + case chart.ErrChartMetadataPatch, chart.ErrValuesFilesMerge, chart.ErrDependencyBuild, chart.ErrChartPackage: + conditions.Delete(obj, sourcev1.FetchFailedCondition) + conditions.MarkTrue(obj, sourcev1.BuildFailedCondition, buildErr.Reason.Reason, buildErr.Error()) + default: + conditions.Delete(obj, sourcev1.BuildFailedCondition) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, buildErr.Reason.Reason, buildErr.Error()) + } + return } } func reasonForBuild(build *chart.Build) string { + if !build.Complete() { + return "" + } if build.Packaged { return sourcev1.ChartPackageSucceededReason } return sourcev1.ChartPullSucceededReason } - -// eventLog records event and logs at the same time. This log is different from -// the debug log in the event recorder in the sense that this is a simple log, -// the event recorder debug log contains complete details about the event. -func (r *HelmChartReconciler) eventLogf(ctx context.Context, obj runtime.Object, eventType string, reason string, messageFmt string, args ...interface{}) { - msg := fmt.Sprintf(messageFmt, args...) - // Log and emit event. - if eventType == corev1.EventTypeWarning { - ctrl.LoggerFrom(ctx).Error(errors.New(reason), msg) - } else { - ctrl.LoggerFrom(ctx).Info(msg) - } - r.Eventf(obj, eventType, reason, msg) -} diff --git a/controllers/helmchart_controller_test.go b/controllers/helmchart_controller_test.go index c333478dd..a97d43eee 100644 --- a/controllers/helmchart_controller_test.go +++ b/controllers/helmchart_controller_test.go @@ -18,1314 +18,1527 @@ package controllers import ( "context" + "errors" "fmt" + "io" "net/http" - "net/url" "os" - "path" "path/filepath" + "reflect" "strings" + "testing" "time" - "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/gittestserver" - "github.com/fluxcd/pkg/helmtestserver" - "github.com/fluxcd/pkg/runtime/conditions" - "github.com/go-git/go-billy/v5/memfs" - "github.com/go-git/go-git/v5" - "github.com/go-git/go-git/v5/config" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/storage/memory" - . "github.com/onsi/ginkgo" + "github.com/darkowlzz/controller-check/status" . "github.com/onsi/gomega" - helmchart "helm.sh/helm/v3/pkg/chart" - "helm.sh/helm/v3/pkg/chart/loader" - "helm.sh/helm/v3/pkg/chartutil" corev1 "k8s.io/api/core/v1" - apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/yaml" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/helmtestserver" + "github.com/fluxcd/pkg/runtime/conditions" + "github.com/fluxcd/pkg/runtime/patch" + "github.com/fluxcd/pkg/testserver" sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + serror "github.com/fluxcd/source-controller/internal/error" + "github.com/fluxcd/source-controller/internal/helm/chart" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" ) -var _ = FDescribe("HelmChartReconciler", func() { - - const ( - timeout = time.Second * 30 - interval = time.Second * 1 - indexInterval = time.Second * 2 - pullInterval = time.Second * 3 - ) - - Context("HelmChart from HelmRepository", func() { - var ( - namespace *corev1.Namespace - helmServer *helmtestserver.HelmServer - err error - ) - - BeforeEach(func() { - namespace = &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: "helm-chart-test-" + randStringRunes(5)}, +func TestHelmChartReconciler_reconcileStorage(t *testing.T) { + tests := []struct { + name string + beforeFunc func(obj *sourcev1.HelmChart, storage *Storage) error + want sreconcile.Result + wantErr bool + assertArtifact *sourcev1.Artifact + assertConditions []metav1.Condition + assertPaths []string + }{ + { + name: "garbage collects", + beforeFunc: func(obj *sourcev1.HelmChart, storage *Storage) error { + revisions := []string{"a", "b", "c"} + for n := range revisions { + v := revisions[n] + obj.Status.Artifact = &sourcev1.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", v), + Revision: v, + } + if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(v), 0644); err != nil { + return err + } + } + testStorage.SetArtifactURL(obj.Status.Artifact) + return nil + }, + assertArtifact: &sourcev1.Artifact{ + Path: "/reconcile-storage/c.txt", + Revision: "c", + Checksum: "2e7d2c03a9507ae265ecf5b5356885a53393a2029d241394997265a1a25aefc6", + URL: testStorage.Hostname + "/reconcile-storage/c.txt", + }, + assertPaths: []string{ + "/reconcile-storage/c.txt", + "!/reconcile-storage/b.txt", + "!/reconcile-storage/a.txt", + }, + want: sreconcile.ResultSuccess, + }, + { + name: "notices missing artifact in storage", + beforeFunc: func(obj *sourcev1.HelmChart, storage *Storage) error { + obj.Status.Artifact = &sourcev1.Artifact{ + Path: "/reconcile-storage/invalid.txt", + Revision: "d", + } + testStorage.SetArtifactURL(obj.Status.Artifact) + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/reconcile-storage/invalid.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, "NoArtifact", "no artifact for resource in storage"), + }, + }, + { + name: "updates hostname on diff from current", + beforeFunc: func(obj *sourcev1.HelmChart, storage *Storage) error { + obj.Status.Artifact = &sourcev1.Artifact{ + Path: "/reconcile-storage/hostname.txt", + Revision: "f", + Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: "http://outdated.com/reconcile-storage/hostname.txt", + } + if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader("file"), 0644); err != nil { + return err + } + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "/reconcile-storage/hostname.txt", + }, + assertArtifact: &sourcev1.Artifact{ + Path: "/reconcile-storage/hostname.txt", + Revision: "f", + Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: testStorage.Hostname + "/reconcile-storage/hostname.txt", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + r := &HelmChartReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, } - err = k8sClient.Create(context.Background(), namespace) - Expect(err).NotTo(HaveOccurred(), "failed to create test namespace") - helmServer, err = helmtestserver.NewTempHelmServer() - Expect(err).To(Succeed()) - helmServer.Start() - }) - - AfterEach(func() { - helmServer.Stop() - os.RemoveAll(helmServer.Root()) - - err = k8sClient.Delete(context.Background(), namespace) - Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace") - }) + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + }, + } + if tt.beforeFunc != nil { + g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed()) + } - It("Creates artifacts for", func() { - Expect(helmServer.PackageChart(path.Join("testdata/charts/helmchart"))).Should(Succeed()) - Expect(helmServer.GenerateIndex()).Should(Succeed()) + got, err := r.reconcileStorage(context.TODO(), obj, nil) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) - repositoryKey := types.NamespacedName{ - Name: "helmrepository-sample-" + randStringRunes(5), - Namespace: namespace.Name, + g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.assertArtifact)) + if tt.assertArtifact != nil && tt.assertArtifact.URL != "" { + g.Expect(obj.Status.Artifact.URL).To(Equal(tt.assertArtifact.URL)) } - Expect(k8sClient.Create(context.Background(), &sourcev1.HelmRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: repositoryKey.Name, - Namespace: repositoryKey.Namespace, - }, - Spec: sourcev1.HelmRepositorySpec{ - URL: helmServer.URL(), - Interval: metav1.Duration{Duration: indexInterval}, - }, - })).Should(Succeed()) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) - key := types.NamespacedName{ - Name: "helmchart-sample-" + randStringRunes(5), - Namespace: namespace.Name, + for _, p := range tt.assertPaths { + absoluteP := filepath.Join(testStorage.BasePath, p) + if !strings.HasPrefix(p, "!") { + g.Expect(absoluteP).To(BeAnExistingFile()) + continue + } + g.Expect(absoluteP).NotTo(BeAnExistingFile()) } - created := &sourcev1.HelmChart{ + }) + } +} + +func TestHelmChartReconciler_reconcileSource(t *testing.T) { + g := NewWithT(t) + + tmpDir, err := os.MkdirTemp("", "reconcile-tarball-") + g.Expect(err).ToNot(HaveOccurred()) + defer os.RemoveAll(tmpDir) + + storage, err := NewStorage(tmpDir, "example.com", timeout) + g.Expect(err).ToNot(HaveOccurred()) + + gitArtifact := &sourcev1.Artifact{ + Revision: "mock-ref/abcdefg12345678", + Path: "mock.tgz", + } + g.Expect(storage.Archive(gitArtifact, "testdata/charts", nil)).To(Succeed()) + + tests := []struct { + name string + source sourcev1.Source + beforeFunc func(obj *sourcev1.HelmChart) + want sreconcile.Result + wantErr error + assertFunc func(g *WithT, build chart.Build, obj sourcev1.HelmChart) + cleanFunc func(g *WithT, build *chart.Build) + }{ + { + name: "Observes Artifact revision and build result", + source: &sourcev1.GitRepository{ ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, + Name: "gitrepository", + Namespace: "default", }, - Spec: sourcev1.HelmChartSpec{ - Chart: "helmchart", - Version: "", - SourceRef: sourcev1.LocalHelmChartSourceReference{ - Kind: sourcev1.HelmRepositoryKind, - Name: repositoryKey.Name, - }, - Interval: metav1.Duration{Duration: pullInterval}, + Status: sourcev1.GitRepositoryStatus{ + Artifact: gitArtifact, }, - } - Expect(k8sClient.Create(context.Background(), created)).Should(Succeed()) - - By("Expecting artifact") - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(helmChart.Values["testDefault"]).To(BeTrue()) - Expect(helmChart.Values["testOverride"]).To(BeFalse()) - - By("Packaging a new chart version and regenerating the index") - Expect(helmServer.PackageChartWithVersion(path.Join("testdata/charts/helmchart"), "0.2.0")).Should(Succeed()) - Expect(helmServer.GenerateIndex()).Should(Succeed()) - - By("Expecting new artifact revision and GC") - Eventually(func() bool { - now := &sourcev1.HelmChart{} - _ = k8sClient.Get(context.Background(), key, now) - // Test revision change and garbage collection - return now.Status.Artifact.Revision != got.Status.Artifact.Revision && - !ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - - When("Setting valid valuesFiles attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFiles = []string{ - "values.yaml", - "override.yaml", + }, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" + obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ + Name: "gitrepository", + Kind: sourcev1.GitRepositoryKind, } - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(helmChart.Values["testDefault"]).To(BeTrue()) - Expect(helmChart.Values["testOverride"]).To(BeTrue()) - }) - - When("Setting invalid valuesFiles attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFiles = []string{ - "values.yaml", - "invalid.yaml", + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { + g.Expect(build.Complete()).To(BeTrue()) + g.Expect(build.Name).To(Equal("helmchart")) + g.Expect(build.Version).To(Equal("0.1.0")) + g.Expect(build.Path).To(BeARegularFile()) + + g.Expect(obj.Status.ObservedSourceArtifactRevision).To(Equal(gitArtifact.Revision)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewChart", "Pulled 'helmchart' chart with version '0.1.0'"), + })) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Error on unavailable source", + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ + Name: "unavailable", + Kind: sourcev1.GitRepositoryKind, } - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(helmChart.Values["testDefault"]).To(BeTrue()) - Expect(helmChart.Values["testOverride"]).To(BeTrue()) - }) - - When("Setting valid valuesFiles and valuesFile attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFile = "values.yaml" - updated.Spec.ValuesFiles = []string{ - "override.yaml", + }, + want: sreconcile.ResultEmpty, + wantErr: &serror.Event{Err: errors.New("gitrepositories.source.toolkit.fluxcd.io \"unavailable\" not found")}, + assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, "SourceUnavailable", "failed to get source: gitrepositories.source.toolkit.fluxcd.io \"unavailable\" not found"), + })) + }, + }, + { + name: "Stalling on unsupported source kind", + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ + Name: "unavailable", + Kind: "Unsupported", } - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(helmChart.Values["testDefault"]).To(BeTrue()) - Expect(helmChart.Values["testOverride"]).To(BeTrue()) - }) - - When("Setting valid valuesFile attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFile = "override.yaml" - updated.Spec.ValuesFiles = []string{} - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - _, exists := helmChart.Values["testDefault"] - Expect(exists).To(BeFalse()) - Expect(helmChart.Values["testOverride"]).To(BeTrue()) - }) - - When("Setting identical valuesFile attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFile = "duplicate.yaml" - updated.Spec.ValuesFiles = []string{} - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(helmChart.Values["testDefault"]).To(BeTrue()) - Expect(helmChart.Values["testOverride"]).To(BeFalse()) - }) - - When("Setting invalid valuesFile attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFile = "invalid.yaml" - updated.Spec.ValuesFiles = []string{} - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && got.GetArtifact() != nil && - ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(helmChart.Values["testDefault"]).To(BeTrue()) - Expect(helmChart.Values["testOverride"]).To(BeFalse()) - }) - - By("Expecting missing HelmRepository error") - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).Should(Succeed()) - updated.Spec.SourceRef.Name = "invalid" - updated.Spec.ValuesFile = "" - updated.Spec.ValuesFiles = []string{} - Expect(k8sClient.Update(context.Background(), updated)).Should(Succeed()) - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, updated) - for _, c := range updated.Status.Conditions { - fmt.Fprintf(GinkgoWriter, "condition type: %s\n", c.Type) - fmt.Fprintf(GinkgoWriter, "condition reason: %s\n", c.Reason) - fmt.Fprintf(GinkgoWriter, "condition message: %s\n", c.Message) - if c.Reason == sourcev1.ChartPullFailedReason && - strings.Contains(c.Message, "failed to retrieve source") { - return true - } + }, + want: sreconcile.ResultEmpty, + wantErr: &serror.Stalling{Err: errors.New("unsupported source kind 'Unsupported'")}, + assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, "SourceUnavailable", "failed to get source: unsupported source kind"), + })) + }, + }, + //{ + // name: "Error on transient build error", + //}, + { + name: "Stalling on persistent build error", + source: &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gitrepository", + Namespace: "default", + }, + Status: sourcev1.GitRepositoryStatus{ + Artifact: gitArtifact, + }, + }, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" + obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ + Name: "gitrepository", + Kind: sourcev1.GitRepositoryKind, } - return false - }, timeout, interval).Should(BeTrue()) - Expect(updated.Status.Artifact).ToNot(BeNil()) - - By("Expecting to delete successfully") - got = &sourcev1.HelmChart{} - Eventually(func() error { - _ = k8sClient.Get(context.Background(), key, got) - return k8sClient.Delete(context.Background(), got) - }, timeout, interval).Should(Succeed()) - - By("Expecting delete to finish") - Eventually(func() error { - c := &sourcev1.HelmChart{} - return k8sClient.Get(context.Background(), key, c) - }, timeout, interval).ShouldNot(Succeed()) - - exists := func(path string) bool { - // wait for tmp sync on macOS - time.Sleep(time.Second) - _, err := os.Stat(path) - return err == nil + obj.Spec.ValuesFiles = []string{"invalid.yaml"} + }, + want: sreconcile.ResultEmpty, + wantErr: &serror.Stalling{Err: errors.New("values files merge error: no values file found at path")}, + assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.BuildFailedCondition, "ValuesFilesError", "values files merge error: no values file found at path"), + })) + }, + }, + { + name: "ResultRequeue when source artifact is unavailable", + source: &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gitrepository", + Namespace: "default", + }, + Status: sourcev1.GitRepositoryStatus{}, + }, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" + obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ + Name: "gitrepository", + Kind: sourcev1.GitRepositoryKind, + } + obj.Status.ObservedSourceArtifactRevision = "foo" + }, + want: sreconcile.ResultRequeue, + assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.ObservedSourceArtifactRevision).To(Equal("foo")) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, "NoSourceArtifact", "no artifact available"), + })) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + clientBuilder := fake.NewClientBuilder() + if tt.source != nil { + clientBuilder.WithRuntimeObjects(tt.source) } - By("Expecting GC on delete") - Eventually(exists(got.Status.Artifact.Path), timeout, interval).ShouldNot(BeTrue()) - }) - - It("Filters versions", func() { - versions := []string{"0.1.0", "0.1.1", "0.2.0", "0.3.0-rc.1", "1.0.0-alpha.1", "1.0.0"} - for k := range versions { - Expect(helmServer.PackageChartWithVersion(path.Join("testdata/charts/helmchart"), versions[k])).Should(Succeed()) + r := &HelmChartReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: storage, } - Expect(helmServer.GenerateIndex()).Should(Succeed()) - - repositoryKey := types.NamespacedName{ - Name: "helmrepository-sample-" + randStringRunes(5), - Namespace: namespace.Name, - } - repository := &sourcev1.HelmRepository{ + obj := sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ - Name: repositoryKey.Name, - Namespace: repositoryKey.Namespace, - }, - Spec: sourcev1.HelmRepositorySpec{ - URL: helmServer.URL(), - Interval: metav1.Duration{Duration: 1 * time.Hour}, + Name: "chart", + Namespace: "default", }, + Spec: sourcev1.HelmChartSpec{}, + } + if tt.beforeFunc != nil { + tt.beforeFunc(&obj) } - Expect(k8sClient.Create(context.Background(), repository)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), repository) - key := types.NamespacedName{ - Name: "helmchart-sample-" + randStringRunes(5), - Namespace: namespace.Name, + var b chart.Build + if tt.cleanFunc != nil { + defer tt.cleanFunc(g, &b) } - chart := &sourcev1.HelmChart{ - ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, - }, - Spec: sourcev1.HelmChartSpec{ - Chart: "helmchart", - Version: "*", - SourceRef: sourcev1.LocalHelmChartSourceReference{ - Kind: sourcev1.HelmRepositoryKind, - Name: repositoryKey.Name, - }, - Interval: metav1.Duration{Duration: 1 * time.Hour}, - }, + + got, err := r.reconcileSource(context.TODO(), &obj, &b) + + g.Expect(err != nil).To(Equal(tt.wantErr != nil)) + if tt.wantErr != nil { + g.Expect(reflect.TypeOf(err).String()).To(Equal(reflect.TypeOf(tt.wantErr).String())) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr.Error())) } - Expect(k8sClient.Create(context.Background(), chart)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), chart) + g.Expect(got).To(Equal(tt.want)) - Eventually(func() string { - _ = k8sClient.Get(context.Background(), key, chart) - if chart.Status.Artifact != nil { - return chart.Status.Artifact.Revision - } - return "" - }, timeout, interval).Should(Equal("1.0.0")) - - chart.Spec.Version = "<0.2.0" - Expect(k8sClient.Update(context.Background(), chart)).Should(Succeed()) - Eventually(func() string { - _ = k8sClient.Get(context.Background(), key, chart) - if chart.Status.Artifact != nil { - return chart.Status.Artifact.Revision - } - return "" - }, timeout, interval).Should(Equal("0.1.1")) - - chart.Spec.Version = "invalid" - Expect(k8sClient.Update(context.Background(), chart)).Should(Succeed()) - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, chart) - return conditions.GetReason(chart, sourcev1.FetchFailedCondition) == "InvalidChartReference" && - conditions.IsStalled(chart) - }, timeout, interval).Should(BeTrue()) - Expect(chart.GetArtifact()).NotTo(BeNil()) - Expect(chart.Status.Artifact.Revision).Should(Equal("0.1.1")) + if tt.assertFunc != nil { + tt.assertFunc(g, b, obj) + } }) + } +} - It("Authenticates when credentials are provided", func() { - helmServer.Stop() - var username, password = "john", "doe" - helmServer.WithMiddleware(func(handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - u, p, ok := r.BasicAuth() - if !ok || username != u || password != p { - w.WriteHeader(401) - return - } - handler.ServeHTTP(w, r) - }) - }) - helmServer.Start() +func TestHelmChartReconciler_buildFromHelmRepository(t *testing.T) { + g := NewWithT(t) - Expect(helmServer.PackageChartWithVersion(path.Join("testdata/charts/helmchart"), "0.1.0")).Should(Succeed()) - Expect(helmServer.GenerateIndex()).Should(Succeed()) + const ( + chartName = "helmchart" + chartVersion = "0.2.0" + higherChartVersion = "0.3.0" + chartPath = "testdata/charts/helmchart" + ) - secretKey := types.NamespacedName{ - Name: "helmrepository-auth-" + randStringRunes(5), - Namespace: namespace.Name, - } - secret := &corev1.Secret{ + serverFactory, err := helmtestserver.NewTempHelmServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(serverFactory.Root()) + + for _, ver := range []string{chartVersion, higherChartVersion} { + g.Expect(serverFactory.PackageChartWithVersion(chartPath, ver)).To(Succeed()) + } + g.Expect(serverFactory.GenerateIndex()).To(Succeed()) + + type options struct { + username string + password string + } + + tests := []struct { + name string + server options + secret *corev1.Secret + beforeFunc func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) + want sreconcile.Result + wantErr error + assertFunc func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) + cleanFunc func(g *WithT, build *chart.Build) + }{ + { + name: "Reconciles chart build", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = "helmchart" + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, _ *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(chartName)) + g.Expect(build.Version).To(Equal(higherChartVersion)) + g.Expect(build.Path).ToNot(BeEmpty()) + g.Expect(build.Path).To(BeARegularFile()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Reconciles chart build with repository credentials", + server: options{ + username: "foo", + password: "bar", + }, + secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: secretKey.Name, - Namespace: secretKey.Namespace, + Name: "auth", }, Data: map[string][]byte{ - "username": []byte(username), - "password": []byte(password), + "username": []byte("foo"), + "password": []byte("bar"), }, + }, + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = chartName + obj.Spec.Version = chartVersion + repository.Spec.SecretRef = &meta.LocalObjectReference{Name: "auth"} + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, _ *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(chartName)) + g.Expect(build.Version).To(Equal(chartVersion)) + g.Expect(build.Path).ToNot(BeEmpty()) + g.Expect(build.Path).To(BeARegularFile()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Uses artifact as build cache", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = chartName + obj.Spec.Version = chartVersion + obj.Status.Artifact = &sourcev1.Artifact{Path: chartName + "-" + chartVersion + ".tgz"} + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(chartName)) + g.Expect(build.Version).To(Equal(chartVersion)) + g.Expect(build.Path).To(Equal(filepath.Join(serverFactory.Root(), obj.Status.Artifact.Path))) + g.Expect(build.Path).To(BeARegularFile()) + }, + }, + { + name: "Sets Generation as VersionMetadata with values files", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = chartName + obj.Generation = 3 + obj.Spec.ValuesFiles = []string{"values.yaml", "override.yaml"} + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, _ *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(chartName)) + g.Expect(build.Version).To(Equal(higherChartVersion + "+3")) + g.Expect(build.Path).ToNot(BeEmpty()) + g.Expect(build.Path).To(BeARegularFile()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Forces build on generation change", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Generation = 3 + obj.Spec.Chart = chartName + obj.Spec.Version = chartVersion + + obj.Status.ObservedGeneration = 2 + obj.Status.Artifact = &sourcev1.Artifact{Path: chartName + "-" + chartVersion + ".tgz"} + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(chartName)) + g.Expect(build.Version).To(Equal(chartVersion)) + g.Expect(build.Path).ToNot(Equal(filepath.Join(serverFactory.Root(), obj.Status.Artifact.Path))) + g.Expect(build.Path).To(BeARegularFile()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Event on unsuccessful secret retrieval", + beforeFunc: func(_ *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + repository.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "invalid", + } + }, + want: sreconcile.ResultEmpty, + wantErr: &serror.Event{Err: errors.New("failed to get secret 'invalid'")}, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret 'invalid'"), + })) + }, + }, + { + name: "Stalling on invalid client options", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + repository.Spec.URL = "file://unsupported" // Unsupported protocol + }, + want: sreconcile.ResultEmpty, + wantErr: &serror.Stalling{Err: errors.New("scheme \"file\" not supported")}, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, meta.FailedReason, "failed to construct Helm client"), + })) + }, + }, + { + name: "Stalling on invalid repository URL", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + repository.Spec.URL = "://unsupported" // Invalid URL + }, + want: sreconcile.ResultEmpty, + wantErr: &serror.Stalling{Err: errors.New("missing protocol scheme")}, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.URLInvalidReason, "invalid Helm repository URL"), + })) + }, + }, + { + name: "BuildError on temporary build error", + beforeFunc: func(obj *sourcev1.HelmChart, _ *sourcev1.HelmRepository) { + obj.Spec.Chart = "invalid" + }, + want: sreconcile.ResultEmpty, + wantErr: &chart.BuildError{Err: errors.New("failed to get chart version for remote reference")}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + server := testserver.NewHTTPServer(serverFactory.Root()) + server.Start() + defer server.Stop() + + if len(tt.server.username+tt.server.password) > 0 { + server.WithMiddleware(func(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + u, p, ok := r.BasicAuth() + if !ok || u != tt.server.username || p != tt.server.password { + w.WriteHeader(401) + return + } + handler.ServeHTTP(w, r) + }) + }) + } + + clientBuilder := fake.NewClientBuilder() + if tt.secret != nil { + clientBuilder.WithObjects(tt.secret.DeepCopy()) } - Expect(k8sClient.Create(context.Background(), secret)).Should(Succeed()) - By("Creating repository and waiting for artifact") - repositoryKey := types.NamespacedName{ - Name: "helmrepository-sample-" + randStringRunes(5), - Namespace: namespace.Name, + storage, err := newTestStorage(server) + g.Expect(err).ToNot(HaveOccurred()) + + r := &HelmChartReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Getters: testGetters, + Storage: storage, } + repository := &sourcev1.HelmRepository{ ObjectMeta: metav1.ObjectMeta{ - Name: repositoryKey.Name, - Namespace: repositoryKey.Namespace, + GenerateName: "helmrepository-", }, Spec: sourcev1.HelmRepositorySpec{ - URL: helmServer.URL(), - SecretRef: &meta.LocalObjectReference{ - Name: secretKey.Name, + URL: server.URL(), + Timeout: &metav1.Duration{Duration: timeout}, + }, + Status: sourcev1.HelmRepositoryStatus{ + Artifact: &sourcev1.Artifact{ + Path: "index.yaml", }, - Interval: metav1.Duration{Duration: pullInterval}, }, } - Expect(k8sClient.Create(context.Background(), repository)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), repository) - - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), repositoryKey, repository) - return repository.Status.Artifact != nil - }, timeout, interval).Should(BeTrue()) - - By("Deleting secret before applying HelmChart") - Expect(k8sClient.Delete(context.Background(), secret)).Should(Succeed()) - - By("Applying HelmChart") - key := types.NamespacedName{ - Name: "helmchart-sample-" + randStringRunes(5), - Namespace: namespace.Name, - } - chart := &sourcev1.HelmChart{ + obj := &sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, - }, - Spec: sourcev1.HelmChartSpec{ - Chart: "helmchart", - Version: "*", - SourceRef: sourcev1.LocalHelmChartSourceReference{ - Kind: sourcev1.HelmRepositoryKind, - Name: repositoryKey.Name, - }, - Interval: metav1.Duration{Duration: pullInterval}, + GenerateName: "helmrepository-", }, + Spec: sourcev1.HelmChartSpec{}, } - Expect(k8sClient.Create(context.Background(), chart)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), chart) - - By("Expecting missing secret error") - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return conditions.GetReason(got, sourcev1.FetchFailedCondition) == sourcev1.AuthenticationFailedReason - }, timeout, interval).Should(BeTrue()) - - By("Applying secret with missing keys") - secret.ResourceVersion = "" - secret.Data["username"] = []byte{} - secret.Data["password"] = []byte{} - Expect(k8sClient.Create(context.Background(), secret)).Should(Succeed()) - - By("Expecting 401") - Eventually(func() bool { - got := &sourcev1.HelmChart{} - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == "ChartPullError" && - strings.Contains(c.Message, "401 Unauthorized") { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - - By("Adding username key") - secret.Data["username"] = []byte(username) - Expect(k8sClient.Update(context.Background(), secret)).Should(Succeed()) - - By("Expecting missing field error") - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.AuthenticationFailedReason { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - - By("Adding password key") - secret.Data["password"] = []byte(password) - Expect(k8sClient.Update(context.Background(), secret)).Should(Succeed()) - - By("Expecting artifact") - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return apimeta.IsStatusConditionTrue(got.Status.Conditions, meta.ReadyCondition) - }, timeout, interval).Should(BeTrue()) - Expect(got.Status.Artifact).ToNot(BeNil()) - }) - }) - - Context("HelmChart from GitRepository", func() { - var ( - namespace *corev1.Namespace - gitServer *gittestserver.GitServer - err error - ) - - BeforeEach(func() { - namespace = &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: "test-git-repository-" + randStringRunes(5)}, + + if tt.beforeFunc != nil { + tt.beforeFunc(obj, repository) } - err = k8sClient.Create(context.Background(), namespace) - Expect(err).NotTo(HaveOccurred(), "failed to create test namespace") - gitServer, err = gittestserver.NewTempGitServer() - Expect(err).NotTo(HaveOccurred()) - gitServer.AutoCreate() - Expect(gitServer.StartHTTP()).To(Succeed()) - }) + var b chart.Build + if tt.cleanFunc != nil { + defer tt.cleanFunc(g, &b) + } + got, err := r.buildFromHelmRepository(context.TODO(), obj, repository, &b) - AfterEach(func() { - gitServer.StopHTTP() - os.RemoveAll(gitServer.Root()) + g.Expect(err != nil).To(Equal(tt.wantErr != nil)) + if tt.wantErr != nil { + g.Expect(reflect.TypeOf(err).String()).To(Equal(reflect.TypeOf(tt.wantErr).String())) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr.Error())) + } + g.Expect(got).To(Equal(tt.want)) - err = k8sClient.Delete(context.Background(), namespace) - Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace") + if tt.assertFunc != nil { + tt.assertFunc(g, obj, b) + } }) - - It("Creates artifacts for", func() { - fs := memfs.New() - gitrepo, err := git.Init(memory.NewStorage(), fs) - Expect(err).NotTo(HaveOccurred()) - - wt, err := gitrepo.Worktree() - Expect(err).NotTo(HaveOccurred()) - - u, err := url.Parse(gitServer.HTTPAddress()) - Expect(err).NotTo(HaveOccurred()) - u.Path = path.Join(u.Path, fmt.Sprintf("repository-%s.git", randStringRunes(5))) - - _, err = gitrepo.CreateRemote(&config.RemoteConfig{ - Name: "origin", - URLs: []string{u.String()}, - }) - Expect(err).NotTo(HaveOccurred()) - - chartDir := "testdata/charts" - Expect(filepath.Walk(chartDir, func(p string, fi os.FileInfo, err error) error { - if err != nil { - return err + } +} + +func TestHelmChartReconciler_buildFromTarballArtifact(t *testing.T) { + g := NewWithT(t) + + tmpDir, err := os.MkdirTemp("", "reconcile-tarball-") + g.Expect(err).ToNot(HaveOccurred()) + defer os.RemoveAll(tmpDir) + + storage, err := NewStorage(tmpDir, "example.com", timeout) + g.Expect(err).ToNot(HaveOccurred()) + + chartsArtifact := &sourcev1.Artifact{ + Revision: "mock-ref/abcdefg12345678", + Path: "mock.tgz", + } + g.Expect(storage.Archive(chartsArtifact, "testdata/charts", nil)).To(Succeed()) + yamlArtifact := &sourcev1.Artifact{ + Revision: "9876abcd", + Path: "values.yaml", + } + g.Expect(storage.CopyFromPath(yamlArtifact, "testdata/charts/helmchart/values.yaml")).To(Succeed()) + cachedArtifact := &sourcev1.Artifact{ + Revision: "0.1.0", + Path: "cached.tgz", + } + g.Expect(storage.CopyFromPath(cachedArtifact, "testdata/charts/helmchart-0.1.0.tgz")).To(Succeed()) + + tests := []struct { + name string + source sourcev1.Artifact + beforeFunc func(obj *sourcev1.HelmChart) + want sreconcile.Result + wantErr error + assertFunc func(g *WithT, build chart.Build) + cleanFunc func(g *WithT, build *chart.Build) + }{ + { + name: "Resolves chart dependencies and builds", + source: *chartsArtifact.DeepCopy(), + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = "testdata/charts/helmchartwithdeps" + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, build chart.Build) { + g.Expect(build.Name).To(Equal("helmchartwithdeps")) + g.Expect(build.Version).To(Equal("0.1.0")) + g.Expect(build.ResolvedDependencies).To(Equal(3)) + g.Expect(build.Path).To(BeARegularFile()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "ReconcileStrategyRevision sets VersionMetadata", + source: *chartsArtifact.DeepCopy(), + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = "testdata/charts/helmchart" + obj.Spec.SourceRef.Kind = sourcev1.GitRepositoryKind + obj.Spec.ReconcileStrategy = sourcev1.ReconcileStrategyRevision + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, build chart.Build) { + g.Expect(build.Name).To(Equal("helmchart")) + g.Expect(build.Version).To(Equal("0.1.0+abcdefg12345")) + g.Expect(build.ResolvedDependencies).To(Equal(0)) + g.Expect(build.Path).To(BeARegularFile()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "ValuesFiles sets Generation as VersionMetadata", + source: *chartsArtifact.DeepCopy(), + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Generation = 3 + obj.Spec.Chart = "testdata/charts/helmchart" + obj.Spec.SourceRef.Kind = sourcev1.GitRepositoryKind + obj.Spec.ValuesFiles = []string{ + filepath.Join(obj.Spec.Chart, "values.yaml"), + filepath.Join(obj.Spec.Chart, "override.yaml"), } + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, build chart.Build) { + g.Expect(build.Name).To(Equal("helmchart")) + g.Expect(build.Version).To(Equal("0.1.0+3")) + g.Expect(build.ResolvedDependencies).To(Equal(0)) + g.Expect(build.Path).To(BeARegularFile()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Chart from storage cache", + source: *chartsArtifact.DeepCopy(), + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" + obj.Status.Artifact = cachedArtifact.DeepCopy() + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, build chart.Build) { + g.Expect(build.Name).To(Equal("helmchart")) + g.Expect(build.Version).To(Equal("0.1.0")) + g.Expect(build.Path).To(Equal(storage.LocalPath(*cachedArtifact.DeepCopy()))) + g.Expect(build.Path).To(BeARegularFile()) + }, + }, + { + name: "Generation change forces rebuild", + source: *chartsArtifact.DeepCopy(), + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Generation = 2 + obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" + obj.Status.Artifact = cachedArtifact.DeepCopy() + obj.Status.ObservedGeneration = 1 + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, build chart.Build) { + g.Expect(build.Name).To(Equal("helmchart")) + g.Expect(build.Version).To(Equal("0.1.0")) + g.Expect(build.Path).ToNot(Equal(storage.LocalPath(*cachedArtifact.DeepCopy()))) + g.Expect(build.Path).To(BeARegularFile()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Empty source artifact", + source: sourcev1.Artifact{}, + want: sreconcile.ResultEmpty, + wantErr: &serror.Event{Err: errors.New("no such file or directory")}, + assertFunc: func(g *WithT, build chart.Build) { + g.Expect(build.Complete()).To(BeFalse()) + }, + }, + { + name: "Invalid artifact type", + source: *yamlArtifact, + want: sreconcile.ResultEmpty, + wantErr: &serror.Event{Err: errors.New("artifact untar error: requires gzip-compressed body")}, + assertFunc: func(g *WithT, build chart.Build) { + g.Expect(build.Complete()).To(BeFalse()) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + r := &HelmChartReconciler{ + Client: fake.NewClientBuilder().Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: storage, + Getters: testGetters, + } - switch { - case fi.Mode().IsDir(): - return fs.MkdirAll(p, os.ModeDir) - case !fi.Mode().IsRegular(): - return nil - } + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + Name: "artifact", + Namespace: "default", + }, + Spec: sourcev1.HelmChartSpec{}, + } + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } - b, err := os.ReadFile(p) - if err != nil { - return err - } + var b chart.Build + if tt.cleanFunc != nil { + defer tt.cleanFunc(g, &b) + } - ff, err := fs.Create(p) - if err != nil { - return err + got, err := r.buildFromTarballArtifact(context.TODO(), obj, tt.source, &b) + g.Expect(err != nil).To(Equal(tt.wantErr != nil)) + if tt.wantErr != nil { + g.Expect(reflect.TypeOf(err).String()).To(Equal(reflect.TypeOf(tt.wantErr).String())) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr.Error())) + } + g.Expect(got).To(Equal(tt.want)) + + if tt.assertFunc != nil { + tt.assertFunc(g, b) + } + }) + } +} + +func TestHelmChartReconciler_reconcileArtifact(t *testing.T) { + tests := []struct { + name string + build *chart.Build + beforeFunc func(obj *sourcev1.HelmChart) + want sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + afterFunc func(t *WithT, obj *sourcev1.HelmChart) + }{ + { + name: "Incomplete build requeues and does not update status", + build: &chart.Build{}, + beforeFunc: func(obj *sourcev1.HelmChart) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + want: sreconcile.ResultRequeue, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "Foo", ""), + }, + }, + { + name: "Copying artifact to storage from build makes Ready=True", + build: mockChartBuild("helmchart", "0.1.0", "testdata/charts/helmchart-0.1.0.tgz"), + beforeFunc: func(obj *sourcev1.HelmChart) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + t.Expect(obj.GetArtifact().Checksum).To(Equal("bbdf96023c912c393b49d5238e227576ed0d20d1bb145d7476d817b80e20c11a")) + t.Expect(obj.GetArtifact().Revision).To(Equal("0.1.0")) + t.Expect(obj.Status.URL).ToNot(BeEmpty()) + t.Expect(obj.Status.ObservedChartName).To(Equal("helmchart")) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, sourcev1.ChartPullSucceededReason, "Pulled 'helmchart' chart with version '0.1.0'"), + }, + }, + { + name: "Up-to-date chart build does not persist artifact to storage", + build: &chart.Build{ + Name: "helmchart", + Version: "0.1.0", + Path: filepath.Join(testStorage.BasePath, "testdata/charts/helmchart-0.1.0.tgz"), + }, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Status.Artifact = &sourcev1.Artifact{ + Path: "testdata/charts/helmchart-0.1.0.tgz", } - if _, err := ff.Write(b); err != nil { - return err + }, + want: sreconcile.ResultSuccess, + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.Status.Artifact.Path).To(Equal("testdata/charts/helmchart-0.1.0.tgz")) + t.Expect(obj.Status.ObservedChartName).To(BeEmpty()) + t.Expect(obj.Status.URL).To(BeEmpty()) + }, + }, + { + name: "Restores conditions in case artifact matches current chart build", + build: &chart.Build{ + Name: "helmchart", + Version: "0.1.0", + Path: filepath.Join(testStorage.BasePath, "testdata/charts/helmchart-0.1.0.tgz"), + Packaged: true, + }, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Status.ObservedChartName = "helmchart" + obj.Status.Artifact = &sourcev1.Artifact{ + Revision: "0.1.0", + Path: "testdata/charts/helmchart-0.1.0.tgz", } - _ = ff.Close() - _, err = wt.Add(p) - - return err - })).To(Succeed()) - - _, err = wt.Commit("Helm charts", &git.CommitOptions{Author: &object.Signature{ - Name: "John Doe", - Email: "john@example.com", - When: time.Now(), - }}) - Expect(err).NotTo(HaveOccurred()) - - err = gitrepo.Push(&git.PushOptions{}) - Expect(err).NotTo(HaveOccurred()) - - repositoryKey := types.NamespacedName{ - Name: fmt.Sprintf("git-repository-sample-%s", randStringRunes(5)), - Namespace: namespace.Name, + }, + want: sreconcile.ResultSuccess, + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.Status.Artifact.Path).To(Equal("testdata/charts/helmchart-0.1.0.tgz")) + t.Expect(obj.Status.URL).To(BeEmpty()) + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, sourcev1.ChartPackageSucceededReason, "Packaged 'helmchart' chart with version '0.1.0'"), + }, + }, + { + name: "Removes ArtifactOutdatedCondition after creating new artifact", + build: mockChartBuild("helmchart", "0.1.0", "testdata/charts/helmchart-0.1.0.tgz"), + beforeFunc: func(obj *sourcev1.HelmChart) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + t.Expect(obj.GetArtifact().Checksum).To(Equal("bbdf96023c912c393b49d5238e227576ed0d20d1bb145d7476d817b80e20c11a")) + t.Expect(obj.GetArtifact().Revision).To(Equal("0.1.0")) + t.Expect(obj.Status.URL).ToNot(BeEmpty()) + t.Expect(obj.Status.ObservedChartName).To(Equal("helmchart")) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, sourcev1.ChartPullSucceededReason, "Pulled 'helmchart' chart with version '0.1.0'"), + }, + }, + { + name: "Creates latest symlink to the created artifact", + build: mockChartBuild("helmchart", "0.1.0", "testdata/charts/helmchart-0.1.0.tgz"), + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + + localPath := testStorage.LocalPath(*obj.GetArtifact()) + symlinkPath := filepath.Join(filepath.Dir(localPath), "latest.tar.gz") + targetFile, err := os.Readlink(symlinkPath) + t.Expect(err).NotTo(HaveOccurred()) + t.Expect(localPath).To(Equal(targetFile)) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, sourcev1.ChartPullSucceededReason, "Pulled 'helmchart' chart with version '0.1.0'"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + r := &HelmChartReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, } - repository := &sourcev1.GitRepository{ + + obj := &sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ - Name: repositoryKey.Name, - Namespace: repositoryKey.Namespace, - }, - Spec: sourcev1.GitRepositorySpec{ - URL: u.String(), - Interval: metav1.Duration{Duration: indexInterval}, + GenerateName: "reconcile-artifact-", + Generation: 1, }, + Status: sourcev1.HelmChartStatus{}, + } + if tt.beforeFunc != nil { + tt.beforeFunc(obj) } - Expect(k8sClient.Create(context.Background(), repository)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), repository) - key := types.NamespacedName{ - Name: "helmchart-sample-" + randStringRunes(5), - Namespace: namespace.Name, + got, err := r.reconcileArtifact(ctx, obj, tt.build) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + if tt.afterFunc != nil { + tt.afterFunc(g, obj) } - chart := &sourcev1.HelmChart{ + }) + } +} + +func TestHelmChartReconciler_getHelmRepositorySecret(t *testing.T) { + mock := &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "secret", + Namespace: "foo", + }, + Data: map[string][]byte{ + "key": []byte("bar"), + }, + } + clientBuilder := fake.NewClientBuilder() + clientBuilder.WithObjects(mock) + + r := &HelmChartReconciler{ + Client: clientBuilder.Build(), + } + + tests := []struct { + name string + repository *sourcev1.HelmRepository + want *corev1.Secret + wantErr bool + }{ + { + name: "Existing secret reference", + repository: &sourcev1.HelmRepository{ ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, + Namespace: mock.Namespace, }, - Spec: sourcev1.HelmChartSpec{ - Chart: "testdata/charts/helmchartwithdeps", - Version: "*", - SourceRef: sourcev1.LocalHelmChartSourceReference{ - Kind: sourcev1.GitRepositoryKind, - Name: repositoryKey.Name, + Spec: sourcev1.HelmRepositorySpec{ + SecretRef: &meta.LocalObjectReference{ + Name: mock.Name, }, - Interval: metav1.Duration{Duration: pullInterval}, }, - } - Expect(k8sClient.Create(context.Background(), chart)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), chart) - - By("Expecting artifact") - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && - ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - - By("Committing a new version in the chart metadata") - f, err := fs.OpenFile(fs.Join(chartDir, "helmchartwithdeps", chartutil.ChartfileName), os.O_RDWR, os.FileMode(0600)) - Expect(err).NotTo(HaveOccurred()) - - b := make([]byte, 2048) - n, err := f.Read(b) - Expect(err).NotTo(HaveOccurred()) - b = b[0:n] - - y := new(helmchart.Metadata) - err = yaml.Unmarshal(b, y) - Expect(err).NotTo(HaveOccurred()) - - y.Version = "0.2.0" - b, err = yaml.Marshal(y) - Expect(err).NotTo(HaveOccurred()) - - _, err = f.Write(b) - Expect(err).NotTo(HaveOccurred()) - - err = f.Close() - Expect(err).NotTo(HaveOccurred()) - - commit, err := wt.Commit("Chart version bump", &git.CommitOptions{ - Author: &object.Signature{ - Name: "John Doe", - Email: "john@example.com", - When: time.Now(), + }, + want: mock, + }, + { + name: "Empty secret reference", + repository: &sourcev1.HelmRepository{ + Spec: sourcev1.HelmRepositorySpec{ + SecretRef: nil, }, - All: true, - }) - Expect(err).NotTo(HaveOccurred()) - - err = gitrepo.Push(&git.PushOptions{}) - Expect(err).NotTo(HaveOccurred()) - - By("Expecting new artifact revision and GC") - now := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, now) - // Test revision change and garbage collection - return now.Status.Artifact.Revision != got.Status.Artifact.Revision && - !ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*now.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(helmChart.Values).ToNot(BeNil()) - Expect(helmChart.Values["testDefault"]).To(BeTrue()) - Expect(helmChart.Values["testOverride"]).To(BeFalse()) - - When("Setting reconcileStrategy to Revision", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ReconcileStrategy = sourcev1.ReconcileStrategyRevision - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact.Revision != updated.Status.Artifact.Revision && - ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - Expect(got.Status.Artifact.Revision).To(ContainSubstring(updated.Status.Artifact.Revision)) - Expect(got.Status.Artifact.Revision).To(ContainSubstring(commit.String()[0:12])) - }) - - When("Setting valid valuesFiles attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFiles = []string{ - "./testdata/charts/helmchart/values.yaml", - "./testdata/charts/helmchart/override.yaml", - } - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(helmChart.Values["testDefault"]).To(BeTrue()) - Expect(helmChart.Values["testOverride"]).To(BeTrue()) - }) - - When("Setting invalid valuesFiles attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFiles = []string{ - "./testdata/charts/helmchart/values.yaml", - "./testdata/charts/helmchart/invalid.yaml", - } - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(helmChart.Values["testDefault"]).To(BeTrue()) - Expect(helmChart.Values["testOverride"]).To(BeTrue()) - }) - - When("Setting valid valuesFiles and valuesFile attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFile = "./testdata/charts/helmchart/values.yaml" - updated.Spec.ValuesFiles = []string{ - "./testdata/charts/helmchart/override.yaml", - } - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(helmChart.Values["testDefault"]).To(BeTrue()) - Expect(helmChart.Values["testOverride"]).To(BeTrue()) - }) - - When("Setting valid valuesFile attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFile = "./testdata/charts/helmchart/override.yaml" - updated.Spec.ValuesFiles = []string{} - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - // Since a lot of chart updates took place above, checking - // artifact checksum isn't the most reliable way to find out - // if the artifact was changed due to the current update. - // Use status condition to be sure. - for _, condn := range got.Status.Conditions { - if strings.Contains(condn.Message, "merged values files [./testdata/charts/helmchart/override.yaml]") && - ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - _, exists := helmChart.Values["testDefault"] - Expect(exists).To(BeFalse()) - Expect(helmChart.Values["testOverride"]).To(BeTrue()) - }) - - When("Setting invalid valuesFile attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFile = "./testdata/charts/helmchart/invalid.yaml" - updated.Spec.ValuesFiles = []string{} - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - _, exists := helmChart.Values["testDefault"] - Expect(exists).To(BeFalse()) - Expect(helmChart.Values["testOverride"]).To(BeTrue()) - }) + }, + want: nil, + }, + { + name: "Error on client error", + repository: &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "different", + }, + Spec: sourcev1.HelmRepositorySpec{ + SecretRef: &meta.LocalObjectReference{ + Name: mock.Name, + }, + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + got, err := r.getHelmRepositorySecret(context.TODO(), tt.repository) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) }) - - It("Creates artifacts with .tgz file", func() { - fs := memfs.New() - gitrepo, err := git.Init(memory.NewStorage(), fs) - Expect(err).NotTo(HaveOccurred()) - - wt, err := gitrepo.Worktree() - Expect(err).NotTo(HaveOccurred()) - - u, err := url.Parse(gitServer.HTTPAddress()) - Expect(err).NotTo(HaveOccurred()) - u.Path = path.Join(u.Path, fmt.Sprintf("repository-%s.git", randStringRunes(5))) - - _, err = gitrepo.CreateRemote(&config.RemoteConfig{ - Name: "origin", - URLs: []string{u.String()}, - }) - Expect(err).NotTo(HaveOccurred()) - - chartDir := "testdata/charts/helmchart" - helmChart, err := loader.LoadDir(chartDir) - Expect(err).NotTo(HaveOccurred()) - - chartPackagePath, err := os.MkdirTemp("", fmt.Sprintf("chartpackage-%s-%s", helmChart.Name(), randStringRunes(5))) - Expect(err).NotTo(HaveOccurred()) - defer os.RemoveAll(chartPackagePath) - - pkg, err := chartutil.Save(helmChart, chartPackagePath) - Expect(err).NotTo(HaveOccurred()) - - b, err := os.ReadFile(pkg) - Expect(err).NotTo(HaveOccurred()) - - tgz := filepath.Base(pkg) - ff, err := fs.Create(tgz) - Expect(err).NotTo(HaveOccurred()) - - _, err = ff.Write(b) - Expect(err).NotTo(HaveOccurred()) - - ff.Close() - _, err = wt.Add(tgz) - Expect(err).NotTo(HaveOccurred()) - - _, err = wt.Commit("Helm chart", &git.CommitOptions{Author: &object.Signature{ - Name: "John Doe", - Email: "john@example.com", - When: time.Now(), - }}) - Expect(err).NotTo(HaveOccurred()) - - err = gitrepo.Push(&git.PushOptions{}) - Expect(err).NotTo(HaveOccurred()) - - repositoryKey := types.NamespacedName{ - Name: fmt.Sprintf("git-repository-sample-%s", randStringRunes(5)), - Namespace: namespace.Name, - } - repository := &sourcev1.GitRepository{ + } +} + +func TestHelmChartReconciler_getSource(t *testing.T) { + mocks := []client.Object{ + &sourcev1.HelmRepository{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.HelmRepositoryKind, + APIVersion: "source.toolkit.fluxcd.io/v1beta2", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "helmrepository", + Namespace: "foo", + }, + }, + &sourcev1.GitRepository{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.GitRepositoryKind, + APIVersion: "source.toolkit.fluxcd.io/v1beta2", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "gitrepository", + Namespace: "foo", + }, + }, + &sourcev1.Bucket{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.BucketKind, + APIVersion: "source.toolkit.fluxcd.io/v1beta2", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "bucket", + Namespace: "foo", + }, + }, + } + clientBuilder := fake.NewClientBuilder() + clientBuilder.WithObjects(mocks...) + + r := &HelmChartReconciler{ + Client: clientBuilder.Build(), + } + + tests := []struct { + name string + obj *sourcev1.HelmChart + want sourcev1.Source + wantErr bool + }{ + { + name: "Get HelmRepository source for reference", + obj: &sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ - Name: repositoryKey.Name, - Namespace: repositoryKey.Namespace, + Namespace: mocks[0].GetNamespace(), }, - Spec: sourcev1.GitRepositorySpec{ - URL: u.String(), - Interval: metav1.Duration{Duration: indexInterval}, + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Name: mocks[0].GetName(), + Kind: mocks[0].GetObjectKind().GroupVersionKind().Kind, + }, }, - } - Expect(k8sClient.Create(context.Background(), repository)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), repository) - - key := types.NamespacedName{ - Name: "helmchart-sample-" + randStringRunes(5), - Namespace: namespace.Name, - } - chart := &sourcev1.HelmChart{ + }, + want: mocks[0].(sourcev1.Source), + }, + { + name: "Get GitRepository source for reference", + obj: &sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, + Namespace: mocks[1].GetNamespace(), }, Spec: sourcev1.HelmChartSpec{ - Chart: tgz, - Version: "*", SourceRef: sourcev1.LocalHelmChartSourceReference{ - Kind: sourcev1.GitRepositoryKind, - Name: repositoryKey.Name, + Name: mocks[1].GetName(), + Kind: mocks[1].GetObjectKind().GroupVersionKind().Kind, }, - Interval: metav1.Duration{Duration: pullInterval}, }, - } - Expect(k8sClient.Create(context.Background(), chart)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), chart) - - By("Expecting artifact") - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && - ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - }) - }) - - Context("HelmChart from GitRepository with HelmRepository dependency", func() { - var ( - namespace *corev1.Namespace - gitServer *gittestserver.GitServer - helmServer *helmtestserver.HelmServer - err error - ) - - BeforeEach(func() { - namespace = &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: "test-git-repository-" + randStringRunes(5)}, - } - err = k8sClient.Create(context.Background(), namespace) - Expect(err).NotTo(HaveOccurred(), "failed to create test namespace") - - gitServer, err = gittestserver.NewTempGitServer() - Expect(err).NotTo(HaveOccurred()) - gitServer.AutoCreate() - Expect(gitServer.StartHTTP()).To(Succeed()) - - helmServer, err = helmtestserver.NewTempHelmServer() - Expect(err).To(Succeed()) - helmServer.Start() - }) - - AfterEach(func() { - gitServer.StopHTTP() - os.RemoveAll(gitServer.Root()) - - helmServer.Stop() - os.RemoveAll(helmServer.Root()) - - err = k8sClient.Delete(context.Background(), namespace) - Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace") - }) - - It("Creates artifacts for", func() { - helmServer.Stop() - var username, password = "john", "doe" - helmServer.WithMiddleware(func(handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - u, p, ok := r.BasicAuth() - if !ok || username != u || password != p { - w.WriteHeader(401) - return - } - handler.ServeHTTP(w, r) - }) - }) - helmServer.Start() - - Expect(helmServer.PackageChart(path.Join("testdata/charts/helmchart"))).Should(Succeed()) - Expect(helmServer.GenerateIndex()).Should(Succeed()) - - secretKey := types.NamespacedName{ - Name: "helmrepository-auth-" + randStringRunes(5), - Namespace: namespace.Name, - } - secret := &corev1.Secret{ + }, + want: mocks[1].(sourcev1.Source), + }, + { + name: "Get Bucket source for reference", + obj: &sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ - Name: secretKey.Name, - Namespace: secretKey.Namespace, + Namespace: mocks[2].GetNamespace(), }, - StringData: map[string]string{ - "username": username, - "password": password, + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Name: mocks[2].GetName(), + Kind: mocks[2].GetObjectKind().GroupVersionKind().Kind, + }, }, - } - Expect(k8sClient.Create(context.Background(), secret)).Should(Succeed()) - - By("Creating repository and waiting for artifact") - helmRepositoryKey := types.NamespacedName{ - Name: "helmrepository-sample-" + randStringRunes(5), - Namespace: namespace.Name, - } - helmRepository := &sourcev1.HelmRepository{ + }, + want: mocks[2].(sourcev1.Source), + }, + { + name: "Error on client error", + obj: &sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ - Name: helmRepositoryKey.Name, - Namespace: helmRepositoryKey.Namespace, + Namespace: mocks[2].GetNamespace(), }, - Spec: sourcev1.HelmRepositorySpec{ - URL: helmServer.URL(), - SecretRef: &meta.LocalObjectReference{ - Name: secretKey.Name, + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Name: mocks[1].GetName(), + Kind: mocks[2].GetObjectKind().GroupVersionKind().Kind, + }, + }, + }, + wantErr: true, + }, + { + name: "Error on unsupported source kind", + obj: &sourcev1.HelmChart{ + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Name: "unsupported", + Kind: "Unsupported", }, - Interval: metav1.Duration{Duration: pullInterval}, }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + got, err := r.getSource(context.TODO(), tt.obj) + + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + g.Expect(got).To(BeNil()) + return } - Expect(k8sClient.Create(context.Background(), helmRepository)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), helmRepository) - - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), helmRepositoryKey, helmRepository) - return helmRepository.Status.Artifact != nil - }, timeout, interval).Should(BeTrue()) - - fs := memfs.New() - gitrepo, err := git.Init(memory.NewStorage(), fs) - Expect(err).NotTo(HaveOccurred()) - - wt, err := gitrepo.Worktree() - Expect(err).NotTo(HaveOccurred()) - - u, err := url.Parse(gitServer.HTTPAddress()) - Expect(err).NotTo(HaveOccurred()) - u.Path = path.Join(u.Path, fmt.Sprintf("repository-%s.git", randStringRunes(5))) - - _, err = gitrepo.CreateRemote(&config.RemoteConfig{ - Name: "origin", - URLs: []string{u.String()}, - }) - Expect(err).NotTo(HaveOccurred()) - - chartDir := "testdata/charts/helmchartwithdeps" - Expect(filepath.Walk(chartDir, func(p string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - - switch { - case fi.Mode().IsDir(): - return fs.MkdirAll(p, os.ModeDir) - case !fi.Mode().IsRegular(): - return nil - } - - b, err := os.ReadFile(p) - if err != nil { - return err - } - ff, err := fs.Create(p) - if err != nil { - return err - } - if _, err := ff.Write(b); err != nil { - return err + g.Expect(got).To(Equal(tt.want)) + g.Expect(err).ToNot(HaveOccurred()) + }) + } +} + +func TestHelmChartReconciler_reconcileDelete(t *testing.T) { + g := NewWithT(t) + + r := &HelmChartReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + } + + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + Name: "reconcile-delete-", + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + Finalizers: []string{ + sourcev1.SourceFinalizer, + }, + }, + Status: sourcev1.HelmChartStatus{}, + } + + artifact := testStorage.NewArtifactFor(sourcev1.HelmChartKind, obj.GetObjectMeta(), "revision", "foo.txt") + obj.Status.Artifact = &artifact + + got, err := r.reconcileDelete(ctx, obj) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(got).To(Equal(sreconcile.ResultEmpty)) + g.Expect(controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer)).To(BeFalse()) + g.Expect(obj.Status.Artifact).To(BeNil()) +} + +func TestHelmChartReconciler_summarizeAndPatch(t *testing.T) { + tests := []struct { + name string + generation int64 + beforeFunc func(obj *sourcev1.HelmChart) + result sreconcile.Result + reconcileErr error + wantErr bool + afterFunc func(t *WithT, obj *sourcev1.HelmChart) + assertConditions []metav1.Condition + }{ + // Success/Fail indicates if a reconciliation succeeded or failed. On + // a successful reconciliation, the object generation is expected to + // match the observed generation in the object status. + // All the cases have some Ready condition set, even if a test case is + // unrelated to the conditions, because it's necessary for a valid + // status. + { + name: "Success, no extra conditions", + generation: 4, + beforeFunc: func(obj *sourcev1.HelmChart) { + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "test-msg") + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "test-msg"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.Status.ObservedGeneration).To(Equal(int64(4))) + }, + }, + { + name: "Success, Ready=True", + generation: 5, + beforeFunc: func(obj *sourcev1.HelmChart) { + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "created") + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "created"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.Status.ObservedGeneration).To(Equal(int64(5))) + }, + }, + { + name: "Success, removes reconciling for successful result", + generation: 2, + beforeFunc: func(obj *sourcev1.HelmChart) { + conditions.MarkReconciling(obj, "NewRevision", "new index version") + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "stored artifact") + }, + result: sreconcile.ResultSuccess, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.Status.ObservedGeneration).To(Equal(int64(2))) + }, + }, + { + name: "Success, record reconciliation request", + beforeFunc: func(obj *sourcev1.HelmChart) { + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "now", } - _ = ff.Close() - _, err = wt.Add(p) - - return err - })).To(Succeed()) - - By("Configuring the chart dependency") - filePath := fs.Join(chartDir, chartutil.ChartfileName) - f, err := fs.OpenFile(filePath, os.O_RDWR, os.FileMode(0600)) - Expect(err).NotTo(HaveOccurred()) - - b := make([]byte, 2048) - n, err := f.Read(b) - Expect(err).NotTo(HaveOccurred()) - b = b[0:n] - - err = f.Close() - Expect(err).NotTo(HaveOccurred()) - - y := new(helmchart.Metadata) - err = yaml.Unmarshal(b, y) - Expect(err).NotTo(HaveOccurred()) - - y.Dependencies = []*helmchart.Dependency{ - { - Name: "helmchart", - Version: ">=0.1.0", - Repository: helmRepository.Spec.URL, + obj.SetAnnotations(annotations) + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "test-msg") + }, + generation: 3, + result: sreconcile.ResultSuccess, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "test-msg"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.Status.LastHandledReconcileAt).To(Equal("now")) + t.Expect(obj.Status.ObservedGeneration).To(Equal(int64(3))) + }, + }, + { + name: "Fail, with multiple conditions ArtifactOutdated=True,Reconciling=True", + generation: 7, + beforeFunc: func(obj *sourcev1.HelmChart) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision") + conditions.MarkReconciling(obj, "NewRevision", "new index revision") + }, + reconcileErr: fmt.Errorf("failed to create dir"), + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, "NewRevision", "new index revision"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new index revision"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.Status.ObservedGeneration).ToNot(Equal(int64(7))) + }, + }, + { + name: "Success, with subreconciler stalled error", + generation: 9, + beforeFunc: func(obj *sourcev1.HelmChart) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.FetchFailedCondition, "failed to construct helm client") + }, + reconcileErr: &serror.Stalling{Err: fmt.Errorf("some error"), Reason: "some reason"}, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, sourcev1.FetchFailedCondition, "failed to construct helm client"), + *conditions.TrueCondition(meta.StalledCondition, "some reason", "some error"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.FetchFailedCondition, "failed to construct helm client"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.Status.ObservedGeneration).To(Equal(int64(9))) + }, + }, + { + name: "Fail, no error but requeue requested", + generation: 3, + beforeFunc: func(obj *sourcev1.HelmChart) { + conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "test-msg") + }, + result: sreconcile.ResultRequeue, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, meta.FailedReason, "test-msg"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.Status.ObservedGeneration).ToNot(Equal(int64(3))) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + builder := fake.NewClientBuilder().WithScheme(testEnv.GetScheme()) + r := &HelmChartReconciler{ + Client: builder.Build(), + } + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + Generation: tt.generation, + }, + Spec: sourcev1.HelmChartSpec{ + Interval: metav1.Duration{Duration: 5 * time.Second}, }, } - b, err = yaml.Marshal(y) - Expect(err).NotTo(HaveOccurred()) - - ff, err := fs.Create(filePath) - Expect(err).NotTo(HaveOccurred()) + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } - _, err = ff.Write(b) - Expect(err).NotTo(HaveOccurred()) + ctx := context.TODO() + g.Expect(r.Create(ctx, obj)).To(Succeed()) + patchHelper, err := patch.NewHelper(obj, r.Client) + g.Expect(err).ToNot(HaveOccurred()) - err = ff.Close() - Expect(err).NotTo(HaveOccurred()) + gotErr := r.summarizeAndPatch(ctx, obj, patchHelper, tt.result, tt.reconcileErr) + g.Expect(gotErr != nil).To(Equal(tt.wantErr)) - _, err = wt.Commit("Helm charts", &git.CommitOptions{ - Author: &object.Signature{ - Name: "John Doe", - Email: "john@example.com", - When: time.Now(), - }, - All: true, - }) - Expect(err).NotTo(HaveOccurred()) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) - err = gitrepo.Push(&git.PushOptions{}) - Expect(err).NotTo(HaveOccurred()) - - repositoryKey := types.NamespacedName{ - Name: fmt.Sprintf("git-repository-sample-%s", randStringRunes(5)), - Namespace: namespace.Name, + if tt.afterFunc != nil { + tt.afterFunc(g, obj) } - repository := &sourcev1.GitRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: repositoryKey.Name, - Namespace: repositoryKey.Namespace, + + // Check if the object status is valid. + condns := &status.Conditions{NegativePolarity: helmChartReadyDepsNegative} + checker := status.NewChecker(r.Client, testEnv.GetScheme(), condns) + checker.CheckErr(ctx, obj) + }) + } +} + +func TestHelmChartReconciler_reconcileSubRecs(t *testing.T) { + // Helper to build simple helmChartReconcilerFunc with result and error. + buildReconcileFuncs := func(r sreconcile.Result, e error) helmChartReconcilerFunc { + return func(_ context.Context, _ *sourcev1.HelmChart, _ *chart.Build) (sreconcile.Result, error) { + return r, e + } + } + + tests := []struct { + name string + generation int64 + observedGeneration int64 + reconcileFuncs []helmChartReconcilerFunc + wantResult sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "successful reconciliations", + reconcileFuncs: []helmChartReconcilerFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + }, + wantResult: sreconcile.ResultSuccess, + wantErr: false, + }, + { + name: "successful reconciliation with generation difference", + generation: 3, + observedGeneration: 2, + reconcileFuncs: []helmChartReconcilerFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + }, + wantResult: sreconcile.ResultSuccess, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, "NewGeneration", "reconciling new generation 3"), + }, + }, + { + name: "failed reconciliation", + reconcileFuncs: []helmChartReconcilerFunc{ + buildReconcileFuncs(sreconcile.ResultEmpty, fmt.Errorf("some error")), + }, + wantResult: sreconcile.ResultEmpty, + wantErr: true, + }, + { + name: "multiple object status conditions mutations", + reconcileFuncs: []helmChartReconcilerFunc{ + func(_ context.Context, obj *sourcev1.HelmChart, _ *chart.Build) (sreconcile.Result, error) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision") + return sreconcile.ResultSuccess, nil }, - Spec: sourcev1.GitRepositorySpec{ - URL: u.String(), - Interval: metav1.Duration{Duration: indexInterval}, + func(_ context.Context, obj *sourcev1.HelmChart, _ *chart.Build) (sreconcile.Result, error) { + conditions.MarkTrue(obj, meta.ReconcilingCondition, "Progressing", "creating artifact") + return sreconcile.ResultSuccess, nil }, - } - Expect(k8sClient.Create(context.Background(), repository)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), repository) - - key := types.NamespacedName{ - Name: "helmchart-sample-" + randStringRunes(5), - Namespace: namespace.Name, - } - chart := &sourcev1.HelmChart{ + }, + wantResult: sreconcile.ResultSuccess, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), + *conditions.TrueCondition(meta.ReconcilingCondition, "Progressing", "creating artifact"), + }, + }, + { + name: "subrecs with one result=Requeue, no error", + reconcileFuncs: []helmChartReconcilerFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + buildReconcileFuncs(sreconcile.ResultRequeue, nil), + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + }, + wantResult: sreconcile.ResultRequeue, + wantErr: false, + }, + { + name: "subrecs with error before result=Requeue", + reconcileFuncs: []helmChartReconcilerFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + buildReconcileFuncs(sreconcile.ResultEmpty, fmt.Errorf("some error")), + buildReconcileFuncs(sreconcile.ResultRequeue, nil), + }, + wantResult: sreconcile.ResultEmpty, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + r := &HelmChartReconciler{} + obj := &sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, + GenerateName: "test-", + Generation: tt.generation, }, - Spec: sourcev1.HelmChartSpec{ - Chart: "testdata/charts/helmchartwithdeps", - Version: "*", - SourceRef: sourcev1.LocalHelmChartSourceReference{ - Kind: sourcev1.GitRepositoryKind, - Name: repositoryKey.Name, - }, - Interval: metav1.Duration{Duration: pullInterval}, + Status: sourcev1.HelmChartStatus{ + ObservedGeneration: tt.observedGeneration, }, } - Expect(k8sClient.Create(context.Background(), chart)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), chart) - - By("Expecting artifact") - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && - ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(helmChart.Values["testDefault"]).To(BeTrue()) - Expect(helmChart.Values["testOverride"]).To(BeFalse()) - - When("Setting valid valuesFiles attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFiles = []string{ - "./testdata/charts/helmchartwithdeps/values.yaml", - "./testdata/charts/helmchartwithdeps/override.yaml", - } - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(helmChart.Values["testDefault"]).To(BeTrue()) - Expect(helmChart.Values["testOverride"]).To(BeTrue()) - }) - - When("Setting invalid valuesFiles attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFiles = []string{ - "./testdata/charts/helmchartwithdeps/values.yaml", - "./testdata/charts/helmchartwithdeps/invalid.yaml", - } - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(helmChart.Values["testDefault"]).To(BeTrue()) - Expect(helmChart.Values["testOverride"]).To(BeTrue()) - }) - - When("Setting valid valuesFiles and valuesFile attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFile = "./testdata/charts/helmchartwithdeps/values.yaml" - updated.Spec.ValuesFiles = []string{ - "./testdata/charts/helmchartwithdeps/override.yaml", - } - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(helmChart.Values["testDefault"]).To(BeTrue()) - Expect(helmChart.Values["testOverride"]).To(BeTrue()) - }) - - When("Setting valid valuesFile attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFile = "./testdata/charts/helmchartwithdeps/override.yaml" - updated.Spec.ValuesFiles = []string{} - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - _, exists := helmChart.Values["testDefault"] - Expect(exists).To(BeFalse()) - Expect(helmChart.Values["testOverride"]).To(BeTrue()) - }) - - When("Setting invalid valuesFile attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFile = "./testdata/charts/helmchartwithdeps/invalid.yaml" - updated.Spec.ValuesFiles = []string{} - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - _, exists := helmChart.Values["testDefault"] - Expect(exists).To(BeFalse()) - Expect(helmChart.Values["testOverride"]).To(BeTrue()) - }) + + got, err := r.reconcile(context.TODO(), obj, tt.reconcileFuncs) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.wantResult)) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) }) - }) -}) + } +} + +func mockChartBuild(name, version, path string) *chart.Build { + var copyP string + if path != "" { + f, err := os.Open(path) + if err == nil { + defer f.Close() + ff, err := os.CreateTemp("", "chart-mock-*.tgz") + if err == nil { + defer ff.Close() + if _, err = io.Copy(ff, f); err == nil { + copyP = ff.Name() + } + } + } + } + return &chart.Build{ + Name: name, + Version: version, + Path: copyP, + } +} diff --git a/internal/helm/chart/builder_local.go b/internal/helm/chart/builder_local.go index 923008dcb..da9cc9cba 100644 --- a/internal/helm/chart/builder_local.go +++ b/internal/helm/chart/builder_local.go @@ -190,7 +190,7 @@ func mergeFileValues(baseDir string, paths []string) (map[string]interface{}, er if err != nil { return nil, err } - if f, err := os.Stat(secureP); os.IsNotExist(err) || !f.Mode().IsRegular() { + if f, err := os.Stat(secureP); err != nil || !f.Mode().IsRegular() { return nil, fmt.Errorf("no values file found at path '%s' (reference '%s')", strings.TrimPrefix(secureP, baseDir), p) } From 032ffb4d2739134c33274b1de9034e0904592c48 Mon Sep 17 00:00:00 2001 From: Hidde Beydals Date: Wed, 26 Jan 2022 20:24:01 +0100 Subject: [PATCH 44/63] controllers: tweak events and logging - Ensure all logged messages start with a lowercase. - Make some pushed (and logged) events of type `EventTypeTrace` to prevent them from being sinked to the external event recorder, to prevent spam. - Only log if artifact is up-to-date with upstream (instead of pushing an event). Signed-off-by: Hidde Beydals --- controllers/bucket_controller.go | 22 ++++++++++----------- controllers/bucket_controller_test.go | 2 +- controllers/gitrepository_controller.go | 25 ++++++++++++------------ controllers/helmchart_controller.go | 8 ++++---- controllers/helmchart_controller_test.go | 10 +++++----- controllers/helmrepository_controller.go | 16 ++++++++++----- internal/helm/chart/builder.go | 8 ++++---- internal/helm/chart/builder_test.go | 12 ++++++------ 8 files changed, 55 insertions(+), 48 deletions(-) diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index ebd2a6daf..c71056648 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -28,6 +28,7 @@ import ( "time" gcpstorage "cloud.google.com/go/storage" + "github.com/fluxcd/pkg/runtime/events" "github.com/fluxcd/source-controller/pkg/gcp" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" @@ -134,7 +135,7 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res // Return early if the object is suspended if obj.Spec.Suspend { - log.Info("Reconciliation is suspended for this object") + log.Info("reconciliation is suspended for this object") return ctrl.Result{}, nil } @@ -221,7 +222,7 @@ func (r *BucketReconciler) summarizeAndPatch(ctx context.Context, obj *sourcev1. // error. func (r *BucketReconciler) reconcile(ctx context.Context, obj *sourcev1.Bucket, reconcilers []bucketReconcilerFunc) (sreconcile.Result, error) { if obj.Generation != obj.Status.ObservedGeneration { - conditions.MarkReconciling(obj, "NewGeneration", "Reconciling new generation %d", obj.Generation) + conditions.MarkReconciling(obj, "NewGeneration", "reconciling new generation %d", obj.Generation) } var artifact sourcev1.Artifact @@ -275,7 +276,7 @@ func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.B // Record that we do not have an artifact if obj.GetArtifact() == nil { - conditions.MarkReconciling(obj, "NoArtifact", "No artifact for resource in storage") + conditions.MarkReconciling(obj, "NoArtifact", "no artifact for resource in storage") return sreconcile.ResultSuccess, nil } @@ -463,8 +464,8 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) return sreconcile.ResultEmpty, e } - r.eventLogf(ctx, obj, corev1.EventTypeNormal, sourcev1.BucketOperationSucceedReason, - "downloaded %d files from bucket '%s' revision '%s'", len(index), obj.Spec.BucketName, revision) + r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.BucketOperationSucceedReason, + "downloaded %d files with revision '%s' from '%s'", len(index), revision, obj.Spec.BucketName) } conditions.Delete(obj, sourcev1.FetchFailedCondition) @@ -618,8 +619,8 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) return sreconcile.ResultEmpty, e } - r.eventLogf(ctx, obj, corev1.EventTypeNormal, sourcev1.BucketOperationSucceedReason, - "downloaded %d files from bucket '%s' revision '%s'", len(index), obj.Spec.BucketName, revision) + r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.BucketOperationSucceedReason, + "downloaded %d files from bucket '%s'", len(index), obj.Spec.BucketName) } conditions.Delete(obj, sourcev1.FetchFailedCondition) @@ -647,7 +648,7 @@ func (r *BucketReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1. // The artifact is up-to-date if obj.GetArtifact().HasRevision(artifact.Revision) { - r.eventLogf(ctx, obj, corev1.EventTypeNormal, meta.SucceededReason, "already up to date, current revision '%s'", artifact.Revision) + ctrl.LoggerFrom(ctx).Info("artifact up-to-date", "revision", artifact.Revision) return sreconcile.ResultSuccess, nil } @@ -713,7 +714,6 @@ func (r *BucketReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1. // reconcileDelete handles the deletion of an object. It first garbage collects all artifacts for the object from the // artifact storage, if successful, the finalizer is removed from the object. -// func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bucket) (ctrl.Result, error) { func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bucket) (sreconcile.Result, error) { // Garbage collect the resource's artifacts if err := r.garbageCollect(ctx, obj); err != nil { @@ -741,7 +741,7 @@ func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *sourcev1.Buc } obj.Status.Artifact = nil // TODO(hidde): we should only push this event if we actually garbage collected something - r.eventLogf(ctx, obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", + r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", "garbage collected artifacts for deleted resource") return nil } @@ -753,7 +753,7 @@ func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *sourcev1.Buc } } // TODO(hidde): we should only push this event if we actually garbage collected something - r.eventLogf(ctx, obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", "garbage collected old artifacts") + r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", "garbage collected old artifacts") } return nil } diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go index c1c9c59c7..afedb63b1 100644 --- a/controllers/bucket_controller_test.go +++ b/controllers/bucket_controller_test.go @@ -201,7 +201,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { "!/reconcile-storage/invalid.txt", }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, "NoArtifact", "No artifact for resource in storage"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NoArtifact", "no artifact for resource in storage"), }, }, { diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go index 204823b4d..3dc97c790 100644 --- a/controllers/gitrepository_controller.go +++ b/controllers/gitrepository_controller.go @@ -25,6 +25,7 @@ import ( "time" securejoin "github.com/cyphar/filepath-securejoin" + "github.com/fluxcd/pkg/runtime/logger" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" @@ -41,9 +42,9 @@ import ( "github.com/fluxcd/pkg/apis/meta" "github.com/fluxcd/pkg/runtime/conditions" helper "github.com/fluxcd/pkg/runtime/controller" + "github.com/fluxcd/pkg/runtime/events" "github.com/fluxcd/pkg/runtime/patch" "github.com/fluxcd/pkg/runtime/predicates" - "github.com/fluxcd/source-controller/pkg/sourceignore" sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" serror "github.com/fluxcd/source-controller/internal/error" @@ -51,6 +52,7 @@ import ( "github.com/fluxcd/source-controller/internal/util" "github.com/fluxcd/source-controller/pkg/git" "github.com/fluxcd/source-controller/pkg/git/strategy" + "github.com/fluxcd/source-controller/pkg/sourceignore" ) // Status conditions owned by the GitRepository reconciler. @@ -139,7 +141,7 @@ func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reques // Return early if the object is suspended if obj.Spec.Suspend { - log.Info("Reconciliation is suspended for this object") + log.Info("reconciliation is suspended for this object") return ctrl.Result{}, nil } @@ -308,7 +310,7 @@ func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context, obj *sou // If both the checkout and signature verification are successful, the given artifact pointer is set to a new artifact // with the available metadata. func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, - obj *sourcev1.GitRepository, artifact *sourcev1.Artifact, includes *artifactSet, dir string) (sreconcile.Result, error) { + obj *sourcev1.GitRepository, artifact *sourcev1.Artifact, _ *artifactSet, dir string) (sreconcile.Result, error) { // Configure authentication strategy to access the source var authOpts *git.AuthOptions var err error @@ -378,8 +380,7 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, // Coin flip on transient or persistent error, return error and hope for the best return sreconcile.ResultEmpty, e } - r.eventLogf(ctx, obj, corev1.EventTypeNormal, sourcev1.GitOperationSucceedReason, - "cloned repository '%s' and checked out revision '%s'", obj.Spec.URL, commit.String()) + ctrl.LoggerFrom(ctx).V(logger.DebugLevel).Info("git repository checked out", "url", obj.Spec.URL, "revision", commit.String()) conditions.Delete(obj, sourcev1.FetchFailedCondition) // Verify commit signature @@ -420,7 +421,7 @@ func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, obj *so // The artifact is up-to-date if obj.GetArtifact().HasRevision(artifact.Revision) && !includes.Diff(obj.Status.IncludedArtifacts) { - r.eventLogf(ctx, obj, corev1.EventTypeNormal, meta.SucceededReason, "already up to date, current revision '%s'", artifact.Revision) + ctrl.LoggerFrom(ctx).Info("artifact up-to-date", "revision", artifact.Revision) return sreconcile.ResultSuccess, nil } @@ -492,7 +493,7 @@ func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, obj *so url, err := r.Storage.Symlink(*artifact, "latest.tar.gz") if err != nil { r.eventLogf(ctx, obj, corev1.EventTypeWarning, sourcev1.StorageOperationFailedReason, - "Failed to update status URL symlink: %s", err) + "failed to update status URL symlink: %s", err) } if url != "" { obj.Status.URL = url @@ -506,7 +507,7 @@ func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, obj *so // If an include is unavailable, it marks the object with v1beta1.IncludeUnavailableCondition and returns early. // If the copy operations are successful, it deletes the v1beta1.IncludeUnavailableCondition from the object. // If the artifactSet differs from the current set, it marks the object with v1beta1.ArtifactOutdatedCondition. -func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context, obj *sourcev1.GitRepository, artifact *sourcev1.Artifact, includes *artifactSet, dir string) (sreconcile.Result, error) { +func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context, obj *sourcev1.GitRepository, _ *sourcev1.Artifact, includes *artifactSet, dir string) (sreconcile.Result, error) { artifacts := make(artifactSet, len(obj.Spec.Include)) for i, incl := range obj.Spec.Include { // Do this first as it is much cheaper than copy operations @@ -544,7 +545,7 @@ func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context, obj *sou // Copy artifact (sub)contents to configured directory if err := r.Storage.CopyToPath(dep.GetArtifact(), incl.GetFromPath(), toPath); err != nil { e := &serror.Event{ - Err: fmt.Errorf("Failed to copy '%s' include from %s to %s: %w", incl.GitRepositoryRef.Name, incl.GetFromPath(), incl.GetToPath(), err), + Err: fmt.Errorf("failed to copy '%s' include from %s to %s: %w", incl.GitRepositoryRef.Name, incl.GetFromPath(), incl.GetToPath(), err), Reason: "CopyFailure", } conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "CopyFailure", e.Err.Error()) @@ -623,7 +624,7 @@ func (r *GitRepositoryReconciler) verifyCommitSignature(ctx context.Context, obj conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of commit '%s'", commit.Hash.String()) - r.eventLogf(ctx, obj, corev1.EventTypeNormal, "VerifiedCommit", + r.eventLogf(ctx, obj, events.EventTypeTrace, "VerifiedCommit", "verified signature of commit '%s'", commit.Hash.String()) return sreconcile.ResultSuccess, nil } @@ -641,7 +642,7 @@ func (r *GitRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourc } obj.Status.Artifact = nil // TODO(hidde): we should only push this event if we actually garbage collected something - r.eventLogf(ctx, obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", + r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", "garbage collected artifacts for deleted resource") return nil } @@ -652,7 +653,7 @@ func (r *GitRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourc } } // TODO(hidde): we should only push this event if we actually garbage collected something - r.eventLogf(ctx, obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", + r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", "garbage collected old artifacts") } return nil diff --git a/controllers/helmchart_controller.go b/controllers/helmchart_controller.go index 93ffad3ca..db6ec5c19 100644 --- a/controllers/helmchart_controller.go +++ b/controllers/helmchart_controller.go @@ -358,7 +358,7 @@ func (r *HelmChartReconciler) reconcileSource(ctx context.Context, obj *sourcev1 // a sudden (partial) disappearance of observed state. // TODO(hidde): include specific name/version information? if depNum := build.ResolvedDependencies; build.Complete() && depNum > 0 { - r.Eventf(obj, corev1.EventTypeNormal, "ResolvedDependencies", "Resolved %d chart dependencies", depNum) + r.Eventf(obj, events.EventTypeTrace, "ResolvedDependencies", "resolved %d chart dependencies", depNum) } // Handle any build error @@ -638,7 +638,7 @@ func (r *HelmChartReconciler) reconcileArtifact(ctx context.Context, obj *source // Return early if the build path equals the current artifact path if curArtifact := obj.GetArtifact(); curArtifact != nil && r.Storage.LocalPath(*curArtifact) == b.Path { - r.eventLogf(ctx, obj, corev1.EventTypeNormal, meta.SucceededReason, "already up to date, current revision '%s'", curArtifact.Revision) + ctrl.LoggerFrom(ctx).Info("artifact up-to-date", "revision", artifact.Revision) return sreconcile.ResultSuccess, nil } @@ -754,7 +754,7 @@ func (r *HelmChartReconciler) garbageCollect(ctx context.Context, obj *sourcev1. } obj.Status.Artifact = nil // TODO(hidde): we should only push this event if we actually garbage collected something - r.eventLogf(ctx, obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", + r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", "garbage collected artifacts for deleted resource") return nil } @@ -766,7 +766,7 @@ func (r *HelmChartReconciler) garbageCollect(ctx context.Context, obj *sourcev1. } } // TODO(hidde): we should only push this event if we actually garbage collected something - r.eventLogf(ctx, obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", "garbage collected old artifacts") + r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", "garbage collected old artifacts") } return nil } diff --git a/controllers/helmchart_controller_test.go b/controllers/helmchart_controller_test.go index a97d43eee..5e74173a6 100644 --- a/controllers/helmchart_controller_test.go +++ b/controllers/helmchart_controller_test.go @@ -232,7 +232,7 @@ func TestHelmChartReconciler_reconcileSource(t *testing.T) { g.Expect(obj.Status.ObservedSourceArtifactRevision).To(Equal(gitArtifact.Revision)) g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewChart", "Pulled 'helmchart' chart with version '0.1.0'"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewChart", "pulled 'helmchart' chart with version '0.1.0'"), })) }, cleanFunc: func(g *WithT, build *chart.Build) { @@ -880,7 +880,7 @@ func TestHelmChartReconciler_reconcileArtifact(t *testing.T) { }, want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, sourcev1.ChartPullSucceededReason, "Pulled 'helmchart' chart with version '0.1.0'"), + *conditions.TrueCondition(meta.ReadyCondition, sourcev1.ChartPullSucceededReason, "pulled 'helmchart' chart with version '0.1.0'"), }, }, { @@ -923,7 +923,7 @@ func TestHelmChartReconciler_reconcileArtifact(t *testing.T) { t.Expect(obj.Status.URL).To(BeEmpty()) }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, sourcev1.ChartPackageSucceededReason, "Packaged 'helmchart' chart with version '0.1.0'"), + *conditions.TrueCondition(meta.ReadyCondition, sourcev1.ChartPackageSucceededReason, "packaged 'helmchart' chart with version '0.1.0'"), }, }, { @@ -941,7 +941,7 @@ func TestHelmChartReconciler_reconcileArtifact(t *testing.T) { }, want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, sourcev1.ChartPullSucceededReason, "Pulled 'helmchart' chart with version '0.1.0'"), + *conditions.TrueCondition(meta.ReadyCondition, sourcev1.ChartPullSucceededReason, "pulled 'helmchart' chart with version '0.1.0'"), }, }, { @@ -958,7 +958,7 @@ func TestHelmChartReconciler_reconcileArtifact(t *testing.T) { }, want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, sourcev1.ChartPullSucceededReason, "Pulled 'helmchart' chart with version '0.1.0'"), + *conditions.TrueCondition(meta.ReadyCondition, sourcev1.ChartPullSucceededReason, "pulled 'helmchart' chart with version '0.1.0'"), }, }, } diff --git a/controllers/helmrepository_controller.go b/controllers/helmrepository_controller.go index b870797f6..e5d655c9c 100644 --- a/controllers/helmrepository_controller.go +++ b/controllers/helmrepository_controller.go @@ -40,6 +40,7 @@ import ( "github.com/fluxcd/pkg/apis/meta" "github.com/fluxcd/pkg/runtime/conditions" helper "github.com/fluxcd/pkg/runtime/controller" + "github.com/fluxcd/pkg/runtime/events" "github.com/fluxcd/pkg/runtime/patch" "github.com/fluxcd/pkg/runtime/predicates" @@ -127,7 +128,7 @@ func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reque // Return early if the object is suspended if obj.Spec.Suspend { - log.Info("Reconciliation is suspended for this object") + log.Info("reconciliation is suspended for this object") return ctrl.Result{}, nil } @@ -252,7 +253,7 @@ func (r *HelmRepositoryReconciler) reconcile(ctx context.Context, obj *sourcev1. // // All artifacts for the resource except for the current one are garbage collected from the storage. // If the artifact in the Status object of the resource disappeared from storage, it is removed from the object. -// If the hostname of any of the URLs on the object do not match the current storage server hostname, they are updated. +// If the hostname of the URLs on the object do not match the current storage server hostname, they are updated. func (r *HelmRepositoryReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) { // Garbage collect previous advertised artifact(s) from storage _ = r.garbageCollect(ctx, obj) @@ -413,7 +414,7 @@ func (r *HelmRepositoryReconciler) reconcileArtifact(ctx context.Context, obj *s }() if obj.GetArtifact().HasRevision(artifact.Revision) { - r.eventLogf(ctx, obj, corev1.EventTypeNormal, meta.SucceededReason, "already up to date, current revision '%s'", artifact.Revision) + ctrl.LoggerFrom(ctx).Info("artifact up-to-date", "revision", artifact.Revision) return sreconcile.ResultSuccess, nil } @@ -450,6 +451,11 @@ func (r *HelmRepositoryReconciler) reconcileArtifact(ctx context.Context, obj *s } } + r.AnnotatedEventf(obj, map[string]string{ + "revision": artifact.Revision, + "checksum": artifact.Checksum, + }, corev1.EventTypeNormal, "NewArtifact", "stored artifact for revision '%s'", artifact.Revision) + // Record it on the object. obj.Status.Artifact = artifact.DeepCopy() @@ -495,7 +501,7 @@ func (r *HelmRepositoryReconciler) garbageCollect(ctx context.Context, obj *sour } obj.Status.Artifact = nil // TODO(hidde): we should only push this event if we actually garbage collected something - r.eventLogf(ctx, obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", + r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", "garbage collected artifacts for deleted resource") return nil } @@ -507,7 +513,7 @@ func (r *HelmRepositoryReconciler) garbageCollect(ctx context.Context, obj *sour } } // TODO(hidde): we should only push this event if we actually garbage collected something - r.eventLogf(ctx, obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", + r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", "garbage collected old artifacts") } return nil diff --git a/internal/helm/chart/builder.go b/internal/helm/chart/builder.go index c44720c12..e3ce2207d 100644 --- a/internal/helm/chart/builder.go +++ b/internal/helm/chart/builder.go @@ -140,16 +140,16 @@ type Build struct { // Summary returns a human-readable summary of the Build. func (b *Build) Summary() string { if !b.HasMetadata() { - return "No chart build" + return "no chart build" } var s strings.Builder - var action = "New" + var action = "new" if b.Path != "" { - action = "Pulled" + action = "pulled" if b.Packaged { - action = "Packaged" + action = "packaged" } } s.WriteString(fmt.Sprintf("%s '%s' chart with version '%s'", action, b.Name, b.Version)) diff --git a/internal/helm/chart/builder_test.go b/internal/helm/chart/builder_test.go index 23c3952b0..4d0812298 100644 --- a/internal/helm/chart/builder_test.go +++ b/internal/helm/chart/builder_test.go @@ -143,7 +143,7 @@ func TestChartBuildResult_Summary(t *testing.T) { Name: "chart", Version: "1.2.3-rc.1+bd6bf40", }, - want: "New 'chart' chart with version '1.2.3-rc.1+bd6bf40'", + want: "new 'chart' chart with version '1.2.3-rc.1+bd6bf40'", }, { name: "Pulled chart", @@ -152,7 +152,7 @@ func TestChartBuildResult_Summary(t *testing.T) { Version: "1.2.3-rc.1+bd6bf40", Path: "chart.tgz", }, - want: "Pulled 'chart' chart with version '1.2.3-rc.1+bd6bf40'", + want: "pulled 'chart' chart with version '1.2.3-rc.1+bd6bf40'", }, { name: "Packaged chart", @@ -163,7 +163,7 @@ func TestChartBuildResult_Summary(t *testing.T) { ValuesFiles: []string{"a.yaml", "b.yaml"}, Path: "chart.tgz", }, - want: "Packaged 'chart' chart with version 'arbitrary-version' and merged values files [a.yaml b.yaml]", + want: "packaged 'chart' chart with version 'arbitrary-version' and merged values files [a.yaml b.yaml]", }, { name: "With values files", @@ -174,17 +174,17 @@ func TestChartBuildResult_Summary(t *testing.T) { ValuesFiles: []string{"a.yaml", "b.yaml"}, Path: "chart.tgz", }, - want: "Packaged 'chart' chart with version 'arbitrary-version' and merged values files [a.yaml b.yaml]", + want: "packaged 'chart' chart with version 'arbitrary-version' and merged values files [a.yaml b.yaml]", }, { name: "Empty build", build: &Build{}, - want: "No chart build", + want: "no chart build", }, { name: "Nil build", build: nil, - want: "No chart build", + want: "no chart build", }, } for _, tt := range tests { From ceb61a318cddf2af8a664bb73d98adec75607f45 Mon Sep 17 00:00:00 2001 From: Hidde Beydals Date: Wed, 26 Jan 2022 20:27:49 +0100 Subject: [PATCH 45/63] controllers: remove legacy test suite As it no longer contains any test cases. Signed-off-by: Hidde Beydals --- api/go.mod | 21 -- api/go.sum | 30 --- controllers/helmrepository_controller_test.go | 10 - controllers/legacy_suite_test.go | 198 ------------------ controllers/suite_test.go | 20 ++ go.mod | 3 - 6 files changed, 20 insertions(+), 262 deletions(-) delete mode 100644 controllers/legacy_suite_test.go diff --git a/api/go.mod b/api/go.mod index 2af43091b..b974e967a 100644 --- a/api/go.mod +++ b/api/go.mod @@ -5,45 +5,24 @@ go 1.17 require ( github.com/fluxcd/pkg/apis/acl v0.0.3 github.com/fluxcd/pkg/apis/meta v0.11.0-rc.3 - // TODO(hidde): introduction of the runtime package is temporary, and the dependency should be removed as soon as - // all APIs have been updated to the runtime standards (more specifically; have dropped their condition modifying - // functions). - github.com/fluxcd/pkg/runtime v0.13.0-rc.6 k8s.io/apimachinery v0.23.1 sigs.k8s.io/controller-runtime v0.11.0 ) require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/go-logr/logr v1.2.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect github.com/google/go-cmp v0.5.6 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/googleapis/gnostic v0.5.5 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/onsi/gomega v1.17.0 // indirect - github.com/pkg/errors v0.9.1 // indirect golang.org/x/net v0.0.0-20211215060638-4ddde0e984e9 // indirect - golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect - golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 // indirect - golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect golang.org/x/text v0.3.7 // indirect - golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.27.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect - k8s.io/api v0.23.0 // indirect - k8s.io/client-go v0.23.0 // indirect k8s.io/klog/v2 v2.30.0 // indirect - k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect k8s.io/utils v0.0.0-20211208161948-7d6a63dca704 // indirect sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.0 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/api/go.sum b/api/go.sum index 927fd8a67..8142a1881 100644 --- a/api/go.sum +++ b/api/go.sum @@ -68,7 +68,6 @@ github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiU github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= @@ -77,9 +76,7 @@ github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnweb github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= @@ -121,7 +118,6 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= -github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= @@ -129,8 +125,6 @@ github.com/fluxcd/pkg/apis/acl v0.0.3 h1:Lw0ZHdpnO4G7Zy9KjrzwwBmDZQuy4qEjaU/RvA6 github.com/fluxcd/pkg/apis/acl v0.0.3/go.mod h1:XPts6lRJ9C9fIF9xVWofmQwftvhY25n1ps7W9xw0XLU= github.com/fluxcd/pkg/apis/meta v0.11.0-rc.3 h1:YY6RlaHIMXawgEOJhJbSrm4NpD9fJTCWFGKgtNfQ0/g= github.com/fluxcd/pkg/apis/meta v0.11.0-rc.3/go.mod h1:ki5wJE4nuFOZt78q0RSYkrKwINgIBPynuswZhnTOSoI= -github.com/fluxcd/pkg/runtime v0.13.0-rc.6 h1:MsxiKYGsuRzEvyreQG5ocNaIZDwKhqvQ711/w4rTkCo= -github.com/fluxcd/pkg/runtime v0.13.0-rc.6/go.mod h1:4oKUO19TeudXrnCRnxCfMSS7EQTYpYlgfXwlQuDJ/Eg= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -154,7 +148,6 @@ github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTg github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -201,7 +194,6 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -244,7 +236,6 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= -github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= @@ -259,11 +250,9 @@ github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBt github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-retryablehttp v0.6.8/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= @@ -281,7 +270,6 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -321,7 +309,6 @@ github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= @@ -372,7 +359,6 @@ github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCko github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -383,25 +369,21 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.28.0 h1:vGVfV9KrDTvWt5boZO0I19g2E3CsWfpPPKZM9dt3mEw= github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= @@ -484,18 +466,15 @@ go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4 go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -605,7 +584,6 @@ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -685,7 +663,6 @@ golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 h1:M69LAlWZCshgp0QSzyDcSsSIejIEeuaCVpmwcKwyLMk= golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -701,7 +678,6 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -767,7 +743,6 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -797,7 +772,6 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -878,7 +852,6 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -922,13 +895,11 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.23.0 h1:WrL1gb73VSC8obi8cuYETJGXEoFNEh3LU0Pt+Sokgro= k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg= -k8s.io/apiextensions-apiserver v0.23.0 h1:uii8BYmHYiT2ZTAJxmvc3X8UhNYMxl2A0z0Xq3Pm+WY= k8s.io/apiextensions-apiserver v0.23.0/go.mod h1:xIFAEEDlAZgpVBl/1VSjGDmLoXAWRG40+GsWhKhAxY4= k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc= k8s.io/apimachinery v0.23.1 h1:sfBjlDFwj2onG0Ijx5C+SrAoeUscPrmghm7wHP+uXlo= k8s.io/apimachinery v0.23.1/go.mod h1:SADt2Kl8/sttJ62RRsi9MIV4o8f5S3coArm0Iu3fBno= k8s.io/apiserver v0.23.0/go.mod h1:Cec35u/9zAepDPPFyT+UMrgqOCjgJ5qtfVJDxjZYmt4= -k8s.io/client-go v0.23.0 h1:vcsOqyPq7XV3QmQRCBH/t9BICJM9Q1M18qahjv+rebY= k8s.io/client-go v0.23.0/go.mod h1:hrDnpnK1mSr65lHHcUuIZIXDgEbzc7/683c6hyG4jTA= k8s.io/code-generator v0.23.0/go.mod h1:vQvOhDXhuzqiVfM/YHp+dmg10WDZCchJVObc9MvowsE= k8s.io/component-base v0.23.0/go.mod h1:DHH5uiFvLC1edCpvcTDV++NKULdYYU6pR9Tt3HIKMKI= @@ -937,7 +908,6 @@ k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= diff --git a/controllers/helmrepository_controller_test.go b/controllers/helmrepository_controller_test.go index f397a3d3c..68790bd3a 100644 --- a/controllers/helmrepository_controller_test.go +++ b/controllers/helmrepository_controller_test.go @@ -29,7 +29,6 @@ import ( "github.com/darkowlzz/controller-check/status" "github.com/go-logr/logr" . "github.com/onsi/gomega" - "helm.sh/helm/v3/pkg/getter" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -49,15 +48,6 @@ import ( sreconcile "github.com/fluxcd/source-controller/internal/reconcile" ) -var ( - testGetters = getter.Providers{ - getter.Provider{ - Schemes: []string{"http", "https"}, - New: getter.NewHTTPGetter, - }, - } -) - func TestHelmRepositoryReconciler_Reconcile(t *testing.T) { g := NewWithT(t) diff --git a/controllers/legacy_suite_test.go b/controllers/legacy_suite_test.go deleted file mode 100644 index 46237896b..000000000 --- a/controllers/legacy_suite_test.go +++ /dev/null @@ -1,198 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "math/rand" - "net/http" - "os" - "path/filepath" - "testing" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "helm.sh/helm/v3/pkg/getter" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/record" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" - "sigs.k8s.io/controller-runtime/pkg/envtest/printer" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" - // +kubebuilder:scaffold:imports -) - -// These tests use Ginkgo (BDD-style Go testing framework). Refer to -// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. - -var cfg *rest.Config -var k8sClient client.Client -var k8sManager ctrl.Manager -var ginkgoTestEnv *envtest.Environment -var ginkgoTestStorage *Storage - -var examplePublicKey []byte -var examplePrivateKey []byte -var exampleCA []byte -var lctx context.Context -var cancel context.CancelFunc - -const ginkgoTimeout = time.Second * 30 - -func TestAPIs(t *testing.T) { - RegisterFailHandler(Fail) - - RunSpecsWithDefaultAndCustomReporters(t, - "Controller Suite", - []Reporter{printer.NewlineReporter{}}) -} - -var _ = BeforeSuite(func() { - done := make(chan interface{}) - go func() { - close(done) - }() - - logf.SetLogger( - zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)), - ) - lctx, cancel = context.WithCancel(ctx) - - By("bootstrapping test environment") - t := true - if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" { - ginkgoTestEnv = &envtest.Environment{ - UseExistingCluster: &t, - } - } else { - ginkgoTestEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, - } - } - - var err error - cfg, err = ginkgoTestEnv.Start() - Expect(err).ToNot(HaveOccurred()) - Expect(cfg).ToNot(BeNil()) - - err = sourcev1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - // +kubebuilder:scaffold:scheme - - Expect(loadExampleKeys()).To(Succeed()) - - tmpStoragePath, err := os.MkdirTemp("", "source-controller-storage-") - Expect(err).NotTo(HaveOccurred(), "failed to create tmp storage dir") - - ginkgoTestStorage, err = NewStorage(tmpStoragePath, "localhost:5050", time.Second*30) - Expect(err).NotTo(HaveOccurred(), "failed to create tmp storage") - // serve artifacts from the filesystem, as done in main.go - fs := http.FileServer(http.Dir(tmpStoragePath)) - http.Handle("/", fs) - go http.ListenAndServe(":5050", nil) - - k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{ - MetricsBindAddress: "0", - Scheme: scheme.Scheme, - }) - Expect(err).ToNot(HaveOccurred()) - - err = (&GitRepositoryReconciler{ - Client: k8sManager.GetClient(), - EventRecorder: record.NewFakeRecorder(32), - Storage: ginkgoTestStorage, - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred(), "failed to setup GitRepositoryReconciler") - - err = (&HelmRepositoryReconciler{ - Client: k8sManager.GetClient(), - EventRecorder: record.NewFakeRecorder(32), - Storage: ginkgoTestStorage, - Getters: getter.Providers{getter.Provider{ - Schemes: []string{"http", "https"}, - New: getter.NewHTTPGetter, - }}, - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred(), "failed to setup HelmRepositoryReconciler") - - err = (&HelmChartReconciler{ - Client: k8sManager.GetClient(), - Storage: ginkgoTestStorage, - EventRecorder: record.NewFakeRecorder(32), - Getters: getter.Providers{getter.Provider{ - Schemes: []string{"http", "https"}, - New: getter.NewHTTPGetter, - }}, - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred(), "failed to setup HelmChartReconciler") - - go func() { - err = k8sManager.Start(lctx) - Expect(err).ToNot(HaveOccurred()) - }() - - k8sClient = k8sManager.GetClient() - Expect(k8sClient).ToNot(BeNil()) - - Eventually(done, ginkgoTimeout).Should(BeClosed()) -}, 60) - -var _ = AfterSuite(func() { - By("tearing down the test environment") - defer GinkgoRecover() - cancel() - if ginkgoTestStorage != nil { - err := os.RemoveAll(ginkgoTestStorage.BasePath) - Expect(err).NotTo(HaveOccurred()) - } - err := ginkgoTestEnv.Stop() - Expect(err).ToNot(HaveOccurred()) -}) - -func init() { - rand.Seed(time.Now().UnixNano()) -} - -func loadExampleKeys() (err error) { - examplePublicKey, err = os.ReadFile("testdata/certs/server.pem") - if err != nil { - return err - } - examplePrivateKey, err = os.ReadFile("testdata/certs/server-key.pem") - if err != nil { - return err - } - exampleCA, err = os.ReadFile("testdata/certs/ca.pem") - return err -} - -var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz1234567890") - -func randStringRunes(n int) string { - b := make([]rune, n) - for i := range b { - b[i] = letterRunes[rand.Intn(len(letterRunes))] - } - return string(b) -} diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 0be4ce587..13145a194 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -24,6 +24,7 @@ import ( "testing" "time" + "helm.sh/helm/v3/pkg/getter" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" @@ -56,6 +57,15 @@ var ( ctx = ctrl.SetupSignalHandler() ) +var ( + testGetters = getter.Providers{ + getter.Provider{ + Schemes: []string{"http", "https"}, + New: getter.NewHTTPGetter, + }, + } +) + var ( tlsPublicKey []byte tlsPrivateKey []byte @@ -163,3 +173,13 @@ func newTestStorage(s *testserver.HTTPServer) (*Storage, error) { } return storage, nil } + +var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz1234567890") + +func randStringRunes(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = letterRunes[rand.Intn(len(letterRunes))] + } + return string(b) +} diff --git a/go.mod b/go.mod index 6664ab816..435adff27 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,6 @@ require ( github.com/go-logr/logr v1.2.2 github.com/libgit2/git2go/v33 v33.0.6 github.com/minio/minio-go/v7 v7.0.15 - github.com/onsi/ginkgo v1.16.5 github.com/onsi/gomega v1.17.0 github.com/otiai10/copy v1.7.0 github.com/spf13/pflag v1.0.5 @@ -139,7 +138,6 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/morikuni/aec v1.0.0 // indirect - github.com/nxadm/tail v1.4.8 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect github.com/opencontainers/runc v1.0.2 // indirect @@ -192,7 +190,6 @@ require ( gopkg.in/gorp.v1 v1.7.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.62.0 // indirect - gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect From 474658a0763218047b29193b8e1170bf864dbf01 Mon Sep 17 00:00:00 2001 From: Hidde Beydals Date: Wed, 26 Jan 2022 20:43:11 +0100 Subject: [PATCH 46/63] api: remove obsolete constants Remove the constants which are no longer in use from the API. Signed-off-by: Hidde Beydals --- api/v1beta2/condition_types.go | 12 ++++-------- api/v1beta2/gitrepository_types.go | 4 ---- 2 files changed, 4 insertions(+), 12 deletions(-) diff --git a/api/v1beta2/condition_types.go b/api/v1beta2/condition_types.go index 787703b3a..1e6ff992d 100644 --- a/api/v1beta2/condition_types.go +++ b/api/v1beta2/condition_types.go @@ -19,14 +19,14 @@ package v1beta2 const SourceFinalizer = "finalizers.fluxcd.io" const ( - // ArtifactUnavailableCondition indicates there is no Artifact available for the Source. - // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. - ArtifactUnavailableCondition string = "ArtifactUnavailable" - // ArtifactOutdatedCondition indicates the current Artifact of the Source is outdated. // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. ArtifactOutdatedCondition string = "ArtifactOutdated" + // SourceVerifiedCondition indicates the integrity of the Source has been verified. If True, the integrity check + // succeeded. If False, it failed. The Condition is only present on the resource if the integrity has been verified. + SourceVerifiedCondition string = "SourceVerified" + // FetchFailedCondition indicates a transient or persistent fetch failure of an upstream Source. // If True, observations on the upstream Source revision may be impossible, and the Artifact available for the // Source may be outdated. @@ -48,8 +48,4 @@ const ( // AuthenticationFailedReason represents the fact that a given secret does not // have the required fields or the provided credentials do not match. AuthenticationFailedReason string = "AuthenticationFailed" - - // VerificationFailedReason represents the fact that the cryptographic - // provenance verification for the source failed. - VerificationFailedReason string = "VerificationFailed" ) diff --git a/api/v1beta2/gitrepository_types.go b/api/v1beta2/gitrepository_types.go index 76c048659..4b811bfee 100644 --- a/api/v1beta2/gitrepository_types.go +++ b/api/v1beta2/gitrepository_types.go @@ -36,10 +36,6 @@ const ( ) const ( - // SourceVerifiedCondition indicates the integrity of the Source has been verified. If True, the integrity check - // succeeded. If False, it failed. The Condition is only present on the resource if the integrity has been verified. - SourceVerifiedCondition string = "SourceVerified" - // IncludeUnavailableCondition indicates one of the includes is not available. For example, because it does not // exist, or does not have an Artifact. // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. From 78882b3b36dd4677516738218705dc1843f9b733 Mon Sep 17 00:00:00 2001 From: Sunny Date: Thu, 27 Jan 2022 14:20:30 +0530 Subject: [PATCH 47/63] Consolidate result conversion and computation Consolidate BuildRuntimeResult() into summarizeAndPatch() to simplify where the results are computed, summarized and patched. Move the event recording and logging of context specific errors into RecordContextualError() and call it in summarizeAndPatch(). Introduce Waiting error for wait and requeue scenarios. Update ComputeReconcileResult() and RecordContextualError() to consider Waiting error. Signed-off-by: Sunny --- controllers/bucket_controller.go | 36 ++++++---- controllers/gitrepository_controller.go | 36 ++++++---- controllers/helmchart_controller.go | 39 ++++++----- controllers/helmchart_controller_test.go | 5 +- controllers/helmrepository_controller.go | 39 ++++++----- controllers/helmrepository_controller_test.go | 5 +- internal/error/error.go | 23 ++++++ internal/reconcile/reconcile.go | 70 ++++++++++++------- 8 files changed, 160 insertions(+), 93 deletions(-) diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index c71056648..542709406 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -139,20 +139,19 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res return ctrl.Result{}, nil } - // Initialize the patch helper + // Initialize the patch helper with the current version of the object. patchHelper, err := patch.NewHelper(obj, r.Client) if err != nil { return ctrl.Result{}, err } + // recResult stores the abstracted reconcile result. var recResult sreconcile.Result // Always attempt to patch the object and status after each reconciliation - // NOTE: This deferred block only modifies the named return error. The - // result from the reconciliation remains the same. Any requeue attributes - // set in the result will continue to be effective. + // NOTE: The final runtime result and error are set in this block. defer func() { - retErr = r.summarizeAndPatch(ctx, obj, patchHelper, recResult, retErr) + result, retErr = r.summarizeAndPatch(ctx, obj, patchHelper, recResult, retErr) // Always record readiness and duration metrics r.Metrics.RecordReadiness(ctx, obj) @@ -163,13 +162,13 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) recResult = sreconcile.ResultRequeue - return ctrl.Result{Requeue: true}, nil + return } // Examine if the object is under deletion if !obj.ObjectMeta.DeletionTimestamp.IsZero() { - res, err := r.reconcileDelete(ctx, obj) - return sreconcile.BuildRuntimeResult(ctx, r.EventRecorder, obj, res, err) + recResult, retErr = r.reconcileDelete(ctx, obj) + return } // Reconcile actual object @@ -178,13 +177,21 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res r.reconcileSource, r.reconcileArtifact, } - recResult, err = r.reconcile(ctx, obj, reconcilers) - return sreconcile.BuildRuntimeResult(ctx, r.EventRecorder, obj, recResult, err) + recResult, retErr = r.reconcile(ctx, obj, reconcilers) + return } // summarizeAndPatch analyzes the object conditions to create a summary of the -// status conditions and patches the object with the calculated summary. -func (r *BucketReconciler) summarizeAndPatch(ctx context.Context, obj *sourcev1.Bucket, patchHelper *patch.Helper, res sreconcile.Result, recErr error) error { +// status conditions, computes runtime results and patches the object in the K8s +// API server. +func (r *BucketReconciler) summarizeAndPatch( + ctx context.Context, + obj *sourcev1.Bucket, + patchHelper *patch.Helper, + res sreconcile.Result, + recErr error) (ctrl.Result, error) { + sreconcile.RecordContextualError(ctx, r.EventRecorder, obj, recErr) + // Record the value of the reconciliation request if any. if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { obj.Status.SetLastHandledReconcileRequest(v) @@ -192,7 +199,8 @@ func (r *BucketReconciler) summarizeAndPatch(ctx context.Context, obj *sourcev1. // Compute the reconcile results, obtain patch options and reconcile error. var patchOpts []patch.Option - patchOpts, recErr = sreconcile.ComputeReconcileResult(obj, res, recErr, bucketOwnedConditions) + var result ctrl.Result + patchOpts, result, recErr = sreconcile.ComputeReconcileResult(obj, obj.GetRequeueAfter(), res, recErr, bucketOwnedConditions) // Summarize the Ready condition based on abnormalities that may have been observed. conditions.SetSummary(obj, @@ -214,7 +222,7 @@ func (r *BucketReconciler) summarizeAndPatch(ctx context.Context, obj *sourcev1. recErr = kerrors.NewAggregate([]error{recErr, err}) } - return recErr + return result, recErr } // reconcile steps iterates through the actual reconciliation tasks for objec, diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go index 3dc97c790..34bb26118 100644 --- a/controllers/gitrepository_controller.go +++ b/controllers/gitrepository_controller.go @@ -145,20 +145,19 @@ func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, nil } - // Initialize the patch helper + // Initialize the patch helper with the current version of the object. patchHelper, err := patch.NewHelper(obj, r.Client) if err != nil { return ctrl.Result{}, err } + // recResult stores the abstracted reconcile result. var recResult sreconcile.Result // Always attempt to patch the object and status after each reconciliation - // NOTE: This deferred block only modifies the named return error. The - // result from the reconciliation remains the same. Any requeue attributes - // set in the result will continue to be effective. + // NOTE: The final runtime result and error are set in this block. defer func() { - retErr = r.summarizeAndPatch(ctx, obj, patchHelper, recResult, retErr) + result, retErr = r.summarizeAndPatch(ctx, obj, patchHelper, recResult, retErr) // Always record readiness and duration metrics r.Metrics.RecordReadiness(ctx, obj) @@ -170,13 +169,13 @@ func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reques if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) recResult = sreconcile.ResultRequeue - return ctrl.Result{Requeue: true}, nil + return } // Examine if the object is under deletion if !obj.ObjectMeta.DeletionTimestamp.IsZero() { - res, err := r.reconcileDelete(ctx, obj) - return sreconcile.BuildRuntimeResult(ctx, r.EventRecorder, obj, res, err) + recResult, retErr = r.reconcileDelete(ctx, obj) + return } // Reconcile actual object @@ -186,13 +185,21 @@ func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reques r.reconcileInclude, r.reconcileArtifact, } - recResult, err = r.reconcile(ctx, obj, reconcilers) - return sreconcile.BuildRuntimeResult(ctx, r.EventRecorder, obj, recResult, err) + recResult, retErr = r.reconcile(ctx, obj, reconcilers) + return } // summarizeAndPatch analyzes the object conditions to create a summary of the -// status conditions and patches the object with the calculated summary. -func (r *GitRepositoryReconciler) summarizeAndPatch(ctx context.Context, obj *sourcev1.GitRepository, patchHelper *patch.Helper, res sreconcile.Result, recErr error) error { +// status conditions, computes runtime results and patches the object in the K8s +// API server. +func (r *GitRepositoryReconciler) summarizeAndPatch( + ctx context.Context, + obj *sourcev1.GitRepository, + patchHelper *patch.Helper, + res sreconcile.Result, + recErr error) (ctrl.Result, error) { + sreconcile.RecordContextualError(ctx, r.EventRecorder, obj, recErr) + // Record the value of the reconciliation request if any. if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { obj.Status.SetLastHandledReconcileRequest(v) @@ -200,7 +207,8 @@ func (r *GitRepositoryReconciler) summarizeAndPatch(ctx context.Context, obj *so // Compute the reconcile results, obtain patch options and reconcile error. var patchOpts []patch.Option - patchOpts, recErr = sreconcile.ComputeReconcileResult(obj, res, recErr, gitRepoOwnedConditions) + var result ctrl.Result + patchOpts, result, recErr = sreconcile.ComputeReconcileResult(obj, obj.GetRequeueAfter(), res, recErr, gitRepoOwnedConditions) // Summarize the Ready condition based on abnormalities that may have been observed. conditions.SetSummary(obj, @@ -222,7 +230,7 @@ func (r *GitRepositoryReconciler) summarizeAndPatch(ctx context.Context, obj *so recErr = kerrors.NewAggregate([]error{recErr, err}) } - return recErr + return result, recErr } // reconcile steps iterates through the actual reconciliation tasks for objec, diff --git a/controllers/helmchart_controller.go b/controllers/helmchart_controller.go index db6ec5c19..865acd8c0 100644 --- a/controllers/helmchart_controller.go +++ b/controllers/helmchart_controller.go @@ -169,21 +169,19 @@ func (r *HelmChartReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return ctrl.Result{}, nil } - // Initialize the patch helper + // Initialize the patch helper with the current version of the object. patchHelper, err := patch.NewHelper(obj, r.Client) if err != nil { return ctrl.Result{}, err } - // Result of the sub-reconciliation + // recResult stores the abstracted reconcile result. var recResult sreconcile.Result // Always attempt to patch the object after each reconciliation. - // NOTE: This deferred block only modifies the named return error. The - // result from the reconciliation remains the same. Any requeue attributes - // set in the result will continue to be effective. + // NOTE: The final runtime result and error are set in this block. defer func() { - retErr = r.summarizeAndPatch(ctx, obj, patchHelper, recResult, retErr) + result, retErr = r.summarizeAndPatch(ctx, obj, patchHelper, recResult, retErr) // Always record readiness and duration metrics r.Metrics.RecordReadiness(ctx, obj) @@ -195,13 +193,13 @@ func (r *HelmChartReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) recResult = sreconcile.ResultRequeue - return ctrl.Result{Requeue: true}, nil + return } // Examine if the object is under deletion if !obj.ObjectMeta.DeletionTimestamp.IsZero() { - res, err := r.reconcileDelete(ctx, obj) - return sreconcile.BuildRuntimeResult(ctx, r.EventRecorder, obj, res, err) + recResult, retErr = r.reconcileDelete(ctx, obj) + return } // Reconcile actual object @@ -210,15 +208,21 @@ func (r *HelmChartReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( r.reconcileSource, r.reconcileArtifact, } - recResult, err = r.reconcile(ctx, obj, reconcilers) - return sreconcile.BuildRuntimeResult(ctx, r.EventRecorder, obj, recResult, err) + recResult, retErr = r.reconcile(ctx, obj, reconcilers) + return } // summarizeAndPatch analyzes the object conditions to create a summary of the -// status conditions and patches the object with the calculated summary. The -// reconciler error type is also used to determine the conditions and the -// returned error. -func (r *HelmChartReconciler) summarizeAndPatch(ctx context.Context, obj *sourcev1.HelmChart, patchHelper *patch.Helper, res sreconcile.Result, recErr error) error { +// status conditions, computes runtime results and patches the object in the K8s +// API server. +func (r *HelmChartReconciler) summarizeAndPatch( + ctx context.Context, + obj *sourcev1.HelmChart, + patchHelper *patch.Helper, + res sreconcile.Result, + recErr error) (ctrl.Result, error) { + sreconcile.RecordContextualError(ctx, r.EventRecorder, obj, recErr) + // Record the value of the reconciliation request, if any if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { obj.Status.SetLastHandledReconcileRequest(v) @@ -226,7 +230,8 @@ func (r *HelmChartReconciler) summarizeAndPatch(ctx context.Context, obj *source // Compute the reconcile results, obtain patch options and reconcile error. var patchOpts []patch.Option - patchOpts, recErr = sreconcile.ComputeReconcileResult(obj, res, recErr, helmChartOwnedConditions) + var result ctrl.Result + patchOpts, result, recErr = sreconcile.ComputeReconcileResult(obj, obj.GetRequeueAfter(), res, recErr, helmChartOwnedConditions) // Summarize Ready condition conditions.SetSummary(obj, @@ -247,7 +252,7 @@ func (r *HelmChartReconciler) summarizeAndPatch(ctx context.Context, obj *source } recErr = kerrors.NewAggregate([]error{recErr, err}) } - return recErr + return result, recErr } // reconcile steps through the actual reconciliation tasks for the object, it returns early on the first step that diff --git a/controllers/helmchart_controller_test.go b/controllers/helmchart_controller_test.go index 5e74173a6..57abbe59a 100644 --- a/controllers/helmchart_controller_test.go +++ b/controllers/helmchart_controller_test.go @@ -1372,7 +1372,8 @@ func TestHelmChartReconciler_summarizeAndPatch(t *testing.T) { builder := fake.NewClientBuilder().WithScheme(testEnv.GetScheme()) r := &HelmChartReconciler{ - Client: builder.Build(), + Client: builder.Build(), + EventRecorder: record.NewFakeRecorder(32), } obj := &sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ @@ -1393,7 +1394,7 @@ func TestHelmChartReconciler_summarizeAndPatch(t *testing.T) { patchHelper, err := patch.NewHelper(obj, r.Client) g.Expect(err).ToNot(HaveOccurred()) - gotErr := r.summarizeAndPatch(ctx, obj, patchHelper, tt.result, tt.reconcileErr) + _, gotErr := r.summarizeAndPatch(ctx, obj, patchHelper, tt.result, tt.reconcileErr) g.Expect(gotErr != nil).To(Equal(tt.wantErr)) g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) diff --git a/controllers/helmrepository_controller.go b/controllers/helmrepository_controller.go index e5d655c9c..29bf46dfb 100644 --- a/controllers/helmrepository_controller.go +++ b/controllers/helmrepository_controller.go @@ -132,21 +132,19 @@ func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reque return ctrl.Result{}, nil } - // Initialize the patch helper + // Initialize the patch helper with the current version of the object. patchHelper, err := patch.NewHelper(obj, r.Client) if err != nil { return ctrl.Result{}, err } - // Result of the sub-reconciliation. + // recResult stores the abstracted reconcile result. var recResult sreconcile.Result // Always attempt to patch the object after each reconciliation. - // NOTE: This deferred block only modifies the named return error. The - // result from the reconciliation remains the same. Any requeue attributes - // set in the result will continue to be effective. + // NOTE: The final runtime result and error are set in this block. defer func() { - retErr = r.summarizeAndPatch(ctx, obj, patchHelper, recResult, retErr) + result, retErr = r.summarizeAndPatch(ctx, obj, patchHelper, recResult, retErr) // Always record readiness and duration metrics r.Metrics.RecordReadiness(ctx, obj) @@ -158,13 +156,13 @@ func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reque if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) recResult = sreconcile.ResultRequeue - return ctrl.Result{Requeue: true}, nil + return } // Examine if the object is under deletion if !obj.ObjectMeta.DeletionTimestamp.IsZero() { - res, err := r.reconcileDelete(ctx, obj) - return sreconcile.BuildRuntimeResult(ctx, r.EventRecorder, obj, res, err) + recResult, retErr = r.reconcileDelete(ctx, obj) + return } // Reconcile actual object @@ -173,15 +171,21 @@ func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reque r.reconcileSource, r.reconcileArtifact, } - recResult, err = r.reconcile(ctx, obj, reconcilers) - return sreconcile.BuildRuntimeResult(ctx, r.EventRecorder, obj, recResult, err) + recResult, retErr = r.reconcile(ctx, obj, reconcilers) + return } // summarizeAndPatch analyzes the object conditions to create a summary of the -// status conditions and patches the object with the calculated summary. The -// reconciler error type is also used to determine the conditions and the -// returned error. -func (r *HelmRepositoryReconciler) summarizeAndPatch(ctx context.Context, obj *sourcev1.HelmRepository, patchHelper *patch.Helper, res sreconcile.Result, recErr error) error { +// status conditions, computes runtime results and patches the object in the K8s +// API server. +func (r *HelmRepositoryReconciler) summarizeAndPatch( + ctx context.Context, + obj *sourcev1.HelmRepository, + patchHelper *patch.Helper, + res sreconcile.Result, + recErr error) (ctrl.Result, error) { + sreconcile.RecordContextualError(ctx, r.EventRecorder, obj, recErr) + // Record the value of the reconciliation request, if any. if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { obj.Status.SetLastHandledReconcileRequest(v) @@ -189,7 +193,8 @@ func (r *HelmRepositoryReconciler) summarizeAndPatch(ctx context.Context, obj *s // Compute the reconcile results, obtain patch options and reconcile error. var patchOpts []patch.Option - patchOpts, recErr = sreconcile.ComputeReconcileResult(obj, res, recErr, helmRepoOwnedConditions) + var result ctrl.Result + patchOpts, result, recErr = sreconcile.ComputeReconcileResult(obj, obj.GetRequeueAfter(), res, recErr, helmRepoOwnedConditions) // Summarize Ready condition. conditions.SetSummary(obj, @@ -211,7 +216,7 @@ func (r *HelmRepositoryReconciler) summarizeAndPatch(ctx context.Context, obj *s recErr = kerrors.NewAggregate([]error{recErr, err}) } - return recErr + return result, recErr } // reconcile iterates through the sub-reconcilers and processes the source diff --git a/controllers/helmrepository_controller_test.go b/controllers/helmrepository_controller_test.go index 68790bd3a..410f8b76c 100644 --- a/controllers/helmrepository_controller_test.go +++ b/controllers/helmrepository_controller_test.go @@ -752,7 +752,8 @@ func TestHelmRepositoryReconciler_summarizeAndPatch(t *testing.T) { builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme()) r := &HelmRepositoryReconciler{ - Client: builder.Build(), + Client: builder.Build(), + EventRecorder: record.NewFakeRecorder(32), } obj := &sourcev1.HelmRepository{ ObjectMeta: metav1.ObjectMeta{ @@ -773,7 +774,7 @@ func TestHelmRepositoryReconciler_summarizeAndPatch(t *testing.T) { patchHelper, err := patch.NewHelper(obj, r.Client) g.Expect(err).ToNot(HaveOccurred()) - gotErr := r.summarizeAndPatch(ctx, obj, patchHelper, tt.result, tt.reconcileErr) + _, gotErr := r.summarizeAndPatch(ctx, obj, patchHelper, tt.result, tt.reconcileErr) g.Expect(gotErr != nil).To(Equal(tt.wantErr)) g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) diff --git a/internal/error/error.go b/internal/error/error.go index df20ccc49..4333c4603 100644 --- a/internal/error/error.go +++ b/internal/error/error.go @@ -16,6 +16,8 @@ limitations under the License. package error +import "time" + // Stalling is the reconciliation stalled state error. It contains an error // and a reason for the stalled condition. type Stalling struct { @@ -54,3 +56,24 @@ func (ee *Event) Error() string { func (ee *Event) Unwrap() error { return ee.Err } + +// Waiting is the reconciliation wait state error. It contains an error, wait +// duration and a reason for the wait. +type Waiting struct { + // RequeueAfter is the wait duration after which to requeue. + RequeueAfter time.Duration + // Reason is the reason for the wait. + Reason string + // Err is the error that caused the wait. + Err error +} + +// Error implement error interface. +func (we *Waiting) Error() string { + return we.Err.Error() +} + +// Unwrap returns the underlying error. +func (we *Waiting) Unwrap() error { + return we.Err +} diff --git a/internal/reconcile/reconcile.go b/internal/reconcile/reconcile.go index 2da1f8096..038e7e245 100644 --- a/internal/reconcile/reconcile.go +++ b/internal/reconcile/reconcile.go @@ -18,8 +18,10 @@ package reconcile import ( "context" + "time" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" kuberecorder "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" @@ -27,7 +29,6 @@ import ( "github.com/fluxcd/pkg/runtime/conditions" "github.com/fluxcd/pkg/runtime/patch" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" serror "github.com/fluxcd/source-controller/internal/error" ) @@ -48,42 +49,51 @@ const ( // BuildRuntimeResult converts a given Result and error into the // return values of a controller's Reconcile function. -func BuildRuntimeResult(ctx context.Context, recorder kuberecorder.EventRecorder, obj sourcev1.Source, rr Result, err error) (ctrl.Result, error) { - // NOTE: The return values can be modified based on the error type. - // For example, if an error signifies a short requeue period that's - // not equal to the requeue period of the object, the error can be checked - // and an appropriate result with the period can be returned. - // - // Example: - // if e, ok := err.(*waitError); ok { - // return ctrl.Result{RequeueAfter: e.RequeueAfter}, err - // } - - // Log and record event based on the error. +// func BuildRuntimeResult(ctx context.Context, recorder kuberecorder.EventRecorder, obj sourcev1.Source, rr Result, err error) (ctrl.Result, error) { +func BuildRuntimeResult(successInterval time.Duration, rr Result, err error) ctrl.Result { + // Handle special errors that contribute to expressing the result. + if e, ok := err.(*serror.Waiting); ok { + return ctrl.Result{RequeueAfter: e.RequeueAfter} + } + + switch rr { + case ResultRequeue: + return ctrl.Result{Requeue: true} + case ResultSuccess: + return ctrl.Result{RequeueAfter: successInterval} + default: + return ctrl.Result{} + } +} + +// RecordContextualError records the contextual errors based on their types. +// An event is recorded for the errors that are returned to the runtime. The +// runtime handles the logging of the error. +// An event is recorded and an error is logged for errors that are known to be +// swallowed, not returned to the runtime. +func RecordContextualError(ctx context.Context, recorder kuberecorder.EventRecorder, obj runtime.Object, err error) { switch e := err.(type) { case *serror.Event: recorder.Eventf(obj, corev1.EventTypeWarning, e.Reason, e.Error()) + case *serror.Waiting: + // Waiting errors are not returned to the runtime. Log it explicitly. + ctrl.LoggerFrom(ctx).Info("reconciliation waiting", "reason", e.Err, "duration", e.RequeueAfter) + recorder.Event(obj, corev1.EventTypeNormal, e.Reason, e.Error()) case *serror.Stalling: // Stalling errors are not returned to the runtime. Log it explicitly. ctrl.LoggerFrom(ctx).Error(e, "reconciliation stalled") recorder.Eventf(obj, corev1.EventTypeWarning, e.Reason, e.Error()) } - - switch rr { - case ResultRequeue: - return ctrl.Result{Requeue: true}, err - case ResultSuccess: - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, err - default: - return ctrl.Result{}, err - } } // ComputeReconcileResult analyzes the reconcile results (result + error), // updates the status conditions of the object with any corrections and returns -// result patch configuration and any error to the caller. The caller is -// responsible for using the patch option to patch the object in the API server. -func ComputeReconcileResult(obj conditions.Setter, res Result, recErr error, ownedConditions []string) ([]patch.Option, error) { +// object patch configuration, runtime result and runtime error. The caller is +// responsible for using the patch configuration to patch the object in the API +// server. +func ComputeReconcileResult(obj conditions.Setter, successInterval time.Duration, res Result, recErr error, ownedConditions []string) ([]patch.Option, ctrl.Result, error) { + result := BuildRuntimeResult(successInterval, res, recErr) + // Remove reconciling condition on successful reconciliation. if recErr == nil && res == ResultSuccess { conditions.Delete(obj, meta.ReconcilingCondition) @@ -105,10 +115,16 @@ func ComputeReconcileResult(obj conditions.Setter, res Result, recErr error, own // requeuing. pOpts = append(pOpts, patch.WithStatusObservedGeneration{}) conditions.MarkStalled(obj, t.Reason, t.Error()) - return pOpts, nil + return pOpts, result, nil } // NOTE: Non-empty result with stalling error indicates that the // returned result is incorrect. + case *serror.Waiting: + // The reconcile resulted in waiting error, remove stalled condition if + // present. + conditions.Delete(obj, meta.StalledCondition) + // The reconciler needs to wait and retry. Return no error. + return pOpts, result, nil case nil: // The reconcile didn't result in any error, we are not in stalled // state. If a requeue is requested, the current generation has not been @@ -123,7 +139,7 @@ func ComputeReconcileResult(obj conditions.Setter, res Result, recErr error, own conditions.Delete(obj, meta.StalledCondition) } - return pOpts, recErr + return pOpts, result, recErr } // LowestRequeuingResult returns the ReconcileResult with the lowest requeue From eb0a554561ea2994b9ddbf91037b3ff088a52d2d Mon Sep 17 00:00:00 2001 From: Hidde Beydals Date: Thu, 27 Jan 2022 21:39:10 +0100 Subject: [PATCH 48/63] internal/helm: ErrChartReference on local load err If a local reference does not contain a path to a valid file, returning `ErrChartReference` is more correct to signal the reference is invalid. This also indirectly causes the reconciler to signal a Suspend, as the source or resource requires a change before a reattempt might be successful. Signed-off-by: Hidde Beydals --- internal/helm/chart/builder_local.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/helm/chart/builder_local.go b/internal/helm/chart/builder_local.go index da9cc9cba..2710e41a9 100644 --- a/internal/helm/chart/builder_local.go +++ b/internal/helm/chart/builder_local.go @@ -77,10 +77,10 @@ func (b *localChartBuilder) Build(ctx context.Context, ref Reference, p string, // to a chart curMeta, err := LoadChartMetadata(localRef.Path) if err != nil { - return nil, &BuildError{Reason: ErrChartPull, Err: err} + return nil, &BuildError{Reason: ErrChartReference, Err: err} } if err = curMeta.Validate(); err != nil { - return nil, &BuildError{Reason: ErrChartPull, Err: err} + return nil, &BuildError{Reason: ErrChartReference, Err: err} } result := &Build{} From fb45032eb60cf9f79d6980157ab95959d2697868 Mon Sep 17 00:00:00 2001 From: Hidde Beydals Date: Thu, 27 Jan 2022 21:42:00 +0100 Subject: [PATCH 49/63] controllers: only handle BuildError All other errors returned by `build*` are already properly wrapped. Signed-off-by: Hidde Beydals --- controllers/helmchart_controller.go | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/controllers/helmchart_controller.go b/controllers/helmchart_controller.go index 865acd8c0..e8a5c0152 100644 --- a/controllers/helmchart_controller.go +++ b/controllers/helmchart_controller.go @@ -368,15 +368,14 @@ func (r *HelmChartReconciler) reconcileSource(ctx context.Context, obj *sourcev1 // Handle any build error if retErr != nil { - e := fmt.Errorf("failed to build chart from source artifact: %w", retErr) - retErr = &serror.Event{ - Err: e, - Reason: meta.FailedReason, - } - if buildErr := new(chart.BuildError); errors.As(e, &buildErr) { + if buildErr := new(chart.BuildError); errors.As(retErr, &buildErr) { + retErr = &serror.Event{ + Err: buildErr, + Reason: buildErr.Reason.Reason, + } if chart.IsPersistentBuildErrorReason(buildErr.Reason) { retErr = &serror.Stalling{ - Err: e, + Err: buildErr, Reason: buildErr.Reason.Reason, } } From 21a7dfeb1e32971e58827fe081dafb4373b5fea3 Mon Sep 17 00:00:00 2001 From: Hidde Beydals Date: Thu, 27 Jan 2022 22:48:06 +0100 Subject: [PATCH 50/63] controllers: HelmChart Reconcile test Signed-off-by: Hidde Beydals --- controllers/helmchart_controller_test.go | 90 ++++++++++++++++++++++++ controllers/suite_test.go | 10 +++ 2 files changed, 100 insertions(+) diff --git a/controllers/helmchart_controller_test.go b/controllers/helmchart_controller_test.go index 57abbe59a..20038fb58 100644 --- a/controllers/helmchart_controller_test.go +++ b/controllers/helmchart_controller_test.go @@ -32,6 +32,7 @@ import ( "github.com/darkowlzz/controller-check/status" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" @@ -50,6 +51,95 @@ import ( sreconcile "github.com/fluxcd/source-controller/internal/reconcile" ) +func TestHelmChartReconciler_Reconcile(t *testing.T) { + g := NewWithT(t) + + const ( + chartName = "helmchart" + chartVersion = "0.2.0" + chartPath = "testdata/charts/helmchart" + ) + + server, err := helmtestserver.NewTempHelmServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(server.Root()) + + g.Expect(server.PackageChartWithVersion(chartPath, chartVersion)).To(Succeed()) + g.Expect(server.GenerateIndex()).To(Succeed()) + + server.Start() + defer server.Stop() + + ns, err := testEnv.CreateNamespace(ctx, "helmchart") + g.Expect(err).ToNot(HaveOccurred()) + defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() + + repository := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-", + Namespace: ns.Name, + }, + Spec: sourcev1.HelmRepositorySpec{ + URL: server.URL(), + }, + } + g.Expect(testEnv.CreateAndWait(ctx, repository)).To(Succeed()) + + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-reconcile-", + Namespace: ns.Name, + }, + Spec: sourcev1.HelmChartSpec{ + Chart: chartName, + Version: chartVersion, + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Kind: sourcev1.HelmRepositoryKind, + Name: repository.Name, + }, + }, + } + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + // Wait for finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return len(obj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for HelmChart to be Ready + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + if !conditions.IsReady(obj) || obj.Status.Artifact == nil { + return false + } + readyCondition := conditions.Get(obj, meta.ReadyCondition) + return obj.Generation == readyCondition.ObservedGeneration && + obj.Generation == obj.Status.ObservedGeneration + }, timeout).Should(BeTrue()) + + // Check if the object status is valid. + condns := &status.Conditions{NegativePolarity: helmChartReadyDepsNegative} + checker := status.NewChecker(testEnv.Client, testEnv.GetScheme(), condns) + checker.CheckErr(ctx, obj) + + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + + // Wait for HelmChart to be deleted + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) +} + func TestHelmChartReconciler_reconcileStorage(t *testing.T) { tests := []struct { name string diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 13145a194..b4a6ca69d 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -126,6 +126,16 @@ func TestMain(m *testing.M) { panic(fmt.Sprintf("Failed to start HelmRepositoryReconciler: %v", err)) } + if err := (&HelmChartReconciler{ + Client: testEnv, + EventRecorder: record.NewFakeRecorder(32), + Metrics: testMetricsH, + Getters: testGetters, + Storage: testStorage, + }).SetupWithManager(testEnv); err != nil { + panic(fmt.Sprintf("Failed to start HelmRepositoryReconciler: %v", err)) + } + go func() { fmt.Println("Starting the test environment") if err := testEnv.Start(ctx); err != nil { From 028a85d61ae578ee207402eeeb8d8b681cd4348f Mon Sep 17 00:00:00 2001 From: Hidde Beydals Date: Fri, 28 Jan 2022 11:32:22 +0100 Subject: [PATCH 51/63] controllers: cleanup TestStorageCopyFromPath files Signed-off-by: Hidde Beydals --- controllers/storage_test.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/controllers/storage_test.go b/controllers/storage_test.go index b93ed69af..c3e7393e4 100644 --- a/controllers/storage_test.go +++ b/controllers/storage_test.go @@ -317,15 +317,11 @@ func TestStorageCopyFromPath(t *testing.T) { } createFile := func(file *File) (absPath string, err error) { - defer func() { - if err != nil && dir != "" { - os.RemoveAll(dir) - } - }() dir, err = os.MkdirTemp("", "test-files-") if err != nil { return } + t.Cleanup(cleanupStoragePath(dir)) absPath = filepath.Join(dir, file.Name) if err = os.MkdirAll(filepath.Dir(absPath), 0755); err != nil { return From ad0993e93e48b946033dac8ed2732a6837fa14aa Mon Sep 17 00:00:00 2001 From: Hidde Beydals Date: Fri, 28 Jan 2022 11:34:32 +0100 Subject: [PATCH 52/63] controllers: truncate temporary cached Helm index Signed-off-by: Hidde Beydals --- controllers/helmrepository_controller.go | 9 +++++---- controllers/helmrepository_controller_test.go | 6 +++--- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/controllers/helmrepository_controller.go b/controllers/helmrepository_controller.go index 29bf46dfb..ff55c78d6 100644 --- a/controllers/helmrepository_controller.go +++ b/controllers/helmrepository_controller.go @@ -416,6 +416,10 @@ func (r *HelmRepositoryReconciler) reconcileArtifact(ctx context.Context, obj *s conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision '%s'", artifact.Revision) } + + if err := chartRepo.RemoveCache(); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to remove temporary cached index file") + } }() if obj.GetArtifact().HasRevision(artifact.Revision) { @@ -427,10 +431,7 @@ func (r *HelmRepositoryReconciler) reconcileArtifact(ctx context.Context, obj *s // and they have to be reconciled. conditions.MarkReconciling(obj, "NewRevision", "new index revision '%s'", artifact.Revision) - // Clear cache at the very end. - defer chartRepo.RemoveCache() - - // Create artifact dir. + // Create artifact dir if err := r.Storage.MkdirAll(*artifact); err != nil { return sreconcile.ResultEmpty, &serror.Event{ Err: fmt.Errorf("failed to create artifact directory: %w", err), diff --git a/controllers/helmrepository_controller_test.go b/controllers/helmrepository_controller_test.go index 410f8b76c..369f8ef35 100644 --- a/controllers/helmrepository_controller_test.go +++ b/controllers/helmrepository_controller_test.go @@ -471,9 +471,9 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { var chartRepo repository.ChartRepository var artifact sourcev1.Artifact - dlog := log.NewDelegatingLogSink(log.NullLogSink{}) - nullLogger := logr.New(dlog) - got, err := r.reconcileSource(logr.NewContext(ctx, nullLogger), obj, &artifact, &chartRepo) + got, err := r.reconcileSource(context.TODO(), obj, &artifact, &chartRepo) + defer os.Remove(chartRepo.CachePath) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(got).To(Equal(tt.want)) From 849f7eeda312537be965878fede06f6c0c01e1e2 Mon Sep 17 00:00:00 2001 From: Hidde Beydals Date: Fri, 28 Jan 2022 11:43:35 +0100 Subject: [PATCH 53/63] controllers: replace `logr.NewContext` shims Signed-off-by: Hidde Beydals --- controllers/bucket_controller_test.go | 11 +++----- controllers/gitrepository_controller_test.go | 27 +++++++------------ controllers/helmrepository_controller_test.go | 16 +++++------ 3 files changed, 19 insertions(+), 35 deletions(-) diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go index afedb63b1..3730f021a 100644 --- a/controllers/bucket_controller_test.go +++ b/controllers/bucket_controller_test.go @@ -32,7 +32,8 @@ import ( "time" "github.com/darkowlzz/controller-check/status" - "github.com/go-logr/logr" + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" . "github.com/onsi/gomega" raw "google.golang.org/api/storage/v1" corev1 "k8s.io/api/core/v1" @@ -41,10 +42,6 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/log" - - "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/runtime/conditions" sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" sreconcile "github.com/fluxcd/source-controller/internal/reconcile" @@ -959,9 +956,7 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { tt.beforeFunc(g, obj, artifact, tmpDir) } - dlog := log.NewDelegatingLogSink(log.NullLogSink{}) - nullLogger := logr.New(dlog) - got, err := r.reconcileArtifact(logr.NewContext(ctx, nullLogger), obj, &artifact, tmpDir) + got, err := r.reconcileArtifact(context.TODO(), obj, &artifact, tmpDir) g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(got).To(Equal(tt.want)) diff --git a/controllers/gitrepository_controller_test.go b/controllers/gitrepository_controller_test.go index 792495a86..4b993e8bf 100644 --- a/controllers/gitrepository_controller_test.go +++ b/controllers/gitrepository_controller_test.go @@ -17,6 +17,7 @@ limitations under the License. package controllers import ( + "context" "fmt" "net/url" "os" @@ -26,13 +27,17 @@ import ( "time" "github.com/darkowlzz/controller-check/status" + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/gittestserver" + "github.com/fluxcd/pkg/runtime/conditions" + "github.com/fluxcd/pkg/ssh" + "github.com/fluxcd/pkg/testserver" "github.com/go-git/go-billy/v5/memfs" gogit "github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5/config" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/object" "github.com/go-git/go-git/v5/storage/memory" - "github.com/go-logr/logr" . "github.com/onsi/gomega" sshtestdata "golang.org/x/crypto/ssh/testdata" corev1 "k8s.io/api/core/v1" @@ -45,13 +50,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/log" - - "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/gittestserver" - "github.com/fluxcd/pkg/runtime/conditions" - "github.com/fluxcd/pkg/ssh" - "github.com/fluxcd/pkg/testserver" sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" sreconcile "github.com/fluxcd/source-controller/internal/reconcile" @@ -499,9 +497,8 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { var artifact sourcev1.Artifact var includes artifactSet - dlog := log.NewDelegatingLogSink(log.NullLogSink{}) - nullLogger := logr.New(dlog) - got, err := r.reconcileSource(logr.NewContext(ctx, nullLogger), obj, &artifact, &includes, tmpDir) + + got, err := r.reconcileSource(context.TODO(), obj, &artifact, &includes, tmpDir) g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(got).To(Equal(tt.want)) @@ -1209,9 +1206,7 @@ func TestGitRepositoryReconciler_verifyCommitSignature(t *testing.T) { tt.beforeFunc(obj) } - dlog := log.NewDelegatingLogSink(log.NullLogSink{}) - nullLogger := logr.New(dlog) - got, err := r.verifyCommitSignature(logr.NewContext(ctx, nullLogger), obj, tt.commit) + got, err := r.verifyCommitSignature(context.TODO(), obj, tt.commit) g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(got).To(Equal(tt.want)) @@ -1335,9 +1330,7 @@ func TestGitRepositoryReconciler_ConditionsUpdate(t *testing.T) { } key := client.ObjectKeyFromObject(obj) - dlog := log.NewDelegatingLogSink(log.NullLogSink{}) - nullLogger := logr.New(dlog) - res, err := r.Reconcile(logr.NewContext(ctx, nullLogger), ctrl.Request{NamespacedName: key}) + res, err := r.Reconcile(context.TODO(), ctrl.Request{NamespacedName: key}) g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(res).To(Equal(tt.want)) diff --git a/controllers/helmrepository_controller_test.go b/controllers/helmrepository_controller_test.go index 369f8ef35..e47521f32 100644 --- a/controllers/helmrepository_controller_test.go +++ b/controllers/helmrepository_controller_test.go @@ -27,7 +27,10 @@ import ( "time" "github.com/darkowlzz/controller-check/status" - "github.com/go-logr/logr" + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/helmtestserver" + "github.com/fluxcd/pkg/runtime/conditions" + "github.com/fluxcd/pkg/runtime/patch" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -35,12 +38,6 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/log" - - "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/helmtestserver" - "github.com/fluxcd/pkg/runtime/conditions" - "github.com/fluxcd/pkg/runtime/patch" sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" serror "github.com/fluxcd/source-controller/internal/error" @@ -596,9 +593,8 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) { if tt.beforeFunc != nil { tt.beforeFunc(g, obj, artifact, chartRepo) } - dlog := log.NewDelegatingLogSink(log.NullLogSink{}) - nullLogger := logr.New(dlog) - got, err := r.reconcileArtifact(logr.NewContext(ctx, nullLogger), obj, &artifact, chartRepo) + + got, err := r.reconcileArtifact(context.TODO(), obj, &artifact, chartRepo) g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(got).To(Equal(tt.want)) From 9b5613732f9a79b3a10a09118e973bff782b51d1 Mon Sep 17 00:00:00 2001 From: Sunny Date: Mon, 31 Jan 2022 16:00:24 +0530 Subject: [PATCH 54/63] storage: Return details about the deleted items Update Storage.RemoveAll() and Storage.RemoveAllButCurrent() to return the details about the deleted items. This helps emit useful information about garbage collection in the controllers and ignore no-op garbage collections. RemoveAll() returns the path of the deleted directory if any. RemoveAllButCurrent() returns a slice of path of all the deleted items from a resource's artifact dir. Signed-off-by: Sunny --- controllers/bucket_controller.go | 15 ++-- controllers/gitrepository_controller.go | 16 ++--- controllers/helmchart_controller.go | 15 ++-- controllers/helmrepository_controller.go | 16 ++--- controllers/storage.go | 20 ++++-- controllers/storage_test.go | 89 +++++++++++++++++++++++- 6 files changed, 135 insertions(+), 36 deletions(-) diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index 542709406..7d520c418 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -741,27 +741,28 @@ func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bu // resource. func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *sourcev1.Bucket) error { if !obj.DeletionTimestamp.IsZero() { - if err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { + if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { return &serror.Event{ Err: fmt.Errorf("garbage collection for deleted resource failed: %s", err), Reason: "GarbageCollectionFailed", } + } else if deleted != "" { + r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected artifacts for deleted resource") } obj.Status.Artifact = nil - // TODO(hidde): we should only push this event if we actually garbage collected something - r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", - "garbage collected artifacts for deleted resource") return nil } if obj.GetArtifact() != nil { - if err := r.Storage.RemoveAllButCurrent(*obj.GetArtifact()); err != nil { + if deleted, err := r.Storage.RemoveAllButCurrent(*obj.GetArtifact()); err != nil { return &serror.Event{ Err: fmt.Errorf("garbage collection of old artifacts failed: %s", err), Reason: "GarbageCollectionFailed", } + } else if len(deleted) > 0 { + r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected old artifacts") } - // TODO(hidde): we should only push this event if we actually garbage collected something - r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", "garbage collected old artifacts") } return nil } diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go index 34bb26118..bbf63c6ce 100644 --- a/controllers/gitrepository_controller.go +++ b/controllers/gitrepository_controller.go @@ -642,27 +642,27 @@ func (r *GitRepositoryReconciler) verifyCommitSignature(ctx context.Context, obj // resource. func (r *GitRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourcev1.GitRepository) error { if !obj.DeletionTimestamp.IsZero() { - if err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { + if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { return &serror.Event{ Err: fmt.Errorf("garbage collection for deleted resource failed: %w", err), Reason: "GarbageCollectionFailed", } + } else if deleted != "" { + r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected artifacts for deleted resource") } obj.Status.Artifact = nil - // TODO(hidde): we should only push this event if we actually garbage collected something - r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", - "garbage collected artifacts for deleted resource") return nil } if obj.GetArtifact() != nil { - if err := r.Storage.RemoveAllButCurrent(*obj.GetArtifact()); err != nil { + if deleted, err := r.Storage.RemoveAllButCurrent(*obj.GetArtifact()); err != nil { return &serror.Event{ Err: fmt.Errorf("garbage collection of old artifacts failed: %w", err), } + } else if len(deleted) > 0 { + r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected old artifacts") } - // TODO(hidde): we should only push this event if we actually garbage collected something - r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", - "garbage collected old artifacts") } return nil } diff --git a/controllers/helmchart_controller.go b/controllers/helmchart_controller.go index e8a5c0152..d767be019 100644 --- a/controllers/helmchart_controller.go +++ b/controllers/helmchart_controller.go @@ -750,27 +750,28 @@ func (r *HelmChartReconciler) reconcileDelete(ctx context.Context, obj *sourcev1 // resource. func (r *HelmChartReconciler) garbageCollect(ctx context.Context, obj *sourcev1.HelmChart) error { if !obj.DeletionTimestamp.IsZero() { - if err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { + if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { return &serror.Event{ Err: fmt.Errorf("garbage collection for deleted resource failed: %w", err), Reason: "GarbageCollectionFailed", } + } else if deleted != "" { + r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected artifacts for deleted resource") } obj.Status.Artifact = nil - // TODO(hidde): we should only push this event if we actually garbage collected something - r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", - "garbage collected artifacts for deleted resource") return nil } if obj.GetArtifact() != nil { - if err := r.Storage.RemoveAllButCurrent(*obj.GetArtifact()); err != nil { + if deleted, err := r.Storage.RemoveAllButCurrent(*obj.GetArtifact()); err != nil { return &serror.Event{ Err: fmt.Errorf("garbage collection of old artifacts failed: %w", err), Reason: "GarbageCollectionFailed", } + } else if len(deleted) > 0 { + r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected old artifacts") } - // TODO(hidde): we should only push this event if we actually garbage collected something - r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", "garbage collected old artifacts") } return nil } diff --git a/controllers/helmrepository_controller.go b/controllers/helmrepository_controller.go index ff55c78d6..7386c6bb3 100644 --- a/controllers/helmrepository_controller.go +++ b/controllers/helmrepository_controller.go @@ -499,28 +499,28 @@ func (r *HelmRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sou // resource. func (r *HelmRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourcev1.HelmRepository) error { if !obj.DeletionTimestamp.IsZero() { - if err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { + if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { return &serror.Event{ Err: fmt.Errorf("garbage collection for deleted resource failed: %w", err), Reason: "GarbageCollectionFailed", } + } else if deleted != "" { + r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected artifacts for deleted resource") } obj.Status.Artifact = nil - // TODO(hidde): we should only push this event if we actually garbage collected something - r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", - "garbage collected artifacts for deleted resource") return nil } if obj.GetArtifact() != nil { - if err := r.Storage.RemoveAllButCurrent(*obj.GetArtifact()); err != nil { + if deleted, err := r.Storage.RemoveAllButCurrent(*obj.GetArtifact()); err != nil { return &serror.Event{ Err: fmt.Errorf("garbage collection of old artifacts failed: %w", err), Reason: "GarbageCollectionFailed", } + } else if len(deleted) > 0 { + r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected old artifacts") } - // TODO(hidde): we should only push this event if we actually garbage collected something - r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", - "garbage collected old artifacts") } return nil } diff --git a/controllers/storage.go b/controllers/storage.go index 8f892da6d..0e9e5fe8b 100644 --- a/controllers/storage.go +++ b/controllers/storage.go @@ -105,13 +105,20 @@ func (s *Storage) MkdirAll(artifact sourcev1.Artifact) error { } // RemoveAll calls os.RemoveAll for the given v1beta1.Artifact base dir. -func (s *Storage) RemoveAll(artifact sourcev1.Artifact) error { +func (s *Storage) RemoveAll(artifact sourcev1.Artifact) (string, error) { + var deletedDir string dir := filepath.Dir(s.LocalPath(artifact)) - return os.RemoveAll(dir) + // Check if the dir exists. + _, err := os.Stat(dir) + if err == nil { + deletedDir = dir + } + return deletedDir, os.RemoveAll(dir) } // RemoveAllButCurrent removes all files for the given v1beta1.Artifact base dir, excluding the current one. -func (s *Storage) RemoveAllButCurrent(artifact sourcev1.Artifact) error { +func (s *Storage) RemoveAllButCurrent(artifact sourcev1.Artifact) ([]string, error) { + deletedFiles := []string{} localPath := s.LocalPath(artifact) dir := filepath.Dir(localPath) var errors []string @@ -124,15 +131,18 @@ func (s *Storage) RemoveAllButCurrent(artifact sourcev1.Artifact) error { if path != localPath && !info.IsDir() && info.Mode()&os.ModeSymlink != os.ModeSymlink { if err := os.Remove(path); err != nil { errors = append(errors, info.Name()) + } else { + // Collect the successfully deleted file paths. + deletedFiles = append(deletedFiles, path) } } return nil }) if len(errors) > 0 { - return fmt.Errorf("failed to remove files: %s", strings.Join(errors, " ")) + return deletedFiles, fmt.Errorf("failed to remove files: %s", strings.Join(errors, " ")) } - return nil + return deletedFiles, nil } // ArtifactExist returns a boolean indicating whether the v1beta1.Artifact exists in storage and is a regular file. diff --git a/controllers/storage_test.go b/controllers/storage_test.go index c3e7393e4..7da575c64 100644 --- a/controllers/storage_test.go +++ b/controllers/storage_test.go @@ -28,6 +28,7 @@ import ( "time" "github.com/go-git/go-git/v5/plumbing/format/gitignore" + . "github.com/onsi/gomega" sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" ) @@ -293,10 +294,96 @@ func TestStorageRemoveAllButCurrent(t *testing.T) { t.Fatalf("Valid path did not successfully return: %v", err) } - if err := s.RemoveAllButCurrent(sourcev1.Artifact{Path: path.Join(dir, "really", "nonexistent")}); err == nil { + if _, err := s.RemoveAllButCurrent(sourcev1.Artifact{Path: path.Join(dir, "really", "nonexistent")}); err == nil { t.Fatal("Did not error while pruning non-existent path") } }) + + t.Run("collect names of deleted items", func(t *testing.T) { + g := NewWithT(t) + dir, err := os.MkdirTemp("", "") + g.Expect(err).ToNot(HaveOccurred()) + t.Cleanup(func() { os.RemoveAll(dir) }) + + s, err := NewStorage(dir, "hostname", time.Minute) + g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage") + + artifact := sourcev1.Artifact{ + Path: path.Join("foo", "bar", "artifact1.tar.gz"), + } + + // Create artifact dir and artifacts. + artifactDir := path.Join(dir, "foo", "bar") + g.Expect(os.MkdirAll(artifactDir, 0755)).NotTo(HaveOccurred()) + current := []string{ + path.Join(artifactDir, "artifact1.tar.gz"), + } + wantDeleted := []string{ + path.Join(artifactDir, "file1.txt"), + path.Join(artifactDir, "file2.txt"), + } + createFile := func(files []string) { + for _, c := range files { + f, err := os.Create(c) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(f.Close()).ToNot(HaveOccurred()) + } + } + createFile(current) + createFile(wantDeleted) + _, err = s.Symlink(artifact, "latest.tar.gz") + g.Expect(err).ToNot(HaveOccurred(), "failed to create symlink") + + deleted, err := s.RemoveAllButCurrent(artifact) + g.Expect(err).ToNot(HaveOccurred(), "failed to remove all but current") + g.Expect(deleted).To(Equal(wantDeleted)) + }) +} + +func TestStorageRemoveAll(t *testing.T) { + tests := []struct { + name string + artifactPath string + createArtifactPath bool + wantDeleted string + }{ + { + name: "delete non-existent path", + artifactPath: path.Join("foo", "bar", "artifact1.tar.gz"), + createArtifactPath: false, + wantDeleted: "", + }, + { + name: "delete existing path", + artifactPath: path.Join("foo", "bar", "artifact1.tar.gz"), + createArtifactPath: true, + wantDeleted: path.Join("foo", "bar"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + dir, err := os.MkdirTemp("", "") + g.Expect(err).ToNot(HaveOccurred()) + t.Cleanup(func() { os.RemoveAll(dir) }) + + s, err := NewStorage(dir, "hostname", time.Minute) + g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage") + + artifact := sourcev1.Artifact{ + Path: tt.artifactPath, + } + + if tt.createArtifactPath { + g.Expect(os.MkdirAll(path.Join(dir, tt.artifactPath), 0755)).ToNot(HaveOccurred()) + } + + deleted, err := s.RemoveAll(artifact) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(deleted).To(ContainSubstring(tt.wantDeleted), "unexpected deleted path") + }) + } } func TestStorageCopyFromPath(t *testing.T) { From d997876b07a418836b68e7b685d91f9c5a4e2e55 Mon Sep 17 00:00:00 2001 From: Sunny Date: Fri, 4 Feb 2022 16:31:42 +0530 Subject: [PATCH 55/63] Make generic SummarizeAndPatch() summarizeAndPatch() was used by all the reconcilers with their own object type. This creates a generic SummarizeAndPatch helper that takes a conditions.Setter object and performs the same operations. All the reconcilers are updated to use SummarizeAndPatch(). The process of summarize and patch can be configured using the HelperOptions. Introduce ResultProcessor to allow injecting middlewares in the SummarizeAndPatch process. Introduce RuntimeResultBuilder to allow defining how the reconciliation result is computed for specific reconciler. This enabled different reconcilers to have different meanings of the reconciliation results. Introduce Conditions in summary package to store all the status conditions related information of a reconciler. This is passed to SummarizeAndPatch() to be used for summary and patch calculation. Remove all the redundant summarizeAndPatch() tests per reconciler. Add package internal/object containing helpers for interacting with runtime.Object needed by the generic SummarizeAndPatch(). Add tests for ComputeReconcileResult(). Signed-off-by: Sunny --- controllers/bucket_controller.go | 108 ++--- controllers/bucket_controller_test.go | 18 +- controllers/gitrepository_controller.go | 118 ++---- controllers/gitrepository_controller_test.go | 18 +- controllers/helmchart_controller.go | 112 ++--- controllers/helmchart_controller_test.go | 192 +-------- controllers/helmrepository_controller.go | 108 ++--- controllers/helmrepository_controller_test.go | 194 +-------- internal/object/object.go | 114 +++++ internal/object/object_test.go | 88 ++++ internal/reconcile/reconcile.go | 78 ++-- internal/reconcile/reconcile_test.go | 157 +++++++ internal/reconcile/summarize/matchers_test.go | 99 +++++ internal/reconcile/summarize/processor.go | 66 +++ .../reconcile/summarize/processor_test.go | 91 ++++ internal/reconcile/summarize/summary.go | 204 +++++++++ internal/reconcile/summarize/summary_test.go | 396 ++++++++++++++++++ 17 files changed, 1474 insertions(+), 687 deletions(-) create mode 100644 internal/object/object.go create mode 100644 internal/object/object_test.go create mode 100644 internal/reconcile/summarize/matchers_test.go create mode 100644 internal/reconcile/summarize/processor.go create mode 100644 internal/reconcile/summarize/processor_test.go create mode 100644 internal/reconcile/summarize/summary.go create mode 100644 internal/reconcile/summarize/summary_test.go diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index 7d520c418..f20679519 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -37,10 +37,8 @@ import ( "golang.org/x/sync/semaphore" "google.golang.org/api/option" corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - kerrors "k8s.io/apimachinery/pkg/util/errors" kuberecorder "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -57,33 +55,33 @@ import ( sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" serror "github.com/fluxcd/source-controller/internal/error" sreconcile "github.com/fluxcd/source-controller/internal/reconcile" + "github.com/fluxcd/source-controller/internal/reconcile/summarize" "github.com/fluxcd/source-controller/pkg/sourceignore" ) -// Status conditions owned by Bucket reconciler. -var bucketOwnedConditions = []string{ - sourcev1.ArtifactOutdatedCondition, - sourcev1.FetchFailedCondition, - meta.ReadyCondition, - meta.ReconcilingCondition, - meta.StalledCondition, -} - -// Conditions that Ready condition is influenced by in descending order of their -// priority. -var bucketReadyDeps = []string{ - sourcev1.ArtifactOutdatedCondition, - sourcev1.FetchFailedCondition, - meta.StalledCondition, - meta.ReconcilingCondition, -} - -// Negative conditions that Ready condition is influenced by. -var bucketReadyDepsNegative = []string{ - sourcev1.ArtifactOutdatedCondition, - sourcev1.FetchFailedCondition, - meta.StalledCondition, - meta.ReconcilingCondition, +// bucketReadyConditions contains all the conditions information needed +// for Bucket Ready status conditions summary calculation. +var bucketReadyConditions = summarize.Conditions{ + Target: meta.ReadyCondition, + Owned: []string{ + sourcev1.ArtifactOutdatedCondition, + sourcev1.FetchFailedCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, + }, + Summarize: []string{ + sourcev1.ArtifactOutdatedCondition, + sourcev1.FetchFailedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, + NegativePolarity: []string{ + sourcev1.ArtifactOutdatedCondition, + sourcev1.FetchFailedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, } // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets,verbs=get;list;watch;create;update;patch;delete @@ -151,7 +149,19 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res // Always attempt to patch the object and status after each reconciliation // NOTE: The final runtime result and error are set in this block. defer func() { - result, retErr = r.summarizeAndPatch(ctx, obj, patchHelper, recResult, retErr) + summarizeHelper := summarize.NewHelper(r.EventRecorder, patchHelper) + summarizeOpts := []summarize.Option{ + summarize.WithConditions(bucketReadyConditions), + summarize.WithReconcileResult(recResult), + summarize.WithReconcileError(retErr), + summarize.WithIgnoreNotFound(), + summarize.WithProcessors( + summarize.RecordContextualError, + summarize.RecordReconcileReq, + ), + summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetInterval().Duration}), + } + result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) // Always record readiness and duration metrics r.Metrics.RecordReadiness(ctx, obj) @@ -181,50 +191,6 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res return } -// summarizeAndPatch analyzes the object conditions to create a summary of the -// status conditions, computes runtime results and patches the object in the K8s -// API server. -func (r *BucketReconciler) summarizeAndPatch( - ctx context.Context, - obj *sourcev1.Bucket, - patchHelper *patch.Helper, - res sreconcile.Result, - recErr error) (ctrl.Result, error) { - sreconcile.RecordContextualError(ctx, r.EventRecorder, obj, recErr) - - // Record the value of the reconciliation request if any. - if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { - obj.Status.SetLastHandledReconcileRequest(v) - } - - // Compute the reconcile results, obtain patch options and reconcile error. - var patchOpts []patch.Option - var result ctrl.Result - patchOpts, result, recErr = sreconcile.ComputeReconcileResult(obj, obj.GetRequeueAfter(), res, recErr, bucketOwnedConditions) - - // Summarize the Ready condition based on abnormalities that may have been observed. - conditions.SetSummary(obj, - meta.ReadyCondition, - conditions.WithConditions( - bucketReadyDeps..., - ), - conditions.WithNegativePolarityConditions( - bucketReadyDepsNegative..., - ), - ) - - // Finally, patch the resource. - if err := patchHelper.Patch(ctx, obj, patchOpts...); err != nil { - // Ignore patch error "not found" when the object is being deleted. - if !obj.ObjectMeta.DeletionTimestamp.IsZero() { - err = kerrors.FilterOut(err, func(e error) bool { return apierrors.IsNotFound(e) }) - } - recErr = kerrors.NewAggregate([]error{recErr, err}) - } - - return result, recErr -} - // reconcile steps iterates through the actual reconciliation tasks for objec, // it returns early on the first step that returns ResultRequeue or produces an // error. diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go index 3730f021a..ee798f376 100644 --- a/controllers/bucket_controller_test.go +++ b/controllers/bucket_controller_test.go @@ -34,6 +34,7 @@ import ( "github.com/darkowlzz/controller-check/status" "github.com/fluxcd/pkg/apis/meta" "github.com/fluxcd/pkg/runtime/conditions" + "github.com/fluxcd/pkg/runtime/patch" . "github.com/onsi/gomega" raw "google.golang.org/api/storage/v1" corev1 "k8s.io/api/core/v1" @@ -125,10 +126,25 @@ func TestBucketReconciler_Reconcile(t *testing.T) { }, timeout).Should(BeTrue()) // Check if the object status is valid. - condns := &status.Conditions{NegativePolarity: bucketReadyDepsNegative} + condns := &status.Conditions{NegativePolarity: bucketReadyConditions.NegativePolarity} checker := status.NewChecker(testEnv.Client, testEnv.GetScheme(), condns) checker.CheckErr(ctx, obj) + // Patch the object with reconcile request annotation. + patchHelper, err := patch.NewHelper(obj, testEnv.Client) + g.Expect(err).ToNot(HaveOccurred()) + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "now", + } + obj.SetAnnotations(annotations) + g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred()) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return obj.Status.LastHandledReconcileAt == "now" + }, timeout).Should(BeTrue()) + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) // Wait for Bucket to be deleted diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go index bbf63c6ce..a494c4f92 100644 --- a/controllers/gitrepository_controller.go +++ b/controllers/gitrepository_controller.go @@ -27,10 +27,8 @@ import ( securejoin "github.com/cyphar/filepath-securejoin" "github.com/fluxcd/pkg/runtime/logger" corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - kerrors "k8s.io/apimachinery/pkg/util/errors" kuberecorder "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -49,41 +47,41 @@ import ( sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" serror "github.com/fluxcd/source-controller/internal/error" sreconcile "github.com/fluxcd/source-controller/internal/reconcile" + "github.com/fluxcd/source-controller/internal/reconcile/summarize" "github.com/fluxcd/source-controller/internal/util" "github.com/fluxcd/source-controller/pkg/git" "github.com/fluxcd/source-controller/pkg/git/strategy" "github.com/fluxcd/source-controller/pkg/sourceignore" ) -// Status conditions owned by the GitRepository reconciler. -var gitRepoOwnedConditions = []string{ - sourcev1.SourceVerifiedCondition, - sourcev1.FetchFailedCondition, - sourcev1.IncludeUnavailableCondition, - sourcev1.ArtifactOutdatedCondition, - meta.ReadyCondition, - meta.ReconcilingCondition, - meta.StalledCondition, -} - -// Conditions that Ready condition is influenced by in descending order of their -// priority. -var gitRepoReadyDeps = []string{ - sourcev1.IncludeUnavailableCondition, - sourcev1.SourceVerifiedCondition, - sourcev1.FetchFailedCondition, - sourcev1.ArtifactOutdatedCondition, - meta.StalledCondition, - meta.ReconcilingCondition, -} - -// Negative conditions that Ready condition is influenced by. -var gitRepoReadyDepsNegative = []string{ - sourcev1.FetchFailedCondition, - sourcev1.IncludeUnavailableCondition, - sourcev1.ArtifactOutdatedCondition, - meta.StalledCondition, - meta.ReconcilingCondition, +// gitRepoReadyConditions contains all the conditions information needed +// for GitRepository Ready status conditions summary calculation. +var gitRepoReadyConditions = summarize.Conditions{ + Target: meta.ReadyCondition, + Owned: []string{ + sourcev1.SourceVerifiedCondition, + sourcev1.FetchFailedCondition, + sourcev1.IncludeUnavailableCondition, + sourcev1.ArtifactOutdatedCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, + }, + Summarize: []string{ + sourcev1.IncludeUnavailableCondition, + sourcev1.SourceVerifiedCondition, + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, + NegativePolarity: []string{ + sourcev1.FetchFailedCondition, + sourcev1.IncludeUnavailableCondition, + sourcev1.ArtifactOutdatedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, } // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories,verbs=get;list;watch;create;update;patch;delete @@ -157,7 +155,19 @@ func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reques // Always attempt to patch the object and status after each reconciliation // NOTE: The final runtime result and error are set in this block. defer func() { - result, retErr = r.summarizeAndPatch(ctx, obj, patchHelper, recResult, retErr) + summarizeHelper := summarize.NewHelper(r.EventRecorder, patchHelper) + summarizeOpts := []summarize.Option{ + summarize.WithConditions(gitRepoReadyConditions), + summarize.WithReconcileResult(recResult), + summarize.WithReconcileError(retErr), + summarize.WithIgnoreNotFound(), + summarize.WithProcessors( + summarize.RecordContextualError, + summarize.RecordReconcileReq, + ), + summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetInterval().Duration}), + } + result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) // Always record readiness and duration metrics r.Metrics.RecordReadiness(ctx, obj) @@ -189,50 +199,6 @@ func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reques return } -// summarizeAndPatch analyzes the object conditions to create a summary of the -// status conditions, computes runtime results and patches the object in the K8s -// API server. -func (r *GitRepositoryReconciler) summarizeAndPatch( - ctx context.Context, - obj *sourcev1.GitRepository, - patchHelper *patch.Helper, - res sreconcile.Result, - recErr error) (ctrl.Result, error) { - sreconcile.RecordContextualError(ctx, r.EventRecorder, obj, recErr) - - // Record the value of the reconciliation request if any. - if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { - obj.Status.SetLastHandledReconcileRequest(v) - } - - // Compute the reconcile results, obtain patch options and reconcile error. - var patchOpts []patch.Option - var result ctrl.Result - patchOpts, result, recErr = sreconcile.ComputeReconcileResult(obj, obj.GetRequeueAfter(), res, recErr, gitRepoOwnedConditions) - - // Summarize the Ready condition based on abnormalities that may have been observed. - conditions.SetSummary(obj, - meta.ReadyCondition, - conditions.WithConditions( - gitRepoReadyDeps..., - ), - conditions.WithNegativePolarityConditions( - gitRepoReadyDepsNegative..., - ), - ) - - // Finally, patch the resource. - if err := patchHelper.Patch(ctx, obj, patchOpts...); err != nil { - // Ignore patch error "not found" when the object is being deleted. - if !obj.ObjectMeta.DeletionTimestamp.IsZero() { - err = kerrors.FilterOut(err, func(e error) bool { return apierrors.IsNotFound(e) }) - } - recErr = kerrors.NewAggregate([]error{recErr, err}) - } - - return result, recErr -} - // reconcile steps iterates through the actual reconciliation tasks for objec, // it returns early on the first step that returns ResultRequeue or produces an // error. diff --git a/controllers/gitrepository_controller_test.go b/controllers/gitrepository_controller_test.go index 4b993e8bf..9473e8dd7 100644 --- a/controllers/gitrepository_controller_test.go +++ b/controllers/gitrepository_controller_test.go @@ -30,6 +30,7 @@ import ( "github.com/fluxcd/pkg/apis/meta" "github.com/fluxcd/pkg/gittestserver" "github.com/fluxcd/pkg/runtime/conditions" + "github.com/fluxcd/pkg/runtime/patch" "github.com/fluxcd/pkg/ssh" "github.com/fluxcd/pkg/testserver" "github.com/go-git/go-billy/v5/memfs" @@ -190,10 +191,25 @@ func TestGitRepositoryReconciler_Reconcile(t *testing.T) { }, timeout).Should(BeTrue()) // Check if the object status is valid. - condns := &status.Conditions{NegativePolarity: gitRepoReadyDepsNegative} + condns := &status.Conditions{NegativePolarity: gitRepoReadyConditions.NegativePolarity} checker := status.NewChecker(testEnv.Client, testEnv.GetScheme(), condns) checker.CheckErr(ctx, obj) + // Patch the object with reconcile request annotation. + patchHelper, err := patch.NewHelper(obj, testEnv.Client) + g.Expect(err).ToNot(HaveOccurred()) + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "now", + } + obj.SetAnnotations(annotations) + g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred()) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return obj.Status.LastHandledReconcileAt == "now" + }, timeout).Should(BeTrue()) + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) // Wait for GitRepository to be deleted diff --git a/controllers/helmchart_controller.go b/controllers/helmchart_controller.go index d767be019..f93d4590b 100644 --- a/controllers/helmchart_controller.go +++ b/controllers/helmchart_controller.go @@ -34,7 +34,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - kerrors "k8s.io/apimachinery/pkg/util/errors" kuberecorder "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -60,36 +59,36 @@ import ( "github.com/fluxcd/source-controller/internal/helm/getter" "github.com/fluxcd/source-controller/internal/helm/repository" sreconcile "github.com/fluxcd/source-controller/internal/reconcile" + "github.com/fluxcd/source-controller/internal/reconcile/summarize" "github.com/fluxcd/source-controller/internal/util" ) -// Status conditions owned by the HelmChart reconciler. -var helmChartOwnedConditions = []string{ - sourcev1.BuildFailedCondition, - sourcev1.FetchFailedCondition, - sourcev1.ArtifactOutdatedCondition, - meta.ReadyCondition, - meta.ReconcilingCondition, - meta.StalledCondition, -} - -// Conditions that Ready condition is influenced by in descending order of their -// priority. -var helmChartReadyDeps = []string{ - sourcev1.BuildFailedCondition, - sourcev1.FetchFailedCondition, - sourcev1.ArtifactOutdatedCondition, - meta.StalledCondition, - meta.ReconcilingCondition, -} - -// Negative conditions that Ready condition is influenced by. -var helmChartReadyDepsNegative = []string{ - sourcev1.BuildFailedCondition, - sourcev1.FetchFailedCondition, - sourcev1.ArtifactOutdatedCondition, - meta.StalledCondition, - meta.ReconcilingCondition, +// helmChartReadyConditions contains all the conditions information +// needed for HelmChart Ready status conditions summary calculation. +var helmChartReadyConditions = summarize.Conditions{ + Target: meta.ReadyCondition, + Owned: []string{ + sourcev1.BuildFailedCondition, + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, + }, + Summarize: []string{ + sourcev1.BuildFailedCondition, + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, + NegativePolarity: []string{ + sourcev1.BuildFailedCondition, + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, } // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmcharts,verbs=get;list;watch;create;update;patch;delete @@ -181,7 +180,19 @@ func (r *HelmChartReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( // Always attempt to patch the object after each reconciliation. // NOTE: The final runtime result and error are set in this block. defer func() { - result, retErr = r.summarizeAndPatch(ctx, obj, patchHelper, recResult, retErr) + summarizeHelper := summarize.NewHelper(r.EventRecorder, patchHelper) + summarizeOpts := []summarize.Option{ + summarize.WithConditions(helmChartReadyConditions), + summarize.WithReconcileResult(recResult), + summarize.WithReconcileError(retErr), + summarize.WithIgnoreNotFound(), + summarize.WithProcessors( + summarize.RecordContextualError, + summarize.RecordReconcileReq, + ), + summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetInterval().Duration}), + } + result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) // Always record readiness and duration metrics r.Metrics.RecordReadiness(ctx, obj) @@ -212,49 +223,6 @@ func (r *HelmChartReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return } -// summarizeAndPatch analyzes the object conditions to create a summary of the -// status conditions, computes runtime results and patches the object in the K8s -// API server. -func (r *HelmChartReconciler) summarizeAndPatch( - ctx context.Context, - obj *sourcev1.HelmChart, - patchHelper *patch.Helper, - res sreconcile.Result, - recErr error) (ctrl.Result, error) { - sreconcile.RecordContextualError(ctx, r.EventRecorder, obj, recErr) - - // Record the value of the reconciliation request, if any - if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { - obj.Status.SetLastHandledReconcileRequest(v) - } - - // Compute the reconcile results, obtain patch options and reconcile error. - var patchOpts []patch.Option - var result ctrl.Result - patchOpts, result, recErr = sreconcile.ComputeReconcileResult(obj, obj.GetRequeueAfter(), res, recErr, helmChartOwnedConditions) - - // Summarize Ready condition - conditions.SetSummary(obj, - meta.ReadyCondition, - conditions.WithConditions( - helmChartReadyDeps..., - ), - conditions.WithNegativePolarityConditions( - helmChartReadyDepsNegative..., - ), - ) - - // Finally, patch the resource - if err := patchHelper.Patch(ctx, obj, patchOpts...); err != nil { - // Ignore patch error "not found" when the object is being deleted. - if !obj.ObjectMeta.DeletionTimestamp.IsZero() { - err = kerrors.FilterOut(err, func(e error) bool { return apierrs.IsNotFound(err) }) - } - recErr = kerrors.NewAggregate([]error{recErr, err}) - } - return result, recErr -} - // reconcile steps through the actual reconciliation tasks for the object, it returns early on the first step that // produces an error. func (r *HelmChartReconciler) reconcile(ctx context.Context, obj *sourcev1.HelmChart, reconcilers []helmChartReconcilerFunc) (sreconcile.Result, error) { diff --git a/controllers/helmchart_controller_test.go b/controllers/helmchart_controller_test.go index 20038fb58..70568f577 100644 --- a/controllers/helmchart_controller_test.go +++ b/controllers/helmchart_controller_test.go @@ -125,10 +125,25 @@ func TestHelmChartReconciler_Reconcile(t *testing.T) { }, timeout).Should(BeTrue()) // Check if the object status is valid. - condns := &status.Conditions{NegativePolarity: helmChartReadyDepsNegative} + condns := &status.Conditions{NegativePolarity: helmChartReadyConditions.NegativePolarity} checker := status.NewChecker(testEnv.Client, testEnv.GetScheme(), condns) checker.CheckErr(ctx, obj) + // Patch the object with reconcile request annotation. + patchHelper, err := patch.NewHelper(obj, testEnv.Client) + g.Expect(err).ToNot(HaveOccurred()) + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "now", + } + obj.SetAnnotations(annotations) + g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred()) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return obj.Status.LastHandledReconcileAt == "now" + }, timeout).Should(BeTrue()) + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) // Wait for HelmChart to be deleted @@ -1326,181 +1341,6 @@ func TestHelmChartReconciler_reconcileDelete(t *testing.T) { g.Expect(obj.Status.Artifact).To(BeNil()) } -func TestHelmChartReconciler_summarizeAndPatch(t *testing.T) { - tests := []struct { - name string - generation int64 - beforeFunc func(obj *sourcev1.HelmChart) - result sreconcile.Result - reconcileErr error - wantErr bool - afterFunc func(t *WithT, obj *sourcev1.HelmChart) - assertConditions []metav1.Condition - }{ - // Success/Fail indicates if a reconciliation succeeded or failed. On - // a successful reconciliation, the object generation is expected to - // match the observed generation in the object status. - // All the cases have some Ready condition set, even if a test case is - // unrelated to the conditions, because it's necessary for a valid - // status. - { - name: "Success, no extra conditions", - generation: 4, - beforeFunc: func(obj *sourcev1.HelmChart) { - conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "test-msg") - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "test-msg"), - }, - afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { - t.Expect(obj.Status.ObservedGeneration).To(Equal(int64(4))) - }, - }, - { - name: "Success, Ready=True", - generation: 5, - beforeFunc: func(obj *sourcev1.HelmChart) { - conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "created") - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "created"), - }, - afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { - t.Expect(obj.Status.ObservedGeneration).To(Equal(int64(5))) - }, - }, - { - name: "Success, removes reconciling for successful result", - generation: 2, - beforeFunc: func(obj *sourcev1.HelmChart) { - conditions.MarkReconciling(obj, "NewRevision", "new index version") - conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "stored artifact") - }, - result: sreconcile.ResultSuccess, - wantErr: false, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact"), - }, - afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { - t.Expect(obj.Status.ObservedGeneration).To(Equal(int64(2))) - }, - }, - { - name: "Success, record reconciliation request", - beforeFunc: func(obj *sourcev1.HelmChart) { - annotations := map[string]string{ - meta.ReconcileRequestAnnotation: "now", - } - obj.SetAnnotations(annotations) - conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "test-msg") - }, - generation: 3, - result: sreconcile.ResultSuccess, - wantErr: false, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "test-msg"), - }, - afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { - t.Expect(obj.Status.LastHandledReconcileAt).To(Equal("now")) - t.Expect(obj.Status.ObservedGeneration).To(Equal(int64(3))) - }, - }, - { - name: "Fail, with multiple conditions ArtifactOutdated=True,Reconciling=True", - generation: 7, - beforeFunc: func(obj *sourcev1.HelmChart) { - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision") - conditions.MarkReconciling(obj, "NewRevision", "new index revision") - }, - reconcileErr: fmt.Errorf("failed to create dir"), - wantErr: true, - assertConditions: []metav1.Condition{ - *conditions.FalseCondition(meta.ReadyCondition, "NewRevision", "new index revision"), - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), - *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new index revision"), - }, - afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { - t.Expect(obj.Status.ObservedGeneration).ToNot(Equal(int64(7))) - }, - }, - { - name: "Success, with subreconciler stalled error", - generation: 9, - beforeFunc: func(obj *sourcev1.HelmChart) { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.FetchFailedCondition, "failed to construct helm client") - }, - reconcileErr: &serror.Stalling{Err: fmt.Errorf("some error"), Reason: "some reason"}, - wantErr: false, - assertConditions: []metav1.Condition{ - *conditions.FalseCondition(meta.ReadyCondition, sourcev1.FetchFailedCondition, "failed to construct helm client"), - *conditions.TrueCondition(meta.StalledCondition, "some reason", "some error"), - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.FetchFailedCondition, "failed to construct helm client"), - }, - afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { - t.Expect(obj.Status.ObservedGeneration).To(Equal(int64(9))) - }, - }, - { - name: "Fail, no error but requeue requested", - generation: 3, - beforeFunc: func(obj *sourcev1.HelmChart) { - conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "test-msg") - }, - result: sreconcile.ResultRequeue, - assertConditions: []metav1.Condition{ - *conditions.FalseCondition(meta.ReadyCondition, meta.FailedReason, "test-msg"), - }, - afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { - t.Expect(obj.Status.ObservedGeneration).ToNot(Equal(int64(3))) - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - builder := fake.NewClientBuilder().WithScheme(testEnv.GetScheme()) - r := &HelmChartReconciler{ - Client: builder.Build(), - EventRecorder: record.NewFakeRecorder(32), - } - obj := &sourcev1.HelmChart{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "test-", - Generation: tt.generation, - }, - Spec: sourcev1.HelmChartSpec{ - Interval: metav1.Duration{Duration: 5 * time.Second}, - }, - } - - if tt.beforeFunc != nil { - tt.beforeFunc(obj) - } - - ctx := context.TODO() - g.Expect(r.Create(ctx, obj)).To(Succeed()) - patchHelper, err := patch.NewHelper(obj, r.Client) - g.Expect(err).ToNot(HaveOccurred()) - - _, gotErr := r.summarizeAndPatch(ctx, obj, patchHelper, tt.result, tt.reconcileErr) - g.Expect(gotErr != nil).To(Equal(tt.wantErr)) - - g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) - - if tt.afterFunc != nil { - tt.afterFunc(g, obj) - } - - // Check if the object status is valid. - condns := &status.Conditions{NegativePolarity: helmChartReadyDepsNegative} - checker := status.NewChecker(r.Client, testEnv.GetScheme(), condns) - checker.CheckErr(ctx, obj) - }) - } -} - func TestHelmChartReconciler_reconcileSubRecs(t *testing.T) { // Helper to build simple helmChartReconcilerFunc with result and error. buildReconcileFuncs := func(r sreconcile.Result, e error) helmChartReconcilerFunc { diff --git a/controllers/helmrepository_controller.go b/controllers/helmrepository_controller.go index 7386c6bb3..100f824e1 100644 --- a/controllers/helmrepository_controller.go +++ b/controllers/helmrepository_controller.go @@ -26,10 +26,8 @@ import ( helmgetter "helm.sh/helm/v3/pkg/getter" corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - kerrors "k8s.io/apimachinery/pkg/util/errors" kuberecorder "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -49,32 +47,32 @@ import ( "github.com/fluxcd/source-controller/internal/helm/getter" "github.com/fluxcd/source-controller/internal/helm/repository" sreconcile "github.com/fluxcd/source-controller/internal/reconcile" + "github.com/fluxcd/source-controller/internal/reconcile/summarize" ) -// Status conditions owned by HelmRepository reconciler. -var helmRepoOwnedConditions = []string{ - sourcev1.FetchFailedCondition, - sourcev1.ArtifactOutdatedCondition, - meta.ReadyCondition, - meta.ReconcilingCondition, - meta.StalledCondition, -} - -// Conditions that Ready condition is influenced by in descending order of their -// priority. -var helmRepoReadyDeps = []string{ - sourcev1.FetchFailedCondition, - sourcev1.ArtifactOutdatedCondition, - meta.StalledCondition, - meta.ReconcilingCondition, -} - -// Negative conditions that Ready condition is influenced by. -var helmRepoReadyDepsNegative = []string{ - sourcev1.FetchFailedCondition, - sourcev1.ArtifactOutdatedCondition, - meta.StalledCondition, - meta.ReconcilingCondition, +// helmRepoReadyConditions contains all the conditions information needed +// for HelmRepository Ready status conditions summary calculation. +var helmRepoReadyConditions = summarize.Conditions{ + Target: meta.ReadyCondition, + Owned: []string{ + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, + }, + Summarize: []string{ + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, + NegativePolarity: []string{ + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, } // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmrepositories,verbs=get;list;watch;create;update;patch;delete @@ -144,7 +142,19 @@ func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reque // Always attempt to patch the object after each reconciliation. // NOTE: The final runtime result and error are set in this block. defer func() { - result, retErr = r.summarizeAndPatch(ctx, obj, patchHelper, recResult, retErr) + summarizeHelper := summarize.NewHelper(r.EventRecorder, patchHelper) + summarizeOpts := []summarize.Option{ + summarize.WithConditions(helmRepoReadyConditions), + summarize.WithReconcileResult(recResult), + summarize.WithReconcileError(retErr), + summarize.WithIgnoreNotFound(), + summarize.WithProcessors( + summarize.RecordContextualError, + summarize.RecordReconcileReq, + ), + summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetInterval().Duration}), + } + result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) // Always record readiness and duration metrics r.Metrics.RecordReadiness(ctx, obj) @@ -175,50 +185,6 @@ func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reque return } -// summarizeAndPatch analyzes the object conditions to create a summary of the -// status conditions, computes runtime results and patches the object in the K8s -// API server. -func (r *HelmRepositoryReconciler) summarizeAndPatch( - ctx context.Context, - obj *sourcev1.HelmRepository, - patchHelper *patch.Helper, - res sreconcile.Result, - recErr error) (ctrl.Result, error) { - sreconcile.RecordContextualError(ctx, r.EventRecorder, obj, recErr) - - // Record the value of the reconciliation request, if any. - if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { - obj.Status.SetLastHandledReconcileRequest(v) - } - - // Compute the reconcile results, obtain patch options and reconcile error. - var patchOpts []patch.Option - var result ctrl.Result - patchOpts, result, recErr = sreconcile.ComputeReconcileResult(obj, obj.GetRequeueAfter(), res, recErr, helmRepoOwnedConditions) - - // Summarize Ready condition. - conditions.SetSummary(obj, - meta.ReadyCondition, - conditions.WithConditions( - helmRepoReadyDeps..., - ), - conditions.WithNegativePolarityConditions( - helmRepoReadyDepsNegative..., - ), - ) - - // Finally, patch the resource. - if err := patchHelper.Patch(ctx, obj, patchOpts...); err != nil { - // Ignore patch error "not found" when the object is being deleted. - if !obj.ObjectMeta.DeletionTimestamp.IsZero() { - err = kerrors.FilterOut(err, func(e error) bool { return apierrors.IsNotFound(e) }) - } - recErr = kerrors.NewAggregate([]error{recErr, err}) - } - - return result, recErr -} - // reconcile iterates through the sub-reconcilers and processes the source // object. The sub-reconcilers are run sequentially. The result and error of // the sub-reconciliation are collected and returned. For multiple results diff --git a/controllers/helmrepository_controller_test.go b/controllers/helmrepository_controller_test.go index e47521f32..993657b46 100644 --- a/controllers/helmrepository_controller_test.go +++ b/controllers/helmrepository_controller_test.go @@ -24,7 +24,6 @@ import ( "path/filepath" "strings" "testing" - "time" "github.com/darkowlzz/controller-check/status" "github.com/fluxcd/pkg/apis/meta" @@ -40,7 +39,6 @@ import ( fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" - serror "github.com/fluxcd/source-controller/internal/error" "github.com/fluxcd/source-controller/internal/helm/repository" sreconcile "github.com/fluxcd/source-controller/internal/reconcile" ) @@ -95,10 +93,25 @@ func TestHelmRepositoryReconciler_Reconcile(t *testing.T) { }, timeout).Should(BeTrue()) // Check if the object status is valid. - condns := &status.Conditions{NegativePolarity: helmRepoReadyDepsNegative} + condns := &status.Conditions{NegativePolarity: helmRepoReadyConditions.NegativePolarity} checker := status.NewChecker(testEnv.Client, testEnv.GetScheme(), condns) checker.CheckErr(ctx, obj) + // Patch the object with reconcile request annotation. + patchHelper, err := patch.NewHelper(obj, testEnv.Client) + g.Expect(err).ToNot(HaveOccurred()) + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "now", + } + obj.SetAnnotations(annotations) + g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred()) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return obj.Status.LastHandledReconcileAt == "now" + }, timeout).Should(BeTrue()) + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) // Wait for HelmRepository to be deleted @@ -612,181 +625,6 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) { } } -func TestHelmRepositoryReconciler_summarizeAndPatch(t *testing.T) { - tests := []struct { - name string - generation int64 - beforeFunc func(obj *sourcev1.HelmRepository) - result sreconcile.Result - reconcileErr error - wantErr bool - afterFunc func(t *WithT, obj *sourcev1.HelmRepository) - assertConditions []metav1.Condition - }{ - // Success/Fail indicates if a reconciliation succeeded or failed. On - // a successful reconciliation, the object generation is expected to - // match the observed generation in the object status. - // All the cases have some Ready condition set, even if a test case is - // unrelated to the conditions, because it's neseccary for a valid - // status. - { - name: "Success, no extra conditions", - generation: 4, - beforeFunc: func(obj *sourcev1.HelmRepository) { - conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "test-msg") - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "test-msg"), - }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository) { - t.Expect(obj.Status.ObservedGeneration).To(Equal(int64(4))) - }, - }, - { - name: "Success, Ready=True", - generation: 5, - beforeFunc: func(obj *sourcev1.HelmRepository) { - conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "created") - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "created"), - }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository) { - t.Expect(obj.Status.ObservedGeneration).To(Equal(int64(5))) - }, - }, - { - name: "Success, removes reconciling for successful result", - generation: 2, - beforeFunc: func(obj *sourcev1.HelmRepository) { - conditions.MarkReconciling(obj, "NewRevision", "new index version") - conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "stored artifact") - }, - result: sreconcile.ResultSuccess, - wantErr: false, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact"), - }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository) { - t.Expect(obj.Status.ObservedGeneration).To(Equal(int64(2))) - }, - }, - { - name: "Success, record reconciliation request", - beforeFunc: func(obj *sourcev1.HelmRepository) { - annotations := map[string]string{ - meta.ReconcileRequestAnnotation: "now", - } - obj.SetAnnotations(annotations) - conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "test-msg") - }, - generation: 3, - result: sreconcile.ResultSuccess, - wantErr: false, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "test-msg"), - }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository) { - t.Expect(obj.Status.LastHandledReconcileAt).To(Equal("now")) - t.Expect(obj.Status.ObservedGeneration).To(Equal(int64(3))) - }, - }, - { - name: "Fail, with multiple conditions ArtifactOutdated=True,Reconciling=True", - generation: 7, - beforeFunc: func(obj *sourcev1.HelmRepository) { - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision") - conditions.MarkReconciling(obj, "NewRevision", "new index revision") - }, - reconcileErr: fmt.Errorf("failed to create dir"), - wantErr: true, - assertConditions: []metav1.Condition{ - *conditions.FalseCondition(meta.ReadyCondition, "NewRevision", "new index revision"), - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), - *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new index revision"), - }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository) { - t.Expect(obj.Status.ObservedGeneration).ToNot(Equal(int64(7))) - }, - }, - { - name: "Success, with subreconciler stalled error", - generation: 9, - beforeFunc: func(obj *sourcev1.HelmRepository) { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.FetchFailedCondition, "failed to construct helm client") - }, - reconcileErr: &serror.Stalling{Err: fmt.Errorf("some error"), Reason: "some reason"}, - wantErr: false, - assertConditions: []metav1.Condition{ - *conditions.FalseCondition(meta.ReadyCondition, sourcev1.FetchFailedCondition, "failed to construct helm client"), - *conditions.TrueCondition(meta.StalledCondition, "some reason", "some error"), - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.FetchFailedCondition, "failed to construct helm client"), - }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository) { - t.Expect(obj.Status.ObservedGeneration).To(Equal(int64(9))) - }, - }, - { - name: "Fail, no error but requeue requested", - generation: 3, - beforeFunc: func(obj *sourcev1.HelmRepository) { - conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "test-msg") - }, - result: sreconcile.ResultRequeue, - assertConditions: []metav1.Condition{ - *conditions.FalseCondition(meta.ReadyCondition, meta.FailedReason, "test-msg"), - }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository) { - t.Expect(obj.Status.ObservedGeneration).ToNot(Equal(int64(3))) - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme()) - r := &HelmRepositoryReconciler{ - Client: builder.Build(), - EventRecorder: record.NewFakeRecorder(32), - } - obj := &sourcev1.HelmRepository{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "test-", - Generation: tt.generation, - }, - Spec: sourcev1.HelmRepositorySpec{ - Interval: metav1.Duration{Duration: 5 * time.Second}, - }, - } - - if tt.beforeFunc != nil { - tt.beforeFunc(obj) - } - - ctx := context.TODO() - g.Expect(r.Create(ctx, obj)).To(Succeed()) - patchHelper, err := patch.NewHelper(obj, r.Client) - g.Expect(err).ToNot(HaveOccurred()) - - _, gotErr := r.summarizeAndPatch(ctx, obj, patchHelper, tt.result, tt.reconcileErr) - g.Expect(gotErr != nil).To(Equal(tt.wantErr)) - - g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) - - if tt.afterFunc != nil { - tt.afterFunc(g, obj) - } - - // Check if the object status is valid. - condns := &status.Conditions{NegativePolarity: helmRepoReadyDepsNegative} - checker := status.NewChecker(r.Client, testEnv.GetScheme(), condns) - checker.CheckErr(ctx, obj) - }) - } -} - func TestHelmRepositoryReconciler_reconcileSubRecs(t *testing.T) { // Helper to build simple helmRepoReconcilerFunc with result and error. buildReconcileFuncs := func(r sreconcile.Result, e error) helmRepoReconcilerFunc { diff --git a/internal/object/object.go b/internal/object/object.go new file mode 100644 index 000000000..c4bd32c22 --- /dev/null +++ b/internal/object/object.go @@ -0,0 +1,114 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "errors" + "time" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +var ( + ErrObservedGenerationNotFound = errors.New("observed generation not found") + ErrLastHandledReconcileAtNotFound = errors.New("last handled reconcile at not found") + ErrRequeueIntervalNotFound = errors.New("requeue interval not found") +) + +// toUnstructured converts a runtime object into Unstructured. +// Based on https://github.com/fluxcd/pkg/blob/b4a14854c75753ea9431693b39c4be672f246552/runtime/patch/utils.go#L55. +func toUnstructured(obj runtime.Object) (*unstructured.Unstructured, error) { + // If the incoming object is already unstructured, perform a deep copy first + // otherwise DefaultUnstructuredConverter ends up returning the inner map without + // making a copy. + if _, ok := obj.(runtime.Unstructured); ok { + obj = obj.DeepCopyObject() + } + rawMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return nil, err + } + return &unstructured.Unstructured{Object: rawMap}, nil +} + +// GetStatusLastHandledReconcileAt returns the status.lastHandledReconcileAt +// value of a given runtime object, if present. +func GetStatusLastHandledReconcileAt(obj runtime.Object) (string, error) { + u, err := toUnstructured(obj) + if err != nil { + return "", err + } + ra, found, err := unstructured.NestedString(u.Object, "status", "lastHandledReconcileAt") + if err != nil { + return "", err + } + if !found { + return "", ErrLastHandledReconcileAtNotFound + } + return ra, nil +} + +// SetStatusLastHandledReconcileAt sets the status.lastHandledReconcileAt value +// of a given runtime object. +func SetStatusLastHandledReconcileAt(obj runtime.Object, val string) error { + content, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return err + } + u := unstructured.Unstructured{} + u.SetUnstructuredContent(content) + if err := unstructured.SetNestedField(u.Object, val, "status", "lastHandledReconcileAt"); err != nil { + return err + } + return runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, obj) +} + +// GetStatusObservedGeneration returns the status.observedGeneration of a given +// runtime object. +func GetStatusObservedGeneration(obj runtime.Object) (int64, error) { + u, err := toUnstructured(obj) + if err != nil { + return 0, err + } + og, found, err := unstructured.NestedInt64(u.Object, "status", "observedGeneration") + if err != nil { + return 0, err + } + if !found { + return 0, ErrObservedGenerationNotFound + } + return og, nil +} + +// GetRequeueInterval returns the spec.interval of a given runtime object, if +// present. +func GetRequeueInterval(obj runtime.Object) (time.Duration, error) { + period := time.Second + u, err := toUnstructured(obj) + if err != nil { + return period, err + } + interval, found, err := unstructured.NestedString(u.Object, "spec", "interval") + if err != nil { + return period, err + } + if !found { + return period, ErrRequeueIntervalNotFound + } + return time.ParseDuration(interval) +} diff --git a/internal/object/object_test.go b/internal/object/object_test.go new file mode 100644 index 000000000..9f0d80bbb --- /dev/null +++ b/internal/object/object_test.go @@ -0,0 +1,88 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "testing" + "time" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" +) + +func TestGetStatusLastHandledReconcileAt(t *testing.T) { + g := NewWithT(t) + + // Get unset status lastHandledReconcileAt. + obj := &sourcev1.GitRepository{} + _, err := GetStatusLastHandledReconcileAt(obj) + g.Expect(err).To(Equal(ErrLastHandledReconcileAtNotFound)) + + // Get set status lastHandledReconcileAt. + obj.Status.LastHandledReconcileAt = "foo" + ra, err := GetStatusLastHandledReconcileAt(obj) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ra).To(Equal("foo")) +} + +func TestSetStatusLastHandledReconcileAt(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.GitRepository{} + err := SetStatusLastHandledReconcileAt(obj, "now") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(obj.Status.LastHandledReconcileAt).To(Equal("now")) +} + +func TestGetStatusObservedGeneration(t *testing.T) { + g := NewWithT(t) + + // Get unset status observedGeneration. + obj := &sourcev1.GitRepository{} + _, err := GetStatusObservedGeneration(obj) + g.Expect(err).To(Equal(ErrObservedGenerationNotFound)) + + // Get set status observedGeneration. + obj.Status.ObservedGeneration = 7 + og, err := GetStatusObservedGeneration(obj) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(og).To(Equal(int64(7))) +} + +func TestGetRequeueInterval(t *testing.T) { + g := NewWithT(t) + + // Get empty requeue interval value. + obj := &sourcev1.GitRepository{} + pd, err := GetRequeueInterval(obj) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(pd).To(Equal(time.Duration(0))) + + // Get set requeue interval value. + obj.Spec.Interval = metav1.Duration{Duration: 3 * time.Second} + pd, err = GetRequeueInterval(obj) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(pd).To(Equal(3 * time.Second)) + + // Get non-existent requeue interval value. + obj2 := &corev1.Secret{} + _, err = GetRequeueInterval(obj2) + g.Expect(err).To(Equal(ErrRequeueIntervalNotFound)) +} diff --git a/internal/reconcile/reconcile.go b/internal/reconcile/reconcile.go index 038e7e245..af0c71b97 100644 --- a/internal/reconcile/reconcile.go +++ b/internal/reconcile/reconcile.go @@ -17,12 +17,8 @@ limitations under the License. package reconcile import ( - "context" "time" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - kuberecorder "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "github.com/fluxcd/pkg/apis/meta" @@ -37,20 +33,40 @@ import ( type Result int const ( - // ResultEmpty indicates a reconcile result which does not requeue. + // ResultEmpty indicates a reconcile result which does not requeue. It is + // also used when returning an error, since the error overshadows result. ResultEmpty Result = iota // ResultRequeue indicates a reconcile result which should immediately // requeue. ResultRequeue - // ResultSuccess indicates a reconcile result which should be - // requeued on the interval as defined on the reconciled object. + // ResultSuccess indicates a reconcile success result. + // For a reconciler that requeues regularly at a fixed interval, runtime + // result with a fixed RequeueAfter is success result. + // For a reconciler that doesn't requeue on successful reconciliation, + // an empty runtime result is success result. + // It is usually returned at the end of a reconciler/sub-reconciler. ResultSuccess ) +// RuntimeResultBuilder defines an interface for runtime result builders. This +// can be implemented to build custom results based on the context of the +// reconciler. +type RuntimeResultBuilder interface { + BuildRuntimeResult(rr Result, err error) ctrl.Result +} + +// AlwaysRequeueResultBuilder implements a RuntimeResultBuilder for always +// requeuing reconcilers. A successful reconciliation result for such +// reconcilers contains a fixed RequeueAfter value. +type AlwaysRequeueResultBuilder struct { + // RequeueAfter is the fixed period at which the reconciler requeues on + // successful execution. + RequeueAfter time.Duration +} + // BuildRuntimeResult converts a given Result and error into the // return values of a controller's Reconcile function. -// func BuildRuntimeResult(ctx context.Context, recorder kuberecorder.EventRecorder, obj sourcev1.Source, rr Result, err error) (ctrl.Result, error) { -func BuildRuntimeResult(successInterval time.Duration, rr Result, err error) ctrl.Result { +func (r AlwaysRequeueResultBuilder) BuildRuntimeResult(rr Result, err error) ctrl.Result { // Handle special errors that contribute to expressing the result. if e, ok := err.(*serror.Waiting); ok { return ctrl.Result{RequeueAfter: e.RequeueAfter} @@ -60,52 +76,32 @@ func BuildRuntimeResult(successInterval time.Duration, rr Result, err error) ctr case ResultRequeue: return ctrl.Result{Requeue: true} case ResultSuccess: - return ctrl.Result{RequeueAfter: successInterval} + return ctrl.Result{RequeueAfter: r.RequeueAfter} default: return ctrl.Result{} } } -// RecordContextualError records the contextual errors based on their types. -// An event is recorded for the errors that are returned to the runtime. The -// runtime handles the logging of the error. -// An event is recorded and an error is logged for errors that are known to be -// swallowed, not returned to the runtime. -func RecordContextualError(ctx context.Context, recorder kuberecorder.EventRecorder, obj runtime.Object, err error) { - switch e := err.(type) { - case *serror.Event: - recorder.Eventf(obj, corev1.EventTypeWarning, e.Reason, e.Error()) - case *serror.Waiting: - // Waiting errors are not returned to the runtime. Log it explicitly. - ctrl.LoggerFrom(ctx).Info("reconciliation waiting", "reason", e.Err, "duration", e.RequeueAfter) - recorder.Event(obj, corev1.EventTypeNormal, e.Reason, e.Error()) - case *serror.Stalling: - // Stalling errors are not returned to the runtime. Log it explicitly. - ctrl.LoggerFrom(ctx).Error(e, "reconciliation stalled") - recorder.Eventf(obj, corev1.EventTypeWarning, e.Reason, e.Error()) - } -} - // ComputeReconcileResult analyzes the reconcile results (result + error), // updates the status conditions of the object with any corrections and returns // object patch configuration, runtime result and runtime error. The caller is -// responsible for using the patch configuration to patch the object in the API -// server. -func ComputeReconcileResult(obj conditions.Setter, successInterval time.Duration, res Result, recErr error, ownedConditions []string) ([]patch.Option, ctrl.Result, error) { - result := BuildRuntimeResult(successInterval, res, recErr) +// responsible for using the patch configuration while patching the object in +// the API server. +// The RuntimeResultBuilder is used to define how the ctrl.Result is computed. +func ComputeReconcileResult(obj conditions.Setter, res Result, recErr error, rb RuntimeResultBuilder) ([]patch.Option, ctrl.Result, error) { + var pOpts []patch.Option + + // Compute the runtime result. + var result ctrl.Result + if rb != nil { + result = rb.BuildRuntimeResult(res, recErr) + } // Remove reconciling condition on successful reconciliation. if recErr == nil && res == ResultSuccess { conditions.Delete(obj, meta.ReconcilingCondition) } - // Patch the object, ignoring conflicts on the conditions owned by this controller. - pOpts := []patch.Option{ - patch.WithOwnedConditions{ - Conditions: ownedConditions, - }, - } - // Analyze the reconcile error. switch t := recErr.(type) { case *serror.Stalling: diff --git a/internal/reconcile/reconcile_test.go b/internal/reconcile/reconcile_test.go index bb0cf4c44..127e3c186 100644 --- a/internal/reconcile/reconcile_test.go +++ b/internal/reconcile/reconcile_test.go @@ -17,9 +17,20 @@ limitations under the License. package reconcile import ( + "fmt" "testing" + "time" . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" + "github.com/fluxcd/pkg/runtime/patch" + + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + serror "github.com/fluxcd/source-controller/internal/error" ) func TestLowestRequeuingResult(t *testing.T) { @@ -45,3 +56,149 @@ func TestLowestRequeuingResult(t *testing.T) { }) } } + +// This test uses AlwaysRequeueResultBuilder as the RuntimeResultBuilder. +func TestComputeReconcileResult(t *testing.T) { + testSuccessInterval := time.Minute + tests := []struct { + name string + result Result + beforeFunc func(obj conditions.Setter) + recErr error + wantResult ctrl.Result + wantErr bool + assertConditions []metav1.Condition + afterFunc func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) + }{ + { + name: "successful result", + result: ResultSuccess, + recErr: nil, + wantResult: ctrl.Result{RequeueAfter: testSuccessInterval}, + wantErr: false, + afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) { + t.Expect(patchOpts.IncludeStatusObservedGeneration).To(BeTrue()) + }, + }, + { + name: "successful result, Reconciling=True, remove Reconciling", + result: ResultSuccess, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkReconciling(obj, "NewRevision", "new revision") + }, + recErr: nil, + wantResult: ctrl.Result{RequeueAfter: testSuccessInterval}, + wantErr: false, + afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) { + t.Expect(patchOpts.IncludeStatusObservedGeneration).To(BeTrue()) + t.Expect(conditions.IsUnknown(obj, meta.ReconcilingCondition)).To(BeTrue()) + }, + }, + { + name: "successful result, Stalled=True, remove Stalled", + result: ResultSuccess, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkStalled(obj, "SomeReason", "some message") + }, + recErr: nil, + wantResult: ctrl.Result{RequeueAfter: testSuccessInterval}, + wantErr: false, + afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) { + t.Expect(patchOpts.IncludeStatusObservedGeneration).To(BeTrue()) + t.Expect(conditions.IsUnknown(obj, meta.StalledCondition)).To(BeTrue()) + }, + }, + { + name: "requeue result", + result: ResultRequeue, + recErr: nil, + wantResult: ctrl.Result{Requeue: true}, + wantErr: false, + afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) { + t.Expect(patchOpts.IncludeStatusObservedGeneration).To(BeFalse()) + }, + }, + { + name: "requeue result", + result: ResultRequeue, + recErr: nil, + wantResult: ctrl.Result{Requeue: true}, + wantErr: false, + afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) { + t.Expect(patchOpts.IncludeStatusObservedGeneration).To(BeFalse()) + }, + }, + { + name: "stalling error", + result: ResultEmpty, + recErr: &serror.Stalling{Err: fmt.Errorf("some error"), Reason: "some reason"}, + wantResult: ctrl.Result{}, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.StalledCondition, "some reason", "some error"), + }, + afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) { + t.Expect(patchOpts.IncludeStatusObservedGeneration).To(BeTrue()) + }, + }, + { + name: "waiting error", + result: ResultEmpty, + recErr: &serror.Waiting{Err: fmt.Errorf("some error"), Reason: "some reason"}, + wantResult: ctrl.Result{}, + wantErr: false, + afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) { + t.Expect(patchOpts.IncludeStatusObservedGeneration).To(BeFalse()) + }, + }, + { + name: "random error", + result: ResultEmpty, + recErr: fmt.Errorf("some error"), + wantResult: ctrl.Result{}, + wantErr: true, + afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) { + t.Expect(patchOpts.IncludeStatusObservedGeneration).To(BeFalse()) + }, + }, + { + name: "random error, Stalled=True, remove Stalled", + result: ResultEmpty, + recErr: fmt.Errorf("some error"), + wantResult: ctrl.Result{}, + wantErr: true, + afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) { + t.Expect(patchOpts.IncludeStatusObservedGeneration).To(BeFalse()) + t.Expect(conditions.IsUnknown(obj, meta.StalledCondition)).To(BeTrue()) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.GitRepository{} + obj.Name = "test-git-repo" + obj.Namespace = "default" + obj.Spec.Interval = metav1.Duration{Duration: testSuccessInterval} + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + rb := AlwaysRequeueResultBuilder{RequeueAfter: obj.Spec.Interval.Duration} + pOpts, result, err := ComputeReconcileResult(obj, tt.result, tt.recErr, rb) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(result).To(Equal(tt.wantResult)) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + opts := &patch.HelperOptions{} + for _, o := range pOpts { + o.ApplyToHelper(opts) + } + tt.afterFunc(g, obj, opts) + }) + } +} diff --git a/internal/reconcile/summarize/matchers_test.go b/internal/reconcile/summarize/matchers_test.go new file mode 100644 index 000000000..b71aa99c8 --- /dev/null +++ b/internal/reconcile/summarize/matchers_test.go @@ -0,0 +1,99 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package summarize + +import ( + "fmt" + + . "github.com/onsi/gomega" + "github.com/onsi/gomega/types" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/fluxcd/source-controller/internal/object" +) + +// HaveStatusObservedGeneration returns a custom matcher to check if a +// runtime.Object has a given status observedGeneration value. +func HaveStatusObservedGeneration(expected int64) types.GomegaMatcher { + return &haveStatusObservedGeneration{ + expected: expected, + } +} + +type haveStatusObservedGeneration struct { + expected int64 + actual int64 +} + +func (m *haveStatusObservedGeneration) Match(actual interface{}) (success bool, err error) { + obj, ok := actual.(runtime.Object) + if !ok { + return false, fmt.Errorf("actual should be a runtime object") + } + + og, err := object.GetStatusObservedGeneration(obj) + if err != nil && err != object.ErrObservedGenerationNotFound { + return false, err + } + m.actual = og + + return Equal(m.expected).Match(og) +} + +func (m *haveStatusObservedGeneration) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("expected\n\t%d\nto match\n\t%d\n", m.actual, m.expected) +} + +func (m *haveStatusObservedGeneration) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("expected\n\t%d\nto not match\n\t%d\n", m.actual, m.expected) +} + +// HaveStatusLastHandledReconcileAt returns a custom matcher to check if a +// runtime.Object has a given status lastHandledReconcileAt value. +func HaveStatusLastHandledReconcileAt(expected string) types.GomegaMatcher { + return &haveStatusLastHandledReconcileAt{ + expected: expected, + } +} + +type haveStatusLastHandledReconcileAt struct { + expected string + actual string +} + +func (m *haveStatusLastHandledReconcileAt) Match(actual interface{}) (success bool, err error) { + obj, ok := actual.(runtime.Object) + if !ok { + return false, fmt.Errorf("actual should be a runtime object") + } + + ra, err := object.GetStatusLastHandledReconcileAt(obj) + if err != nil && err != object.ErrLastHandledReconcileAtNotFound { + return false, err + } + m.actual = ra + + return Equal(m.expected).Match(ra) +} + +func (m *haveStatusLastHandledReconcileAt) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("expected\n\t%s\nto match\n\t%s\n", m.actual, m.expected) +} + +func (m *haveStatusLastHandledReconcileAt) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("expected\n\t%s\nto not match\n\t%s\n", m.actual, m.expected) +} diff --git a/internal/reconcile/summarize/processor.go b/internal/reconcile/summarize/processor.go new file mode 100644 index 000000000..54e135e47 --- /dev/null +++ b/internal/reconcile/summarize/processor.go @@ -0,0 +1,66 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package summarize + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + kuberecorder "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/fluxcd/pkg/apis/meta" + serror "github.com/fluxcd/source-controller/internal/error" + "github.com/fluxcd/source-controller/internal/object" + "github.com/fluxcd/source-controller/internal/reconcile" +) + +// ResultProcessor processes the results of reconciliation (the object, result +// and error). Any errors during processing need not result in the +// reconciliation failure. The errors can be recorded as logs and events. +type ResultProcessor func(context.Context, kuberecorder.EventRecorder, client.Object, reconcile.Result, error) + +// RecordContextualError is a ResultProcessor that records the contextual errors +// based on their types. +// An event is recorded for the errors that are returned to the runtime. The +// runtime handles the logging of the error. +// An event is recorded and an error is logged for errors that are known to be +// swallowed, not returned to the runtime. +func RecordContextualError(ctx context.Context, recorder kuberecorder.EventRecorder, obj client.Object, _ reconcile.Result, err error) { + switch e := err.(type) { + case *serror.Event: + recorder.Eventf(obj, corev1.EventTypeWarning, e.Reason, e.Error()) + case *serror.Waiting: + // Waiting errors are not returned to the runtime. Log it explicitly. + ctrl.LoggerFrom(ctx).Info("reconciliation waiting", "reason", e.Err, "duration", e.RequeueAfter) + recorder.Event(obj, corev1.EventTypeNormal, e.Reason, e.Error()) + case *serror.Stalling: + // Stalling errors are not returned to the runtime. Log it explicitly. + ctrl.LoggerFrom(ctx).Error(e, "reconciliation stalled") + recorder.Eventf(obj, corev1.EventTypeWarning, e.Reason, e.Error()) + } +} + +// RecordReconcileReq is a ResultProcessor that checks the reconcile +// annotation value and sets it in the object status as +// status.lastHandledReconcileAt. +func RecordReconcileReq(ctx context.Context, recorder kuberecorder.EventRecorder, obj client.Object, _ reconcile.Result, _ error) { + if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { + object.SetStatusLastHandledReconcileAt(obj, v) + } +} diff --git a/internal/reconcile/summarize/processor_test.go b/internal/reconcile/summarize/processor_test.go new file mode 100644 index 000000000..dc6765d83 --- /dev/null +++ b/internal/reconcile/summarize/processor_test.go @@ -0,0 +1,91 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package summarize + +import ( + "context" + "testing" + + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/fluxcd/pkg/apis/meta" + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + "github.com/fluxcd/source-controller/internal/object" + "github.com/fluxcd/source-controller/internal/reconcile" +) + +func TestRecordReconcileReq(t *testing.T) { + tests := []struct { + name string + beforeFunc func(obj client.Object) + afterFunc func(t *WithT, obj client.Object) + }{ + { + name: "no reconcile req", + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusLastHandledReconcileAt("")) + }, + }, + { + name: "no reconcile req, noop on existing value", + beforeFunc: func(obj client.Object) { + object.SetStatusLastHandledReconcileAt(obj, "zzz") + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusLastHandledReconcileAt("zzz")) + }, + }, + { + name: "with reconcile req", + beforeFunc: func(obj client.Object) { + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "now", + } + obj.SetAnnotations(annotations) + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusLastHandledReconcileAt("now")) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-obj", + }, + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + ctx := context.TODO() + RecordReconcileReq(ctx, record.NewFakeRecorder(32), obj, reconcile.ResultEmpty, nil) + + if tt.afterFunc != nil { + tt.afterFunc(g, obj) + } + }) + } +} diff --git a/internal/reconcile/summarize/summary.go b/internal/reconcile/summarize/summary.go new file mode 100644 index 000000000..0ba76715f --- /dev/null +++ b/internal/reconcile/summarize/summary.go @@ -0,0 +1,204 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package summarize + +import ( + "context" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + kerrors "k8s.io/apimachinery/pkg/util/errors" + kuberecorder "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/fluxcd/pkg/runtime/conditions" + "github.com/fluxcd/pkg/runtime/patch" + + "github.com/fluxcd/source-controller/internal/reconcile" +) + +// Conditions contains all the conditions information needed to summarize the +// target condition. +type Conditions struct { + // Target is the target condition, e.g.: Ready. + Target string + // Owned conditions are the conditions owned by the reconciler for this + // target condition. + Owned []string + // Summarize conditions are the conditions that the target condition depends + // on. + Summarize []string + // NegativePolarity conditions are the conditions in Summarize with negative + // polarity. + NegativePolarity []string +} + +// Helper is SummarizeAndPatch helper. +type Helper struct { + recorder kuberecorder.EventRecorder + patchHelper *patch.Helper +} + +// NewHelper returns an initialized Helper. +func NewHelper(recorder kuberecorder.EventRecorder, patchHelper *patch.Helper) *Helper { + return &Helper{ + recorder: recorder, + patchHelper: patchHelper, + } +} + +// HelperOptions contains options for SummarizeAndPatch. +// Summarizing and patching at the very end of a reconciliation involves +// computing the result of the reconciler. This requires providing the +// ReconcileResult, ReconcileError and a ResultBuilder in the context of the +// reconciliation. +// For using this to perform intermediate patching in the middle of a +// reconciliation, no ReconcileResult, ReconcileError or ResultBuilder should +// be provided. Only Conditions summary would be calculated and patched. +type HelperOptions struct { + // Conditions are conditions that needs to be summarized and persisted on + // the object. + Conditions []Conditions + // Processors are chain of ResultProcessors for processing the results. This + // can be used to analyze and modify the results. This enables injecting + // custom middlewares in the SummarizeAndPatch operation. + Processors []ResultProcessor + // IgnoreNotFound can be used to ignores any resource not found error during + // patching. + IgnoreNotFound bool + // ReconcileResult is the abstracted result of reconciliation. + ReconcileResult reconcile.Result + // ReconcileError is the reconciliation error. + ReconcileError error + // ResultBuilder defines how the reconciliation result is computed. + ResultBuilder reconcile.RuntimeResultBuilder +} + +// Option is configuration that modifies SummarizeAndPatch. +type Option func(*HelperOptions) + +// WithConditions sets the Conditions for which summary is calculated in +// SummarizeAndPatch. +func WithConditions(condns ...Conditions) Option { + return func(s *HelperOptions) { + s.Conditions = append(s.Conditions, condns...) + } +} + +// WithProcessors can be used to inject middlewares in the SummarizeAndPatch +// process, to be executed before the result calculation and patching. +func WithProcessors(rps ...ResultProcessor) Option { + return func(s *HelperOptions) { + s.Processors = append(s.Processors, rps...) + } +} + +// WithIgnoreNotFound skips any resource not found error during patching. +func WithIgnoreNotFound() Option { + return func(s *HelperOptions) { + s.IgnoreNotFound = true + } +} + +// WithResultBuilder sets the strategy for result computation in +// SummarizeAndPatch. +func WithResultBuilder(rb reconcile.RuntimeResultBuilder) Option { + return func(s *HelperOptions) { + s.ResultBuilder = rb + } +} + +// WithReconcileResult sets the value of input result used to calculate the +// results of reconciliation in SummarizeAndPatch. +func WithReconcileResult(rr reconcile.Result) Option { + return func(s *HelperOptions) { + s.ReconcileResult = rr + } +} + +// WithReconcileError sets the value of input error used to calculate the +// results reconciliation in SummarizeAndPatch. +func WithReconcileError(re error) Option { + return func(s *HelperOptions) { + s.ReconcileError = re + } +} + +// SummarizeAndPatch summarizes and patches the result to the target object. +// When used at the very end of a reconciliation, the result builder must be +// specified using the Option WithResultBuilder(). The returned result and error +// can be returned as the return values of the reconciliation. +// When used in the middle of a reconciliation, no result builder should be set +// and the result can be ignored. +func (h *Helper) SummarizeAndPatch(ctx context.Context, obj conditions.Setter, options ...Option) (ctrl.Result, error) { + // Calculate the options. + opts := &HelperOptions{} + for _, o := range options { + o(opts) + } + // Combined the owned conditions of all the conditions for the patcher. + ownedConditions := []string{} + for _, c := range opts.Conditions { + ownedConditions = append(ownedConditions, c.Owned...) + } + // Patch the object, prioritizing the conditions owned by the controller in + // case of any conflicts. + patchOpts := []patch.Option{ + patch.WithOwnedConditions{ + Conditions: ownedConditions, + }, + } + + // Process the results of reconciliation. + for _, processor := range opts.Processors { + processor(ctx, h.recorder, obj, opts.ReconcileResult, opts.ReconcileError) + } + + var result ctrl.Result + var recErr error + if opts.ResultBuilder != nil { + // Compute the reconcile results, obtain patch options and reconcile error. + var pOpts []patch.Option + pOpts, result, recErr = reconcile.ComputeReconcileResult(obj, opts.ReconcileResult, opts.ReconcileError, opts.ResultBuilder) + patchOpts = append(patchOpts, pOpts...) + } + + // Summarize conditions. This must be performed only after computing the + // reconcile result, since the object status is adjusted based on the + // reconcile result and error. + for _, c := range opts.Conditions { + conditions.SetSummary(obj, + c.Target, + conditions.WithConditions( + c.Summarize..., + ), + conditions.WithNegativePolarityConditions( + c.NegativePolarity..., + ), + ) + } + + // Finally, patch the resource. + if err := h.patchHelper.Patch(ctx, obj, patchOpts...); err != nil { + // Ignore patch error "not found" when the object is being deleted. + if opts.IgnoreNotFound && !obj.GetDeletionTimestamp().IsZero() { + err = kerrors.FilterOut(err, func(e error) bool { return apierrors.IsNotFound(e) }) + } + recErr = kerrors.NewAggregate([]error{recErr, err}) + } + + return result, recErr +} diff --git a/internal/reconcile/summarize/summary_test.go b/internal/reconcile/summarize/summary_test.go new file mode 100644 index 000000000..7d48ff493 --- /dev/null +++ b/internal/reconcile/summarize/summary_test.go @@ -0,0 +1,396 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package summarize + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/darkowlzz/controller-check/status" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" + "github.com/fluxcd/pkg/runtime/patch" + + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + serror "github.com/fluxcd/source-controller/internal/error" + "github.com/fluxcd/source-controller/internal/reconcile" +) + +// This tests the scenario where SummarizeAndPatch is used at the very end of a +// reconciliation. +func TestSummarizeAndPatch(t *testing.T) { + var testReadyConditions = Conditions{ + Target: meta.ReadyCondition, + Owned: []string{ + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, + }, + Summarize: []string{ + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, + NegativePolarity: []string{ + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, + } + var testFooConditions = Conditions{ + Target: "Foo", + Owned: []string{ + "Foo", + "AAA", + "BBB", + }, + Summarize: []string{ + "AAA", + "BBB", + }, + NegativePolarity: []string{ + "BBB", + }, + } + + tests := []struct { + name string + generation int64 + beforeFunc func(obj conditions.Setter) + result reconcile.Result + reconcileErr error + conditions []Conditions + wantErr bool + afterFunc func(t *WithT, obj client.Object) + assertConditions []metav1.Condition + }{ + // Success/Fail indicates if a reconciliation succeeded or failed. On + // a successful reconciliation, the object generation is expected to + // match the observed generation in the object status. + // All the cases have some Ready condition set, even if a test case is + // unrelated to the conditions, because it's neseccary for a valid + // status. + { + name: "Success, no extra conditions", + generation: 4, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "test-msg") + }, + conditions: []Conditions{testReadyConditions}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "test-msg"), + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusObservedGeneration(4)) + }, + }, + { + name: "Success, Ready=True", + generation: 5, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "created") + }, + conditions: []Conditions{testReadyConditions}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "created"), + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusObservedGeneration(5)) + }, + }, + { + name: "Success, removes reconciling for successful result", + generation: 2, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkReconciling(obj, "NewRevision", "new index version") + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "stored artifact") + }, + conditions: []Conditions{testReadyConditions}, + result: reconcile.ResultSuccess, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact"), + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusObservedGeneration(2)) + }, + }, + { + name: "Success, record reconciliation request", + beforeFunc: func(obj conditions.Setter) { + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "now", + } + obj.SetAnnotations(annotations) + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "test-msg") + }, + generation: 3, + conditions: []Conditions{testReadyConditions}, + result: reconcile.ResultSuccess, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "test-msg"), + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusLastHandledReconcileAt("now")) + t.Expect(obj).To(HaveStatusObservedGeneration(3)) + }, + }, + { + name: "Fail, with multiple conditions ArtifactOutdated=True,Reconciling=True", + generation: 7, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision") + conditions.MarkReconciling(obj, "NewRevision", "new index revision") + }, + conditions: []Conditions{testReadyConditions}, + reconcileErr: fmt.Errorf("failed to create dir"), + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, "NewRevision", "new index revision"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new index revision"), + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).ToNot(HaveStatusObservedGeneration(7)) + }, + }, + { + name: "Success, with subreconciler stalled error", + generation: 9, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.FetchFailedCondition, "failed to construct client") + }, + conditions: []Conditions{testReadyConditions}, + reconcileErr: &serror.Stalling{Err: fmt.Errorf("some error"), Reason: "some reason"}, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, sourcev1.FetchFailedCondition, "failed to construct client"), + *conditions.TrueCondition(meta.StalledCondition, "some reason", "some error"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.FetchFailedCondition, "failed to construct client"), + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusObservedGeneration(9)) + }, + }, + { + name: "Fail, no error but requeue requested", + generation: 3, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "test-msg") + }, + conditions: []Conditions{testReadyConditions}, + result: reconcile.ResultRequeue, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, meta.FailedReason, "test-msg"), + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).ToNot(HaveStatusObservedGeneration(3)) + }, + }, + { + name: "Success, multiple conditions summary", + generation: 3, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "test-msg") + conditions.MarkTrue(obj, "AAA", "ZZZ", "zzz") // Positive polarity True. + conditions.MarkTrue(obj, "BBB", "YYY", "yyy") // Negative polarity True. + }, + conditions: []Conditions{testReadyConditions, testFooConditions}, + result: reconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "test-msg"), + *conditions.FalseCondition("Foo", "YYY", "yyy"), // False summary. + *conditions.TrueCondition("BBB", "YYY", "yyy"), + *conditions.TrueCondition("AAA", "ZZZ", "zzz"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + scheme := runtime.NewScheme() + g.Expect(sourcev1.AddToScheme(scheme)) + + builder := fakeclient.NewClientBuilder().WithScheme(scheme) + client := builder.Build() + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + Generation: tt.generation, + }, + Spec: sourcev1.GitRepositorySpec{ + Interval: metav1.Duration{Duration: 5 * time.Second}, + }, + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + ctx := context.TODO() + g.Expect(client.Create(ctx, obj)).To(Succeed()) + patchHelper, err := patch.NewHelper(obj, client) + g.Expect(err).ToNot(HaveOccurred()) + + summaryHelper := NewHelper(record.NewFakeRecorder(32), patchHelper) + summaryOpts := []Option{ + WithReconcileResult(tt.result), + WithReconcileError(tt.reconcileErr), + WithConditions(tt.conditions...), + WithIgnoreNotFound(), + WithProcessors(RecordContextualError, RecordReconcileReq), + WithResultBuilder(reconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.Spec.Interval.Duration}), + } + _, gotErr := summaryHelper.SummarizeAndPatch(ctx, obj, summaryOpts...) + g.Expect(gotErr != nil).To(Equal(tt.wantErr)) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + if tt.afterFunc != nil { + tt.afterFunc(g, obj) + } + + // Check if the object status is valid as per kstatus. + condns := &status.Conditions{NegativePolarity: testReadyConditions.NegativePolarity} + checker := status.NewChecker(client, scheme, condns) + checker.CheckErr(ctx, obj) + }) + } +} + +// This tests the scenario where SummarizeAndPatch is used in the middle of +// reconciliation. +func TestSummarizeAndPatch_Intermediate(t *testing.T) { + var testStageAConditions = Conditions{ + Target: "StageA", + Owned: []string{"StageA", "A1", "A2", "A3"}, + Summarize: []string{"A1", "A2", "A3"}, + NegativePolarity: []string{"A3"}, + } + var testStageBConditions = Conditions{ + Target: "StageB", + Owned: []string{"StageB", "B1", "B2"}, + Summarize: []string{"B1", "B2"}, + NegativePolarity: []string{"B1"}, + } + + tests := []struct { + name string + conditions []Conditions + beforeFunc func(obj conditions.Setter) + assertConditions []metav1.Condition + }{ + { + name: "single Conditions, True summary", + conditions: []Conditions{testStageAConditions}, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, "A1", "ZZZ", "zzz") // Positive polarity True. + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition("StageA", "ZZZ", "zzz"), // True summary. + *conditions.TrueCondition("A1", "ZZZ", "zzz"), + }, + }, + { + name: "single Conditions, False summary", + conditions: []Conditions{testStageAConditions}, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, "A1", "ZZZ", "zzz") // Positive polarity True. + conditions.MarkTrue(obj, "A3", "OOO", "ooo") // Negative polarity True. + }, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition("StageA", "OOO", "ooo"), // False summary. + *conditions.TrueCondition("A3", "OOO", "ooo"), + *conditions.TrueCondition("A1", "ZZZ", "zzz"), + }, + }, + { + name: "multiple Conditions", + conditions: []Conditions{testStageAConditions, testStageBConditions}, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, "A3", "ZZZ", "zzz") // Negative polarity True. + conditions.MarkTrue(obj, "B2", "RRR", "rrr") // Positive polarity True. + }, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition("StageA", "ZZZ", "zzz"), // False summary. + *conditions.TrueCondition("A3", "ZZZ", "zzz"), + *conditions.TrueCondition("StageB", "RRR", "rrr"), // True summary. + *conditions.TrueCondition("B2", "RRR", "rrr"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + scheme := runtime.NewScheme() + g.Expect(sourcev1.AddToScheme(scheme)) + + builder := fakeclient.NewClientBuilder().WithScheme(scheme) + kclient := builder.Build() + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + }, + Spec: sourcev1.GitRepositorySpec{ + Interval: metav1.Duration{Duration: 5 * time.Second}, + }, + Status: sourcev1.GitRepositoryStatus{ + Conditions: []metav1.Condition{ + *conditions.FalseCondition("StageA", "QQQ", "qqq"), + }, + }, + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + ctx := context.TODO() + g.Expect(kclient.Create(ctx, obj)).To(Succeed()) + patchHelper, err := patch.NewHelper(obj, kclient) + g.Expect(err).ToNot(HaveOccurred()) + + summaryHelper := NewHelper(record.NewFakeRecorder(32), patchHelper) + summaryOpts := []Option{ + WithConditions(tt.conditions...), + } + _, err = summaryHelper.SummarizeAndPatch(ctx, obj, summaryOpts...) + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} From 45df2d76c82988d3fd7d6385d6d935736f09bffa Mon Sep 17 00:00:00 2001 From: Sunny Date: Wed, 9 Feb 2022 14:19:54 +0530 Subject: [PATCH 56/63] Update API descriptions and messages to be consistent - Update v1beta2 API descriptions and reconciling messages to be consistent. - Replace 'download' with 'fetch'. Since the status condition for download failure is called FetchFailed, using the term 'fetch' makes the messaging more consistent. - Replace `BucketOperationSucceed` with `BucketOperationSucceeded` and generate api docs. Signed-off-by: Sunny --- api/v1beta2/artifact_types.go | 8 ++- api/v1beta2/bucket_types.go | 12 ++--- api/v1beta2/gitrepository_types.go | 2 +- api/v1beta2/helmchart_types.go | 2 +- api/v1beta2/helmrepository_types.go | 4 +- .../source.toolkit.fluxcd.io_buckets.yaml | 13 +++-- ...rce.toolkit.fluxcd.io_gitrepositories.yaml | 23 +++++--- .../source.toolkit.fluxcd.io_helmcharts.yaml | 11 ++-- ...ce.toolkit.fluxcd.io_helmrepositories.yaml | 13 +++-- controllers/bucket_controller.go | 14 ++--- controllers/gitrepository_controller.go | 2 +- controllers/helmchart_controller.go | 2 +- controllers/helmchart_controller_test.go | 2 +- controllers/helmrepository_controller.go | 4 +- controllers/helmrepository_controller_test.go | 2 +- docs/api/source.md | 52 +++++++++++++++---- 16 files changed, 113 insertions(+), 53 deletions(-) diff --git a/api/v1beta2/artifact_types.go b/api/v1beta2/artifact_types.go index c1f6ab877..363f79b1a 100644 --- a/api/v1beta2/artifact_types.go +++ b/api/v1beta2/artifact_types.go @@ -23,13 +23,17 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// Artifact represents the output of a source synchronisation. +// Artifact represents the output of a Source synchronisation. type Artifact struct { - // Path is the relative file path of this artifact. + // Path is the relative file path of this Artifact. + // It can be used to locate the Artifact file in the root of the Artifact + // storage on the local file system of the controller managing the Source. // +required Path string `json:"path"` // URL is the HTTP address of this artifact. + // It is used by the consumers of the artifacts to fetch and use the + // artifacts. It is expected to be resolvable from within the cluster. // +required URL string `json:"url"` diff --git a/api/v1beta2/bucket_types.go b/api/v1beta2/bucket_types.go index 7f403b4b8..42c23b480 100644 --- a/api/v1beta2/bucket_types.go +++ b/api/v1beta2/bucket_types.go @@ -69,7 +69,7 @@ type BucketSpec struct { // +required Interval metav1.Duration `json:"interval"` - // The timeout for download operations, defaults to 60s. + // The timeout for fetch operations, defaults to 60s. // +kubebuilder:default="60s" // +optional Timeout *metav1.Duration `json:"timeout,omitempty"` @@ -99,7 +99,7 @@ type BucketStatus struct { // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` - // URL is the download link for the artifact output of the last Bucket sync. + // URL is the fetch link for the artifact output of the last Bucket sync. // +optional URL string `json:"url,omitempty"` @@ -111,12 +111,12 @@ type BucketStatus struct { } const ( - // BucketOperationSucceedReason represents the fact that the bucket listing and - // download operations succeeded. - BucketOperationSucceedReason string = "BucketOperationSucceed" + // BucketOperationSucceededReason represents the fact that the bucket listing and + // fetch operations succeeded. + BucketOperationSucceededReason string = "BucketOperationSucceeded" // BucketOperationFailedReason represents the fact that the bucket listing or - // download operations failed. + // fetch operations failed. BucketOperationFailedReason string = "BucketOperationFailed" ) diff --git a/api/v1beta2/gitrepository_types.go b/api/v1beta2/gitrepository_types.go index 4b811bfee..8910cd17f 100644 --- a/api/v1beta2/gitrepository_types.go +++ b/api/v1beta2/gitrepository_types.go @@ -168,7 +168,7 @@ type GitRepositoryStatus struct { // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` - // URL is the download link for the artifact output of the last repository sync. + // URL is the fetch link for the artifact output of the last repository sync. // +optional URL string `json:"url,omitempty"` diff --git a/api/v1beta2/helmchart_types.go b/api/v1beta2/helmchart_types.go index 4852e0a79..6abe8ed21 100644 --- a/api/v1beta2/helmchart_types.go +++ b/api/v1beta2/helmchart_types.go @@ -127,7 +127,7 @@ type HelmChartStatus struct { // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` - // URL is the download link for the last chart pulled. + // URL is the fetch link for the last chart pulled. // +optional URL string `json:"url,omitempty"` diff --git a/api/v1beta2/helmrepository_types.go b/api/v1beta2/helmrepository_types.go index 4cc6bd260..6a9bea967 100644 --- a/api/v1beta2/helmrepository_types.go +++ b/api/v1beta2/helmrepository_types.go @@ -61,7 +61,7 @@ type HelmRepositorySpec struct { // +required Interval metav1.Duration `json:"interval"` - // The timeout of index downloading, defaults to 60s. + // The timeout of index fetching, defaults to 60s. // +kubebuilder:default:="60s" // +optional Timeout *metav1.Duration `json:"timeout,omitempty"` @@ -85,7 +85,7 @@ type HelmRepositoryStatus struct { // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` - // URL is the download link for the last index fetched. + // URL is the fetch link for the last index fetched. // +optional URL string `json:"url,omitempty"` diff --git a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml index 55fd59894..2abd9a2b9 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml @@ -356,7 +356,7 @@ spec: type: boolean timeout: default: 60s - description: The timeout for download operations, defaults to 60s. + description: The timeout for fetch operations, defaults to 60s. type: string required: - bucketName @@ -381,7 +381,10 @@ spec: format: date-time type: string path: - description: Path is the relative file path of this artifact. + description: Path is the relative file path of this Artifact. + It can be used to locate the Artifact file in the root of the + Artifact storage on the local file system of the controller + managing the Source. type: string revision: description: Revision is a human readable identifier traceable @@ -389,7 +392,9 @@ spec: tag, a Helm index timestamp, a Helm chart version, etc. type: string url: - description: URL is the HTTP address of this artifact. + description: URL is the HTTP address of this artifact. It is used + by the consumers of the artifacts to fetch and use the artifacts. + It is expected to be resolvable from within the cluster. type: string required: - path @@ -475,7 +480,7 @@ spec: format: int64 type: integer url: - description: URL is the download link for the artifact output of the + description: URL is the fetch link for the artifact output of the last Bucket sync. type: string type: object diff --git a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml index 47c99328e..905f1ae19 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml @@ -549,7 +549,10 @@ spec: format: date-time type: string path: - description: Path is the relative file path of this artifact. + description: Path is the relative file path of this Artifact. + It can be used to locate the Artifact file in the root of the + Artifact storage on the local file system of the controller + managing the Source. type: string revision: description: Revision is a human readable identifier traceable @@ -557,7 +560,9 @@ spec: tag, a Helm index timestamp, a Helm chart version, etc. type: string url: - description: URL is the HTTP address of this artifact. + description: URL is the HTTP address of this artifact. It is used + by the consumers of the artifacts to fetch and use the artifacts. + It is expected to be resolvable from within the cluster. type: string required: - path @@ -637,7 +642,7 @@ spec: description: IncludedArtifacts represents the included artifacts from the last successful repository sync. items: - description: Artifact represents the output of a source synchronisation. + description: Artifact represents the output of a Source synchronisation. properties: checksum: description: Checksum is the SHA256 checksum of the artifact. @@ -648,7 +653,10 @@ spec: format: date-time type: string path: - description: Path is the relative file path of this artifact. + description: Path is the relative file path of this Artifact. + It can be used to locate the Artifact file in the root of + the Artifact storage on the local file system of the controller + managing the Source. type: string revision: description: Revision is a human readable identifier traceable @@ -656,7 +664,10 @@ spec: tag, a Helm index timestamp, a Helm chart version, etc. type: string url: - description: URL is the HTTP address of this artifact. + description: URL is the HTTP address of this artifact. It is + used by the consumers of the artifacts to fetch and use the + artifacts. It is expected to be resolvable from within the + cluster. type: string required: - path @@ -673,7 +684,7 @@ spec: format: int64 type: integer url: - description: URL is the download link for the artifact output of the + description: URL is the fetch link for the artifact output of the last repository sync. type: string type: object diff --git a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml index dbf29410c..7dc2ece79 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml @@ -428,7 +428,10 @@ spec: format: date-time type: string path: - description: Path is the relative file path of this artifact. + description: Path is the relative file path of this Artifact. + It can be used to locate the Artifact file in the root of the + Artifact storage on the local file system of the controller + managing the Source. type: string revision: description: Revision is a human readable identifier traceable @@ -436,7 +439,9 @@ spec: tag, a Helm index timestamp, a Helm chart version, etc. type: string url: - description: URL is the HTTP address of this artifact. + description: URL is the HTTP address of this artifact. It is used + by the consumers of the artifacts to fetch and use the artifacts. + It is expected to be resolvable from within the cluster. type: string required: - path @@ -530,7 +535,7 @@ spec: of the Source reference. type: string url: - description: URL is the download link for the last chart pulled. + description: URL is the fetch link for the last chart pulled. type: string type: object type: object diff --git a/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml index cb8f6c411..e951fbd30 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml @@ -326,7 +326,7 @@ spec: type: boolean timeout: default: 60s - description: The timeout of index downloading, defaults to 60s. + description: The timeout of index fetching, defaults to 60s. type: string url: description: The Helm repository URL, a valid URL contains at least @@ -354,7 +354,10 @@ spec: format: date-time type: string path: - description: Path is the relative file path of this artifact. + description: Path is the relative file path of this Artifact. + It can be used to locate the Artifact file in the root of the + Artifact storage on the local file system of the controller + managing the Source. type: string revision: description: Revision is a human readable identifier traceable @@ -362,7 +365,9 @@ spec: tag, a Helm index timestamp, a Helm chart version, etc. type: string url: - description: URL is the HTTP address of this artifact. + description: URL is the HTTP address of this artifact. It is used + by the consumers of the artifacts to fetch and use the artifacts. + It is expected to be resolvable from within the cluster. type: string required: - path @@ -448,7 +453,7 @@ spec: format: int64 type: integer url: - description: URL is the download link for the last index fetched. + description: URL is the fetch link for the last index fetched. type: string type: object type: object diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index f20679519..a152523dc 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -196,7 +196,7 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res // error. func (r *BucketReconciler) reconcile(ctx context.Context, obj *sourcev1.Bucket, reconcilers []bucketReconcilerFunc) (sreconcile.Result, error) { if obj.Generation != obj.Status.ObservedGeneration { - conditions.MarkReconciling(obj, "NewGeneration", "reconciling new generation %d", obj.Generation) + conditions.MarkReconciling(obj, "NewGeneration", "reconciling new object generation (%d)", obj.Generation) } var artifact sourcev1.Artifact @@ -432,14 +432,14 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source }) if err = group.Wait(); err != nil { e := &serror.Event{ - Err: fmt.Errorf("download from bucket '%s' failed: %w", obj.Spec.BucketName, err), + Err: fmt.Errorf("fetch from bucket '%s' failed: %w", obj.Spec.BucketName, err), Reason: sourcev1.BucketOperationFailedReason, } conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) return sreconcile.ResultEmpty, e } - r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.BucketOperationSucceedReason, - "downloaded %d files with revision '%s' from '%s'", len(index), revision, obj.Spec.BucketName) + r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.BucketOperationSucceededReason, + "fetched %d files with revision '%s' from '%s'", len(index), revision, obj.Spec.BucketName) } conditions.Delete(obj, sourcev1.FetchFailedCondition) @@ -587,14 +587,14 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 }) if err = group.Wait(); err != nil { e := &serror.Event{ - Err: fmt.Errorf("download from bucket '%s' failed: %w", obj.Spec.BucketName, err), + Err: fmt.Errorf("fetch from bucket '%s' failed: %w", obj.Spec.BucketName, err), Reason: sourcev1.BucketOperationFailedReason, } conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) return sreconcile.ResultEmpty, e } - r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.BucketOperationSucceedReason, - "downloaded %d files from bucket '%s'", len(index), obj.Spec.BucketName) + r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.BucketOperationSucceededReason, + "fetched %d files from bucket '%s'", len(index), obj.Spec.BucketName) } conditions.Delete(obj, sourcev1.FetchFailedCondition) diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go index a494c4f92..8c02f117f 100644 --- a/controllers/gitrepository_controller.go +++ b/controllers/gitrepository_controller.go @@ -204,7 +204,7 @@ func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reques // error. func (r *GitRepositoryReconciler) reconcile(ctx context.Context, obj *sourcev1.GitRepository, reconcilers []gitRepoReconcilerFunc) (sreconcile.Result, error) { if obj.Generation != obj.Status.ObservedGeneration { - conditions.MarkReconciling(obj, "NewGeneration", "reconciling new generation %d", obj.Generation) + conditions.MarkReconciling(obj, "NewGeneration", "reconciling new object generation (%d)", obj.Generation) } var artifact sourcev1.Artifact diff --git a/controllers/helmchart_controller.go b/controllers/helmchart_controller.go index f93d4590b..f921fe9a8 100644 --- a/controllers/helmchart_controller.go +++ b/controllers/helmchart_controller.go @@ -227,7 +227,7 @@ func (r *HelmChartReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( // produces an error. func (r *HelmChartReconciler) reconcile(ctx context.Context, obj *sourcev1.HelmChart, reconcilers []helmChartReconcilerFunc) (sreconcile.Result, error) { if obj.Generation != obj.Status.ObservedGeneration { - conditions.MarkReconciling(obj, "NewGeneration", "reconciling new generation %d", obj.Generation) + conditions.MarkReconciling(obj, "NewGeneration", "reconciling new object generation (%d)", obj.Generation) } // Run the sub-reconcilers and build the result of reconciliation. diff --git a/controllers/helmchart_controller_test.go b/controllers/helmchart_controller_test.go index 70568f577..8944966aa 100644 --- a/controllers/helmchart_controller_test.go +++ b/controllers/helmchart_controller_test.go @@ -1376,7 +1376,7 @@ func TestHelmChartReconciler_reconcileSubRecs(t *testing.T) { wantResult: sreconcile.ResultSuccess, wantErr: false, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, "NewGeneration", "reconciling new generation 3"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewGeneration", "reconciling new object generation (3)"), }, }, { diff --git a/controllers/helmrepository_controller.go b/controllers/helmrepository_controller.go index 100f824e1..39b28e984 100644 --- a/controllers/helmrepository_controller.go +++ b/controllers/helmrepository_controller.go @@ -192,7 +192,7 @@ func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reque // result with the shortest requeue period. func (r *HelmRepositoryReconciler) reconcile(ctx context.Context, obj *sourcev1.HelmRepository, reconcilers []helmRepoReconcilerFunc) (sreconcile.Result, error) { if obj.Generation != obj.Status.ObservedGeneration { - conditions.MarkReconciling(obj, "NewGeneration", "reconciling new generation %d", obj.Generation) + conditions.MarkReconciling(obj, "NewGeneration", "reconciling new object generation (%d)", obj.Generation) } var chartRepo repository.ChartRepository @@ -328,7 +328,7 @@ func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, obj *sou checksum, err := newChartRepo.CacheIndex() if err != nil { e := &serror.Event{ - Err: fmt.Errorf("failed to download Helm repository index: %w", err), + Err: fmt.Errorf("failed to fetch Helm repository index: %w", err), Reason: meta.FailedReason, } conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, meta.FailedReason, e.Err.Error()) diff --git a/controllers/helmrepository_controller_test.go b/controllers/helmrepository_controller_test.go index 993657b46..9bcf5fd52 100644 --- a/controllers/helmrepository_controller_test.go +++ b/controllers/helmrepository_controller_test.go @@ -660,7 +660,7 @@ func TestHelmRepositoryReconciler_reconcileSubRecs(t *testing.T) { wantResult: sreconcile.ResultSuccess, wantErr: false, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, "NewGeneration", "reconciling new generation 3"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewGeneration", "reconciling new object generation (3)"), }, }, { diff --git a/docs/api/source.md b/docs/api/source.md index 597eae693..91ac4e946 100644 --- a/docs/api/source.md +++ b/docs/api/source.md @@ -171,7 +171,7 @@ Kubernetes meta/v1.Duration (Optional) -

The timeout for download operations, defaults to 60s.

+

The timeout for fetch operations, defaults to 60s.

@@ -797,7 +797,7 @@ Kubernetes meta/v1.Duration (Optional) -

The timeout of index downloading, defaults to 60s.

+

The timeout of index fetching, defaults to 60s.

@@ -854,7 +854,7 @@ HelmRepositoryStatus HelmChartStatus, HelmRepositoryStatus)

-

Artifact represents the output of a source synchronisation.

+

Artifact represents the output of a Source synchronisation.

@@ -873,7 +873,9 @@ string @@ -884,7 +886,9 @@ string @@ -1045,7 +1049,7 @@ Kubernetes meta/v1.Duration @@ -1144,7 +1148,7 @@ string @@ -1543,7 +1547,7 @@ string @@ -1811,6 +1815,32 @@ int64 + + + + + + + + @@ -1956,7 +1986,7 @@ Kubernetes meta/v1.Duration @@ -2041,7 +2071,7 @@ string From acda9981507a3c2c7447d71e161f85e91711d1f3 Mon Sep 17 00:00:00 2001 From: Sunny Date: Tue, 15 Feb 2022 17:20:13 +0530 Subject: [PATCH 57/63] gitrepo: Use commit msg in NewArtifact message Use commit message in the NewArtifact event message to make it more user friendly. Signed-off-by: Sunny --- controllers/gitrepository_controller.go | 37 +++++++++++-------- controllers/gitrepository_controller_test.go | 39 +++++++++++--------- 2 files changed, 42 insertions(+), 34 deletions(-) diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go index 8c02f117f..8c4c31b93 100644 --- a/controllers/gitrepository_controller.go +++ b/controllers/gitrepository_controller.go @@ -107,7 +107,7 @@ type GitRepositoryReconcilerOptions struct { // gitRepoReconcilerFunc is the function type for all the Git repository // reconciler functions. -type gitRepoReconcilerFunc func(ctx context.Context, obj *sourcev1.GitRepository, artifact *sourcev1.Artifact, includes *artifactSet, dir string) (sreconcile.Result, error) +type gitRepoReconcilerFunc func(ctx context.Context, obj *sourcev1.GitRepository, commit *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) func (r *GitRepositoryReconciler) SetupWithManager(mgr ctrl.Manager) error { return r.SetupWithManagerAndOptions(mgr, GitRepositoryReconcilerOptions{}) @@ -207,7 +207,7 @@ func (r *GitRepositoryReconciler) reconcile(ctx context.Context, obj *sourcev1.G conditions.MarkReconciling(obj, "NewGeneration", "reconciling new object generation (%d)", obj.Generation) } - var artifact sourcev1.Artifact + var commit git.Commit var includes artifactSet // Create temp dir for Git clone @@ -224,7 +224,7 @@ func (r *GitRepositoryReconciler) reconcile(ctx context.Context, obj *sourcev1.G var res sreconcile.Result var resErr error for _, rec := range reconcilers { - recResult, err := rec(ctx, obj, &artifact, &includes, tmpDir) + recResult, err := rec(ctx, obj, &commit, &includes, tmpDir) // Exit immediately on ResultRequeue. if recResult == sreconcile.ResultRequeue { return sreconcile.ResultRequeue, nil @@ -248,7 +248,8 @@ func (r *GitRepositoryReconciler) reconcile(ctx context.Context, obj *sourcev1.G // If the artifact in the Status object of the resource disappeared from storage, it is removed from the object. // If the object does not have an artifact in its Status object, a v1beta1.ArtifactUnavailableCondition is set. // If the hostname of any of the URLs on the object do not match the current storage server hostname, they are updated. -func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.GitRepository, artifact *sourcev1.Artifact, includes *artifactSet, dir string) (sreconcile.Result, error) { +func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context, + obj *sourcev1.GitRepository, _ *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) { // Garbage collect previous advertised artifact(s) from storage _ = r.garbageCollect(ctx, obj) @@ -284,7 +285,7 @@ func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context, obj *sou // If both the checkout and signature verification are successful, the given artifact pointer is set to a new artifact // with the available metadata. func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, - obj *sourcev1.GitRepository, artifact *sourcev1.Artifact, _ *artifactSet, dir string) (sreconcile.Result, error) { + obj *sourcev1.GitRepository, commit *git.Commit, _ *artifactSet, dir string) (sreconcile.Result, error) { // Configure authentication strategy to access the source var authOpts *git.AuthOptions var err error @@ -344,7 +345,7 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, // Checkout HEAD of reference in object gitCtx, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) defer cancel() - commit, err := checkoutStrategy.Checkout(gitCtx, dir, obj.Spec.URL, authOpts) + c, err := checkoutStrategy.Checkout(gitCtx, dir, obj.Spec.URL, authOpts) if err != nil { e := &serror.Event{ Err: fmt.Errorf("failed to checkout and determine revision: %w", err), @@ -354,6 +355,8 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, // Coin flip on transient or persistent error, return error and hope for the best return sreconcile.ResultEmpty, e } + // Assign the commit to the shared commit reference. + *commit = *c ctrl.LoggerFrom(ctx).V(logger.DebugLevel).Info("git repository checked out", "url", obj.Spec.URL, "revision", commit.String()) conditions.Delete(obj, sourcev1.FetchFailedCondition) @@ -362,9 +365,6 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, return result, err } - // Create potential new artifact with current available metadata - *artifact = r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), commit.String(), fmt.Sprintf("%s.tar.gz", commit.Hash.String())) - // Mark observations about the revision on the object if !obj.GetArtifact().HasRevision(commit.String()) { message := fmt.Sprintf("new upstream revision '%s'", commit.String()) @@ -383,7 +383,11 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, // Source ignore patterns are loaded, and the given directory is archived. // On a successful archive, the artifact and includes in the status of the given object are set, and the symlink in the // storage is updated to its path. -func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.GitRepository, artifact *sourcev1.Artifact, includes *artifactSet, dir string) (sreconcile.Result, error) { +func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, + obj *sourcev1.GitRepository, commit *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) { + // Create potential new artifact with current available metadata + artifact := r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), commit.String(), fmt.Sprintf("%s.tar.gz", commit.Hash.String())) + // Always restore the Ready condition in case it got removed due to a transient error defer func() { if obj.GetArtifact().HasRevision(artifact.Revision) && !includes.Diff(obj.Status.IncludedArtifacts) { @@ -419,14 +423,14 @@ func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, obj *so } // Ensure artifact directory exists and acquire lock - if err := r.Storage.MkdirAll(*artifact); err != nil { + if err := r.Storage.MkdirAll(artifact); err != nil { e := &serror.Event{ Err: fmt.Errorf("failed to create artifact directory: %w", err), Reason: sourcev1.StorageOperationFailedReason, } return sreconcile.ResultEmpty, e } - unlock, err := r.Storage.Lock(*artifact) + unlock, err := r.Storage.Lock(artifact) if err != nil { return sreconcile.ResultEmpty, &serror.Event{ Err: fmt.Errorf("failed to acquire lock for artifact: %w", err), @@ -448,7 +452,7 @@ func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, obj *so } // Archive directory to storage - if err := r.Storage.Archive(artifact, dir, SourceIgnoreFilter(ps, nil)); err != nil { + if err := r.Storage.Archive(&artifact, dir, SourceIgnoreFilter(ps, nil)); err != nil { return sreconcile.ResultEmpty, &serror.Event{ Err: fmt.Errorf("unable to archive artifact to storage: %w", err), Reason: sourcev1.StorageOperationFailedReason, @@ -457,14 +461,14 @@ func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, obj *so r.AnnotatedEventf(obj, map[string]string{ "revision": artifact.Revision, "checksum": artifact.Checksum, - }, corev1.EventTypeNormal, "NewArtifact", "stored artifact for revision '%s'", artifact.Revision) + }, corev1.EventTypeNormal, "NewArtifact", "stored artifact for commit '%s'", commit.ShortMessage()) // Record it on the object obj.Status.Artifact = artifact.DeepCopy() obj.Status.IncludedArtifacts = *includes // Update symlink on a "best effort" basis - url, err := r.Storage.Symlink(*artifact, "latest.tar.gz") + url, err := r.Storage.Symlink(artifact, "latest.tar.gz") if err != nil { r.eventLogf(ctx, obj, corev1.EventTypeWarning, sourcev1.StorageOperationFailedReason, "failed to update status URL symlink: %s", err) @@ -481,7 +485,8 @@ func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, obj *so // If an include is unavailable, it marks the object with v1beta1.IncludeUnavailableCondition and returns early. // If the copy operations are successful, it deletes the v1beta1.IncludeUnavailableCondition from the object. // If the artifactSet differs from the current set, it marks the object with v1beta1.ArtifactOutdatedCondition. -func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context, obj *sourcev1.GitRepository, _ *sourcev1.Artifact, includes *artifactSet, dir string) (sreconcile.Result, error) { +func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context, + obj *sourcev1.GitRepository, _ *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) { artifacts := make(artifactSet, len(obj.Spec.Include)) for i, incl := range obj.Spec.Include { // Do this first as it is much cheaper than copy operations diff --git a/controllers/gitrepository_controller_test.go b/controllers/gitrepository_controller_test.go index 9473e8dd7..5f8529773 100644 --- a/controllers/gitrepository_controller_test.go +++ b/controllers/gitrepository_controller_test.go @@ -511,14 +511,14 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", obj.Spec.URL) } - var artifact sourcev1.Artifact + var commit git.Commit var includes artifactSet - got, err := r.reconcileSource(context.TODO(), obj, &artifact, &includes, tmpDir) + got, err := r.reconcileSource(context.TODO(), obj, &commit, &includes, tmpDir) g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(got).To(Equal(tt.want)) - g.Expect(artifact).ToNot(BeNil()) + g.Expect(commit).ToNot(BeNil()) }) } }) @@ -666,9 +666,9 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T) obj := obj.DeepCopy() obj.Spec.GitImplementation = i - var artifact sourcev1.Artifact + var commit git.Commit var includes artifactSet - got, err := r.reconcileSource(ctx, obj, &artifact, &includes, tmpDir) + got, err := r.reconcileSource(ctx, obj, &commit, &includes, tmpDir) if err != nil { println(err.Error()) } @@ -676,7 +676,7 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T) g.Expect(got).To(Equal(tt.want)) if tt.wantRevision != "" { revision := strings.ReplaceAll(tt.wantRevision, "", headRef.Hash().String()) - g.Expect(artifact.Revision).To(Equal(revision)) + g.Expect(commit.String()).To(Equal(revision)) g.Expect(conditions.IsTrue(obj, sourcev1.ArtifactOutdatedCondition)).To(BeTrue()) } }) @@ -691,7 +691,7 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { dir string includes artifactSet beforeFunc func(obj *sourcev1.GitRepository) - afterFunc func(t *WithT, obj *sourcev1.GitRepository, artifact sourcev1.Artifact) + afterFunc func(t *WithT, obj *sourcev1.GitRepository) want sreconcile.Result wantErr bool assertConditions []metav1.Condition @@ -702,7 +702,7 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { beforeFunc: func(obj *sourcev1.GitRepository) { obj.Spec.Interval = metav1.Duration{Duration: interval} }, - afterFunc: func(t *WithT, obj *sourcev1.GitRepository, artifact sourcev1.Artifact) { + afterFunc: func(t *WithT, obj *sourcev1.GitRepository) { t.Expect(obj.GetArtifact()).ToNot(BeNil()) t.Expect(obj.Status.URL).ToNot(BeEmpty()) }, @@ -719,7 +719,7 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { beforeFunc: func(obj *sourcev1.GitRepository) { obj.Spec.Interval = metav1.Duration{Duration: interval} }, - afterFunc: func(t *WithT, obj *sourcev1.GitRepository, artifact sourcev1.Artifact) { + afterFunc: func(t *WithT, obj *sourcev1.GitRepository) { t.Expect(obj.GetArtifact()).ToNot(BeNil()) t.Expect(obj.GetArtifact().Checksum).To(Equal("60a3bf69f337cb5ec9ebd00abefbb6e7f2a2cf27158ecf438d52b2035b184172")) t.Expect(obj.Status.IncludedArtifacts).ToNot(BeEmpty()) @@ -740,7 +740,7 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { obj.Status.Artifact = &sourcev1.Artifact{Revision: "main/revision"} obj.Status.IncludedArtifacts = []*sourcev1.Artifact{{Revision: "main/revision"}} }, - afterFunc: func(t *WithT, obj *sourcev1.GitRepository, artifact sourcev1.Artifact) { + afterFunc: func(t *WithT, obj *sourcev1.GitRepository) { t.Expect(obj.Status.URL).To(BeEmpty()) }, want: sreconcile.ResultSuccess, @@ -755,7 +755,7 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { obj.Spec.Interval = metav1.Duration{Duration: interval} obj.Spec.Ignore = pointer.StringPtr("!**.txt\n") }, - afterFunc: func(t *WithT, obj *sourcev1.GitRepository, artifact sourcev1.Artifact) { + afterFunc: func(t *WithT, obj *sourcev1.GitRepository) { t.Expect(obj.GetArtifact()).ToNot(BeNil()) t.Expect(obj.GetArtifact().Checksum).To(Equal("11f7f007dce5619bd79e6c57688261058d09f5271e802463ac39f2b9ead7cabd")) }, @@ -772,7 +772,7 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { obj.Spec.Interval = metav1.Duration{Duration: interval} conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") }, - afterFunc: func(t *WithT, obj *sourcev1.GitRepository, artifact sourcev1.Artifact) { + afterFunc: func(t *WithT, obj *sourcev1.GitRepository) { t.Expect(obj.GetArtifact()).ToNot(BeNil()) t.Expect(obj.GetArtifact().Checksum).To(Equal("60a3bf69f337cb5ec9ebd00abefbb6e7f2a2cf27158ecf438d52b2035b184172")) t.Expect(obj.Status.URL).ToNot(BeEmpty()) @@ -789,7 +789,7 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { beforeFunc: func(obj *sourcev1.GitRepository) { obj.Spec.Interval = metav1.Duration{Duration: interval} }, - afterFunc: func(t *WithT, obj *sourcev1.GitRepository, artifact sourcev1.Artifact) { + afterFunc: func(t *WithT, obj *sourcev1.GitRepository) { t.Expect(obj.GetArtifact()).ToNot(BeNil()) localPath := testStorage.LocalPath(*obj.GetArtifact()) @@ -843,15 +843,18 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { tt.beforeFunc(obj) } - artifact := testStorage.NewArtifactFor(obj.Kind, obj, "main/revision", "checksum.tar.gz") + commit := git.Commit{ + Hash: []byte("revision"), + Reference: "refs/heads/main", + } - got, err := r.reconcileArtifact(ctx, obj, &artifact, &tt.includes, tt.dir) + got, err := r.reconcileArtifact(ctx, obj, &commit, &tt.includes, tt.dir) g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(got).To(Equal(tt.want)) if tt.afterFunc != nil { - tt.afterFunc(g, obj, artifact) + tt.afterFunc(g, obj) } }) } @@ -1036,10 +1039,10 @@ func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) { g.Expect(err).NotTo(HaveOccurred()) defer os.RemoveAll(tmpDir) - var artifact sourcev1.Artifact + var commit git.Commit var includes artifactSet - got, err := r.reconcileInclude(ctx, obj, &artifact, &includes, tmpDir) + got, err := r.reconcileInclude(ctx, obj, &commit, &includes, tmpDir) g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions)) g.Expect(err != nil).To(Equal(tt.wantErr)) if err == nil { From 47d09581df4af9c61a962ef808717baeb5017315 Mon Sep 17 00:00:00 2001 From: Sunny Date: Wed, 16 Feb 2022 03:30:18 +0530 Subject: [PATCH 58/63] bucket: Make NewArtifact event more informative Use the etagIndex to provide more information about the artifact in NewArtifact events and remove the revision from the event message. The revision is still kept in the event annotations. Signed-off-by: Sunny --- controllers/bucket_controller.go | 51 +++++++++++++++------------ controllers/bucket_controller_test.go | 12 ++++--- 2 files changed, 36 insertions(+), 27 deletions(-) diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index a152523dc..54bf8e51a 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -104,7 +104,7 @@ type BucketReconcilerOptions struct { // bucketReconcilerFunc is the function type for all the bucket reconciler // functions. -type bucketReconcilerFunc func(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error) +type bucketReconcilerFunc func(ctx context.Context, obj *sourcev1.Bucket, index etagIndex, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error) func (r *BucketReconciler) SetupWithManager(mgr ctrl.Manager) error { return r.SetupWithManagerAndOptions(mgr, BucketReconcilerOptions{}) @@ -199,6 +199,7 @@ func (r *BucketReconciler) reconcile(ctx context.Context, obj *sourcev1.Bucket, conditions.MarkReconciling(obj, "NewGeneration", "reconciling new object generation (%d)", obj.Generation) } + index := make(etagIndex) var artifact sourcev1.Artifact // Create temp working dir @@ -215,7 +216,7 @@ func (r *BucketReconciler) reconcile(ctx context.Context, obj *sourcev1.Bucket, var res sreconcile.Result var resErr error for _, rec := range reconcilers { - recResult, err := rec(ctx, obj, &artifact, tmpDir) + recResult, err := rec(ctx, obj, index, &artifact, tmpDir) // Exit immediately on ResultRequeue. if recResult == sreconcile.ResultRequeue { return sreconcile.ResultRequeue, nil @@ -238,7 +239,8 @@ func (r *BucketReconciler) reconcile(ctx context.Context, obj *sourcev1.Bucket, // All artifacts for the resource except for the current one are garbage collected from the storage. // If the artifact in the Status object of the resource disappeared from storage, it is removed from the object. // If the hostname of the URLs on the object do not match the current storage server hostname, they are updated. -func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error) { +func (r *BucketReconciler) reconcileStorage(ctx context.Context, + obj *sourcev1.Bucket, _ etagIndex, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error) { // Garbage collect previous advertised artifact(s) from storage _ = r.garbageCollect(ctx, obj) @@ -266,7 +268,8 @@ func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.B // result. // If a SecretRef is defined, it attempts to fetch the Secret before calling the provider. If the fetch of the Secret // fails, it records v1beta1.FetchFailedCondition=True and returns early. -func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error) { +func (r *BucketReconciler) reconcileSource(ctx context.Context, + obj *sourcev1.Bucket, index etagIndex, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error) { var secret *corev1.Secret if obj.Spec.SecretRef != nil { secretName := types.NamespacedName{ @@ -287,9 +290,9 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bu switch obj.Spec.Provider { case sourcev1.GoogleBucketProvider: - return r.reconcileGCPSource(ctx, obj, artifact, secret, dir) + return r.reconcileGCPSource(ctx, obj, index, artifact, secret, dir) default: - return r.reconcileMinioSource(ctx, obj, artifact, secret, dir) + return r.reconcileMinioSource(ctx, obj, index, artifact, secret, dir) } } @@ -302,8 +305,8 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bu // On a successful download, it removes v1beta1.FetchFailedCondition, and compares the current revision of HEAD to // the artifact on the object, and records v1beta1.ArtifactOutdatedCondition if they differ. // If the download was successful, the given artifact pointer is set to a new artifact with the available metadata. -func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, - secret *corev1.Secret, dir string) (sreconcile.Result, error) { +func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, + obj *sourcev1.Bucket, index etagIndex, artifact *sourcev1.Artifact, secret *corev1.Secret, dir string) (sreconcile.Result, error) { // Build the client with the configuration from the object and secret s3Client, err := r.buildMinioClient(obj, secret) if err != nil { @@ -367,7 +370,6 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source // Build up an index of object keys and their etags // As the keys define the paths and the etags represent a change in file contents, this should be sufficient to // detect both structural and file changes - var index = make(etagIndex) for object := range s3Client.ListObjects(ctxTimeout, obj.Spec.BucketName, minio.ListObjectsOptions{ Recursive: true, UseV1: s3utils.IsGoogleEndpoint(*s3Client.EndpointURL()), @@ -438,8 +440,6 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) return sreconcile.ResultEmpty, e } - r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.BucketOperationSucceededReason, - "fetched %d files with revision '%s' from '%s'", len(index), revision, obj.Spec.BucketName) } conditions.Delete(obj, sourcev1.FetchFailedCondition) @@ -457,8 +457,8 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source // On a successful download, it removes v1beta1.DownloadFailedCondition, and compares the current revision of HEAD to // the artifact on the object, and records v1beta1.ArtifactOutdatedCondition if they differ. // If the download was successful, the given artifact pointer is set to a new artifact with the available metadata. -func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, - secret *corev1.Secret, dir string) (sreconcile.Result, error) { +func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, + obj *sourcev1.Bucket, index etagIndex, artifact *sourcev1.Artifact, secret *corev1.Secret, dir string) (sreconcile.Result, error) { gcpClient, err := r.buildGCPClient(ctx, secret) if err != nil { e := &serror.Event{ @@ -522,7 +522,6 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 // Build up an index of object keys and their etags // As the keys define the paths and the etags represent a change in file contents, this should be sufficient to // detect both structural and file changes - var index = make(etagIndex) objects := gcpClient.ListObjects(ctxTimeout, obj.Spec.BucketName, nil) for { object, err := objects.Next() @@ -593,8 +592,6 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) return sreconcile.ResultEmpty, e } - r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.BucketOperationSucceededReason, - "fetched %d files from bucket '%s'", len(index), obj.Spec.BucketName) } conditions.Delete(obj, sourcev1.FetchFailedCondition) @@ -610,7 +607,8 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 // If the given artifact does not differ from the object's current, it returns early. // On a successful archive, the artifact in the status of the given object is set, and the symlink in the storage is // updated to its path. -func (r *BucketReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error) { +func (r *BucketReconciler) reconcileArtifact(ctx context.Context, + obj *sourcev1.Bucket, index etagIndex, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error) { // Always restore the Ready condition in case it got removed due to a transient error defer func() { if obj.GetArtifact().HasRevision(artifact.Revision) { @@ -666,10 +664,10 @@ func (r *BucketReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1. Reason: sourcev1.StorageOperationFailedReason, } } - r.AnnotatedEventf(obj, map[string]string{ + r.annotatedEventLogf(ctx, obj, map[string]string{ "revision": artifact.Revision, "checksum": artifact.Checksum, - }, corev1.EventTypeNormal, "NewArtifact", "stored artifact for revision '%s'", artifact.Revision) + }, corev1.EventTypeNormal, "NewArtifact", "fetched %d files from '%s'", len(index), obj.Spec.BucketName) // Record it on the object obj.Status.Artifact = artifact.DeepCopy() @@ -803,10 +801,17 @@ func (i etagIndex) Revision() (string, error) { return fmt.Sprintf("%x", sum.Sum(nil)), nil } -// eventLog records event and logs at the same time. This log is different from -// the debug log in the event recorder in the sense that this is a simple log, -// the event recorder debug log contains complete details about the event. +// eventLogf records event and logs at the same time. func (r *BucketReconciler) eventLogf(ctx context.Context, obj runtime.Object, eventType string, reason string, messageFmt string, args ...interface{}) { + r.annotatedEventLogf(ctx, obj, nil, eventType, reason, messageFmt, args...) +} + +// annotatedEventLogf records annotated event and logs at the same time. This +// log is different from the debug log in the event recorder in the sense that +// this is a simple log, the event recorder debug log contains complete details +// about the event. +func (r *BucketReconciler) annotatedEventLogf(ctx context.Context, + obj runtime.Object, annotations map[string]string, eventType string, reason string, messageFmt string, args ...interface{}) { msg := fmt.Sprintf(messageFmt, args...) // Log and emit event. if eventType == corev1.EventTypeWarning { @@ -814,5 +819,5 @@ func (r *BucketReconciler) eventLogf(ctx context.Context, obj runtime.Object, ev } else { ctrl.LoggerFrom(ctx).Info(msg) } - r.Eventf(obj, eventType, reason, msg) + r.AnnotatedEventf(obj, annotations, eventType, reason, msg) } diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go index ee798f376..9264795f4 100644 --- a/controllers/bucket_controller_test.go +++ b/controllers/bucket_controller_test.go @@ -264,9 +264,10 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed()) } + index := make(etagIndex) var artifact sourcev1.Artifact - got, err := r.reconcileStorage(context.TODO(), obj, &artifact, "") + got, err := r.reconcileStorage(context.TODO(), obj, index, &artifact, "") g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(got).To(Equal(tt.want)) @@ -549,7 +550,8 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { } artifact := &sourcev1.Artifact{} - got, err := r.reconcileSource(context.TODO(), obj, artifact, tmpDir) + index := make(etagIndex) + got, err := r.reconcileSource(context.TODO(), obj, index, artifact, tmpDir) g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(got).To(Equal(tt.want)) @@ -828,7 +830,8 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { }() artifact := &sourcev1.Artifact{} - got, err := r.reconcileSource(context.TODO(), obj, artifact, tmpDir) + index := make(etagIndex) + got, err := r.reconcileSource(context.TODO(), obj, index, artifact, tmpDir) g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(got).To(Equal(tt.want)) @@ -965,6 +968,7 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { }, } + index := make(etagIndex) artifact := testStorage.NewArtifactFor(obj.Kind, obj, "existing", "foo.tar.gz") artifact.Checksum = testChecksum @@ -972,7 +976,7 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { tt.beforeFunc(g, obj, artifact, tmpDir) } - got, err := r.reconcileArtifact(context.TODO(), obj, &artifact, tmpDir) + got, err := r.reconcileArtifact(context.TODO(), obj, index, &artifact, tmpDir) g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(got).To(Equal(tt.want)) From 07a539e3d634adf4ac7ac0eff71119f7f26c5463 Mon Sep 17 00:00:00 2001 From: Hidde Beydals Date: Fri, 18 Feb 2022 15:24:44 +0100 Subject: [PATCH 59/63] build: update meta API and pkg/runtime to non RC Signed-off-by: Hidde Beydals --- api/go.mod | 6 +++--- api/go.sum | 11 ++++++----- go.mod | 4 ++-- go.sum | 12 +++++++----- 4 files changed, 18 insertions(+), 15 deletions(-) diff --git a/api/go.mod b/api/go.mod index b974e967a..46c5284ff 100644 --- a/api/go.mod +++ b/api/go.mod @@ -4,8 +4,8 @@ go 1.17 require ( github.com/fluxcd/pkg/apis/acl v0.0.3 - github.com/fluxcd/pkg/apis/meta v0.11.0-rc.3 - k8s.io/apimachinery v0.23.1 + github.com/fluxcd/pkg/apis/meta v0.12.0 + k8s.io/apimachinery v0.23.2 sigs.k8s.io/controller-runtime v0.11.0 ) @@ -24,5 +24,5 @@ require ( k8s.io/klog/v2 v2.30.0 // indirect k8s.io/utils v0.0.0-20211208161948-7d6a63dca704 // indirect sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect ) diff --git a/api/go.sum b/api/go.sum index 8142a1881..7267da4d0 100644 --- a/api/go.sum +++ b/api/go.sum @@ -123,8 +123,8 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fluxcd/pkg/apis/acl v0.0.3 h1:Lw0ZHdpnO4G7Zy9KjrzwwBmDZQuy4qEjaU/RvA6k1lc= github.com/fluxcd/pkg/apis/acl v0.0.3/go.mod h1:XPts6lRJ9C9fIF9xVWofmQwftvhY25n1ps7W9xw0XLU= -github.com/fluxcd/pkg/apis/meta v0.11.0-rc.3 h1:YY6RlaHIMXawgEOJhJbSrm4NpD9fJTCWFGKgtNfQ0/g= -github.com/fluxcd/pkg/apis/meta v0.11.0-rc.3/go.mod h1:ki5wJE4nuFOZt78q0RSYkrKwINgIBPynuswZhnTOSoI= +github.com/fluxcd/pkg/apis/meta v0.12.0 h1:Ssyltj6E9A7y32sZrzjog0m+bIsFM/3lHHfmpxesUAU= +github.com/fluxcd/pkg/apis/meta v0.12.0/go.mod h1:SPrSWMwDK7Ls2/4GadzhjDjPFbKrzzgzuZ0oDO3jzso= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -897,8 +897,8 @@ k8s.io/api v0.23.0 h1:WrL1gb73VSC8obi8cuYETJGXEoFNEh3LU0Pt+Sokgro= k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg= k8s.io/apiextensions-apiserver v0.23.0/go.mod h1:xIFAEEDlAZgpVBl/1VSjGDmLoXAWRG40+GsWhKhAxY4= k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc= -k8s.io/apimachinery v0.23.1 h1:sfBjlDFwj2onG0Ijx5C+SrAoeUscPrmghm7wHP+uXlo= -k8s.io/apimachinery v0.23.1/go.mod h1:SADt2Kl8/sttJ62RRsi9MIV4o8f5S3coArm0Iu3fBno= +k8s.io/apimachinery v0.23.2 h1:dBmjCOeYBdg2ibcQxMuUq+OopZ9fjfLIR5taP/XKeTs= +k8s.io/apimachinery v0.23.2/go.mod h1:zDqeV0AK62LbCI0CI7KbWCAYdLg+E+8UXJ0rIz5gmS8= k8s.io/apiserver v0.23.0/go.mod h1:Cec35u/9zAepDPPFyT+UMrgqOCjgJ5qtfVJDxjZYmt4= k8s.io/client-go v0.23.0/go.mod h1:hrDnpnK1mSr65lHHcUuIZIXDgEbzc7/683c6hyG4jTA= k8s.io/code-generator v0.23.0/go.mod h1:vQvOhDXhuzqiVfM/YHp+dmg10WDZCchJVObc9MvowsE= @@ -924,8 +924,9 @@ sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87J sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/structured-merge-diff/v4 v4.2.0 h1:kDvPBbnPk+qYmkHmSo8vKGp438IASWofnbbUKDE/bv0= sigs.k8s.io/structured-merge-diff/v4 v4.2.0/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= +sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/go.mod b/go.mod index 435adff27..53e5b1e82 100644 --- a/go.mod +++ b/go.mod @@ -11,12 +11,12 @@ require ( github.com/cyphar/filepath-securejoin v0.2.2 github.com/darkowlzz/controller-check v0.0.0-20220119215126-648356cef22c github.com/elazarl/goproxy v0.0.0-20211114080932-d06c3be7c11b - github.com/fluxcd/pkg/apis/meta v0.11.0-rc.3 + github.com/fluxcd/pkg/apis/meta v0.12.0 github.com/fluxcd/pkg/gittestserver v0.5.0 github.com/fluxcd/pkg/gitutil v0.1.0 github.com/fluxcd/pkg/helmtestserver v0.4.0 github.com/fluxcd/pkg/lockedfile v0.1.0 - github.com/fluxcd/pkg/runtime v0.13.0-rc.7 + github.com/fluxcd/pkg/runtime v0.13.0 github.com/fluxcd/pkg/ssh v0.2.0 github.com/fluxcd/pkg/testserver v0.2.0 github.com/fluxcd/pkg/untar v0.1.0 diff --git a/go.sum b/go.sum index 48bb6d3e8..ba456a0f8 100644 --- a/go.sum +++ b/go.sum @@ -303,8 +303,9 @@ github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8S github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fluxcd/pkg/apis/acl v0.0.3 h1:Lw0ZHdpnO4G7Zy9KjrzwwBmDZQuy4qEjaU/RvA6k1lc= github.com/fluxcd/pkg/apis/acl v0.0.3/go.mod h1:XPts6lRJ9C9fIF9xVWofmQwftvhY25n1ps7W9xw0XLU= -github.com/fluxcd/pkg/apis/meta v0.11.0-rc.3 h1:YY6RlaHIMXawgEOJhJbSrm4NpD9fJTCWFGKgtNfQ0/g= github.com/fluxcd/pkg/apis/meta v0.11.0-rc.3/go.mod h1:ki5wJE4nuFOZt78q0RSYkrKwINgIBPynuswZhnTOSoI= +github.com/fluxcd/pkg/apis/meta v0.12.0 h1:Ssyltj6E9A7y32sZrzjog0m+bIsFM/3lHHfmpxesUAU= +github.com/fluxcd/pkg/apis/meta v0.12.0/go.mod h1:SPrSWMwDK7Ls2/4GadzhjDjPFbKrzzgzuZ0oDO3jzso= github.com/fluxcd/pkg/gittestserver v0.5.0 h1:pPdaz7pUsukt4eQ+xQeNwoypOXGGOHFHnPjIHQAv0tE= github.com/fluxcd/pkg/gittestserver v0.5.0/go.mod h1:mFEF/Xrg+CjQH4VFCRCou2qZmhWKo7EYcjr7MIoX6+s= github.com/fluxcd/pkg/gitutil v0.1.0 h1:VO3kJY/CKOCO4ysDNqfdpTg04icAKBOSb3lbR5uE/IE= @@ -314,8 +315,8 @@ github.com/fluxcd/pkg/helmtestserver v0.4.0/go.mod h1:JOI9f3oXUFIWmMKWMBan7FjglA github.com/fluxcd/pkg/lockedfile v0.1.0 h1:YsYFAkd6wawMCcD74ikadAKXA4s2sukdxrn7w8RB5eo= github.com/fluxcd/pkg/lockedfile v0.1.0/go.mod h1:EJLan8t9MiOcgTs8+puDjbE6I/KAfHbdvIy9VUgIjm8= github.com/fluxcd/pkg/runtime v0.13.0-rc.6/go.mod h1:4oKUO19TeudXrnCRnxCfMSS7EQTYpYlgfXwlQuDJ/Eg= -github.com/fluxcd/pkg/runtime v0.13.0-rc.7 h1:snESiRwjrmNchIBautlxnXn8HzmeDEnS3PsMbP2fyeg= -github.com/fluxcd/pkg/runtime v0.13.0-rc.7/go.mod h1:uGPudgMUNC3wu7Zoh6AgJM8WSH3VpmnzjrwkVb86d3Y= +github.com/fluxcd/pkg/runtime v0.13.0 h1:7iLAenXm+9EadXe0JLZjhOR6AwRaASseqTW7vk8wAWU= +github.com/fluxcd/pkg/runtime v0.13.0/go.mod h1:G0EuJZJi/ZOjrWiclF4bBmkbzKhWssKuzSsmz3kVCMg= github.com/fluxcd/pkg/ssh v0.2.0 h1:e9V+HReOL7czm7edVzYS1e+CnFKz1/kHiUNfLRpBdH8= github.com/fluxcd/pkg/ssh v0.2.0/go.mod h1:EpQC7Ztdlbi8S/dlYXqVDZtHtLpN3FNl3N6zWujVzbA= github.com/fluxcd/pkg/testserver v0.1.0/go.mod h1:fvt8BHhXw6c1+CLw1QFZxcQprlcXzsrL4rzXaiGM+Iw= @@ -1230,7 +1231,6 @@ golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211110154304-99a53858aa08/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158 h1:rm+CHSpPEEW2IsXUib1ThaHIjuBVZjxNgSKmBLFfD4c= @@ -1533,6 +1533,7 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= k8s.io/api v0.22.4/go.mod h1:Rgs+9gIGYC5laXQSZZ9JqT5NevNgoGiOdVWi1BAB3qk= k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg= +k8s.io/api v0.23.2/go.mod h1:sYuDb3flCtRPI8ghn6qFrcK5ZBu2mhbElxRE95qpwlI= k8s.io/api v0.23.3 h1:KNrME8KHGr12Ozjf8ytOewKzZh6hl/hHUZeHddT3a38= k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= k8s.io/apiextensions-apiserver v0.22.4/go.mod h1:kH9lxD8dbJ+k0ZizGET55lFgdGjO8t45fgZnCVdZEpw= @@ -1542,7 +1543,7 @@ k8s.io/apiextensions-apiserver v0.23.3/go.mod h1:/ZpRXdgKZA6DvIVPEmXDCZJN53YIQEU k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= k8s.io/apimachinery v0.22.4/go.mod h1:yU6oA6Gnax9RrxGzVvPFFJ+mpnW6PBSqp0sx0I0HHW0= k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc= -k8s.io/apimachinery v0.23.1/go.mod h1:SADt2Kl8/sttJ62RRsi9MIV4o8f5S3coArm0Iu3fBno= +k8s.io/apimachinery v0.23.2/go.mod h1:zDqeV0AK62LbCI0CI7KbWCAYdLg+E+8UXJ0rIz5gmS8= k8s.io/apimachinery v0.23.3 h1:7IW6jxNzrXTsP0c8yXz2E5Yx/WTzVPTsHIx/2Vm0cIk= k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= @@ -1556,6 +1557,7 @@ k8s.io/cli-runtime v0.23.0/go.mod h1:B5N3YH0KP1iKr6gEuJ/RRmGjO0mJQ/f/JrsmEiPQAlU k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= k8s.io/client-go v0.22.4/go.mod h1:Yzw4e5e7h1LNHA4uqnMVrpEpUs1hJOiuBsJKIlRCHDA= k8s.io/client-go v0.23.0/go.mod h1:hrDnpnK1mSr65lHHcUuIZIXDgEbzc7/683c6hyG4jTA= +k8s.io/client-go v0.23.2/go.mod h1:k3YbsWg6GWdHF1THHTQP88X9RhB1DWPo3Dq7KfU/D1c= k8s.io/client-go v0.23.3 h1:23QYUmCQ/W6hW78xIwm3XqZrrKZM+LWDqW2zfo+szJs= k8s.io/client-go v0.23.3/go.mod h1:47oMd+YvAOqZM7pcQ6neJtBiFH7alOyfunYN48VsmwE= k8s.io/code-generator v0.22.4/go.mod h1:qjYl54pQ/emhkT0UxbufbREYJMWsHNNV/jSVwhYZQGw= From f72a28a193441642a36b7dea8044dc69b2284a7a Mon Sep 17 00:00:00 2001 From: Sunny Date: Mon, 21 Feb 2022 15:38:28 +0530 Subject: [PATCH 60/63] Use field owner in the patch helper - Update summarize helper to have patch field owner. - Updated the controllers to set the patch field owner. Signed-off-by: Sunny --- controllers/bucket_controller.go | 4 ++- controllers/gitrepository_controller.go | 4 ++- controllers/helmchart_controller.go | 6 ++-- controllers/helmrepository_controller.go | 6 ++-- internal/reconcile/summarize/summary.go | 13 ++++++++ main.go | 40 +++++++++++++----------- 6 files changed, 49 insertions(+), 24 deletions(-) diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index 54bf8e51a..522b47c81 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -95,7 +95,8 @@ type BucketReconciler struct { kuberecorder.EventRecorder helper.Metrics - Storage *Storage + Storage *Storage + ControllerName string } type BucketReconcilerOptions struct { @@ -160,6 +161,7 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res summarize.RecordReconcileReq, ), summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetInterval().Duration}), + summarize.WithPatchFieldOwner(r.ControllerName), } result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go index 8c4c31b93..53a9da69e 100644 --- a/controllers/gitrepository_controller.go +++ b/controllers/gitrepository_controller.go @@ -95,7 +95,8 @@ type GitRepositoryReconciler struct { kuberecorder.EventRecorder helper.Metrics - Storage *Storage + Storage *Storage + ControllerName string requeueDependency time.Duration } @@ -166,6 +167,7 @@ func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reques summarize.RecordReconcileReq, ), summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetInterval().Duration}), + summarize.WithPatchFieldOwner(r.ControllerName), } result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) diff --git a/controllers/helmchart_controller.go b/controllers/helmchart_controller.go index f921fe9a8..5bbe56cd4 100644 --- a/controllers/helmchart_controller.go +++ b/controllers/helmchart_controller.go @@ -102,8 +102,9 @@ type HelmChartReconciler struct { kuberecorder.EventRecorder helper.Metrics - Storage *Storage - Getters helmgetter.Providers + Storage *Storage + Getters helmgetter.Providers + ControllerName string } func (r *HelmChartReconciler) SetupWithManager(mgr ctrl.Manager) error { @@ -191,6 +192,7 @@ func (r *HelmChartReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( summarize.RecordReconcileReq, ), summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetInterval().Duration}), + summarize.WithPatchFieldOwner(r.ControllerName), } result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) diff --git a/controllers/helmrepository_controller.go b/controllers/helmrepository_controller.go index 39b28e984..b0b2f0e57 100644 --- a/controllers/helmrepository_controller.go +++ b/controllers/helmrepository_controller.go @@ -86,8 +86,9 @@ type HelmRepositoryReconciler struct { kuberecorder.EventRecorder helper.Metrics - Getters helmgetter.Providers - Storage *Storage + Getters helmgetter.Providers + Storage *Storage + ControllerName string } type HelmRepositoryReconcilerOptions struct { @@ -153,6 +154,7 @@ func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reque summarize.RecordReconcileReq, ), summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetInterval().Duration}), + summarize.WithPatchFieldOwner(r.ControllerName), } result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) diff --git a/internal/reconcile/summarize/summary.go b/internal/reconcile/summarize/summary.go index 0ba76715f..1c2f97aae 100644 --- a/internal/reconcile/summarize/summary.go +++ b/internal/reconcile/summarize/summary.go @@ -85,6 +85,9 @@ type HelperOptions struct { ReconcileError error // ResultBuilder defines how the reconciliation result is computed. ResultBuilder reconcile.RuntimeResultBuilder + // PatchFieldOwner defines the field owner configuration for the Kubernetes + // patch operation. + PatchFieldOwner string } // Option is configuration that modifies SummarizeAndPatch. @@ -137,6 +140,13 @@ func WithReconcileError(re error) Option { } } +// WithPatchFieldOwner sets the FieldOwner in the patch helper. +func WithPatchFieldOwner(fieldOwner string) Option { + return func(s *HelperOptions) { + s.PatchFieldOwner = fieldOwner + } +} + // SummarizeAndPatch summarizes and patches the result to the target object. // When used at the very end of a reconciliation, the result builder must be // specified using the Option WithResultBuilder(). The returned result and error @@ -161,6 +171,9 @@ func (h *Helper) SummarizeAndPatch(ctx context.Context, obj conditions.Setter, o Conditions: ownedConditions, }, } + if opts.PatchFieldOwner != "" { + patchOpts = append(patchOpts, patch.WithFieldOwner(opts.PatchFieldOwner)) + } // Process the results of reconciliation. for _, processor := range opts.Processors { diff --git a/main.go b/main.go index 270548035..19e6c35e1 100644 --- a/main.go +++ b/main.go @@ -165,10 +165,11 @@ func main() { storage := mustInitStorage(storagePath, storageAdvAddr, setupLog) if err = (&controllers.GitRepositoryReconciler{ - Client: mgr.GetClient(), - EventRecorder: eventRecorder, - Metrics: metricsH, - Storage: storage, + Client: mgr.GetClient(), + EventRecorder: eventRecorder, + Metrics: metricsH, + Storage: storage, + ControllerName: controllerName, }).SetupWithManagerAndOptions(mgr, controllers.GitRepositoryReconcilerOptions{ MaxConcurrentReconciles: concurrent, DependencyRequeueInterval: requeueDependency, @@ -177,11 +178,12 @@ func main() { os.Exit(1) } if err = (&controllers.HelmRepositoryReconciler{ - Client: mgr.GetClient(), - EventRecorder: eventRecorder, - Metrics: metricsH, - Storage: storage, - Getters: getters, + Client: mgr.GetClient(), + EventRecorder: eventRecorder, + Metrics: metricsH, + Storage: storage, + Getters: getters, + ControllerName: controllerName, }).SetupWithManagerAndOptions(mgr, controllers.HelmRepositoryReconcilerOptions{ MaxConcurrentReconciles: concurrent, }); err != nil { @@ -189,11 +191,12 @@ func main() { os.Exit(1) } if err = (&controllers.HelmChartReconciler{ - Client: mgr.GetClient(), - Storage: storage, - Getters: getters, - EventRecorder: eventRecorder, - Metrics: metricsH, + Client: mgr.GetClient(), + Storage: storage, + Getters: getters, + EventRecorder: eventRecorder, + Metrics: metricsH, + ControllerName: controllerName, }).SetupWithManagerAndOptions(mgr, controllers.HelmChartReconcilerOptions{ MaxConcurrentReconciles: concurrent, }); err != nil { @@ -201,10 +204,11 @@ func main() { os.Exit(1) } if err = (&controllers.BucketReconciler{ - Client: mgr.GetClient(), - EventRecorder: eventRecorder, - Metrics: metricsH, - Storage: storage, + Client: mgr.GetClient(), + EventRecorder: eventRecorder, + Metrics: metricsH, + Storage: storage, + ControllerName: controllerName, }).SetupWithManagerAndOptions(mgr, controllers.BucketReconcilerOptions{ MaxConcurrentReconciles: concurrent, }); err != nil { From e9ae0c232e9d35192e2065d7da54a033690d745b Mon Sep 17 00:00:00 2001 From: Sunny Date: Mon, 21 Feb 2022 16:41:09 +0530 Subject: [PATCH 61/63] Add kstatus client conformance tests Use kstatus to compute the status of the objects. Signed-off-by: Sunny --- controllers/bucket_controller_test.go | 8 + controllers/gitrepository_controller_test.go | 8 + controllers/helmchart_controller_test.go | 8 + controllers/helmrepository_controller_test.go | 8 + go.mod | 36 ++-- go.sum | 170 +++++++++++++++--- 6 files changed, 193 insertions(+), 45 deletions(-) diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go index 9264795f4..39ef70860 100644 --- a/controllers/bucket_controller_test.go +++ b/controllers/bucket_controller_test.go @@ -41,6 +41,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" + kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status" "sigs.k8s.io/controller-runtime/pkg/client" fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -130,6 +131,13 @@ func TestBucketReconciler_Reconcile(t *testing.T) { checker := status.NewChecker(testEnv.Client, testEnv.GetScheme(), condns) checker.CheckErr(ctx, obj) + // kstatus client conformance check. + uo, err := patch.ToUnstructured(obj) + g.Expect(err).ToNot(HaveOccurred()) + res, err := kstatus.Compute(uo) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res.Status).To(Equal(kstatus.CurrentStatus)) + // Patch the object with reconcile request annotation. patchHelper, err := patch.NewHelper(obj, testEnv.Client) g.Expect(err).ToNot(HaveOccurred()) diff --git a/controllers/gitrepository_controller_test.go b/controllers/gitrepository_controller_test.go index 5f8529773..1e7028c75 100644 --- a/controllers/gitrepository_controller_test.go +++ b/controllers/gitrepository_controller_test.go @@ -47,6 +47,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/record" "k8s.io/utils/pointer" + kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -195,6 +196,13 @@ func TestGitRepositoryReconciler_Reconcile(t *testing.T) { checker := status.NewChecker(testEnv.Client, testEnv.GetScheme(), condns) checker.CheckErr(ctx, obj) + // kstatus client conformance check. + u, err := patch.ToUnstructured(obj) + g.Expect(err).ToNot(HaveOccurred()) + res, err := kstatus.Compute(u) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res.Status).To(Equal(kstatus.CurrentStatus)) + // Patch the object with reconcile request annotation. patchHelper, err := patch.NewHelper(obj, testEnv.Client) g.Expect(err).ToNot(HaveOccurred()) diff --git a/controllers/helmchart_controller_test.go b/controllers/helmchart_controller_test.go index 8944966aa..b031e9d50 100644 --- a/controllers/helmchart_controller_test.go +++ b/controllers/helmchart_controller_test.go @@ -35,6 +35,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" + kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -129,6 +130,13 @@ func TestHelmChartReconciler_Reconcile(t *testing.T) { checker := status.NewChecker(testEnv.Client, testEnv.GetScheme(), condns) checker.CheckErr(ctx, obj) + // kstatus client conformance check. + u, err := patch.ToUnstructured(obj) + g.Expect(err).ToNot(HaveOccurred()) + res, err := kstatus.Compute(u) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res.Status).To(Equal(kstatus.CurrentStatus)) + // Patch the object with reconcile request annotation. patchHelper, err := patch.NewHelper(obj, testEnv.Client) g.Expect(err).ToNot(HaveOccurred()) diff --git a/controllers/helmrepository_controller_test.go b/controllers/helmrepository_controller_test.go index 9bcf5fd52..137df58f8 100644 --- a/controllers/helmrepository_controller_test.go +++ b/controllers/helmrepository_controller_test.go @@ -35,6 +35,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" + kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status" "sigs.k8s.io/controller-runtime/pkg/client" fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -97,6 +98,13 @@ func TestHelmRepositoryReconciler_Reconcile(t *testing.T) { checker := status.NewChecker(testEnv.Client, testEnv.GetScheme(), condns) checker.CheckErr(ctx, obj) + // kstatus client conformance check. + u, err := patch.ToUnstructured(obj) + g.Expect(err).ToNot(HaveOccurred()) + res, err := kstatus.Compute(u) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res.Status).To(Equal(kstatus.CurrentStatus)) + // Patch the object with reconcile request annotation. patchHelper, err := patch.NewHelper(obj, testEnv.Client) g.Expect(err).ToNot(HaveOccurred()) diff --git a/go.mod b/go.mod index 53e5b1e82..a12a9704d 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/fluxcd/pkg/gitutil v0.1.0 github.com/fluxcd/pkg/helmtestserver v0.4.0 github.com/fluxcd/pkg/lockedfile v0.1.0 - github.com/fluxcd/pkg/runtime v0.13.0 + github.com/fluxcd/pkg/runtime v0.13.1 github.com/fluxcd/pkg/ssh v0.2.0 github.com/fluxcd/pkg/testserver v0.2.0 github.com/fluxcd/pkg/untar v0.1.0 @@ -32,19 +32,20 @@ require ( github.com/spf13/pflag v1.0.5 golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - google.golang.org/api v0.54.0 + google.golang.org/api v0.62.0 gotest.tools v2.2.0+incompatible helm.sh/helm/v3 v3.7.2 k8s.io/api v0.23.3 k8s.io/apimachinery v0.23.3 k8s.io/client-go v0.23.3 k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 + sigs.k8s.io/cli-utils v0.28.0 sigs.k8s.io/controller-runtime v0.11.1 sigs.k8s.io/yaml v1.3.0 ) require ( - cloud.google.com/go v0.90.0 // indirect + cloud.google.com/go v0.99.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/BurntSushi/toml v0.3.1 // indirect github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd // indirect @@ -62,6 +63,7 @@ require ( github.com/bugsnag/bugsnag-go v2.1.2+incompatible // indirect github.com/bugsnag/panicwrap v1.3.4 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect github.com/containerd/containerd v1.5.7 // indirect github.com/containerd/continuity v0.1.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -77,7 +79,7 @@ require ( github.com/emirpasic/gods v1.12.0 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect - github.com/fatih/color v1.7.0 // indirect + github.com/fatih/color v1.13.0 // indirect github.com/fluxcd/pkg/apis/acl v0.0.3 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/go-errors/errors v1.0.1 // indirect @@ -96,12 +98,12 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.3.0 // indirect - github.com/googleapis/gax-go/v2 v2.1.0 // indirect + github.com/googleapis/gax-go/v2 v2.1.1 // indirect github.com/googleapis/gnostic v0.5.5 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/gosuri/uitable v0.0.4 // indirect github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect - github.com/hashicorp/go-cleanhttp v0.5.1 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.6.8 // indirect github.com/huandu/xstrings v1.3.2 // indirect github.com/imdario/mergo v0.3.12 // indirect @@ -110,7 +112,6 @@ require ( github.com/jmoiron/sqlx v1.3.1 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/jstemmer/go-junit-report v0.9.1 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 // indirect github.com/klauspost/compress v1.13.5 // indirect @@ -121,8 +122,8 @@ require ( github.com/lib/pq v1.10.0 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailru/easyjson v0.7.6 // indirect - github.com/mattn/go-colorable v0.0.9 // indirect - github.com/mattn/go-isatty v0.0.4 // indirect + github.com/mattn/go-colorable v0.1.12 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/minio/md5-simd v1.1.0 // indirect @@ -156,8 +157,8 @@ require ( github.com/shopspring/decimal v1.2.0 // indirect github.com/sirupsen/logrus v1.8.1 // indirect github.com/sosedoff/gitkit v0.2.1-0.20200818155723-72ebbcf5056d // indirect - github.com/spf13/cast v1.3.1 // indirect - github.com/spf13/cobra v1.2.1 // indirect + github.com/spf13/cast v1.4.1 // indirect + github.com/spf13/cobra v1.3.0 // indirect github.com/stretchr/testify v1.7.0 // indirect github.com/xanzy/ssh-agent v0.3.0 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect @@ -172,34 +173,31 @@ require ( go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.19.1 // indirect - golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect - golang.org/x/mod v0.4.2 // indirect golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect golang.org/x/sys v0.0.0-20220209214540-3681064d5158 // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect - golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2 // indirect - google.golang.org/grpc v1.40.0 // indirect + google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect + google.golang.org/grpc v1.42.0 // indirect google.golang.org/protobuf v1.27.1 // indirect gopkg.in/gorp.v1 v1.7.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/ini.v1 v1.62.0 // indirect + gopkg.in/ini.v1 v1.66.2 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect k8s.io/apiextensions-apiserver v0.23.3 // indirect k8s.io/apiserver v0.23.3 // indirect - k8s.io/cli-runtime v0.23.0 // indirect + k8s.io/cli-runtime v0.23.2 // indirect k8s.io/component-base v0.23.3 // indirect k8s.io/klog/v2 v2.40.1 // indirect k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf // indirect - k8s.io/kubectl v0.22.4 // indirect + k8s.io/kubectl v0.23.2 // indirect oras.land/oras-go v0.4.0 // indirect sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect sigs.k8s.io/kustomize/api v0.10.1 // indirect diff --git a/go.sum b/go.sum index ba456a0f8..b6023992d 100644 --- a/go.sum +++ b/go.sum @@ -22,8 +22,13 @@ cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0 h1:MjvSkUq8RuAb+2JLDi5VQmmExRJPUQ3JLCWpRB6fmdw= cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM= +cloud.google.com/go v0.99.0 h1:y/cM2iqGgGi5D5DQZl6D9STN/3dR/Vx5Mp8s752oJTY= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -33,6 +38,7 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7 cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -68,6 +74,7 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd h1:sjQovDkwrZp8u+gxLtPgKGjk5hCxuy2hrRejBTA9xFU= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= @@ -122,7 +129,9 @@ github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.m github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= @@ -156,6 +165,7 @@ github.com/bugsnag/panicwrap v1.3.4 h1:A6sXFtDGsgU/4BLf5JT0o5uYg3EeKgGx3Sfs+/uk3 github.com/bugsnag/panicwrap v1.3.4/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= @@ -163,6 +173,7 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 h1:7aWHqerlJ41y6FOsEUvknqgXnGmJyJSbjhAWq5pO4F8= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -171,11 +182,19 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= @@ -222,6 +241,7 @@ github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfc github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= @@ -287,7 +307,10 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -297,8 +320,10 @@ github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fluxcd/pkg/apis/acl v0.0.3 h1:Lw0ZHdpnO4G7Zy9KjrzwwBmDZQuy4qEjaU/RvA6k1lc= @@ -315,8 +340,8 @@ github.com/fluxcd/pkg/helmtestserver v0.4.0/go.mod h1:JOI9f3oXUFIWmMKWMBan7FjglA github.com/fluxcd/pkg/lockedfile v0.1.0 h1:YsYFAkd6wawMCcD74ikadAKXA4s2sukdxrn7w8RB5eo= github.com/fluxcd/pkg/lockedfile v0.1.0/go.mod h1:EJLan8t9MiOcgTs8+puDjbE6I/KAfHbdvIy9VUgIjm8= github.com/fluxcd/pkg/runtime v0.13.0-rc.6/go.mod h1:4oKUO19TeudXrnCRnxCfMSS7EQTYpYlgfXwlQuDJ/Eg= -github.com/fluxcd/pkg/runtime v0.13.0 h1:7iLAenXm+9EadXe0JLZjhOR6AwRaASseqTW7vk8wAWU= -github.com/fluxcd/pkg/runtime v0.13.0/go.mod h1:G0EuJZJi/ZOjrWiclF4bBmkbzKhWssKuzSsmz3kVCMg= +github.com/fluxcd/pkg/runtime v0.13.1 h1:/MVSjP/pySd7tNP5FaYMkWerfxf8NZmO7SlDkTUjkjU= +github.com/fluxcd/pkg/runtime v0.13.1/go.mod h1:G0EuJZJi/ZOjrWiclF4bBmkbzKhWssKuzSsmz3kVCMg= github.com/fluxcd/pkg/ssh v0.2.0 h1:e9V+HReOL7czm7edVzYS1e+CnFKz1/kHiUNfLRpBdH8= github.com/fluxcd/pkg/ssh v0.2.0/go.mod h1:EpQC7Ztdlbi8S/dlYXqVDZtHtLpN3FNl3N6zWujVzbA= github.com/fluxcd/pkg/testserver v0.1.0/go.mod h1:fvt8BHhXw6c1+CLw1QFZxcQprlcXzsrL4rzXaiGM+Iw= @@ -505,13 +530,13 @@ github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0 h1:6DWmvNpomjL1+3liNSZbVns3zsYzzCjm6pRBO1tLeso= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1 h1:dp3bWCh+PPO1zjRRiCSczJav13sBvG4UhNyVTa1KqdU= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= @@ -532,18 +557,28 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.0.0 h1:bkKf0BeBXcSYa7f5Fyi9gMuQ8gNsxeiNpZjR6VxNZeo= +github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.8 h1:92lWxgpa+fF3FozM4B3UZtHZMJX8T5XT+TFdCxsPyWs= github.com/hashicorp/go-retryablehttp v0.6.8/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -551,15 +586,23 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -583,14 +626,13 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= @@ -639,6 +681,7 @@ github.com/libgit2/git2go/v33 v33.0.6/go.mod h1:KdpqkU+6+++4oHna/MIOgx4GCQ92IPCd github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= +github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= @@ -656,11 +699,20 @@ github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kN github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A= github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= -github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-oci8 v0.1.1/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= @@ -674,6 +726,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5 github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4= github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= @@ -683,6 +737,7 @@ github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKU github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/cli v1.1.2/go.mod h1:6iaV0fGdElS6dPBx0EApTxHrcWvmJphyh2n8YBLPPZ4= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.1.1 h1:Bp6x9R1Wn16SIz3OfeDr0b7RnCG2OB66Y7PQyC/cvq4= @@ -698,6 +753,7 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= @@ -773,9 +829,11 @@ github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT9 github.com/otiai10/mint v1.3.3 h1:7JgpsBaN0uMkyju4tbYHu0mnM55hNKVYLsXmwr15NQI= github.com/otiai10/mint v1.3.3/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc= @@ -788,11 +846,13 @@ github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= @@ -806,6 +866,7 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= @@ -816,6 +877,7 @@ github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7z github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= @@ -834,8 +896,10 @@ github.com/rubenv/sql-migrate v0.0.0-20210614095031-55d5740dbbcc/go.mod h1:HFLT6 github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= @@ -853,9 +917,7 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= @@ -864,17 +926,20 @@ github.com/sosedoff/gitkit v0.2.1-0.20200818155723-72ebbcf5056d/go.mod h1:A+o6Za github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= +github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/cobra v1.3.0 h1:R7cSvGu+Vv+qX0gW5R/85dx2kmmJT5z5NM8ifdYjdn0= +github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -886,6 +951,8 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= +github.com/spyzhov/ajson v0.4.2/go.mod h1:63V+CGM6f1Bu/p4nLIN8885ojBdt88TbLoSFzyqMuVA= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -905,6 +972,7 @@ github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= @@ -948,8 +1016,11 @@ go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= @@ -1002,6 +1073,7 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1040,7 +1112,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -1052,8 +1123,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1075,6 +1146,7 @@ golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1102,11 +1174,13 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211215060638-4ddde0e984e9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -1129,6 +1203,7 @@ golang.org/x/oauth2 v0.0.0-20210615190721-d04028783cf1/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1151,6 +1226,7 @@ golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1165,9 +1241,12 @@ golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1175,6 +1254,7 @@ golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1211,6 +1291,7 @@ golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1229,8 +1310,15 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158 h1:rm+CHSpPEEW2IsXUib1ThaHIjuBVZjxNgSKmBLFfD4c= @@ -1278,6 +1366,7 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1323,7 +1412,6 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff h1:VX/uD7MK0AHXGiScH3fsieUQUcpmRERPDYtqZdJnA+Q= golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1360,8 +1448,14 @@ google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtuk google.golang.org/api v0.49.0/go.mod h1:BECiH72wsfwUvOVn3+btPD5WHi0LzavZReBndi42L18= google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0 h1:ECJUVngj71QI6XEm7b1sAf8BljU5inEhMbKPR8Lxhhk= google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.62.0 h1:PhGymJMXfGBzc4lBRmrx9+1w4w2wEzURHNGF/sD/xGc= +google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1427,9 +1521,21 @@ google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2 h1:NHN4wOCScVzKhPenJ2dt+BTs3X/XkBVI/Rh4iDt55T8= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1456,8 +1562,10 @@ google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0 h1:XT2/MFpuPFsEX2fWh3YQtHkZ+WYZFQRfaUgLZYj/p6A= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1492,8 +1600,9 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI= +gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= @@ -1552,8 +1661,8 @@ k8s.io/apiserver v0.23.0/go.mod h1:Cec35u/9zAepDPPFyT+UMrgqOCjgJ5qtfVJDxjZYmt4= k8s.io/apiserver v0.23.3 h1:gWY1DmA0AdAGR/H+Q/1FtyGkFq8xqSaZOw7oLopmO8k= k8s.io/apiserver v0.23.3/go.mod h1:3HhsTmC+Pn+Jctw+Ow0LHA4dQ4oXrQ4XJDzrVDG64T4= k8s.io/cli-runtime v0.22.4/go.mod h1:x35r0ERHXr/MrbR1C6MPJxQ3xKG6+hXi9m2xLzlMPZA= -k8s.io/cli-runtime v0.23.0 h1:UONt0BV2+edjUVAXuR1nnOAL2CB9r+Gl9yk4UBQpKfs= -k8s.io/cli-runtime v0.23.0/go.mod h1:B5N3YH0KP1iKr6gEuJ/RRmGjO0mJQ/f/JrsmEiPQAlU= +k8s.io/cli-runtime v0.23.2 h1:4zOZX78mFSakwe4gef81XDBu94Yu0th6bfveTOx8ZQk= +k8s.io/cli-runtime v0.23.2/go.mod h1:Ag70akCDvwux4HxY+nH2J3UqE2e6iwSSdG1HE6p1VTU= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= k8s.io/client-go v0.22.4/go.mod h1:Yzw4e5e7h1LNHA4uqnMVrpEpUs1hJOiuBsJKIlRCHDA= k8s.io/client-go v0.23.0/go.mod h1:hrDnpnK1mSr65lHHcUuIZIXDgEbzc7/683c6hyG4jTA= @@ -1562,13 +1671,16 @@ k8s.io/client-go v0.23.3 h1:23QYUmCQ/W6hW78xIwm3XqZrrKZM+LWDqW2zfo+szJs= k8s.io/client-go v0.23.3/go.mod h1:47oMd+YvAOqZM7pcQ6neJtBiFH7alOyfunYN48VsmwE= k8s.io/code-generator v0.22.4/go.mod h1:qjYl54pQ/emhkT0UxbufbREYJMWsHNNV/jSVwhYZQGw= k8s.io/code-generator v0.23.0/go.mod h1:vQvOhDXhuzqiVfM/YHp+dmg10WDZCchJVObc9MvowsE= +k8s.io/code-generator v0.23.2/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= k8s.io/component-base v0.22.4/go.mod h1:MrSaQy4a3tFVViff8TZL6JHYSewNCLshZCwHYM58v5A= k8s.io/component-base v0.23.0/go.mod h1:DHH5uiFvLC1edCpvcTDV++NKULdYYU6pR9Tt3HIKMKI= +k8s.io/component-base v0.23.2/go.mod h1:wS9Z03MO3oJ0RU8bB/dbXTiluGju+SC/F5i660gxB8c= k8s.io/component-base v0.23.3 h1:q+epprVdylgecijVGVdf4MbizEL2feW4ssd7cdo6LVY= k8s.io/component-base v0.23.3/go.mod h1:1Smc4C60rWG7d3HjSYpIwEbySQ3YWg0uzH5a2AtaTLg= k8s.io/component-helpers v0.22.4/go.mod h1:A50qTyczDFbhZDifIfS2zFrHuPk9UNOWPpvNZ+3RSIs= +k8s.io/component-helpers v0.23.2/go.mod h1:J6CMwiaf0izLoNwiLl2OymB4+rGTsTpWp6PL/AqOM4U= k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= @@ -1586,9 +1698,11 @@ k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2R k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf h1:M9XBsiMslw2lb2ZzglC0TOkBPK5NQi0/noUrdnoFwUg= k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= -k8s.io/kubectl v0.22.4 h1:ECUO1QWyZ70DiIKEfgBx+8i9D98uspVOwgc1APs/07w= k8s.io/kubectl v0.22.4/go.mod h1:ok2qRT6y2Gy4+y+mniJVyUMKeBHP4OWS9Rdtf/QTM5I= +k8s.io/kubectl v0.23.2 h1:YakGzFN1csIOW/Us5VsLxjcu5Q6Vh5rqcvukcNuBwFk= +k8s.io/kubectl v0.23.2/go.mod h1:zWm5wt8PdRmHiVhE9a7q7XYW4WFX9StkZGnC18+1v3M= k8s.io/metrics v0.22.4/go.mod h1:6F/iwuYb1w2QDCoHkeMFLf4pwHBcYKLm4mPtVHKYrIw= +k8s.io/metrics v0.23.2/go.mod h1:idJHc+lLK5teHUC6Z2+d6qTKA12d5FLDxmC/DHiUYKc= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= @@ -1608,6 +1722,8 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyz sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25/go.mod h1:Mlj9PNLmG9bZ6BHFwFKDo5afkpWyUISkb9Me0GnK66I= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.27/go.mod h1:tq2nT0Kx7W+/f2JVE+zxYtUhdjuELJkVpNz+x/QN5R4= +sigs.k8s.io/cli-utils v0.28.0 h1:gsvwqygoXlW2y8CmKdflQJNZp1Yhi4geATW3/Ei7oYc= +sigs.k8s.io/cli-utils v0.28.0/go.mod h1:WDVRa5/eQBKntG++uyKdyT+xU7MLdCR4XsgseqL5uX4= sigs.k8s.io/controller-runtime v0.11.0/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eMbCQznLP5zcqA= sigs.k8s.io/controller-runtime v0.11.1 h1:7YIHT2QnHJArj/dk9aUkYhfqfK5cIxPOX5gPECfdZLU= sigs.k8s.io/controller-runtime v0.11.1/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eMbCQznLP5zcqA= @@ -1618,7 +1734,9 @@ sigs.k8s.io/kustomize/api v0.8.11/go.mod h1:a77Ls36JdfCWojpUqR6m60pdGY1AYFix4AH8 sigs.k8s.io/kustomize/api v0.10.1 h1:KgU7hfYoscuqag84kxtzKdEC3mKMb99DPI3a0eaV1d0= sigs.k8s.io/kustomize/api v0.10.1/go.mod h1:2FigT1QN6xKdcnGS2Ppp1uIWrtWN28Ms8A3OZUZhwr8= sigs.k8s.io/kustomize/cmd/config v0.9.13/go.mod h1:7547FLF8W/lTaDf0BDqFTbZxM9zqwEJqCKN9sSR0xSs= +sigs.k8s.io/kustomize/cmd/config v0.10.2/go.mod h1:K2aW7nXJ0AaT+VA/eO0/dzFLxmpFcTzudmAgDwPY1HQ= sigs.k8s.io/kustomize/kustomize/v4 v4.2.0/go.mod h1:MOkR6fmhwG7hEDRXBYELTi5GSFcLwfqwzTRHW3kv5go= +sigs.k8s.io/kustomize/kustomize/v4 v4.4.1/go.mod h1:qOKJMMz2mBP+vcS7vK+mNz4HBLjaQSWRY22EF6Tb7Io= sigs.k8s.io/kustomize/kyaml v0.11.0/go.mod h1:GNMwjim4Ypgp/MueD3zXHLRJEjz7RvtPae0AwlvEMFM= sigs.k8s.io/kustomize/kyaml v0.13.0 h1:9c+ETyNfSrVhxvphs+K2dzT3dh5oVPPEqPOE/cUpScY= sigs.k8s.io/kustomize/kyaml v0.13.0/go.mod h1:FTJxEZ86ScK184NpGSAQcfEqee0nul8oLCK30D47m4E= From 84bf8c8572417d3bd8d310be7e9c8d7d3761e758 Mon Sep 17 00:00:00 2001 From: Sunny Date: Mon, 21 Feb 2022 17:41:17 +0530 Subject: [PATCH 62/63] fuzz: Update to use v1beta2 APIs Signed-off-by: Sunny --- internal/util/temp_test.go | 3 ++- tests/fuzz/gitrepository_fuzzer.go | 3 +-- tests/fuzz/go.mod | 4 ++++ 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/internal/util/temp_test.go b/internal/util/temp_test.go index 7db873e2d..2f98079c6 100644 --- a/internal/util/temp_test.go +++ b/internal/util/temp_test.go @@ -18,6 +18,7 @@ package util import ( "os" + "path/filepath" "testing" . "github.com/onsi/gomega" @@ -50,7 +51,7 @@ func TestTempPathForObj(t *testing.T) { }{ { name: "default", - want: os.TempDir() + "/secret-default-foo-", + want: filepath.Join(os.TempDir(), "secret-default-foo-"), }, { name: "with directory", diff --git a/tests/fuzz/gitrepository_fuzzer.go b/tests/fuzz/gitrepository_fuzzer.go index 01c4cc949..a81ecdc4c 100644 --- a/tests/fuzz/gitrepository_fuzzer.go +++ b/tests/fuzz/gitrepository_fuzzer.go @@ -60,7 +60,7 @@ import ( "github.com/fluxcd/pkg/gittestserver" "github.com/fluxcd/pkg/runtime/testenv" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" "github.com/fluxcd/source-controller/controllers" ) @@ -149,7 +149,6 @@ func ensureDependencies() error { startEnvServer(func(m manager.Manager) { utilruntime.Must((&controllers.GitRepositoryReconciler{ Client: m.GetClient(), - Scheme: scheme.Scheme, Storage: storage, }).SetupWithManager(m)) }) diff --git a/tests/fuzz/go.mod b/tests/fuzz/go.mod index 4a1e24999..d88f3ac54 100644 --- a/tests/fuzz/go.mod +++ b/tests/fuzz/go.mod @@ -1,3 +1,7 @@ module github.com/fluxcd/source-controller/tests/fuzz go 1.17 + +replace github.com/fluxcd/kustomize-controller/api => ../../api + +replace github.com/fluxcd/kustomize-controller => ../../ From 9c7661dcbd6770fc3653fadf2847ca39c462ddf0 Mon Sep 17 00:00:00 2001 From: Sunny Date: Mon, 21 Feb 2022 18:45:56 +0530 Subject: [PATCH 63/63] helmrepo: Make NewArtifact event human friendly Inform index size and repo instead of a revision. Signed-off-by: Sunny --- controllers/helmrepository_controller.go | 13 ++++++++++++- go.mod | 2 +- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/controllers/helmrepository_controller.go b/controllers/helmrepository_controller.go index b0b2f0e57..bfdce2958 100644 --- a/controllers/helmrepository_controller.go +++ b/controllers/helmrepository_controller.go @@ -24,6 +24,7 @@ import ( "os" "time" + "github.com/docker/go-units" helmgetter "helm.sh/helm/v3/pkg/getter" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -425,10 +426,20 @@ func (r *HelmRepositoryReconciler) reconcileArtifact(ctx context.Context, obj *s } } + // Calculate the artifact size to be included in the NewArtifact event. + fi, err := os.Stat(chartRepo.CachePath) + if err != nil { + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("unable to read the artifact: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } + } + size := units.HumanSize(float64(fi.Size())) + r.AnnotatedEventf(obj, map[string]string{ "revision": artifact.Revision, "checksum": artifact.Checksum, - }, corev1.EventTypeNormal, "NewArtifact", "stored artifact for revision '%s'", artifact.Revision) + }, corev1.EventTypeNormal, "NewArtifact", "fetched index of size %s from '%s'", size, chartRepo.URL) // Record it on the object. obj.Status.Artifact = artifact.DeepCopy() diff --git a/go.mod b/go.mod index a12a9704d..5ea8d80f5 100644 --- a/go.mod +++ b/go.mod @@ -10,6 +10,7 @@ require ( github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 github.com/cyphar/filepath-securejoin v0.2.2 github.com/darkowlzz/controller-check v0.0.0-20220119215126-648356cef22c + github.com/docker/go-units v0.4.0 github.com/elazarl/goproxy v0.0.0-20211114080932-d06c3be7c11b github.com/fluxcd/pkg/apis/meta v0.12.0 github.com/fluxcd/pkg/gittestserver v0.5.0 @@ -73,7 +74,6 @@ require ( github.com/docker/docker-credential-helpers v0.6.3 // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-metrics v0.0.1 // indirect - github.com/docker/go-units v0.4.0 // indirect github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect github.com/dustin/go-humanize v1.0.0 // indirect github.com/emirpasic/gods v1.12.0 // indirect
-

Path is the relative file path of this artifact.

+

Path is the relative file path of this Artifact. +It can be used to locate the Artifact file in the root of the Artifact +storage on the local file system of the controller managing the Source.

-

URL is the HTTP address of this artifact.

+

URL is the HTTP address of this artifact. +It is used by the consumers of the artifacts to fetch and use the +artifacts. It is expected to be resolvable from within the cluster.

(Optional) -

The timeout for download operations, defaults to 60s.

+

The timeout for fetch operations, defaults to 60s.

(Optional) -

URL is the download link for the artifact output of the last Bucket sync.

+

URL is the fetch link for the artifact output of the last Bucket sync.

(Optional) -

URL is the download link for the artifact output of the last repository sync.

+

URL is the fetch link for the artifact output of the last repository sync.

+observedSourceArtifactRevision
+ +string + +
+(Optional) +

ObservedSourceArtifactRevision is the last observed Artifact.Revision +of the Source reference.

+
+observedChartName
+ +string + +
+(Optional) +

ObservedChartName is the last observed chart name as defined by the +resolved chart reference.

+
conditions
@@ -1832,7 +1862,7 @@ string
(Optional) -

URL is the download link for the last chart pulled.

+

URL is the fetch link for the last chart pulled.

(Optional) -

The timeout of index downloading, defaults to 60s.

+

The timeout of index fetching, defaults to 60s.

(Optional) -

URL is the download link for the last index fetched.

+

URL is the fetch link for the last index fetched.