From 663afba56b4a2c692ccae9a1b0a44da4d758606d Mon Sep 17 00:00:00 2001 From: Your Name Date: Sat, 8 Feb 2025 14:27:19 +0000 Subject: [PATCH] feat: add resource StorageBucket --- apis/storage/v1alpha1/bucket_identity.go | 118 +++ apis/storage/v1alpha1/bucket_reference.go | 83 ++ apis/storage/v1alpha1/bucket_types.go | 84 ++ apis/storage/v1alpha1/doc.go | 16 + apis/storage/v1alpha1/groupversion_info.go | 33 + apis/storage/v1alpha1/types.generated.go | 671 ++++++++++++++ pkg/controller/direct/register/register.go | 1 + .../direct/storage/bucket_controller.go | 276 ++++++ .../direct/storage/mapper.generated.go | 832 ++++++++++++++++++ 9 files changed, 2114 insertions(+) create mode 100644 apis/storage/v1alpha1/bucket_identity.go create mode 100644 apis/storage/v1alpha1/bucket_reference.go create mode 100644 apis/storage/v1alpha1/bucket_types.go create mode 100644 apis/storage/v1alpha1/doc.go create mode 100644 apis/storage/v1alpha1/groupversion_info.go create mode 100644 apis/storage/v1alpha1/types.generated.go create mode 100644 pkg/controller/direct/storage/bucket_controller.go create mode 100644 pkg/controller/direct/storage/mapper.generated.go diff --git a/apis/storage/v1alpha1/bucket_identity.go b/apis/storage/v1alpha1/bucket_identity.go new file mode 100644 index 0000000000..070470dfd2 --- /dev/null +++ b/apis/storage/v1alpha1/bucket_identity.go @@ -0,0 +1,118 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "context" + "fmt" + "strings" + + "github.com/GoogleCloudPlatform/k8s-config-connector/apis/common" + refsv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/apis/refs/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// BucketIdentity defines the resource reference to StorageBucket, which "External" field +// holds the GCP identifier for the KRM object. +type BucketIdentity struct { + parent *BucketParent + id string +} + +func (i *BucketIdentity) String() string { + return i.parent.String() + "/buckets/" + i.id +} + +func (i *BucketIdentity) ID() string { + return i.id +} + +func (i *BucketIdentity) Parent() *BucketParent { + return i.parent +} + +type BucketParent struct { + ProjectID string + Location string +} + +func (p *BucketParent) String() string { + return "projects/" + p.ProjectID + "/locations/" + p.Location +} + + +// New builds a BucketIdentity from the Config Connector Bucket object. +func NewBucketIdentity(ctx context.Context, reader client.Reader, obj *StorageBucket) (*BucketIdentity, error) { + + // Get Parent + projectRef, err := refsv1beta1.ResolveProject(ctx, reader, obj.GetNamespace(), obj.Spec.ProjectRef) + if err != nil { + return nil, err + } + projectID := projectRef.ProjectID + if projectID == "" { + return nil, fmt.Errorf("cannot resolve project") + } + location := obj.Spec.Location + + // Get desired ID + resourceID := common.ValueOf(obj.Spec.ResourceID) + if resourceID == "" { + resourceID = obj.GetName() + } + if resourceID == "" { + return nil, fmt.Errorf("cannot resolve resource ID") + } + + // Use approved External + externalRef := common.ValueOf(obj.Status.ExternalRef) + if externalRef != "" { + // Validate desired with actual + actualParent, actualResourceID, err := ParseBucketExternal(externalRef) + if err != nil { + return nil, err + } + if actualParent.ProjectID != projectID { + return nil, fmt.Errorf("spec.projectRef changed, expect %s, got %s", actualParent.ProjectID, projectID) + } + if actualParent.Location != location { + return nil, fmt.Errorf("spec.location changed, expect %s, got %s", actualParent.Location, location) + } + if actualResourceID != resourceID { + return nil, fmt.Errorf("cannot reset `metadata.name` or `spec.resourceID` to %s, since it has already assigned to %s", + resourceID, actualResourceID) + } + } + return &BucketIdentity{ + parent: &BucketParent{ + ProjectID: projectID, + Location: location, + }, + id: resourceID, + }, nil +} + +func ParseBucketExternal(external string) (parent *BucketParent, resourceID string, err error) { + tokens := strings.Split(external, "/") + if len(tokens) != 6 || tokens[0] != "projects" || tokens[2] != "locations" || tokens[4] != "buckets" { + return nil, "", fmt.Errorf("format of StorageBucket external=%q was not known (use projects/{{projectID}}/locations/{{location}}/buckets/{{bucketID}})", external) + } + parent = &BucketParent{ + ProjectID: tokens[1], + Location: tokens[3], + } + resourceID = tokens[5] + return parent, resourceID, nil +} diff --git a/apis/storage/v1alpha1/bucket_reference.go b/apis/storage/v1alpha1/bucket_reference.go new file mode 100644 index 0000000000..3aef2f9be5 --- /dev/null +++ b/apis/storage/v1alpha1/bucket_reference.go @@ -0,0 +1,83 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "context" + "fmt" + + refsv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/apis/refs/v1beta1" + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/k8s" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ refsv1beta1.ExternalNormalizer = &BucketRef{} + +// BucketRef defines the resource reference to StorageBucket, which "External" field +// holds the GCP identifier for the KRM object. +type BucketRef struct { + // A reference to an externally managed StorageBucket resource. + // Should be in the format "projects/{{projectID}}/locations/{{location}}/buckets/{{bucketID}}". + External string `json:"external,omitempty"` + + // The name of a StorageBucket resource. + Name string `json:"name,omitempty"` + + // The namespace of a StorageBucket resource. + Namespace string `json:"namespace,omitempty"` +} + +// NormalizedExternal provision the "External" value for other resource that depends on StorageBucket. +// If the "External" is given in the other resource's spec.StorageBucketRef, the given value will be used. +// Otherwise, the "Name" and "Namespace" will be used to query the actual StorageBucket object from the cluster. +func (r *BucketRef) NormalizedExternal(ctx context.Context, reader client.Reader, otherNamespace string) (string, error) { + if r.External != "" && r.Name != "" { + return "", fmt.Errorf("cannot specify both name and external on %s reference", StorageBucketGVK.Kind) + } + // From given External + if r.External != "" { + if _, _, err := ParseBucketExternal(r.External); err != nil { + return "", err + } + return r.External, nil + } + + // From the Config Connector object + if r.Namespace == "" { + r.Namespace = otherNamespace + } + key := types.NamespacedName{Name: r.Name, Namespace: r.Namespace} + u := &unstructured.Unstructured{} + u.SetGroupVersionKind(StorageBucketGVK) + if err := reader.Get(ctx, key, u); err != nil { + if apierrors.IsNotFound(err) { + return "", k8s.NewReferenceNotFoundError(u.GroupVersionKind(), key) + } + return "", fmt.Errorf("reading referenced %s %s: %w", StorageBucketGVK, key, err) + } + // Get external from status.externalRef. This is the most trustworthy place. + actualExternalRef, _, err := unstructured.NestedString(u.Object, "status", "externalRef") + if err != nil { + return "", fmt.Errorf("reading status.externalRef: %w", err) + } + if actualExternalRef == "" { + return "", k8s.NewReferenceNotReadyError(u.GroupVersionKind(), key) + } + r.External = actualExternalRef + return r.External, nil +} diff --git a/apis/storage/v1alpha1/bucket_types.go b/apis/storage/v1alpha1/bucket_types.go new file mode 100644 index 0000000000..78ee617eac --- /dev/null +++ b/apis/storage/v1alpha1/bucket_types.go @@ -0,0 +1,84 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/apis/k8s/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var StorageBucketGVK = GroupVersion.WithKind("StorageBucket") + +// StorageBucketSpec defines the desired state of StorageBucket +// +kcc:proto=google.storage.v2.Bucket +type StorageBucketSpec struct { + // The StorageBucket name. If not given, the metadata.name will be used. + ResourceID *string `json:"resourceID,omitempty"` +} + +// StorageBucketStatus defines the config connector machine state of StorageBucket +type StorageBucketStatus struct { + /* Conditions represent the latest available observations of the + object's current state. */ + Conditions []v1alpha1.Condition `json:"conditions,omitempty"` + + // ObservedGeneration is the generation of the resource that was most recently observed by the Config Connector controller. If this is equal to metadata.generation, then that means that the current reported status reflects the most recent desired state of the resource. + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + + // A unique specifier for the StorageBucket resource in GCP. + ExternalRef *string `json:"externalRef,omitempty"` + + // ObservedState is the state of the resource as most recently observed in GCP. + ObservedState *StorageBucketObservedState `json:"observedState,omitempty"` +} + +// StorageBucketObservedState is the state of the StorageBucket resource as most recently observed in GCP. +// +kcc:proto=google.storage.v2.Bucket +type StorageBucketObservedState struct { +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// TODO(user): make sure the pluralizaiton below is correct +// +kubebuilder:resource:categories=gcp,shortName=gcpstoragebucket;gcpstoragebuckets +// +kubebuilder:subresource:status +// +kubebuilder:metadata:labels="cnrm.cloud.google.com/managed-by-kcc=true";"cnrm.cloud.google.com/system=true" +// +kubebuilder:printcolumn:name="Age",JSONPath=".metadata.creationTimestamp",type="date" +// +kubebuilder:printcolumn:name="Ready",JSONPath=".status.conditions[?(@.type=='Ready')].status",type="string",description="When 'True', the most recent reconcile of the resource succeeded" +// +kubebuilder:printcolumn:name="Status",JSONPath=".status.conditions[?(@.type=='Ready')].reason",type="string",description="The reason for the value in 'Ready'" +// +kubebuilder:printcolumn:name="Status Age",JSONPath=".status.conditions[?(@.type=='Ready')].lastTransitionTime",type="date",description="The last transition time for the value in 'Status'" + +// StorageBucket is the Schema for the StorageBucket API +// +k8s:openapi-gen=true +type StorageBucket struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +required + Spec StorageBucketSpec `json:"spec,omitempty"` + Status StorageBucketStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// StorageBucketList contains a list of StorageBucket +type StorageBucketList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []StorageBucket `json:"items"` +} + +func init() { + SchemeBuilder.Register(&StorageBucket{}, &StorageBucketList{}) +} diff --git a/apis/storage/v1alpha1/doc.go b/apis/storage/v1alpha1/doc.go new file mode 100644 index 0000000000..f672956c68 --- /dev/null +++ b/apis/storage/v1alpha1/doc.go @@ -0,0 +1,16 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +kcc:proto=google.storage.v2 +package v1alpha1 diff --git a/apis/storage/v1alpha1/groupversion_info.go b/apis/storage/v1alpha1/groupversion_info.go new file mode 100644 index 0000000000..aa0290645c --- /dev/null +++ b/apis/storage/v1alpha1/groupversion_info.go @@ -0,0 +1,33 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +kubebuilder:object:generate=true +// +groupName=storage.cnrm.cloud.google.com +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "storage.cnrm.cloud.google.com", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/storage/v1alpha1/types.generated.go b/apis/storage/v1alpha1/types.generated.go new file mode 100644 index 0000000000..38c07597bd --- /dev/null +++ b/apis/storage/v1alpha1/types.generated.go @@ -0,0 +1,671 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + + +// +kcc:proto=google.storage.v2.Bucket +type Bucket struct { + // Immutable. The name of the bucket. + // Format: `projects/{project}/buckets/{bucket}` + // +kcc:proto:field=google.storage.v2.Bucket.name + Name *string `json:"name,omitempty"` + + // The etag of the bucket. + // If included in the metadata of an UpdateBucketRequest, the operation will + // only be performed if the etag matches that of the bucket. + // +kcc:proto:field=google.storage.v2.Bucket.etag + Etag *string `json:"etag,omitempty"` + + // Immutable. The project which owns this bucket, in the format of + // "projects/{projectIdentifier}". + // {projectIdentifier} can be the project ID or project number. + // +kcc:proto:field=google.storage.v2.Bucket.project + Project *string `json:"project,omitempty"` + + // Immutable. The location of the bucket. Object data for objects in the + // bucket resides in physical storage within this region. Defaults to `US`. + // See the + // [https://developers.google.com/storage/docs/concepts-techniques#specifyinglocations"][developer's + // guide] for the authoritative list. Attempting to update this field after + // the bucket is created will result in an error. + // +kcc:proto:field=google.storage.v2.Bucket.location + Location *string `json:"location,omitempty"` + + // The bucket's default storage class, used whenever no storageClass is + // specified for a newly-created object. This defines how objects in the + // bucket are stored and determines the SLA and the cost of storage. + // If this value is not specified when the bucket is created, it will default + // to `STANDARD`. For more information, see + // https://developers.google.com/storage/docs/storage-classes. + // +kcc:proto:field=google.storage.v2.Bucket.storage_class + StorageClass *string `json:"storageClass,omitempty"` + + // The recovery point objective for cross-region replication of the bucket. + // Applicable only for dual- and multi-region buckets. "DEFAULT" uses default + // replication. "ASYNC_TURBO" enables turbo replication, valid for dual-region + // buckets only. If rpo is not specified when the bucket is created, it + // defaults to "DEFAULT". For more information, see + // https://cloud.google.com/storage/docs/availability-durability#turbo-replication. + // +kcc:proto:field=google.storage.v2.Bucket.rpo + Rpo *string `json:"rpo,omitempty"` + + // Access controls on the bucket. + // If iam_config.uniform_bucket_level_access is enabled on this bucket, + // requests to set, read, or modify acl is an error. + // +kcc:proto:field=google.storage.v2.Bucket.acl + Acl []BucketAccessControl `json:"acl,omitempty"` + + // Default access controls to apply to new objects when no ACL is provided. + // If iam_config.uniform_bucket_level_access is enabled on this bucket, + // requests to set, read, or modify acl is an error. + // +kcc:proto:field=google.storage.v2.Bucket.default_object_acl + DefaultObjectAcl []ObjectAccessControl `json:"defaultObjectAcl,omitempty"` + + // The bucket's lifecycle config. See + // [https://developers.google.com/storage/docs/lifecycle]Lifecycle Management] + // for more information. + // +kcc:proto:field=google.storage.v2.Bucket.lifecycle + Lifecycle *Bucket_Lifecycle `json:"lifecycle,omitempty"` + + // The bucket's [https://www.w3.org/TR/cors/][Cross-Origin Resource Sharing] + // (CORS) config. + // +kcc:proto:field=google.storage.v2.Bucket.cors + Cors []Bucket_Cors `json:"cors,omitempty"` + + // The default value for event-based hold on newly created objects in this + // bucket. Event-based hold is a way to retain objects indefinitely until an + // event occurs, signified by the + // hold's release. After being released, such objects will be subject to + // bucket-level retention (if any). One sample use case of this flag is for + // banks to hold loan documents for at least 3 years after loan is paid in + // full. Here, bucket-level retention is 3 years and the event is loan being + // paid in full. In this example, these objects will be held intact for any + // number of years until the event has occurred (event-based hold on the + // object is released) and then 3 more years after that. That means retention + // duration of the objects begins from the moment event-based hold + // transitioned from true to false. Objects under event-based hold cannot be + // deleted, overwritten or archived until the hold is removed. + // +kcc:proto:field=google.storage.v2.Bucket.default_event_based_hold + DefaultEventBasedHold *bool `json:"defaultEventBasedHold,omitempty"` + + // User-provided labels, in key/value pairs. + // +kcc:proto:field=google.storage.v2.Bucket.labels + Labels map[string]string `json:"labels,omitempty"` + + // The bucket's website config, controlling how the service behaves + // when accessing bucket contents as a web site. See the + // [https://cloud.google.com/storage/docs/static-website][Static Website + // Examples] for more information. + // +kcc:proto:field=google.storage.v2.Bucket.website + Website *Bucket_Website `json:"website,omitempty"` + + // The bucket's versioning config. + // +kcc:proto:field=google.storage.v2.Bucket.versioning + Versioning *Bucket_Versioning `json:"versioning,omitempty"` + + // The bucket's logging config, which defines the destination bucket + // and name prefix (if any) for the current bucket's logs. + // +kcc:proto:field=google.storage.v2.Bucket.logging + Logging *Bucket_Logging `json:"logging,omitempty"` + + // Encryption config for a bucket. + // +kcc:proto:field=google.storage.v2.Bucket.encryption + Encryption *Bucket_Encryption `json:"encryption,omitempty"` + + // The bucket's billing config. + // +kcc:proto:field=google.storage.v2.Bucket.billing + Billing *Bucket_Billing `json:"billing,omitempty"` + + // The bucket's retention policy. The retention policy enforces a minimum + // retention time for all objects contained in the bucket, based on their + // creation time. Any attempt to overwrite or delete objects younger than the + // retention period will result in a PERMISSION_DENIED error. An unlocked + // retention policy can be modified or removed from the bucket via a + // storage.buckets.update operation. A locked retention policy cannot be + // removed or shortened in duration for the lifetime of the bucket. + // Attempting to remove or decrease period of a locked retention policy will + // result in a PERMISSION_DENIED error. + // +kcc:proto:field=google.storage.v2.Bucket.retention_policy + RetentionPolicy *Bucket_RetentionPolicy `json:"retentionPolicy,omitempty"` + + // The bucket's IAM config. + // +kcc:proto:field=google.storage.v2.Bucket.iam_config + IamConfig *Bucket_IamConfig `json:"iamConfig,omitempty"` + + // Reserved for future use. + // +kcc:proto:field=google.storage.v2.Bucket.satisfies_pzs + SatisfiesPzs *bool `json:"satisfiesPzs,omitempty"` + + // Configuration that, if present, specifies the data placement for a + // [https://cloud.google.com/storage/docs/locations#location-dr][configurable + // dual-region]. + // +kcc:proto:field=google.storage.v2.Bucket.custom_placement_config + CustomPlacementConfig *Bucket_CustomPlacementConfig `json:"customPlacementConfig,omitempty"` + + // The bucket's Autoclass configuration. If there is no configuration, the + // Autoclass feature will be disabled and have no effect on the bucket. + // +kcc:proto:field=google.storage.v2.Bucket.autoclass + Autoclass *Bucket_Autoclass `json:"autoclass,omitempty"` + + // Optional. The bucket's hierarchical namespace configuration. If there is no + // configuration, the hierarchical namespace feature will be disabled and have + // no effect on the bucket. + // +kcc:proto:field=google.storage.v2.Bucket.hierarchical_namespace + HierarchicalNamespace *Bucket_HierarchicalNamespace `json:"hierarchicalNamespace,omitempty"` + + // Optional. The bucket's soft delete policy. The soft delete policy prevents + // soft-deleted objects from being permanently deleted. + // +kcc:proto:field=google.storage.v2.Bucket.soft_delete_policy + SoftDeletePolicy *Bucket_SoftDeletePolicy `json:"softDeletePolicy,omitempty"` +} + +// +kcc:proto=google.storage.v2.Bucket.Autoclass +type Bucket_Autoclass struct { + // Enables Autoclass. + // +kcc:proto:field=google.storage.v2.Bucket.Autoclass.enabled + Enabled *bool `json:"enabled,omitempty"` + + // An object in an Autoclass bucket will eventually cool down to the + // terminal storage class if there is no access to the object. + // The only valid values are NEARLINE and ARCHIVE. + // +kcc:proto:field=google.storage.v2.Bucket.Autoclass.terminal_storage_class + TerminalStorageClass *string `json:"terminalStorageClass,omitempty"` +} + +// +kcc:proto=google.storage.v2.Bucket.Billing +type Bucket_Billing struct { + // When set to true, Requester Pays is enabled for this bucket. + // +kcc:proto:field=google.storage.v2.Bucket.Billing.requester_pays + RequesterPays *bool `json:"requesterPays,omitempty"` +} + +// +kcc:proto=google.storage.v2.Bucket.Cors +type Bucket_Cors struct { + // The list of Origins eligible to receive CORS response headers. See + // [https://tools.ietf.org/html/rfc6454][RFC 6454] for more on origins. + // Note: "*" is permitted in the list of origins, and means "any Origin". + // +kcc:proto:field=google.storage.v2.Bucket.Cors.origin + Origin []string `json:"origin,omitempty"` + + // The list of HTTP methods on which to include CORS response headers, + // (`GET`, `OPTIONS`, `POST`, etc) Note: "*" is permitted in the list of + // methods, and means "any method". + // +kcc:proto:field=google.storage.v2.Bucket.Cors.method + Method []string `json:"method,omitempty"` + + // The list of HTTP headers other than the + // [https://www.w3.org/TR/cors/#simple-response-header][simple response + // headers] to give permission for the user-agent to share across domains. + // +kcc:proto:field=google.storage.v2.Bucket.Cors.response_header + ResponseHeader []string `json:"responseHeader,omitempty"` + + // The value, in seconds, to return in the + // [https://www.w3.org/TR/cors/#access-control-max-age-response-header][Access-Control-Max-Age + // header] used in preflight responses. + // +kcc:proto:field=google.storage.v2.Bucket.Cors.max_age_seconds + MaxAgeSeconds *int32 `json:"maxAgeSeconds,omitempty"` +} + +// +kcc:proto=google.storage.v2.Bucket.CustomPlacementConfig +type Bucket_CustomPlacementConfig struct { + // List of locations to use for data placement. + // +kcc:proto:field=google.storage.v2.Bucket.CustomPlacementConfig.data_locations + DataLocations []string `json:"dataLocations,omitempty"` +} + +// +kcc:proto=google.storage.v2.Bucket.Encryption +type Bucket_Encryption struct { + // The name of the Cloud KMS key that will be used to encrypt objects + // inserted into this bucket, if no encryption method is specified. + // +kcc:proto:field=google.storage.v2.Bucket.Encryption.default_kms_key + DefaultKMSKey *string `json:"defaultKMSKey,omitempty"` +} + +// +kcc:proto=google.storage.v2.Bucket.HierarchicalNamespace +type Bucket_HierarchicalNamespace struct { + // Optional. Enables the hierarchical namespace feature. + // +kcc:proto:field=google.storage.v2.Bucket.HierarchicalNamespace.enabled + Enabled *bool `json:"enabled,omitempty"` +} + +// +kcc:proto=google.storage.v2.Bucket.IamConfig +type Bucket_IamConfig struct { + // Bucket restriction options currently enforced on the bucket. + // +kcc:proto:field=google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access + UniformBucketLevelAccess *Bucket_IamConfig_UniformBucketLevelAccess `json:"uniformBucketLevelAccess,omitempty"` + + // Whether IAM will enforce public access prevention. Valid values are + // "enforced" or "inherited". + // +kcc:proto:field=google.storage.v2.Bucket.IamConfig.public_access_prevention + PublicAccessPrevention *string `json:"publicAccessPrevention,omitempty"` +} + +// +kcc:proto=google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess +type Bucket_IamConfig_UniformBucketLevelAccess struct { + // If set, access checks only use bucket-level IAM policies or above. + // +kcc:proto:field=google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.enabled + Enabled *bool `json:"enabled,omitempty"` + + // The deadline time for changing + // `iam_config.uniform_bucket_level_access.enabled` from `true` to + // `false`. Mutable until the specified deadline is reached, but not + // afterward. + // +kcc:proto:field=google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.lock_time + LockTime *string `json:"lockTime,omitempty"` +} + +// +kcc:proto=google.storage.v2.Bucket.Lifecycle +type Bucket_Lifecycle struct { + // A lifecycle management rule, which is made of an action to take and the + // condition(s) under which the action will be taken. + // +kcc:proto:field=google.storage.v2.Bucket.Lifecycle.rule + Rule []Bucket_Lifecycle_Rule `json:"rule,omitempty"` +} + +// +kcc:proto=google.storage.v2.Bucket.Lifecycle.Rule +type Bucket_Lifecycle_Rule struct { + // The action to take. + // +kcc:proto:field=google.storage.v2.Bucket.Lifecycle.Rule.action + Action *Bucket_Lifecycle_Rule_Action `json:"action,omitempty"` + + // The condition(s) under which the action will be taken. + // +kcc:proto:field=google.storage.v2.Bucket.Lifecycle.Rule.condition + Condition *Bucket_Lifecycle_Rule_Condition `json:"condition,omitempty"` +} + +// +kcc:proto=google.storage.v2.Bucket.Lifecycle.Rule.Action +type Bucket_Lifecycle_Rule_Action struct { + // Type of the action. Currently, only `Delete`, `SetStorageClass`, and + // `AbortIncompleteMultipartUpload` are supported. + // +kcc:proto:field=google.storage.v2.Bucket.Lifecycle.Rule.Action.type + Type *string `json:"type,omitempty"` + + // Target storage class. Required iff the type of the action is + // SetStorageClass. + // +kcc:proto:field=google.storage.v2.Bucket.Lifecycle.Rule.Action.storage_class + StorageClass *string `json:"storageClass,omitempty"` +} + +// +kcc:proto=google.storage.v2.Bucket.Lifecycle.Rule.Condition +type Bucket_Lifecycle_Rule_Condition struct { + // Age of an object (in days). This condition is satisfied when an + // object reaches the specified age. + // A value of 0 indicates that all objects immediately match this + // condition. + // +kcc:proto:field=google.storage.v2.Bucket.Lifecycle.Rule.Condition.age_days + AgeDays *int32 `json:"ageDays,omitempty"` + + // This condition is satisfied when an object is created before midnight + // of the specified date in UTC. + // +kcc:proto:field=google.storage.v2.Bucket.Lifecycle.Rule.Condition.created_before + CreatedBefore *Date `json:"createdBefore,omitempty"` + + // Relevant only for versioned objects. If the value is + // `true`, this condition matches live objects; if the value + // is `false`, it matches archived objects. + // +kcc:proto:field=google.storage.v2.Bucket.Lifecycle.Rule.Condition.is_live + IsLive *bool `json:"isLive,omitempty"` + + // Relevant only for versioned objects. If the value is N, this + // condition is satisfied when there are at least N versions (including + // the live version) newer than this version of the object. + // +kcc:proto:field=google.storage.v2.Bucket.Lifecycle.Rule.Condition.num_newer_versions + NumNewerVersions *int32 `json:"numNewerVersions,omitempty"` + + // Objects having any of the storage classes specified by this condition + // will be matched. Values include `MULTI_REGIONAL`, `REGIONAL`, + // `NEARLINE`, `COLDLINE`, `STANDARD`, and + // `DURABLE_REDUCED_AVAILABILITY`. + // +kcc:proto:field=google.storage.v2.Bucket.Lifecycle.Rule.Condition.matches_storage_class + MatchesStorageClass []string `json:"matchesStorageClass,omitempty"` + + // Number of days that have elapsed since the custom timestamp set on an + // object. + // The value of the field must be a nonnegative integer. + // +kcc:proto:field=google.storage.v2.Bucket.Lifecycle.Rule.Condition.days_since_custom_time + DaysSinceCustomTime *int32 `json:"daysSinceCustomTime,omitempty"` + + // An object matches this condition if the custom timestamp set on the + // object is before the specified date in UTC. + // +kcc:proto:field=google.storage.v2.Bucket.Lifecycle.Rule.Condition.custom_time_before + CustomTimeBefore *Date `json:"customTimeBefore,omitempty"` + + // This condition is relevant only for versioned objects. An object + // version satisfies this condition only if these many days have been + // passed since it became noncurrent. The value of the field must be a + // nonnegative integer. If it's zero, the object version will become + // eligible for Lifecycle action as soon as it becomes noncurrent. + // +kcc:proto:field=google.storage.v2.Bucket.Lifecycle.Rule.Condition.days_since_noncurrent_time + DaysSinceNoncurrentTime *int32 `json:"daysSinceNoncurrentTime,omitempty"` + + // This condition is relevant only for versioned objects. An object + // version satisfies this condition only if it became noncurrent before + // the specified date in UTC. + // +kcc:proto:field=google.storage.v2.Bucket.Lifecycle.Rule.Condition.noncurrent_time_before + NoncurrentTimeBefore *Date `json:"noncurrentTimeBefore,omitempty"` + + // List of object name prefixes. If any prefix exactly matches the + // beginning of the object name, the condition evaluates to true. + // +kcc:proto:field=google.storage.v2.Bucket.Lifecycle.Rule.Condition.matches_prefix + MatchesPrefix []string `json:"matchesPrefix,omitempty"` + + // List of object name suffixes. If any suffix exactly matches the + // end of the object name, the condition evaluates to true. + // +kcc:proto:field=google.storage.v2.Bucket.Lifecycle.Rule.Condition.matches_suffix + MatchesSuffix []string `json:"matchesSuffix,omitempty"` +} + +// +kcc:proto=google.storage.v2.Bucket.Logging +type Bucket_Logging struct { + // The destination bucket where the current bucket's logs should be placed, + // using path format (like `projects/123456/buckets/foo`). + // +kcc:proto:field=google.storage.v2.Bucket.Logging.log_bucket + LogBucket *string `json:"logBucket,omitempty"` + + // A prefix for log object names. + // +kcc:proto:field=google.storage.v2.Bucket.Logging.log_object_prefix + LogObjectPrefix *string `json:"logObjectPrefix,omitempty"` +} + +// +kcc:proto=google.storage.v2.Bucket.RetentionPolicy +type Bucket_RetentionPolicy struct { + // Server-determined value that indicates the time from which policy was + // enforced and effective. + // +kcc:proto:field=google.storage.v2.Bucket.RetentionPolicy.effective_time + EffectiveTime *string `json:"effectiveTime,omitempty"` + + // Once locked, an object retention policy cannot be modified. + // +kcc:proto:field=google.storage.v2.Bucket.RetentionPolicy.is_locked + IsLocked *bool `json:"isLocked,omitempty"` + + // The duration that objects need to be retained. Retention duration must be + // greater than zero and less than 100 years. Note that enforcement of + // retention periods less than a day is not guaranteed. Such periods should + // only be used for testing purposes. Any `nanos` value specified will be + // rounded down to the nearest second. + // +kcc:proto:field=google.storage.v2.Bucket.RetentionPolicy.retention_duration + RetentionDuration *string `json:"retentionDuration,omitempty"` +} + +// +kcc:proto=google.storage.v2.Bucket.SoftDeletePolicy +type Bucket_SoftDeletePolicy struct { + // The period of time that soft-deleted objects in the bucket must be + // retained and cannot be permanently deleted. The duration must be greater + // than or equal to 7 days and less than 1 year. + // +kcc:proto:field=google.storage.v2.Bucket.SoftDeletePolicy.retention_duration + RetentionDuration *string `json:"retentionDuration,omitempty"` + + // Time from which the policy was effective. This is service-provided. + // +kcc:proto:field=google.storage.v2.Bucket.SoftDeletePolicy.effective_time + EffectiveTime *string `json:"effectiveTime,omitempty"` +} + +// +kcc:proto=google.storage.v2.Bucket.Versioning +type Bucket_Versioning struct { + // While set to true, versioning is fully enabled for this bucket. + // +kcc:proto:field=google.storage.v2.Bucket.Versioning.enabled + Enabled *bool `json:"enabled,omitempty"` +} + +// +kcc:proto=google.storage.v2.Bucket.Website +type Bucket_Website struct { + // If the requested object path is missing, the service will ensure the path + // has a trailing '/', append this suffix, and attempt to retrieve the + // resulting object. This allows the creation of `index.html` + // objects to represent directory pages. + // +kcc:proto:field=google.storage.v2.Bucket.Website.main_page_suffix + MainPageSuffix *string `json:"mainPageSuffix,omitempty"` + + // If the requested object path is missing, and any + // `mainPageSuffix` object is missing, if applicable, the service + // will return the named object from this bucket as the content for a + // [https://tools.ietf.org/html/rfc7231#section-6.5.4][404 Not Found] + // result. + // +kcc:proto:field=google.storage.v2.Bucket.Website.not_found_page + NotFoundPage *string `json:"notFoundPage,omitempty"` +} + +// +kcc:proto=google.storage.v2.BucketAccessControl +type BucketAccessControl struct { + // The access permission for the entity. + // +kcc:proto:field=google.storage.v2.BucketAccessControl.role + Role *string `json:"role,omitempty"` + + // The ID of the access-control entry. + // +kcc:proto:field=google.storage.v2.BucketAccessControl.id + ID *string `json:"id,omitempty"` + + // The entity holding the permission, in one of the following forms: + // * `user-{userid}` + // * `user-{email}` + // * `group-{groupid}` + // * `group-{email}` + // * `domain-{domain}` + // * `project-{team}-{projectnumber}` + // * `project-{team}-{projectid}` + // * `allUsers` + // * `allAuthenticatedUsers` + // Examples: + // * The user `liz@example.com` would be `user-liz@example.com`. + // * The group `example@googlegroups.com` would be + // `group-example@googlegroups.com` + // * All members of the Google Apps for Business domain `example.com` would be + // `domain-example.com` + // For project entities, `project-{team}-{projectnumber}` format will be + // returned on response. + // +kcc:proto:field=google.storage.v2.BucketAccessControl.entity + Entity *string `json:"entity,omitempty"` + + // The ID for the entity, if any. + // +kcc:proto:field=google.storage.v2.BucketAccessControl.entity_id + EntityID *string `json:"entityID,omitempty"` + + // The etag of the BucketAccessControl. + // If included in the metadata of an update or delete request message, the + // operation operation will only be performed if the etag matches that of the + // bucket's BucketAccessControl. + // +kcc:proto:field=google.storage.v2.BucketAccessControl.etag + Etag *string `json:"etag,omitempty"` + + // The email address associated with the entity, if any. + // +kcc:proto:field=google.storage.v2.BucketAccessControl.email + Email *string `json:"email,omitempty"` + + // The domain associated with the entity, if any. + // +kcc:proto:field=google.storage.v2.BucketAccessControl.domain + Domain *string `json:"domain,omitempty"` + + // The project team associated with the entity, if any. + // +kcc:proto:field=google.storage.v2.BucketAccessControl.project_team + ProjectTeam *ProjectTeam `json:"projectTeam,omitempty"` +} + +// +kcc:proto=google.storage.v2.ObjectAccessControl +type ObjectAccessControl struct { + // The access permission for the entity. One of the following values: + // * `READER` + // * `WRITER` + // * `OWNER` + // +kcc:proto:field=google.storage.v2.ObjectAccessControl.role + Role *string `json:"role,omitempty"` + + // The ID of the access-control entry. + // +kcc:proto:field=google.storage.v2.ObjectAccessControl.id + ID *string `json:"id,omitempty"` + + // The entity holding the permission, in one of the following forms: + // * `user-{userid}` + // * `user-{email}` + // * `group-{groupid}` + // * `group-{email}` + // * `domain-{domain}` + // * `project-{team}-{projectnumber}` + // * `project-{team}-{projectid}` + // * `allUsers` + // * `allAuthenticatedUsers` + // Examples: + // * The user `liz@example.com` would be `user-liz@example.com`. + // * The group `example@googlegroups.com` would be + // `group-example@googlegroups.com`. + // * All members of the Google Apps for Business domain `example.com` would be + // `domain-example.com`. + // For project entities, `project-{team}-{projectnumber}` format will be + // returned on response. + // +kcc:proto:field=google.storage.v2.ObjectAccessControl.entity + Entity *string `json:"entity,omitempty"` + + // The ID for the entity, if any. + // +kcc:proto:field=google.storage.v2.ObjectAccessControl.entity_id + EntityID *string `json:"entityID,omitempty"` + + // The etag of the ObjectAccessControl. + // If included in the metadata of an update or delete request message, the + // operation will only be performed if the etag matches that of the live + // object's ObjectAccessControl. + // +kcc:proto:field=google.storage.v2.ObjectAccessControl.etag + Etag *string `json:"etag,omitempty"` + + // The email address associated with the entity, if any. + // +kcc:proto:field=google.storage.v2.ObjectAccessControl.email + Email *string `json:"email,omitempty"` + + // The domain associated with the entity, if any. + // +kcc:proto:field=google.storage.v2.ObjectAccessControl.domain + Domain *string `json:"domain,omitempty"` + + // The project team associated with the entity, if any. + // +kcc:proto:field=google.storage.v2.ObjectAccessControl.project_team + ProjectTeam *ProjectTeam `json:"projectTeam,omitempty"` +} + +// +kcc:proto=google.storage.v2.Owner +type Owner struct { + // The entity, in the form `user-`*userId*. + // +kcc:proto:field=google.storage.v2.Owner.entity + Entity *string `json:"entity,omitempty"` + + // The ID for the entity. + // +kcc:proto:field=google.storage.v2.Owner.entity_id + EntityID *string `json:"entityID,omitempty"` +} + +// +kcc:proto=google.storage.v2.ProjectTeam +type ProjectTeam struct { + // The project number. + // +kcc:proto:field=google.storage.v2.ProjectTeam.project_number + ProjectNumber *string `json:"projectNumber,omitempty"` + + // The team. + // +kcc:proto:field=google.storage.v2.ProjectTeam.team + Team *string `json:"team,omitempty"` +} + +// +kcc:proto=google.type.Date +type Date struct { + // Year of the date. Must be from 1 to 9999, or 0 to specify a date without + // a year. + // +kcc:proto:field=google.type.Date.year + Year *int32 `json:"year,omitempty"` + + // Month of a year. Must be from 1 to 12, or 0 to specify a year without a + // month and day. + // +kcc:proto:field=google.type.Date.month + Month *int32 `json:"month,omitempty"` + + // Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 + // to specify a year by itself or a year and month where the day isn't + // significant. + // +kcc:proto:field=google.type.Date.day + Day *int32 `json:"day,omitempty"` +} + +// +kcc:proto=google.storage.v2.Bucket +type BucketObservedState struct { + // Output only. The user-chosen part of the bucket name. The `{bucket}` + // portion of the `name` field. For globally unique buckets, this is equal to + // the "bucket name" of other Cloud Storage APIs. Example: "pub". + // +kcc:proto:field=google.storage.v2.Bucket.bucket_id + BucketID *string `json:"bucketID,omitempty"` + + // Output only. The metadata generation of this bucket. + // +kcc:proto:field=google.storage.v2.Bucket.metageneration + Metageneration *int64 `json:"metageneration,omitempty"` + + // Output only. The location type of the bucket (region, dual-region, + // multi-region, etc). + // +kcc:proto:field=google.storage.v2.Bucket.location_type + LocationType *string `json:"locationType,omitempty"` + + // Access controls on the bucket. + // If iam_config.uniform_bucket_level_access is enabled on this bucket, + // requests to set, read, or modify acl is an error. + // +kcc:proto:field=google.storage.v2.Bucket.acl + Acl []BucketAccessControlObservedState `json:"acl,omitempty"` + + // Default access controls to apply to new objects when no ACL is provided. + // If iam_config.uniform_bucket_level_access is enabled on this bucket, + // requests to set, read, or modify acl is an error. + // +kcc:proto:field=google.storage.v2.Bucket.default_object_acl + DefaultObjectAcl []ObjectAccessControlObservedState `json:"defaultObjectAcl,omitempty"` + + // Output only. The creation time of the bucket. + // +kcc:proto:field=google.storage.v2.Bucket.create_time + CreateTime *string `json:"createTime,omitempty"` + + // Output only. The modification time of the bucket. + // +kcc:proto:field=google.storage.v2.Bucket.update_time + UpdateTime *string `json:"updateTime,omitempty"` + + // Output only. The owner of the bucket. This is always the project team's + // owner group. + // +kcc:proto:field=google.storage.v2.Bucket.owner + Owner *Owner `json:"owner,omitempty"` + + // The bucket's Autoclass configuration. If there is no configuration, the + // Autoclass feature will be disabled and have no effect on the bucket. + // +kcc:proto:field=google.storage.v2.Bucket.autoclass + Autoclass *Bucket_AutoclassObservedState `json:"autoclass,omitempty"` +} + +// +kcc:proto=google.storage.v2.Bucket.Autoclass +type Bucket_AutoclassObservedState struct { + // Output only. Latest instant at which the `enabled` field was set to true + // after being disabled/unconfigured or set to false after being enabled. If + // Autoclass is enabled when the bucket is created, the toggle_time is set + // to the bucket creation time. + // +kcc:proto:field=google.storage.v2.Bucket.Autoclass.toggle_time + ToggleTime *string `json:"toggleTime,omitempty"` + + // Output only. Latest instant at which the autoclass terminal storage class + // was updated. + // +kcc:proto:field=google.storage.v2.Bucket.Autoclass.terminal_storage_class_update_time + TerminalStorageClassUpdateTime *string `json:"terminalStorageClassUpdateTime,omitempty"` +} + +// +kcc:proto=google.storage.v2.BucketAccessControl +type BucketAccessControlObservedState struct { + // Output only. The alternative entity format, if exists. For project + // entities, `project-{team}-{projectid}` format will be returned on response. + // +kcc:proto:field=google.storage.v2.BucketAccessControl.entity_alt + EntityAlt *string `json:"entityAlt,omitempty"` +} + +// +kcc:proto=google.storage.v2.ObjectAccessControl +type ObjectAccessControlObservedState struct { + // Output only. The alternative entity format, if exists. For project + // entities, `project-{team}-{projectid}` format will be returned on response. + // +kcc:proto:field=google.storage.v2.ObjectAccessControl.entity_alt + EntityAlt *string `json:"entityAlt,omitempty"` +} diff --git a/pkg/controller/direct/register/register.go b/pkg/controller/direct/register/register.go index 9dce92703d..379b57ac0a 100644 --- a/pkg/controller/direct/register/register.go +++ b/pkg/controller/direct/register/register.go @@ -47,5 +47,6 @@ import ( _ "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct/securesourcemanager" _ "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct/spanner" _ "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct/sql" + _ "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct/storage" _ "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct/workstations" ) diff --git a/pkg/controller/direct/storage/bucket_controller.go b/pkg/controller/direct/storage/bucket_controller.go new file mode 100644 index 0000000000..df4a089e14 --- /dev/null +++ b/pkg/controller/direct/storage/bucket_controller.go @@ -0,0 +1,276 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "fmt" + "reflect" + + refs "github.com/GoogleCloudPlatform/k8s-config-connector/apis/refs/v1beta1" + krm "github.com/GoogleCloudPlatform/k8s-config-connector/apis/storage/v1alpha1" + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/config" + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct" + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct/common" + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct/directbase" + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct/registry" + + // TODO(contributor): Update the import with the google cloud client + gcp "cloud.google.com/go/storage/apiv1" + + // TODO(contributor): Update the import with the google cloud client api protobuf + storagepb "cloud.google.com/go/storage/v2/storagepb" + "google.golang.org/api/option" + "google.golang.org/protobuf/types/known/fieldmaskpb" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func init() { + registry.RegisterModel(krm.StorageBucketGVK, NewBucketModel) +} + +func NewBucketModel(ctx context.Context, config *config.ControllerConfig) (directbase.Model, error) { + return &modelBucket{config: *config}, nil +} + +var _ directbase.Model = &modelBucket{} + +type modelBucket struct { + config config.ControllerConfig +} + +func (m *modelBucket) client(ctx context.Context) (*gcp.Client, error) { + var opts []option.ClientOption + opts, err := m.config.RESTClientOptions() + if err != nil { + return nil, err + } + gcpClient, err := gcp.NewRESTClient(ctx, opts...) + if err != nil { + return nil, fmt.Errorf("building Bucket client: %w", err) + } + return gcpClient, err +} + +func (m *modelBucket) AdapterForObject(ctx context.Context, reader client.Reader, u *unstructured.Unstructured) (directbase.Adapter, error) { + obj := &krm.StorageBucket{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, &obj); err != nil { + return nil, fmt.Errorf("error converting to %T: %w", obj, err) + } + + id, err := krm.NewBucketIdentity(ctx, reader, obj) + if err != nil { + return nil, err + } + + // Get storage GCP client + gcpClient, err := m.client(ctx) + if err != nil { + return nil, err + } + return &BucketAdapter{ + id: id, + gcpClient: gcpClient, + desired: obj, + }, nil +} + +func (m *modelBucket) AdapterForURL(ctx context.Context, url string) (directbase.Adapter, error) { + // TODO: Support URLs + return nil, nil +} + +type BucketAdapter struct { + id *krm.BucketIdentity + gcpClient *gcp.Client + desired *krm.StorageBucket + actual *storagepb.Bucket +} + +var _ directbase.Adapter = &BucketAdapter{} + +// Find retrieves the GCP resource. +// Return true means the object is found. This triggers Adapter `Update` call. +// Return false means the object is not found. This triggers Adapter `Create` call. +// Return a non-nil error requeues the requests. +func (a *BucketAdapter) Find(ctx context.Context) (bool, error) { + log := klog.FromContext(ctx) + log.V(2).Info("getting Bucket", "name", a.id) + + req := &storagepb.GetBucketRequest{Name: a.id} + bucketpb, err := a.gcpClient.GetBucket(ctx, req) + if err != nil { + if direct.IsNotFound(err) { + return false, nil + } + return false, fmt.Errorf("getting Bucket %q: %w", a.id, err) + } + + a.actual = bucketpb + return true, nil +} + +// Create creates the resource in GCP based on `spec` and update the Config Connector object `status` based on the GCP response. +func (a *BucketAdapter) Create(ctx context.Context, createOp *directbase.CreateOperation) error { + log := klog.FromContext(ctx) + log.V(2).Info("creating Bucket", "name", a.id) + mapCtx := &direct.MapContext{} + + desired := a.desired.DeepCopy() + resource := StorageBucketSpec_ToProto(mapCtx, &desired.Spec) + if mapCtx.Err() != nil { + return mapCtx.Err() + } + + // TODO(contributor): Complete the gcp "CREATE" or "INSERT" request. + req := &storagepb.CreateBucketRequest{ + Parent: a.id.Parent().String(), + Bucket: resource, + } + op, err := a.gcpClient.CreateBucket(ctx, req) + if err != nil { + return fmt.Errorf("creating Bucket %s: %w", a.id, err) + } + created, err := op.Wait(ctx) + if err != nil { + return fmt.Errorf("Bucket %s waiting creation: %w", a.id, err) + } + log.V(2).Info("successfully created Bucket", "name", a.id) + + status := &krm.StorageBucketStatus{} + status.ObservedState = StorageBucketObservedState_FromProto(mapCtx, created) + if mapCtx.Err() != nil { + return mapCtx.Err() + } + status.ExternalRef = &a.id.External + return createOp.UpdateStatus(ctx, status, nil) +} + +// Update updates the resource in GCP based on `spec` and update the Config Connector object `status` based on the GCP response. +func (a *BucketAdapter) Update(ctx context.Context, updateOp *directbase.UpdateOperation) error { + log := klog.FromContext(ctx) + log.V(2).Info("updating Bucket", "name", a.id) + mapCtx := &direct.MapContext{} + + desiredPb := StorageBucketSpec_ToProto(mapCtx, &a.desired.DeepCopy().Spec) + if mapCtx.Err() != nil { + return mapCtx.Err() + } + + paths := []string{} + // Option 1: This option is good for proto that has `field_mask` for output-only, immutable, required/optional. + // TODO(contributor): If choosing this option, remove the "Option 2" code. + { + var err error + paths, err = common.CompareProtoMessage(desiredPb, a.actual, common.BasicDiff) + if err != nil { + return err + } + } + + // Option 2: manually add all mutable fields. + // TODO(contributor): If choosing this option, remove the "Option 1" code. + { + if !reflect.DeepEqual(a.desired.Spec.DisplayName, a.actual.DisplayName) { + paths = append(paths, "display_name") + } + } + + if len(paths) == 0 { + log.V(2).Info("no field needs update", "name", a.id.External) + status := &krm.StorageBucketStatus{} + status.ObservedState = StorageBucketObservedState_FromProto(mapCtx, a.actual) + if mapCtx.Err() != nil { + return mapCtx.Err() + } + return updateOp.UpdateStatus(ctx, status, nil) + } + updateMask := &fieldmaskpb.FieldMask{ + Paths: sets.List(paths)} + + // TODO(contributor): Complete the gcp "UPDATE" or "PATCH" request. + req := &storagepb.UpdateBucketRequest{ + Name: a.id.External, + UpdateMask: updateMask, + Bucket: desiredPb, + } + op, err := a.gcpClient.UpdateBucket(ctx, req) + if err != nil { + return fmt.Errorf("updating Bucket %s: %w", a.id.External, err) + } + updated, err := op.Wait(ctx) + if err != nil { + return fmt.Errorf("Bucket %s waiting update: %w", a.id.External, err) + } + log.V(2).Info("successfully updated Bucket", "name", a.id.External) + + status := &krm.StorageBucketStatus{} + status.ObservedState = StorageBucketObservedState_FromProto(mapCtx, updated) + if mapCtx.Err() != nil { + return mapCtx.Err() + } + return updateOp.UpdateStatus(ctx, status, nil) +} + +// Export maps the GCP object to a Config Connector resource `spec`. +func (a *BucketAdapter) Export(ctx context.Context) (*unstructured.Unstructured, error) { + if a.actual == nil { + return nil, fmt.Errorf("Find() not called") + } + u := &unstructured.Unstructured{} + + obj := &krm.StorageBucket{} + mapCtx := &direct.MapContext{} + obj.Spec = direct.ValueOf(StorageBucketSpec_FromProto(mapCtx, a.actual)) + if mapCtx.Err() != nil { + return nil, mapCtx.Err() + } + obj.Spec.ProjectRef = &refs.ProjectRef{External: a.id.Parent().ProjectID} + obj.Spec.Location = a.id.Parent().Location + uObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return nil, err + } + + u.SetName(a.actual.Id) + u.SetGroupVersionKind(krm.StorageBucketGVK) + + u.Object = uObj + return u, nil +} + +// Delete the resource from GCP service when the corresponding Config Connector resource is deleted. +func (a *BucketAdapter) Delete(ctx context.Context, deleteOp *directbase.DeleteOperation) (bool, error) { + log := klog.FromContext(ctx) + log.V(2).Info("deleting Bucket", "name", a.id) + + req := &storagepb.DeleteBucketRequest{Name: a.id.String()} + op, err := a.gcpClient.DeleteBucket(ctx, req) + if err != nil { + return false, fmt.Errorf("deleting Bucket %s: %w", a.id, err) + } + log.V(2).Info("successfully deleted Bucket", "name", a.id) + + err = op.Wait(ctx) + if err != nil { + return false, fmt.Errorf("waiting delete Bucket %s: %w", a.id, err) + } + return true, nil +} diff --git a/pkg/controller/direct/storage/mapper.generated.go b/pkg/controller/direct/storage/mapper.generated.go new file mode 100644 index 0000000000..a5e4f69758 --- /dev/null +++ b/pkg/controller/direct/storage/mapper.generated.go @@ -0,0 +1,832 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + pb "cloud.google.com/go/storage/internal/apiv2/storagepb" + krm "github.com/GoogleCloudPlatform/k8s-config-connector/apis/storage/v1alpha1" + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct" + refs "github.com/GoogleCloudPlatform/k8s-config-connector/apis/refs/v1beta1" +) +func Bucket_FromProto(mapCtx *direct.MapContext, in *pb.Bucket) *krm.Bucket { + if in == nil { + return nil + } + out := &krm.Bucket{} + out.Name = direct.LazyPtr(in.GetName()) + // MISSING: BucketID + out.Etag = direct.LazyPtr(in.GetEtag()) + out.Project = direct.LazyPtr(in.GetProject()) + // MISSING: Metageneration + out.Location = direct.LazyPtr(in.GetLocation()) + // MISSING: LocationType + out.StorageClass = direct.LazyPtr(in.GetStorageClass()) + out.Rpo = direct.LazyPtr(in.GetRpo()) + out.Acl = direct.Slice_FromProto(mapCtx, in.Acl, BucketAccessControl_FromProto) + out.DefaultObjectAcl = direct.Slice_FromProto(mapCtx, in.DefaultObjectAcl, ObjectAccessControl_FromProto) + out.Lifecycle = Bucket_Lifecycle_FromProto(mapCtx, in.GetLifecycle()) + // MISSING: CreateTime + out.Cors = direct.Slice_FromProto(mapCtx, in.Cors, Bucket_Cors_FromProto) + // MISSING: UpdateTime + out.DefaultEventBasedHold = direct.LazyPtr(in.GetDefaultEventBasedHold()) + out.Labels = in.Labels + out.Website = Bucket_Website_FromProto(mapCtx, in.GetWebsite()) + out.Versioning = Bucket_Versioning_FromProto(mapCtx, in.GetVersioning()) + out.Logging = Bucket_Logging_FromProto(mapCtx, in.GetLogging()) + // MISSING: Owner + out.Encryption = Bucket_Encryption_FromProto(mapCtx, in.GetEncryption()) + out.Billing = Bucket_Billing_FromProto(mapCtx, in.GetBilling()) + out.RetentionPolicy = Bucket_RetentionPolicy_FromProto(mapCtx, in.GetRetentionPolicy()) + out.IamConfig = Bucket_IamConfig_FromProto(mapCtx, in.GetIamConfig()) + out.SatisfiesPzs = direct.LazyPtr(in.GetSatisfiesPzs()) + out.CustomPlacementConfig = Bucket_CustomPlacementConfig_FromProto(mapCtx, in.GetCustomPlacementConfig()) + out.Autoclass = Bucket_Autoclass_FromProto(mapCtx, in.GetAutoclass()) + out.HierarchicalNamespace = Bucket_HierarchicalNamespace_FromProto(mapCtx, in.GetHierarchicalNamespace()) + out.SoftDeletePolicy = Bucket_SoftDeletePolicy_FromProto(mapCtx, in.GetSoftDeletePolicy()) + return out +} +func Bucket_ToProto(mapCtx *direct.MapContext, in *krm.Bucket) *pb.Bucket { + if in == nil { + return nil + } + out := &pb.Bucket{} + out.Name = direct.ValueOf(in.Name) + // MISSING: BucketID + out.Etag = direct.ValueOf(in.Etag) + out.Project = direct.ValueOf(in.Project) + // MISSING: Metageneration + out.Location = direct.ValueOf(in.Location) + // MISSING: LocationType + out.StorageClass = direct.ValueOf(in.StorageClass) + out.Rpo = direct.ValueOf(in.Rpo) + out.Acl = direct.Slice_ToProto(mapCtx, in.Acl, BucketAccessControl_ToProto) + out.DefaultObjectAcl = direct.Slice_ToProto(mapCtx, in.DefaultObjectAcl, ObjectAccessControl_ToProto) + out.Lifecycle = Bucket_Lifecycle_ToProto(mapCtx, in.Lifecycle) + // MISSING: CreateTime + out.Cors = direct.Slice_ToProto(mapCtx, in.Cors, Bucket_Cors_ToProto) + // MISSING: UpdateTime + out.DefaultEventBasedHold = direct.ValueOf(in.DefaultEventBasedHold) + out.Labels = in.Labels + out.Website = Bucket_Website_ToProto(mapCtx, in.Website) + out.Versioning = Bucket_Versioning_ToProto(mapCtx, in.Versioning) + out.Logging = Bucket_Logging_ToProto(mapCtx, in.Logging) + // MISSING: Owner + out.Encryption = Bucket_Encryption_ToProto(mapCtx, in.Encryption) + out.Billing = Bucket_Billing_ToProto(mapCtx, in.Billing) + out.RetentionPolicy = Bucket_RetentionPolicy_ToProto(mapCtx, in.RetentionPolicy) + out.IamConfig = Bucket_IamConfig_ToProto(mapCtx, in.IamConfig) + out.SatisfiesPzs = direct.ValueOf(in.SatisfiesPzs) + out.CustomPlacementConfig = Bucket_CustomPlacementConfig_ToProto(mapCtx, in.CustomPlacementConfig) + out.Autoclass = Bucket_Autoclass_ToProto(mapCtx, in.Autoclass) + out.HierarchicalNamespace = Bucket_HierarchicalNamespace_ToProto(mapCtx, in.HierarchicalNamespace) + out.SoftDeletePolicy = Bucket_SoftDeletePolicy_ToProto(mapCtx, in.SoftDeletePolicy) + return out +} +func BucketAccessControl_FromProto(mapCtx *direct.MapContext, in *pb.BucketAccessControl) *krm.BucketAccessControl { + if in == nil { + return nil + } + out := &krm.BucketAccessControl{} + out.Role = direct.LazyPtr(in.GetRole()) + out.ID = direct.LazyPtr(in.GetId()) + out.Entity = direct.LazyPtr(in.GetEntity()) + // MISSING: EntityAlt + out.EntityID = direct.LazyPtr(in.GetEntityId()) + out.Etag = direct.LazyPtr(in.GetEtag()) + out.Email = direct.LazyPtr(in.GetEmail()) + out.Domain = direct.LazyPtr(in.GetDomain()) + out.ProjectTeam = ProjectTeam_FromProto(mapCtx, in.GetProjectTeam()) + return out +} +func BucketAccessControl_ToProto(mapCtx *direct.MapContext, in *krm.BucketAccessControl) *pb.BucketAccessControl { + if in == nil { + return nil + } + out := &pb.BucketAccessControl{} + out.Role = direct.ValueOf(in.Role) + out.Id = direct.ValueOf(in.ID) + out.Entity = direct.ValueOf(in.Entity) + // MISSING: EntityAlt + out.EntityId = direct.ValueOf(in.EntityID) + out.Etag = direct.ValueOf(in.Etag) + out.Email = direct.ValueOf(in.Email) + out.Domain = direct.ValueOf(in.Domain) + out.ProjectTeam = ProjectTeam_ToProto(mapCtx, in.ProjectTeam) + return out +} +func BucketAccessControlObservedState_FromProto(mapCtx *direct.MapContext, in *pb.BucketAccessControl) *krm.BucketAccessControlObservedState { + if in == nil { + return nil + } + out := &krm.BucketAccessControlObservedState{} + // MISSING: Role + // MISSING: ID + // MISSING: Entity + out.EntityAlt = direct.LazyPtr(in.GetEntityAlt()) + // MISSING: EntityID + // MISSING: Etag + // MISSING: Email + // MISSING: Domain + // MISSING: ProjectTeam + return out +} +func BucketAccessControlObservedState_ToProto(mapCtx *direct.MapContext, in *krm.BucketAccessControlObservedState) *pb.BucketAccessControl { + if in == nil { + return nil + } + out := &pb.BucketAccessControl{} + // MISSING: Role + // MISSING: ID + // MISSING: Entity + out.EntityAlt = direct.ValueOf(in.EntityAlt) + // MISSING: EntityID + // MISSING: Etag + // MISSING: Email + // MISSING: Domain + // MISSING: ProjectTeam + return out +} +func BucketObservedState_FromProto(mapCtx *direct.MapContext, in *pb.Bucket) *krm.BucketObservedState { + if in == nil { + return nil + } + out := &krm.BucketObservedState{} + // MISSING: Name + out.BucketID = direct.LazyPtr(in.GetBucketId()) + // MISSING: Etag + // MISSING: Project + out.Metageneration = direct.LazyPtr(in.GetMetageneration()) + // MISSING: Location + out.LocationType = direct.LazyPtr(in.GetLocationType()) + // MISSING: StorageClass + // MISSING: Rpo + out.Acl = direct.Slice_FromProto(mapCtx, in.Acl, BucketAccessControlObservedState_FromProto) + out.DefaultObjectAcl = direct.Slice_FromProto(mapCtx, in.DefaultObjectAcl, ObjectAccessControlObservedState_FromProto) + // MISSING: Lifecycle + out.CreateTime = direct.StringTimestamp_FromProto(mapCtx, in.GetCreateTime()) + // MISSING: Cors + out.UpdateTime = direct.StringTimestamp_FromProto(mapCtx, in.GetUpdateTime()) + // MISSING: DefaultEventBasedHold + // MISSING: Labels + // MISSING: Website + // MISSING: Versioning + // MISSING: Logging + out.Owner = Owner_FromProto(mapCtx, in.GetOwner()) + // MISSING: Encryption + // MISSING: Billing + // MISSING: RetentionPolicy + // MISSING: IamConfig + // MISSING: SatisfiesPzs + // MISSING: CustomPlacementConfig + out.Autoclass = Bucket_AutoclassObservedState_FromProto(mapCtx, in.GetAutoclass()) + // MISSING: HierarchicalNamespace + // MISSING: SoftDeletePolicy + return out +} +func BucketObservedState_ToProto(mapCtx *direct.MapContext, in *krm.BucketObservedState) *pb.Bucket { + if in == nil { + return nil + } + out := &pb.Bucket{} + // MISSING: Name + out.BucketId = direct.ValueOf(in.BucketID) + // MISSING: Etag + // MISSING: Project + out.Metageneration = direct.ValueOf(in.Metageneration) + // MISSING: Location + out.LocationType = direct.ValueOf(in.LocationType) + // MISSING: StorageClass + // MISSING: Rpo + out.Acl = direct.Slice_ToProto(mapCtx, in.Acl, BucketAccessControlObservedState_ToProto) + out.DefaultObjectAcl = direct.Slice_ToProto(mapCtx, in.DefaultObjectAcl, ObjectAccessControlObservedState_ToProto) + // MISSING: Lifecycle + out.CreateTime = direct.StringTimestamp_ToProto(mapCtx, in.CreateTime) + // MISSING: Cors + out.UpdateTime = direct.StringTimestamp_ToProto(mapCtx, in.UpdateTime) + // MISSING: DefaultEventBasedHold + // MISSING: Labels + // MISSING: Website + // MISSING: Versioning + // MISSING: Logging + out.Owner = Owner_ToProto(mapCtx, in.Owner) + // MISSING: Encryption + // MISSING: Billing + // MISSING: RetentionPolicy + // MISSING: IamConfig + // MISSING: SatisfiesPzs + // MISSING: CustomPlacementConfig + out.Autoclass = Bucket_AutoclassObservedState_ToProto(mapCtx, in.Autoclass) + // MISSING: HierarchicalNamespace + // MISSING: SoftDeletePolicy + return out +} +func Bucket_Autoclass_FromProto(mapCtx *direct.MapContext, in *pb.Bucket_Autoclass) *krm.Bucket_Autoclass { + if in == nil { + return nil + } + out := &krm.Bucket_Autoclass{} + out.Enabled = direct.LazyPtr(in.GetEnabled()) + // MISSING: ToggleTime + out.TerminalStorageClass = in.TerminalStorageClass + // MISSING: TerminalStorageClassUpdateTime + return out +} +func Bucket_Autoclass_ToProto(mapCtx *direct.MapContext, in *krm.Bucket_Autoclass) *pb.Bucket_Autoclass { + if in == nil { + return nil + } + out := &pb.Bucket_Autoclass{} + out.Enabled = direct.ValueOf(in.Enabled) + // MISSING: ToggleTime + out.TerminalStorageClass = in.TerminalStorageClass + // MISSING: TerminalStorageClassUpdateTime + return out +} +func Bucket_AutoclassObservedState_FromProto(mapCtx *direct.MapContext, in *pb.Bucket_Autoclass) *krm.Bucket_AutoclassObservedState { + if in == nil { + return nil + } + out := &krm.Bucket_AutoclassObservedState{} + // MISSING: Enabled + out.ToggleTime = direct.StringTimestamp_FromProto(mapCtx, in.GetToggleTime()) + // MISSING: TerminalStorageClass + out.TerminalStorageClassUpdateTime = direct.StringTimestamp_FromProto(mapCtx, in.GetTerminalStorageClassUpdateTime()) + return out +} +func Bucket_AutoclassObservedState_ToProto(mapCtx *direct.MapContext, in *krm.Bucket_AutoclassObservedState) *pb.Bucket_Autoclass { + if in == nil { + return nil + } + out := &pb.Bucket_Autoclass{} + // MISSING: Enabled + out.ToggleTime = direct.StringTimestamp_ToProto(mapCtx, in.ToggleTime) + // MISSING: TerminalStorageClass + if oneof := direct.StringTimestamp_ToProto(mapCtx, in.TerminalStorageClassUpdateTime); oneof != nil { + out.TerminalStorageClassUpdateTime = &pb.Bucket_Autoclass_TerminalStorageClassUpdateTime{TerminalStorageClassUpdateTime: oneof} + } + return out +} +func Bucket_Billing_FromProto(mapCtx *direct.MapContext, in *pb.Bucket_Billing) *krm.Bucket_Billing { + if in == nil { + return nil + } + out := &krm.Bucket_Billing{} + out.RequesterPays = direct.LazyPtr(in.GetRequesterPays()) + return out +} +func Bucket_Billing_ToProto(mapCtx *direct.MapContext, in *krm.Bucket_Billing) *pb.Bucket_Billing { + if in == nil { + return nil + } + out := &pb.Bucket_Billing{} + out.RequesterPays = direct.ValueOf(in.RequesterPays) + return out +} +func Bucket_Cors_FromProto(mapCtx *direct.MapContext, in *pb.Bucket_Cors) *krm.Bucket_Cors { + if in == nil { + return nil + } + out := &krm.Bucket_Cors{} + out.Origin = in.Origin + out.Method = in.Method + out.ResponseHeader = in.ResponseHeader + out.MaxAgeSeconds = direct.LazyPtr(in.GetMaxAgeSeconds()) + return out +} +func Bucket_Cors_ToProto(mapCtx *direct.MapContext, in *krm.Bucket_Cors) *pb.Bucket_Cors { + if in == nil { + return nil + } + out := &pb.Bucket_Cors{} + out.Origin = in.Origin + out.Method = in.Method + out.ResponseHeader = in.ResponseHeader + out.MaxAgeSeconds = direct.ValueOf(in.MaxAgeSeconds) + return out +} +func Bucket_CustomPlacementConfig_FromProto(mapCtx *direct.MapContext, in *pb.Bucket_CustomPlacementConfig) *krm.Bucket_CustomPlacementConfig { + if in == nil { + return nil + } + out := &krm.Bucket_CustomPlacementConfig{} + out.DataLocations = in.DataLocations + return out +} +func Bucket_CustomPlacementConfig_ToProto(mapCtx *direct.MapContext, in *krm.Bucket_CustomPlacementConfig) *pb.Bucket_CustomPlacementConfig { + if in == nil { + return nil + } + out := &pb.Bucket_CustomPlacementConfig{} + out.DataLocations = in.DataLocations + return out +} +func Bucket_Encryption_FromProto(mapCtx *direct.MapContext, in *pb.Bucket_Encryption) *krm.Bucket_Encryption { + if in == nil { + return nil + } + out := &krm.Bucket_Encryption{} + out.DefaultKMSKey = direct.LazyPtr(in.GetDefaultKmsKey()) + return out +} +func Bucket_Encryption_ToProto(mapCtx *direct.MapContext, in *krm.Bucket_Encryption) *pb.Bucket_Encryption { + if in == nil { + return nil + } + out := &pb.Bucket_Encryption{} + out.DefaultKmsKey = direct.ValueOf(in.DefaultKMSKey) + return out +} +func Bucket_HierarchicalNamespace_FromProto(mapCtx *direct.MapContext, in *pb.Bucket_HierarchicalNamespace) *krm.Bucket_HierarchicalNamespace { + if in == nil { + return nil + } + out := &krm.Bucket_HierarchicalNamespace{} + out.Enabled = direct.LazyPtr(in.GetEnabled()) + return out +} +func Bucket_HierarchicalNamespace_ToProto(mapCtx *direct.MapContext, in *krm.Bucket_HierarchicalNamespace) *pb.Bucket_HierarchicalNamespace { + if in == nil { + return nil + } + out := &pb.Bucket_HierarchicalNamespace{} + out.Enabled = direct.ValueOf(in.Enabled) + return out +} +func Bucket_IamConfig_FromProto(mapCtx *direct.MapContext, in *pb.Bucket_IamConfig) *krm.Bucket_IamConfig { + if in == nil { + return nil + } + out := &krm.Bucket_IamConfig{} + out.UniformBucketLevelAccess = Bucket_IamConfig_UniformBucketLevelAccess_FromProto(mapCtx, in.GetUniformBucketLevelAccess()) + out.PublicAccessPrevention = direct.LazyPtr(in.GetPublicAccessPrevention()) + return out +} +func Bucket_IamConfig_ToProto(mapCtx *direct.MapContext, in *krm.Bucket_IamConfig) *pb.Bucket_IamConfig { + if in == nil { + return nil + } + out := &pb.Bucket_IamConfig{} + out.UniformBucketLevelAccess = Bucket_IamConfig_UniformBucketLevelAccess_ToProto(mapCtx, in.UniformBucketLevelAccess) + out.PublicAccessPrevention = direct.ValueOf(in.PublicAccessPrevention) + return out +} +func Bucket_IamConfig_UniformBucketLevelAccess_FromProto(mapCtx *direct.MapContext, in *pb.Bucket_IamConfig_UniformBucketLevelAccess) *krm.Bucket_IamConfig_UniformBucketLevelAccess { + if in == nil { + return nil + } + out := &krm.Bucket_IamConfig_UniformBucketLevelAccess{} + out.Enabled = direct.LazyPtr(in.GetEnabled()) + out.LockTime = direct.StringTimestamp_FromProto(mapCtx, in.GetLockTime()) + return out +} +func Bucket_IamConfig_UniformBucketLevelAccess_ToProto(mapCtx *direct.MapContext, in *krm.Bucket_IamConfig_UniformBucketLevelAccess) *pb.Bucket_IamConfig_UniformBucketLevelAccess { + if in == nil { + return nil + } + out := &pb.Bucket_IamConfig_UniformBucketLevelAccess{} + out.Enabled = direct.ValueOf(in.Enabled) + out.LockTime = direct.StringTimestamp_ToProto(mapCtx, in.LockTime) + return out +} +func Bucket_Lifecycle_FromProto(mapCtx *direct.MapContext, in *pb.Bucket_Lifecycle) *krm.Bucket_Lifecycle { + if in == nil { + return nil + } + out := &krm.Bucket_Lifecycle{} + out.Rule = direct.Slice_FromProto(mapCtx, in.Rule, Bucket_Lifecycle_Rule_FromProto) + return out +} +func Bucket_Lifecycle_ToProto(mapCtx *direct.MapContext, in *krm.Bucket_Lifecycle) *pb.Bucket_Lifecycle { + if in == nil { + return nil + } + out := &pb.Bucket_Lifecycle{} + out.Rule = direct.Slice_ToProto(mapCtx, in.Rule, Bucket_Lifecycle_Rule_ToProto) + return out +} +func Bucket_Lifecycle_Rule_FromProto(mapCtx *direct.MapContext, in *pb.Bucket_Lifecycle_Rule) *krm.Bucket_Lifecycle_Rule { + if in == nil { + return nil + } + out := &krm.Bucket_Lifecycle_Rule{} + out.Action = Bucket_Lifecycle_Rule_Action_FromProto(mapCtx, in.GetAction()) + out.Condition = Bucket_Lifecycle_Rule_Condition_FromProto(mapCtx, in.GetCondition()) + return out +} +func Bucket_Lifecycle_Rule_ToProto(mapCtx *direct.MapContext, in *krm.Bucket_Lifecycle_Rule) *pb.Bucket_Lifecycle_Rule { + if in == nil { + return nil + } + out := &pb.Bucket_Lifecycle_Rule{} + out.Action = Bucket_Lifecycle_Rule_Action_ToProto(mapCtx, in.Action) + out.Condition = Bucket_Lifecycle_Rule_Condition_ToProto(mapCtx, in.Condition) + return out +} +func Bucket_Lifecycle_Rule_Action_FromProto(mapCtx *direct.MapContext, in *pb.Bucket_Lifecycle_Rule_Action) *krm.Bucket_Lifecycle_Rule_Action { + if in == nil { + return nil + } + out := &krm.Bucket_Lifecycle_Rule_Action{} + out.Type = direct.LazyPtr(in.GetType()) + out.StorageClass = direct.LazyPtr(in.GetStorageClass()) + return out +} +func Bucket_Lifecycle_Rule_Action_ToProto(mapCtx *direct.MapContext, in *krm.Bucket_Lifecycle_Rule_Action) *pb.Bucket_Lifecycle_Rule_Action { + if in == nil { + return nil + } + out := &pb.Bucket_Lifecycle_Rule_Action{} + out.Type = direct.ValueOf(in.Type) + out.StorageClass = direct.ValueOf(in.StorageClass) + return out +} +func Bucket_Lifecycle_Rule_Condition_FromProto(mapCtx *direct.MapContext, in *pb.Bucket_Lifecycle_Rule_Condition) *krm.Bucket_Lifecycle_Rule_Condition { + if in == nil { + return nil + } + out := &krm.Bucket_Lifecycle_Rule_Condition{} + out.AgeDays = in.AgeDays + out.CreatedBefore = Date_FromProto(mapCtx, in.GetCreatedBefore()) + out.IsLive = in.IsLive + out.NumNewerVersions = in.NumNewerVersions + out.MatchesStorageClass = in.MatchesStorageClass + out.DaysSinceCustomTime = in.DaysSinceCustomTime + out.CustomTimeBefore = Date_FromProto(mapCtx, in.GetCustomTimeBefore()) + out.DaysSinceNoncurrentTime = in.DaysSinceNoncurrentTime + out.NoncurrentTimeBefore = Date_FromProto(mapCtx, in.GetNoncurrentTimeBefore()) + out.MatchesPrefix = in.MatchesPrefix + out.MatchesSuffix = in.MatchesSuffix + return out +} +func Bucket_Lifecycle_Rule_Condition_ToProto(mapCtx *direct.MapContext, in *krm.Bucket_Lifecycle_Rule_Condition) *pb.Bucket_Lifecycle_Rule_Condition { + if in == nil { + return nil + } + out := &pb.Bucket_Lifecycle_Rule_Condition{} + out.AgeDays = in.AgeDays + out.CreatedBefore = Date_ToProto(mapCtx, in.CreatedBefore) + out.IsLive = in.IsLive + out.NumNewerVersions = in.NumNewerVersions + out.MatchesStorageClass = in.MatchesStorageClass + out.DaysSinceCustomTime = in.DaysSinceCustomTime + out.CustomTimeBefore = Date_ToProto(mapCtx, in.CustomTimeBefore) + out.DaysSinceNoncurrentTime = in.DaysSinceNoncurrentTime + out.NoncurrentTimeBefore = Date_ToProto(mapCtx, in.NoncurrentTimeBefore) + out.MatchesPrefix = in.MatchesPrefix + out.MatchesSuffix = in.MatchesSuffix + return out +} +func Bucket_Logging_FromProto(mapCtx *direct.MapContext, in *pb.Bucket_Logging) *krm.Bucket_Logging { + if in == nil { + return nil + } + out := &krm.Bucket_Logging{} + out.LogBucket = direct.LazyPtr(in.GetLogBucket()) + out.LogObjectPrefix = direct.LazyPtr(in.GetLogObjectPrefix()) + return out +} +func Bucket_Logging_ToProto(mapCtx *direct.MapContext, in *krm.Bucket_Logging) *pb.Bucket_Logging { + if in == nil { + return nil + } + out := &pb.Bucket_Logging{} + out.LogBucket = direct.ValueOf(in.LogBucket) + out.LogObjectPrefix = direct.ValueOf(in.LogObjectPrefix) + return out +} +func Bucket_RetentionPolicy_FromProto(mapCtx *direct.MapContext, in *pb.Bucket_RetentionPolicy) *krm.Bucket_RetentionPolicy { + if in == nil { + return nil + } + out := &krm.Bucket_RetentionPolicy{} + out.EffectiveTime = direct.StringTimestamp_FromProto(mapCtx, in.GetEffectiveTime()) + out.IsLocked = direct.LazyPtr(in.GetIsLocked()) + out.RetentionDuration = direct.StringDuration_FromProto(mapCtx, in.GetRetentionDuration()) + return out +} +func Bucket_RetentionPolicy_ToProto(mapCtx *direct.MapContext, in *krm.Bucket_RetentionPolicy) *pb.Bucket_RetentionPolicy { + if in == nil { + return nil + } + out := &pb.Bucket_RetentionPolicy{} + out.EffectiveTime = direct.StringTimestamp_ToProto(mapCtx, in.EffectiveTime) + out.IsLocked = direct.ValueOf(in.IsLocked) + out.RetentionDuration = direct.StringDuration_ToProto(mapCtx, in.RetentionDuration) + return out +} +func Bucket_SoftDeletePolicy_FromProto(mapCtx *direct.MapContext, in *pb.Bucket_SoftDeletePolicy) *krm.Bucket_SoftDeletePolicy { + if in == nil { + return nil + } + out := &krm.Bucket_SoftDeletePolicy{} + out.RetentionDuration = direct.StringDuration_FromProto(mapCtx, in.GetRetentionDuration()) + out.EffectiveTime = direct.StringTimestamp_FromProto(mapCtx, in.GetEffectiveTime()) + return out +} +func Bucket_SoftDeletePolicy_ToProto(mapCtx *direct.MapContext, in *krm.Bucket_SoftDeletePolicy) *pb.Bucket_SoftDeletePolicy { + if in == nil { + return nil + } + out := &pb.Bucket_SoftDeletePolicy{} + if oneof := direct.StringDuration_ToProto(mapCtx, in.RetentionDuration); oneof != nil { + out.RetentionDuration = &pb.Bucket_SoftDeletePolicy_RetentionDuration{RetentionDuration: oneof} + } + if oneof := direct.StringTimestamp_ToProto(mapCtx, in.EffectiveTime); oneof != nil { + out.EffectiveTime = &pb.Bucket_SoftDeletePolicy_EffectiveTime{EffectiveTime: oneof} + } + return out +} +func Bucket_Versioning_FromProto(mapCtx *direct.MapContext, in *pb.Bucket_Versioning) *krm.Bucket_Versioning { + if in == nil { + return nil + } + out := &krm.Bucket_Versioning{} + out.Enabled = direct.LazyPtr(in.GetEnabled()) + return out +} +func Bucket_Versioning_ToProto(mapCtx *direct.MapContext, in *krm.Bucket_Versioning) *pb.Bucket_Versioning { + if in == nil { + return nil + } + out := &pb.Bucket_Versioning{} + out.Enabled = direct.ValueOf(in.Enabled) + return out +} +func Bucket_Website_FromProto(mapCtx *direct.MapContext, in *pb.Bucket_Website) *krm.Bucket_Website { + if in == nil { + return nil + } + out := &krm.Bucket_Website{} + out.MainPageSuffix = direct.LazyPtr(in.GetMainPageSuffix()) + out.NotFoundPage = direct.LazyPtr(in.GetNotFoundPage()) + return out +} +func Bucket_Website_ToProto(mapCtx *direct.MapContext, in *krm.Bucket_Website) *pb.Bucket_Website { + if in == nil { + return nil + } + out := &pb.Bucket_Website{} + out.MainPageSuffix = direct.ValueOf(in.MainPageSuffix) + out.NotFoundPage = direct.ValueOf(in.NotFoundPage) + return out +} +func ObjectAccessControl_FromProto(mapCtx *direct.MapContext, in *pb.ObjectAccessControl) *krm.ObjectAccessControl { + if in == nil { + return nil + } + out := &krm.ObjectAccessControl{} + out.Role = direct.LazyPtr(in.GetRole()) + out.ID = direct.LazyPtr(in.GetId()) + out.Entity = direct.LazyPtr(in.GetEntity()) + // MISSING: EntityAlt + out.EntityID = direct.LazyPtr(in.GetEntityId()) + out.Etag = direct.LazyPtr(in.GetEtag()) + out.Email = direct.LazyPtr(in.GetEmail()) + out.Domain = direct.LazyPtr(in.GetDomain()) + out.ProjectTeam = ProjectTeam_FromProto(mapCtx, in.GetProjectTeam()) + return out +} +func ObjectAccessControl_ToProto(mapCtx *direct.MapContext, in *krm.ObjectAccessControl) *pb.ObjectAccessControl { + if in == nil { + return nil + } + out := &pb.ObjectAccessControl{} + out.Role = direct.ValueOf(in.Role) + out.Id = direct.ValueOf(in.ID) + out.Entity = direct.ValueOf(in.Entity) + // MISSING: EntityAlt + out.EntityId = direct.ValueOf(in.EntityID) + out.Etag = direct.ValueOf(in.Etag) + out.Email = direct.ValueOf(in.Email) + out.Domain = direct.ValueOf(in.Domain) + out.ProjectTeam = ProjectTeam_ToProto(mapCtx, in.ProjectTeam) + return out +} +func ObjectAccessControlObservedState_FromProto(mapCtx *direct.MapContext, in *pb.ObjectAccessControl) *krm.ObjectAccessControlObservedState { + if in == nil { + return nil + } + out := &krm.ObjectAccessControlObservedState{} + // MISSING: Role + // MISSING: ID + // MISSING: Entity + out.EntityAlt = direct.LazyPtr(in.GetEntityAlt()) + // MISSING: EntityID + // MISSING: Etag + // MISSING: Email + // MISSING: Domain + // MISSING: ProjectTeam + return out +} +func ObjectAccessControlObservedState_ToProto(mapCtx *direct.MapContext, in *krm.ObjectAccessControlObservedState) *pb.ObjectAccessControl { + if in == nil { + return nil + } + out := &pb.ObjectAccessControl{} + // MISSING: Role + // MISSING: ID + // MISSING: Entity + out.EntityAlt = direct.ValueOf(in.EntityAlt) + // MISSING: EntityID + // MISSING: Etag + // MISSING: Email + // MISSING: Domain + // MISSING: ProjectTeam + return out +} +func Owner_FromProto(mapCtx *direct.MapContext, in *pb.Owner) *krm.Owner { + if in == nil { + return nil + } + out := &krm.Owner{} + out.Entity = direct.LazyPtr(in.GetEntity()) + out.EntityID = direct.LazyPtr(in.GetEntityId()) + return out +} +func Owner_ToProto(mapCtx *direct.MapContext, in *krm.Owner) *pb.Owner { + if in == nil { + return nil + } + out := &pb.Owner{} + out.Entity = direct.ValueOf(in.Entity) + out.EntityId = direct.ValueOf(in.EntityID) + return out +} +func ProjectTeam_FromProto(mapCtx *direct.MapContext, in *pb.ProjectTeam) *krm.ProjectTeam { + if in == nil { + return nil + } + out := &krm.ProjectTeam{} + out.ProjectNumber = direct.LazyPtr(in.GetProjectNumber()) + out.Team = direct.LazyPtr(in.GetTeam()) + return out +} +func ProjectTeam_ToProto(mapCtx *direct.MapContext, in *krm.ProjectTeam) *pb.ProjectTeam { + if in == nil { + return nil + } + out := &pb.ProjectTeam{} + out.ProjectNumber = direct.ValueOf(in.ProjectNumber) + out.Team = direct.ValueOf(in.Team) + return out +} +func StorageBucketObservedState_FromProto(mapCtx *direct.MapContext, in *pb.Bucket) *krm.StorageBucketObservedState { + if in == nil { + return nil + } + out := &krm.StorageBucketObservedState{} + // MISSING: Name + // MISSING: BucketID + // MISSING: Etag + // MISSING: Project + // MISSING: Metageneration + // MISSING: Location + // MISSING: LocationType + // MISSING: StorageClass + // MISSING: Rpo + // MISSING: Acl + // MISSING: DefaultObjectAcl + // MISSING: Lifecycle + // MISSING: CreateTime + // MISSING: Cors + // MISSING: UpdateTime + // MISSING: DefaultEventBasedHold + // MISSING: Labels + // MISSING: Website + // MISSING: Versioning + // MISSING: Logging + // MISSING: Owner + // MISSING: Encryption + // MISSING: Billing + // MISSING: RetentionPolicy + // MISSING: IamConfig + // MISSING: SatisfiesPzs + // MISSING: CustomPlacementConfig + // MISSING: Autoclass + // MISSING: HierarchicalNamespace + // MISSING: SoftDeletePolicy + return out +} +func StorageBucketObservedState_ToProto(mapCtx *direct.MapContext, in *krm.StorageBucketObservedState) *pb.Bucket { + if in == nil { + return nil + } + out := &pb.Bucket{} + // MISSING: Name + // MISSING: BucketID + // MISSING: Etag + // MISSING: Project + // MISSING: Metageneration + // MISSING: Location + // MISSING: LocationType + // MISSING: StorageClass + // MISSING: Rpo + // MISSING: Acl + // MISSING: DefaultObjectAcl + // MISSING: Lifecycle + // MISSING: CreateTime + // MISSING: Cors + // MISSING: UpdateTime + // MISSING: DefaultEventBasedHold + // MISSING: Labels + // MISSING: Website + // MISSING: Versioning + // MISSING: Logging + // MISSING: Owner + // MISSING: Encryption + // MISSING: Billing + // MISSING: RetentionPolicy + // MISSING: IamConfig + // MISSING: SatisfiesPzs + // MISSING: CustomPlacementConfig + // MISSING: Autoclass + // MISSING: HierarchicalNamespace + // MISSING: SoftDeletePolicy + return out +} +func StorageBucketSpec_FromProto(mapCtx *direct.MapContext, in *pb.Bucket) *krm.StorageBucketSpec { + if in == nil { + return nil + } + out := &krm.StorageBucketSpec{} + // MISSING: Name + // MISSING: BucketID + // MISSING: Etag + // MISSING: Project + // MISSING: Metageneration + // MISSING: Location + // MISSING: LocationType + // MISSING: StorageClass + // MISSING: Rpo + // MISSING: Acl + // MISSING: DefaultObjectAcl + // MISSING: Lifecycle + // MISSING: CreateTime + // MISSING: Cors + // MISSING: UpdateTime + // MISSING: DefaultEventBasedHold + // MISSING: Labels + // MISSING: Website + // MISSING: Versioning + // MISSING: Logging + // MISSING: Owner + // MISSING: Encryption + // MISSING: Billing + // MISSING: RetentionPolicy + // MISSING: IamConfig + // MISSING: SatisfiesPzs + // MISSING: CustomPlacementConfig + // MISSING: Autoclass + // MISSING: HierarchicalNamespace + // MISSING: SoftDeletePolicy + return out +} +func StorageBucketSpec_ToProto(mapCtx *direct.MapContext, in *krm.StorageBucketSpec) *pb.Bucket { + if in == nil { + return nil + } + out := &pb.Bucket{} + // MISSING: Name + // MISSING: BucketID + // MISSING: Etag + // MISSING: Project + // MISSING: Metageneration + // MISSING: Location + // MISSING: LocationType + // MISSING: StorageClass + // MISSING: Rpo + // MISSING: Acl + // MISSING: DefaultObjectAcl + // MISSING: Lifecycle + // MISSING: CreateTime + // MISSING: Cors + // MISSING: UpdateTime + // MISSING: DefaultEventBasedHold + // MISSING: Labels + // MISSING: Website + // MISSING: Versioning + // MISSING: Logging + // MISSING: Owner + // MISSING: Encryption + // MISSING: Billing + // MISSING: RetentionPolicy + // MISSING: IamConfig + // MISSING: SatisfiesPzs + // MISSING: CustomPlacementConfig + // MISSING: Autoclass + // MISSING: HierarchicalNamespace + // MISSING: SoftDeletePolicy + return out +}