diff --git a/slice/api/v1alpha1/slice_types.go b/slice/api/v1alpha1/slice_types.go index a09768513..65df27536 100644 --- a/slice/api/v1alpha1/slice_types.go +++ b/slice/api/v1alpha1/slice_types.go @@ -65,6 +65,7 @@ type SliceStatus struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster // +kubebuilder:printcolumn:name="Type",type=string,JSONPath=`.spec.type` // +kubebuilder:printcolumn:name="Topology",type=string,JSONPath=`.spec.topology` // +kubebuilder:printcolumn:name="State",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].reason` diff --git a/slice/config/crd/bases/slice.accelerator.gke.io_slices.yaml b/slice/config/crd/bases/slice.accelerator.gke.io_slices.yaml index e07c95288..5f32552da 100644 --- a/slice/config/crd/bases/slice.accelerator.gke.io_slices.yaml +++ b/slice/config/crd/bases/slice.accelerator.gke.io_slices.yaml @@ -12,7 +12,7 @@ spec: listKind: SliceList plural: slices singular: slice - scope: Namespaced + scope: Cluster versions: - additionalPrinterColumns: - jsonPath: .spec.type diff --git a/slice/internal/controller/indexer.go b/slice/internal/controller/indexer.go index 5f8c58223..f43fd59c0 100644 --- a/slice/internal/controller/indexer.go +++ b/slice/internal/controller/indexer.go @@ -25,24 +25,50 @@ import ( kueue "sigs.k8s.io/kueue/apis/kueue/v1beta1" "tpu-slice-controller/api/v1alpha1" + "tpu-slice-controller/internal/core" "tpu-slice-controller/internal/util/slices" ) const ( - OwnerReferenceUID = "metadata.ownerReferences.uid" + OwnerReferenceUID = "metadata.ownerReferences.uid" + WorkloadNamespaceIndex = "workload.namespace" + WorkloadNameIndex = "workload.name" ) func indexOwnerReferenceUID(obj client.Object) []string { return slices.Map(obj.GetOwnerReferences(), func(o *metav1.OwnerReference) string { return string(o.UID) }) } +func indexSliceByWorkloadNamespace(obj client.Object) []string { + if slice, ok := obj.(*v1alpha1.Slice); ok { + if ns, found := slice.GetAnnotations()[core.OwnerWorkloadNamespaceAnnotation]; found { + return []string{ns} + } + } + return nil +} + +func indexSliceByWorkloadName(obj client.Object) []string { + if slice, ok := obj.(*v1alpha1.Slice); ok { + if name, found := slice.GetAnnotations()[core.OwnerWorkloadNameAnnotation]; found { + return []string{name} + } + } + return nil +} + // SetupIndexer configures the indexer to index specific fields for kueue.Workload and v1alpha1.Slice resources. func SetupIndexer(ctx context.Context, indexer client.FieldIndexer) error { if err := indexer.IndexField(ctx, &kueue.Workload{}, OwnerReferenceUID, indexOwnerReferenceUID); err != nil { return fmt.Errorf("setting index on ownerReferences.uid for Workload: %w", err) } - if err := indexer.IndexField(ctx, &v1alpha1.Slice{}, OwnerReferenceUID, indexOwnerReferenceUID); err != nil { - return fmt.Errorf("setting index on ownerReferences.uid for Slice: %w", err) + // Since Slice is now cluster-scoped, it cannot have a controller owner reference to a namespaced Workload. + // We use annotations for linking Slices to Workloads. + if err := indexer.IndexField(ctx, &v1alpha1.Slice{}, WorkloadNamespaceIndex, indexSliceByWorkloadNamespace); err != nil { + return fmt.Errorf("setting index on workload namespace for Slice: %w", err) + } + if err := indexer.IndexField(ctx, &v1alpha1.Slice{}, WorkloadNameIndex, indexSliceByWorkloadName); err != nil { + return fmt.Errorf("setting index on workload name for Slice: %w", err) } return nil } diff --git a/slice/internal/controller/workload_controller.go b/slice/internal/controller/workload_controller.go index 557137ee4..91305a8a1 100644 --- a/slice/internal/controller/workload_controller.go +++ b/slice/internal/controller/workload_controller.go @@ -282,8 +282,10 @@ func (r *WorkloadReconciler) cleanupSlices(ctx context.Context, wl *kueue.Worklo func (r *WorkloadReconciler) findWorkloadSlices(ctx context.Context, wl *kueue.Workload) ([]v1alpha1.Slice, error) { slices := &v1alpha1.SliceList{} opts := []client.ListOption{ - client.InNamespace(wl.Namespace), - client.MatchingFields{OwnerReferenceUID: string(wl.UID)}, + client.MatchingFields{ + WorkloadNamespaceIndex: wl.Namespace, + WorkloadNameIndex: wl.Name, + }, } if err := r.client.List(ctx, slices, opts...); err != nil { return nil, err @@ -463,7 +465,7 @@ func (r *WorkloadReconciler) syncSlices( continue } - sliceName := core.SliceName(wl.Name, psa.Name) + sliceName := core.SliceName(wl.Namespace, wl.Name, psa.Name) if _, exist := slicesByName[sliceName]; exist { // Slice already exists, nothing to do. @@ -511,10 +513,9 @@ func (r *WorkloadReconciler) createSlice(ctx context.Context, wl *kueue.Workload slice := core.SliceWithMetadata(wl, psa.Name) log := ctrl.LoggerFrom(ctx).WithValues("slice", klog.KObj(slice)) log.V(3).Info("Creating Slice") - - if err := controllerutil.SetControllerReference(wl, slice, r.client.Scheme()); err != nil { - return nil, err - } + // Since Slice is a cluster-scoped object and Workload is namespaced, + // we cannot set a controller owner reference. The Workload's namespace and name + // are stored as annotations on the Slice for lookup. parseTopologyAssignmentIntoPartitionIds(slice, psa.TopologyAssignment, nodes) ps := podset.FindPodSetByName(wl.Spec.PodSets, psa.Name) @@ -522,7 +523,7 @@ func (r *WorkloadReconciler) createSlice(ctx context.Context, wl *kueue.Workload slice.Spec.Topology = core.GetTPUTopology(ps.Template) if err := r.client.Create(ctx, slice); err != nil { - msg := fmt.Sprintf("Error creating Slice %q: %v", client.ObjectKeyFromObject(slice), err) + msg := fmt.Sprintf("Error creating Slice %q: %v", slice.Name, err) log.Error(err, msg) r.record.Event(wl, corev1.EventTypeWarning, FailedCreateSliceEventType, api.TruncateEventMessage(msg)) ac.State = kueue.CheckStatePending @@ -547,7 +548,7 @@ func (r *WorkloadReconciler) updateWorkloadAdmissionCheckStatus(ctx context.Cont func buildCreationEventMessage(slices []v1alpha1.Slice) string { sliceNames := make([]string, len(slices)) for index, slice := range slices { - sliceNames[index] = fmt.Sprintf("%q", client.ObjectKeyFromObject(&slice)) + sliceNames[index] = fmt.Sprintf("%q", slice.Name) } sort.Strings(sliceNames) return fmt.Sprintf("The Slices %s have been created", strings.Join(sliceNames, ", ")) @@ -672,18 +673,20 @@ func (h *sliceHandler) handleEvent(ctx context.Context, obj client.Object, q wor log := ctrl.LoggerFrom(ctx) - owner := metav1.GetControllerOf(slice) - if owner == nil { - log.V(3).Info("Owner not found") + workloadNamespace, nsFound := slice.Annotations[core.OwnerWorkloadNamespaceAnnotation] + workloadName, nameFound := slice.Annotations[core.OwnerWorkloadNameAnnotation] + + if !nsFound || !nameFound { + log.V(3).Info("Slice is missing workload owner annotations, skipping event handling", "slice", klog.KObj(slice)) return } - log.V(3).Info("Handle Slice event", "workload", klog.KRef(slice.Namespace, slice.Name)) + log.V(3).Info("Handle Slice event", "workload", klog.KRef(workloadNamespace, workloadName)) req := reconcile.Request{ NamespacedName: types.NamespacedName{ - Name: owner.Name, - Namespace: slice.Namespace, + Name: workloadName, + Namespace: workloadNamespace, }, } diff --git a/slice/internal/controller/workload_controller_test.go b/slice/internal/controller/workload_controller_test.go index 40d3f8d20..a7624e650 100644 --- a/slice/internal/controller/workload_controller_test.go +++ b/slice/internal/controller/workload_controller_test.go @@ -59,9 +59,8 @@ var ( ) var ( - jobSetGVK = jobset.SchemeGroupVersion.WithKind("JobSet") - jobGVK = batchv1.SchemeGroupVersion.WithKind("Job") - workloadGVK = kueue.SchemeGroupVersion.WithKind("Workload") + jobSetGVK = jobset.SchemeGroupVersion.WithKind("JobSet") + jobGVK = batchv1.SchemeGroupVersion.WithKind("Job") ) func TestWorkloadReconciler(t *testing.T) { @@ -85,9 +84,9 @@ func TestWorkloadReconciler(t *testing.T) { } } - buildEventRecord := func(eventType, reason, message string) utiltesting.EventRecord { + buildEventRecord := func(namespace, eventType, reason, message string) utiltesting.EventRecord { return utiltesting.EventRecord{ - Key: client.ObjectKey{Namespace: corev1.NamespaceDefault, Name: baseWorkloadName}, + Key: client.ObjectKey{Namespace: namespace, Name: baseWorkloadName}, EventType: eventType, Reason: reason, Message: message, @@ -133,14 +132,15 @@ func TestWorkloadReconciler(t *testing.T) { baseWorkloadWrapper := utiltesting.MakeWorkload(baseWorkloadName, corev1.NamespaceDefault). UID(baseWorkloadName). AdmissionCheck(buildAdmissionCheckState(kueue.CheckStatePending, "")) - baseSlice1Wrapper := utiltesting.MakeSliceWrapper(core.SliceName(baseWorkloadName, "ps1"), corev1.NamespaceDefault). + baseSlice1Wrapper := utiltesting.MakeSliceWrapper(core.SliceName(corev1.NamespaceDefault, baseWorkloadName, "ps1")). Type(slice.TypeTpu7x). Topology("4x4x12"). - ControllerReference(workloadGVK, baseWorkloadName, baseWorkloadName). + OwnerWorkloadAnnotations(corev1.NamespaceDefault, baseWorkloadName). PartitionIds("subblock1") - baseSlice2Wrapper := baseSlice1Wrapper.Clone().Name(core.SliceName(baseWorkloadName, "ps2")). + baseSlice2Wrapper := baseSlice1Wrapper.Clone().Name(core.SliceName(corev1.NamespaceDefault, baseWorkloadName, "ps2")). Type(slice.TypeTpu7x). Topology("4x4x12"). + OwnerWorkloadAnnotations(corev1.NamespaceDefault, baseWorkloadName). PartitionIds("subblock2") worker1Node := utiltesting.MakeNode("worker1").Label("cloud.google.com/gke-tpu-slice-4x4x4-id", "subblock1") @@ -737,8 +737,8 @@ func TestWorkloadReconciler(t *testing.T) { *baseSlice2Wrapper.DeepCopy(), }, wantEvents: []utiltesting.EventRecord{ - buildEventRecord(corev1.EventTypeNormal, SlicesCreatedEventType, - `The Slices "default/workload-ps1", "default/workload-ps2" have been created`), + buildEventRecord(corev1.NamespaceDefault, corev1.EventTypeNormal, SlicesCreatedEventType, + `The Slices "default-workload-ps1", "default-workload-ps2" have been created`), }, }, "should create Slices only for relevant PodSets (invalid pod template)": { @@ -789,8 +789,8 @@ func TestWorkloadReconciler(t *testing.T) { *baseSlice1Wrapper.DeepCopy(), }, wantEvents: []utiltesting.EventRecord{ - buildEventRecord(corev1.EventTypeNormal, SlicesCreatedEventType, - `The Slices "default/workload-ps1" have been created`), + buildEventRecord(corev1.NamespaceDefault, corev1.EventTypeNormal, SlicesCreatedEventType, + `The Slices "default-workload-ps1" have been created`), }, }, "should create Slices only for relevant PodSets (invalid assignment)": { @@ -837,8 +837,8 @@ func TestWorkloadReconciler(t *testing.T) { *baseSlice1Wrapper.DeepCopy(), }, wantEvents: []utiltesting.EventRecord{ - buildEventRecord(corev1.EventTypeNormal, SlicesCreatedEventType, - `The Slices "default/workload-ps1" have been created`), + buildEventRecord(corev1.NamespaceDefault, corev1.EventTypeNormal, SlicesCreatedEventType, + `The Slices "default-workload-ps1" have been created`), }, }, "should create missed Slices": { @@ -869,8 +869,8 @@ func TestWorkloadReconciler(t *testing.T) { *baseSlice2Wrapper.DeepCopy(), }, wantEvents: []utiltesting.EventRecord{ - buildEventRecord(corev1.EventTypeNormal, SlicesCreatedEventType, - `The Slices "default/workload-ps2" have been created`), + buildEventRecord(corev1.NamespaceDefault, corev1.EventTypeNormal, SlicesCreatedEventType, + `The Slices "default-workload-ps2" have been created`), }, }, "parse TAS Assignment to populate PartitionIDs in Slice": { @@ -900,8 +900,8 @@ func TestWorkloadReconciler(t *testing.T) { *baseSlice2Wrapper.DeepCopy(), }, wantEvents: []utiltesting.EventRecord{ - buildEventRecord(corev1.EventTypeNormal, SlicesCreatedEventType, - `The Slices "default/workload-ps1", "default/workload-ps2" have been created`), + buildEventRecord(corev1.NamespaceDefault, corev1.EventTypeNormal, SlicesCreatedEventType, + `The Slices "default-workload-ps1", "default-workload-ps2" have been created`), }, }, "parse TAS Assignment to populate NodeSelector in Slice (hostname)": { @@ -965,8 +965,8 @@ func TestWorkloadReconciler(t *testing.T) { *baseSlice2Wrapper.DeepCopy(), }, wantEvents: []utiltesting.EventRecord{ - buildEventRecord(corev1.EventTypeNormal, SlicesCreatedEventType, - `The Slices "default/workload-ps1", "default/workload-ps2" have been created`), + buildEventRecord(corev1.NamespaceDefault, corev1.EventTypeNormal, SlicesCreatedEventType, + `The Slices "default-workload-ps1", "default-workload-ps2" have been created`), }, }, "error on Slice creation": { @@ -994,13 +994,13 @@ func TestWorkloadReconciler(t *testing.T) { ReserveQuota(baseAdmission, now). ControllerReference(jobSetGVK, baseJobSetName, baseJobSetName). Finalizers(SliceControllerName). - AdmissionCheck(buildAdmissionCheckState(kueue.CheckStatePending, `Error creating Slice "default/workload-ps1": test error`)). + AdmissionCheck(buildAdmissionCheckState(kueue.CheckStatePending, `Error creating Slice "default-workload-ps1": test error`)). Obj(), }, wantErr: errTest, wantEvents: []utiltesting.EventRecord{ - buildEventRecord(corev1.EventTypeWarning, FailedCreateSliceEventType, - `Error creating Slice "default/workload-ps1": test error`), + buildEventRecord(corev1.NamespaceDefault, corev1.EventTypeWarning, FailedCreateSliceEventType, + `Error creating Slice "default-workload-ps1": test error`), }, }, "should update the Workload's AdmissionCheckState": { @@ -1148,7 +1148,7 @@ func TestWorkloadReconciler(t *testing.T) { *baseSlice2Wrapper.Clone().Active().Obj(), }, wantEvents: []utiltesting.EventRecord{ - buildEventRecord(corev1.EventTypeNormal, AdmissionCheckUpdatedEventType, + buildEventRecord(corev1.NamespaceDefault, corev1.EventTypeNormal, AdmissionCheckUpdatedEventType, fmt.Sprintf(`Admission check %q updated state from "Pending" to "Ready"`, baseACName)), }, }, @@ -1180,7 +1180,7 @@ func TestWorkloadReconciler(t *testing.T) { *baseSlice1Wrapper.Clone().Active().Obj(), *baseSlice2Wrapper.Clone().Degraded().Obj()}, wantEvents: []utiltesting.EventRecord{ - buildEventRecord(corev1.EventTypeNormal, AdmissionCheckUpdatedEventType, + buildEventRecord(corev1.NamespaceDefault, corev1.EventTypeNormal, AdmissionCheckUpdatedEventType, fmt.Sprintf(`Admission check %q updated state from "Pending" to "Ready"`, baseACName)), }, }, @@ -1212,7 +1212,7 @@ func TestWorkloadReconciler(t *testing.T) { *baseSlice1Wrapper.Clone().Active().Obj(), }, wantEvents: []utiltesting.EventRecord{ - buildEventRecord(corev1.EventTypeNormal, AdmissionCheckUpdatedEventType, + buildEventRecord(corev1.NamespaceDefault, corev1.EventTypeNormal, AdmissionCheckUpdatedEventType, fmt.Sprintf(`Admission check %q updated state from "Pending" to "Retry"`, baseACName)), }, }, @@ -1274,10 +1274,98 @@ func TestWorkloadReconciler(t *testing.T) { *baseSlice2Wrapper.Clone().Active().Obj(), }, wantEvents: []utiltesting.EventRecord{ - buildEventRecord(corev1.EventTypeNormal, AdmissionCheckUpdatedEventType, + buildEventRecord(corev1.NamespaceDefault, corev1.EventTypeNormal, AdmissionCheckUpdatedEventType, fmt.Sprintf(`Admission check %q updated state from "Pending" to "Ready"`, baseACName)), }, }, + "should create a slice for another workload with the same name but in a different namespace": { + request: types.NamespacedName{Name: baseWorkloadName, Namespace: "namespace2"}, + objs: []client.Object{ + worker1Node.DeepCopy(), + worker2Node.DeepCopy(), + baseAdmissionCheckWrapper.DeepCopy(), + utiltesting.MakeWorkload(baseWorkloadName, "namespace1"). + UID(baseWorkloadName+"-ns1"). + PodSets(basePodSets...). + ReserveQuota(baseAdmission, now). + ControllerReference(jobSetGVK, baseJobSetName, baseJobSetName). + Finalizers(SliceControllerName). + AdmissionCheck(buildAdmissionCheckState(kueue.CheckStateReady, `Slices are in states: 2 ACTIVE`)). + Obj(), + utiltesting.MakeSliceWrapper(core.SliceName("namespace1", baseWorkloadName, "ps1")). + Type(slice.TypeTpu7x). + Topology("4x4x12"). + OwnerWorkloadAnnotations("namespace1", baseWorkloadName). + PartitionIds("subblock1"). + Active(). + Obj(), + utiltesting.MakeSliceWrapper(core.SliceName("namespace1", baseWorkloadName, "ps2")). + Type(slice.TypeTpu7x). + Topology("4x4x12"). + OwnerWorkloadAnnotations("namespace1", baseWorkloadName). + PartitionIds("subblock2"). + Active(). + Obj(), + utiltesting.MakeWorkload(baseWorkloadName, "namespace2"). + UID(baseWorkloadName+"-ns2"). + PodSets(basePodSets...). + ReserveQuota(baseAdmission, now). + ControllerReference(jobSetGVK, baseJobSetName, baseJobSetName). + Finalizers(SliceControllerName). + AdmissionCheck(buildAdmissionCheckState(kueue.CheckStatePending, "")). + Obj(), + }, + wantWorkloads: []kueue.Workload{ + *utiltesting.MakeWorkload(baseWorkloadName, "namespace1"). + UID(baseWorkloadName+"-ns1"). + PodSets(basePodSets...). + ReserveQuota(baseAdmission, now). + ControllerReference(jobSetGVK, baseJobSetName, baseJobSetName). + Finalizers(SliceControllerName). + AdmissionCheck(buildAdmissionCheckState(kueue.CheckStateReady, `Slices are in states: 2 ACTIVE`)). + Obj(), + *utiltesting.MakeWorkload(baseWorkloadName, "namespace2"). + UID(baseWorkloadName+"-ns2"). + PodSets(basePodSets...). + ReserveQuota(baseAdmission, now). + ControllerReference(jobSetGVK, baseJobSetName, baseJobSetName). + Finalizers(SliceControllerName). + AdmissionCheck(buildAdmissionCheckState(kueue.CheckStatePending, `Slices are in states: 2 CREATED`)). + Obj(), + }, + wantSlices: []slice.Slice{ + *utiltesting.MakeSliceWrapper(core.SliceName("namespace1", baseWorkloadName, "ps1")). + Type(slice.TypeTpu7x). + Topology("4x4x12"). + OwnerWorkloadAnnotations("namespace1", baseWorkloadName). + PartitionIds("subblock1"). + Active(). + Obj(), + *utiltesting.MakeSliceWrapper(core.SliceName("namespace1", baseWorkloadName, "ps2")). + Type(slice.TypeTpu7x). + Topology("4x4x12"). + OwnerWorkloadAnnotations("namespace1", baseWorkloadName). + PartitionIds("subblock2"). + Active(). + Obj(), + *utiltesting.MakeSliceWrapper(core.SliceName("namespace2", baseWorkloadName, "ps1")). + Type(slice.TypeTpu7x). + Topology("4x4x12"). + OwnerWorkloadAnnotations("namespace2", baseWorkloadName). + PartitionIds("subblock1"). + Obj(), + *utiltesting.MakeSliceWrapper(core.SliceName("namespace2", baseWorkloadName, "ps2")). + Type(slice.TypeTpu7x). + Topology("4x4x12"). + OwnerWorkloadAnnotations("namespace2", baseWorkloadName). + PartitionIds("subblock2"). + Obj(), + }, + wantEvents: []utiltesting.EventRecord{ + buildEventRecord("namespace2", corev1.EventTypeNormal, SlicesCreatedEventType, + `The Slices "namespace2-workload-ps1", "namespace2-workload-ps2" have been created`), + }, + }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { @@ -1357,8 +1445,8 @@ func TestSliceHandlerHandleEvent(t *testing.T) { obj: utiltesting.MakeWorkload(baseWlName, corev1.NamespaceDefault).Obj(), }, "has a workload that should be handled": { - obj: utiltesting.MakeSliceWrapper(baseSliceName, corev1.NamespaceDefault). - ControllerReference(workloadGVK, baseWlName, baseWlName). + obj: utiltesting.MakeSliceWrapper(baseSliceName). + OwnerWorkloadAnnotations(corev1.NamespaceDefault, baseWlName). Obj(), want: []requestDuration{ { diff --git a/slice/internal/core/constants.go b/slice/internal/core/constants.go index a7dc55c4a..fa2b46f28 100644 --- a/slice/internal/core/constants.go +++ b/slice/internal/core/constants.go @@ -52,3 +52,8 @@ const ( // MMIGHealthStatusUnknown indicates the MMIG health is unknown. MMIGHealthStatusUnknown MMIGHealthStatus = "UNKNOWN" ) + +const ( + OwnerWorkloadNamespaceAnnotation = "slice.accelerator.gke.io/owner-workload-namespace" + OwnerWorkloadNameAnnotation = "slice.accelerator.gke.io/owner-workload-name" +) diff --git a/slice/internal/core/slice.go b/slice/internal/core/slice.go index 2cb588125..7f8777014 100644 --- a/slice/internal/core/slice.go +++ b/slice/internal/core/slice.go @@ -38,14 +38,17 @@ func SliceKeyFromWorkload(wl *kueue.Workload, podSetName kueue.PodSetReference) func SliceWithMetadata(wl *kueue.Workload, podSetName kueue.PodSetReference) *v1alpha1.Slice { return &v1alpha1.Slice{ ObjectMeta: metav1.ObjectMeta{ - Name: SliceName(wl.Name, podSetName), - Namespace: wl.Namespace, + Name: SliceName(wl.Namespace, wl.Name, podSetName), + Annotations: map[string]string{ + OwnerWorkloadNamespaceAnnotation: wl.Namespace, + OwnerWorkloadNameAnnotation: wl.Name, + }, }, } } -func SliceName(workloadName string, podSetName kueue.PodSetReference) string { - return fmt.Sprintf("%s-%s", workloadName, podSetName) +func SliceName(ns string, workloadName string, podSetName kueue.PodSetReference) string { + return fmt.Sprintf("%s-%s-%s", ns, workloadName, podSetName) } func isStale(slice *v1alpha1.Slice) bool { diff --git a/slice/internal/util/testing/wrappers.go b/slice/internal/util/testing/wrappers.go index 823afe4ee..68ebaa52a 100644 --- a/slice/internal/util/testing/wrappers.go +++ b/slice/internal/util/testing/wrappers.go @@ -250,12 +250,11 @@ type SliceWrapper struct { v1alpha1.Slice } -func MakeSliceWrapper(name, namespace string) *SliceWrapper { +func MakeSliceWrapper(name string) *SliceWrapper { return &SliceWrapper{ v1alpha1.Slice{ ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, + Name: name, }, }, } @@ -289,6 +288,15 @@ func (s *SliceWrapper) ControllerReference(gvk schema.GroupVersionKind, name, ui return s } +func (s *SliceWrapper) OwnerWorkloadAnnotations(ns, name string) *SliceWrapper { + if s.Annotations == nil { + s.Annotations = make(map[string]string) + } + s.Annotations[core.OwnerWorkloadNameAnnotation] = name + s.Annotations[core.OwnerWorkloadNamespaceAnnotation] = ns + return s +} + func (s *SliceWrapper) PartitionIds(ids ...string) *SliceWrapper { s.Spec.PartitionIds = ids return s