Skip to content

Commit 5d35be5

Browse files
authored
fix: address an issue where agent might panic if CSA cannot be used and apply strategy is not fully configured (#117)
1 parent d437b2c commit 5d35be5

File tree

4 files changed

+443
-2
lines changed

4 files changed

+443
-2
lines changed

pkg/controllers/workapplier/apply.go

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -168,7 +168,12 @@ func (r *Reconciler) apply(
168168
return r.serverSideApply(
169169
ctx,
170170
gvr, manifestObjCopy, inMemberClusterObj,
171-
applyStrategy.ServerSideApplyConfig.ForceConflicts, isOptimisticLockEnabled, false,
171+
// When falling back to SSA, always disable force apply ops (this is also the default
172+
// behavior).
173+
//
174+
// Note that the work applier might still enable force apply ops if it finds that
175+
// self-conflicts might be occur.
176+
false, isOptimisticLockEnabled, false,
172177
)
173178
case applyStrategy.Type == fleetv1beta1.ApplyStrategyTypeServerSideApply:
174179
// The apply strategy dictates that server-side apply should be used.

pkg/controllers/workapplier/controller_integration_test.go

Lines changed: 226 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,8 @@ limitations under the License.
1717
package workapplier
1818

1919
import (
20+
"crypto/rand"
21+
"encoding/base64"
2022
"fmt"
2123
"time"
2224

@@ -5485,7 +5487,7 @@ var _ = Describe("report diff", func() {
54855487
})
54865488
})
54875489

5488-
var _ = Describe("switch apply strategies", func() {
5490+
var _ = Describe("handling different apply strategies", func() {
54895491
Context("switch from report diff to CSA", Ordered, func() {
54905492
workName := fmt.Sprintf(workNameTemplate, utils.RandStr())
54915493
// The environment prepared by the envtest package does not support namespace
@@ -6495,4 +6497,227 @@ var _ = Describe("switch apply strategies", func() {
64956497
// deletion; consequently this test suite would not attempt so verify its deletion.
64966498
})
64976499
})
6500+
6501+
Context("falling back from CSA to SSA", Ordered, func() {
6502+
workName := fmt.Sprintf(workNameTemplate, utils.RandStr())
6503+
// The environment prepared by the envtest package does not support namespace
6504+
// deletion; each test case would use a new namespace.
6505+
nsName := fmt.Sprintf(nsNameTemplate, utils.RandStr())
6506+
6507+
var appliedWorkOwnerRef *metav1.OwnerReference
6508+
var regularNS *corev1.Namespace
6509+
var oversizedCM *corev1.ConfigMap
6510+
6511+
BeforeAll(func() {
6512+
// Prepare a NS object.
6513+
regularNS = ns.DeepCopy()
6514+
regularNS.Name = nsName
6515+
regularNSJSON := marshalK8sObjJSON(regularNS)
6516+
6517+
// Prepare an oversized configMap object.
6518+
6519+
// Generate a large bytes array.
6520+
//
6521+
// Kubernetes will reject configMaps larger than 1048576 bytes (~1 MB);
6522+
// and when an object's spec size exceeds 262144 bytes, KubeFleet will not
6523+
// be able to use client-side apply with the object as it cannot set
6524+
// an last applied configuration annotation of that size. Consequently,
6525+
// for this test case, it prepares a configMap object of 600000 bytes so
6526+
// that Kubernetes will accept it but CSA cannot use it, forcing the
6527+
// work applier to fall back to server-side apply.
6528+
randomBytes := make([]byte, 600000)
6529+
// Note that this method never returns an error and will always fill the given
6530+
// slice completely.
6531+
_, _ = rand.Read(randomBytes)
6532+
// Encode the random bytes to a base64 string.
6533+
randomBase64Str := base64.StdEncoding.EncodeToString(randomBytes)
6534+
oversizedCM = &corev1.ConfigMap{
6535+
TypeMeta: metav1.TypeMeta{
6536+
Kind: "ConfigMap",
6537+
APIVersion: "v1",
6538+
},
6539+
ObjectMeta: metav1.ObjectMeta{
6540+
Namespace: nsName,
6541+
Name: configMapName,
6542+
},
6543+
Data: map[string]string{
6544+
"randomBase64Str": randomBase64Str,
6545+
},
6546+
}
6547+
oversizedCMJSON := marshalK8sObjJSON(oversizedCM)
6548+
6549+
// Create a new Work object with all the manifest JSONs and proper apply strategy.
6550+
createWorkObject(workName, nil, regularNSJSON, oversizedCMJSON)
6551+
})
6552+
6553+
It("should add cleanup finalizer to the Work object", func() {
6554+
finalizerAddedActual := workFinalizerAddedActual(workName)
6555+
Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add cleanup finalizer to the Work object")
6556+
})
6557+
6558+
It("should prepare an AppliedWork object", func() {
6559+
appliedWorkCreatedActual := appliedWorkCreatedActual(workName)
6560+
Eventually(appliedWorkCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to prepare an AppliedWork object")
6561+
6562+
appliedWorkOwnerRef = prepareAppliedWorkOwnerRef(workName)
6563+
})
6564+
6565+
It("should apply the manifests", func() {
6566+
// Ensure that the NS object has been applied as expected.
6567+
regularNSObjectAppliedActual := regularNSObjectAppliedActual(nsName, appliedWorkOwnerRef)
6568+
Eventually(regularNSObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the namespace object")
6569+
6570+
Expect(memberClient.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object")
6571+
6572+
// Ensure that the oversized ConfigMap object has been applied as expected via SSA.
6573+
Eventually(func() error {
6574+
gotConfigMap := &corev1.ConfigMap{}
6575+
if err := memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: configMapName}, gotConfigMap); err != nil {
6576+
return fmt.Errorf("failed to retrieve the ConfigMap object: %w", err)
6577+
}
6578+
6579+
wantConfigMap := oversizedCM.DeepCopy()
6580+
wantConfigMap.TypeMeta = metav1.TypeMeta{}
6581+
wantConfigMap.OwnerReferences = []metav1.OwnerReference{
6582+
*appliedWorkOwnerRef,
6583+
}
6584+
6585+
rebuiltConfigMap := &corev1.ConfigMap{
6586+
ObjectMeta: metav1.ObjectMeta{
6587+
Namespace: gotConfigMap.Namespace,
6588+
Name: gotConfigMap.Name,
6589+
OwnerReferences: gotConfigMap.OwnerReferences,
6590+
},
6591+
Data: gotConfigMap.Data,
6592+
}
6593+
if diff := cmp.Diff(rebuiltConfigMap, wantConfigMap); diff != "" {
6594+
return fmt.Errorf("configMap diff (-got +want):\n%s", diff)
6595+
}
6596+
6597+
// Perform additional checks to ensure that the work applier has fallen back
6598+
// from CSA to SSA.
6599+
lastAppliedConf, foundAnnotation := gotConfigMap.Annotations[fleetv1beta1.LastAppliedConfigAnnotation]
6600+
if foundAnnotation && len(lastAppliedConf) > 0 {
6601+
return fmt.Errorf("the configMap object has annotation %s (value: %s) in presence when SSA should be used", fleetv1beta1.LastAppliedConfigAnnotation, lastAppliedConf)
6602+
}
6603+
6604+
foundFieldMgr := false
6605+
fieldMgrs := gotConfigMap.GetManagedFields()
6606+
for _, fieldMgr := range fieldMgrs {
6607+
// For simplicity reasons, here the test case verifies only against the field
6608+
// manager name.
6609+
if fieldMgr.Manager == workFieldManagerName {
6610+
foundFieldMgr = true
6611+
}
6612+
}
6613+
if !foundFieldMgr {
6614+
return fmt.Errorf("the configMap object does not list the KubeFleet member agent as a field manager (%s) when SSA should be used", workFieldManagerName)
6615+
}
6616+
6617+
return nil
6618+
}, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the oversized configMap object")
6619+
6620+
Expect(memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: configMapName}, oversizedCM)).To(Succeed(), "Failed to retrieve the ConfigMap object")
6621+
})
6622+
6623+
It("should update the Work object status", func() {
6624+
// Prepare the status information.
6625+
workConds := []metav1.Condition{
6626+
{
6627+
Type: fleetv1beta1.WorkConditionTypeApplied,
6628+
Status: metav1.ConditionTrue,
6629+
Reason: WorkAllManifestsAppliedReason,
6630+
},
6631+
{
6632+
Type: fleetv1beta1.WorkConditionTypeAvailable,
6633+
Status: metav1.ConditionTrue,
6634+
Reason: WorkAllManifestsAvailableReason,
6635+
},
6636+
}
6637+
manifestConds := []fleetv1beta1.ManifestCondition{
6638+
{
6639+
Identifier: fleetv1beta1.WorkResourceIdentifier{
6640+
Ordinal: 0,
6641+
Group: "",
6642+
Version: "v1",
6643+
Kind: "Namespace",
6644+
Resource: "namespaces",
6645+
Name: nsName,
6646+
},
6647+
Conditions: []metav1.Condition{
6648+
{
6649+
Type: fleetv1beta1.WorkConditionTypeApplied,
6650+
Status: metav1.ConditionTrue,
6651+
Reason: string(ManifestProcessingApplyResultTypeApplied),
6652+
ObservedGeneration: 0,
6653+
},
6654+
{
6655+
Type: fleetv1beta1.WorkConditionTypeAvailable,
6656+
Status: metav1.ConditionTrue,
6657+
Reason: string(ManifestProcessingAvailabilityResultTypeAvailable),
6658+
ObservedGeneration: 0,
6659+
},
6660+
},
6661+
},
6662+
{
6663+
Identifier: fleetv1beta1.WorkResourceIdentifier{
6664+
Ordinal: 1,
6665+
Group: "",
6666+
Version: "v1",
6667+
Kind: "ConfigMap",
6668+
Resource: "configmaps",
6669+
Name: configMapName,
6670+
Namespace: nsName,
6671+
},
6672+
Conditions: []metav1.Condition{
6673+
{
6674+
Type: fleetv1beta1.WorkConditionTypeApplied,
6675+
Status: metav1.ConditionTrue,
6676+
Reason: string(ManifestProcessingApplyResultTypeApplied),
6677+
ObservedGeneration: 0,
6678+
},
6679+
{
6680+
Type: fleetv1beta1.WorkConditionTypeAvailable,
6681+
Status: metav1.ConditionTrue,
6682+
Reason: string(ManifestProcessingAvailabilityResultTypeAvailable),
6683+
ObservedGeneration: 0,
6684+
},
6685+
},
6686+
},
6687+
}
6688+
6689+
workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil)
6690+
Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status")
6691+
})
6692+
6693+
AfterAll(func() {
6694+
// Delete the Work object and related resources.
6695+
deleteWorkObject(workName)
6696+
6697+
// Ensure that all applied manifests have been removed.
6698+
appliedWorkRemovedActual := appliedWorkRemovedActual(workName, nsName)
6699+
Eventually(appliedWorkRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the AppliedWork object")
6700+
6701+
Eventually(func() error {
6702+
// Retrieve the ConfigMap object.
6703+
cm := &corev1.ConfigMap{
6704+
ObjectMeta: metav1.ObjectMeta{
6705+
Namespace: nsName,
6706+
Name: configMapName,
6707+
},
6708+
}
6709+
if err := memberClient.Delete(ctx, cm); err != nil && !errors.IsNotFound(err) {
6710+
return fmt.Errorf("failed to delete the ConfigMap object: %w", err)
6711+
}
6712+
6713+
if err := memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: configMapName}, cm); !errors.IsNotFound(err) {
6714+
return fmt.Errorf("the ConfigMap object still exists or an unexpected error occurred: %w", err)
6715+
}
6716+
return nil
6717+
}, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the oversized configMap object")
6718+
6719+
// The environment prepared by the envtest package does not support namespace
6720+
// deletion; consequently this test suite would not attempt so verify its deletion.
6721+
})
6722+
})
64986723
})

pkg/utils/defaulter/work_test.go

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,26 @@ func TestSetDefaultsWork(t *testing.T) {
8585
},
8686
},
8787
},
88+
{
89+
name: "client-side apply",
90+
work: placementv1beta1.Work{
91+
Spec: placementv1beta1.WorkSpec{
92+
ApplyStrategy: &placementv1beta1.ApplyStrategy{
93+
Type: placementv1beta1.ApplyStrategyTypeClientSideApply,
94+
},
95+
},
96+
},
97+
want: placementv1beta1.Work{
98+
Spec: placementv1beta1.WorkSpec{
99+
ApplyStrategy: &placementv1beta1.ApplyStrategy{
100+
Type: placementv1beta1.ApplyStrategyTypeClientSideApply,
101+
ComparisonOption: placementv1beta1.ComparisonOptionTypePartialComparison,
102+
WhenToApply: placementv1beta1.WhenToApplyTypeAlways,
103+
WhenToTakeOver: placementv1beta1.WhenToTakeOverTypeAlways,
104+
},
105+
},
106+
},
107+
},
88108
{
89109
name: "all fields are set",
90110
work: placementv1beta1.Work{

0 commit comments

Comments
 (0)