Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
45 changes: 34 additions & 11 deletions k8s/gate/hugservice/hugservice.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,15 +96,7 @@ func (*ServiceReconciler) buildVirtualListenerServicePorts(virtualListeners map[
// reconcileServicePorts patches a single Service's port list if it differs from
// the desired state (reserved ports + VirtualListener ports).
func (r *ServiceReconciler) reconcileServicePorts(ctx context.Context, svc *corev1.Service, desiredVLPorts []corev1.ServicePort) {
// Collect reserved ports from the current service (stat, metrics, …).
var reservedPorts []corev1.ServicePort
for _, p := range svc.Spec.Ports {
if reservedPortNames[p.Name] {
reservedPorts = append(reservedPorts, p)
}
}

desiredPorts := append(reservedPorts, desiredVLPorts...)
desiredPorts := buildDesiredPorts(svc.Spec.Ports, desiredVLPorts)

if portsEqual(svc.Spec.Ports, desiredPorts) {
return
Expand All @@ -127,10 +119,41 @@ func (r *ServiceReconciler) reconcileServicePorts(ctx context.Context, svc *core
)
}

// buildDesiredPorts merges the current Service ports with the desired set of
// VirtualListener-derived ports (keeps reserved ports as-is, append the
// VL ports, carrying over the NodePort assigned on the existing Service
// whenever the port number matches - to keep it stable).
func buildDesiredPorts(existing, desiredVLPorts []corev1.ServicePort) []corev1.ServicePort {
existingNodePortByPort := make(map[int32]int32, len(existing))
var reservedPorts []corev1.ServicePort
for _, p := range existing {
if p.NodePort != 0 {
existingNodePortByPort[p.Port] = p.NodePort
}
if reservedPortNames[p.Name] {
reservedPorts = append(reservedPorts, p)
}
}

merged := make([]corev1.ServicePort, 0, len(reservedPorts)+len(desiredVLPorts))
merged = append(merged, reservedPorts...)
for _, dp := range desiredVLPorts {
if dp.NodePort == 0 {
if np, ok := existingNodePortByPort[dp.Port]; ok {
dp.NodePort = np
}
}
merged = append(merged, dp)
}
return merged
}

// portsEqual returns true if both slices describe the same set of ports
// (matched by name, port, and targetPort, order-independent).
// NodePort is intentionally excluded: it is assigned by Kubernetes and not
// managed by the reconciler.
// NodePort is intentionally excluded from the comparison: it is assigned by
// Kubernetes (or the user, via NodePort-typed Services) and carried over by
// buildDesiredPorts, so a reconcile that only differs by NodePort should not
// trigger a patch.
func portsEqual(a, b []corev1.ServicePort) bool {
if len(a) != len(b) {
return false
Expand Down
58 changes: 58 additions & 0 deletions k8s/gate/hugservice/hugservice_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
package hugservice

import (
"reflect"
"testing"

corev1 "k8s.io/api/core/v1"
Expand All @@ -24,6 +25,18 @@ func port(name string, port, nodePort int32) corev1.ServicePort {
return corev1.ServicePort{Name: name, Port: port, NodePort: nodePort, TargetPort: intstr.FromInt32(port)}
}

// vlPort builds a ServicePort as emitted by buildVirtualListenerServicePorts
// (TCP, TargetPort==Port). nodePort=0 means "unset".
func vlPort(name string, p, nodePort int32) corev1.ServicePort {
return corev1.ServicePort{
Name: name,
Protocol: corev1.ProtocolTCP,
Port: p,
NodePort: nodePort,
TargetPort: intstr.FromInt32(p),
}
}

func TestPortsEqual(t *testing.T) {
tests := []struct {
name string
Expand Down Expand Up @@ -111,3 +124,48 @@ func TestPortsEqual(t *testing.T) {
})
}
}

// TestBuildDesiredPortsPreservesNodePort covers the two NodePort-preservation:
// - a VL port matching an existing VL port by name/number.
// - a VL port matching a pre-seeded entry only by port number.
// Additionally, a brand-new listener must come out with NodePort=0 so
// Kubernetes allocates.
func TestBuildDesiredPortsPreservesNodePort(t *testing.T) {
existing := []corev1.ServicePort{
port("stat", 31024, 31721), // reserved, kept verbatim
vlPort("tls-31443", 31443, 31364), // matches desired by name
port("kube-example", 32132, 32132), // matches desired by port number
}
desiredVL := []corev1.ServicePort{
vlPort("tls-31443", 31443, 0),
vlPort("tls-32132", 32132, 0),
vlPort("tls-40000", 40000, 0), // new listener, no existing match
}
want := []corev1.ServicePort{
port("stat", 31024, 31721),
vlPort("tls-31443", 31443, 31364),
vlPort("tls-32132", 32132, 32132),
vlPort("tls-40000", 40000, 0),
}

got := buildDesiredPorts(existing, desiredVL)
if !reflect.DeepEqual(got, want) {
t.Fatalf("buildDesiredPorts() mismatch\n got = %#v\n want = %#v", got, want)
}
}

// TestBuildDesiredPortsIsStable checks, after the first reconcile with
// Kubernetes filling any unset NodePorts, that a second reconcile is a
// no-op. This is what prevents the NodePort flip on each event batch.
func TestBuildDesiredPortsIsStable(t *testing.T) {
desiredVL := []corev1.ServicePort{vlPort("tls-31443", 31443, 0)}
first := buildDesiredPorts([]corev1.ServicePort{port("stat", 31024, 31024)}, desiredVL)
for i := range first {
if first[i].NodePort == 0 {
first[i].NodePort = 30000 // simulate Kubernetes allocation
}
}
if second := buildDesiredPorts(first, desiredVL); !reflect.DeepEqual(first, second) {
t.Fatalf("reconcile is not stable\n first = %#v\n second = %#v", first, second)
}
}
Loading