[Aider] Phase 0

This commit is contained in:
2025-05-10 18:18:58 -04:00
parent 2f0debf608
commit 1ae06781d6
14 changed files with 4438 additions and 55 deletions

View File

@ -1,19 +1,16 @@
package config
import (
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
"git.dws.rip/dubey/kat/api/v1alpha1"
"google.golang.org/protobuf/types/known/timestamppb"
pb "git.dws.rip/dubey/kat/api/v1alpha1"
)
func createTestClusterKatFile(t *testing.T, content string) string {
t.Helper()
tmpFile, err := ioutil.TempFile(t.TempDir(), "cluster.*.kat")
tmpFile, err := os.CreateTemp(t.TempDir(), "cluster.*.kat")
if err != nil {
t.Fatalf("Failed to create temp file: %v", err)
}
@ -144,8 +141,8 @@ spec:
}
func TestSetClusterConfigDefaults(t *testing.T) {
config := &v1alpha1.ClusterConfiguration{
Spec: &v1alpha1.ClusterConfigurationSpec{},
config := &pb.ClusterConfiguration{
Spec: &pb.ClusterConfigurationSpec{},
}
SetClusterConfigDefaults(config)
@ -184,8 +181,8 @@ func TestSetClusterConfigDefaults(t *testing.T) {
}
// Test NodeLossTimeoutSeconds derivation
configWithTick := &v1alpha1.ClusterConfiguration{
Spec: &v1alpha1.ClusterConfigurationSpec{AgentTickSeconds: 10},
configWithTick := &pb.ClusterConfiguration{
Spec: &pb.ClusterConfigurationSpec{AgentTickSeconds: 10},
}
SetClusterConfigDefaults(configWithTick)
if configWithTick.Spec.NodeLossTimeoutSeconds != 40 { // 10 * 4
@ -194,48 +191,53 @@ func TestSetClusterConfigDefaults(t *testing.T) {
}
func TestValidateClusterConfiguration_InvalidValues(t *testing.T) {
baseValidSpec := func() *v1alpha1.ClusterConfigurationSpec {
return &v1alpha1.ClusterConfigurationSpec{
ClusterCidr: "10.0.0.0/16",
ServiceCidr: "10.1.0.0/16",
NodeSubnetBits: 8,
ClusterDomain: "test.local",
AgentPort: 10250,
ApiPort: 10251,
EtcdPeerPort: 2380,
EtcdClientPort: 2379,
VolumeBasePath: "/var/lib/kat/volumes",
BackupPath: "/var/lib/kat/backups",
BackupIntervalMinutes: 30,
AgentTickSeconds: 15,
NodeLossTimeoutSeconds:60,
baseValidSpec := func() *pb.ClusterConfigurationSpec {
return &pb.ClusterConfigurationSpec{
ClusterCidr: "10.0.0.0/16",
ServiceCidr: "10.1.0.0/16",
NodeSubnetBits: 8,
ClusterDomain: "test.local",
AgentPort: 10250,
ApiPort: 10251,
EtcdPeerPort: 2380,
EtcdClientPort: 2379,
VolumeBasePath: "/var/lib/kat/volumes",
BackupPath: "/var/lib/kat/backups",
BackupIntervalMinutes: 30,
AgentTickSeconds: 15,
NodeLossTimeoutSeconds: 60,
}
}
baseValidMetadata := func() *v1alpha1.ObjectMeta {
return &v1alpha1.ObjectMeta{Name: "test"}
baseValidMetadata := func() *pb.ObjectMeta {
return &pb.ObjectMeta{Name: "test"}
}
tests := []struct {
name string
mutator func(cfg *v1alpha1.ClusterConfiguration)
mutator func(cfg *pb.ClusterConfiguration)
wantErr string
}{
{"invalid clusterCIDR", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.ClusterCidr = "invalid" }, "invalid spec.clusterCIDR"},
{"invalid serviceCIDR", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.ServiceCidr = "invalid" }, "invalid spec.serviceCIDR"},
{"invalid agentPort low", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.AgentPort = 0 }, "invalid port for agentPort"},
{"invalid agentPort high", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.AgentPort = 70000 }, "invalid port for agentPort"},
{"port conflict", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.ApiPort = cfg.Spec.AgentPort }, "port conflict"},
{"invalid nodeSubnetBits low", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.NodeSubnetBits = 0 }, "invalid spec.nodeSubnetBits"},
{"invalid nodeSubnetBits high", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.NodeSubnetBits = 32 }, "invalid spec.nodeSubnetBits"},
{"invalid nodeSubnetBits vs clusterCIDR", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.ClusterCidr = "10.0.0.0/28"; cfg.Spec.NodeSubnetBits = 8 }, "results in an invalid subnet size"},
{"invalid agentTickSeconds", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.AgentTickSeconds = 0 }, "agentTickSeconds must be positive"},
{"invalid nodeLossTimeoutSeconds", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.NodeLossTimeoutSeconds = 0 }, "nodeLossTimeoutSeconds must be positive"},
{"nodeLoss < agentTick", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.NodeLossTimeoutSeconds = cfg.Spec.AgentTickSeconds - 1 }, "nodeLossTimeoutSeconds must be greater"},
{"invalid clusterCIDR", func(cfg *pb.ClusterConfiguration) { cfg.Spec.ClusterCidr = "invalid" }, "invalid spec.clusterCIDR"},
{"invalid serviceCIDR", func(cfg *pb.ClusterConfiguration) { cfg.Spec.ServiceCidr = "invalid" }, "invalid spec.serviceCIDR"},
{"invalid agentPort low", func(cfg *pb.ClusterConfiguration) { cfg.Spec.AgentPort = 0 }, "invalid port for agentPort"},
{"invalid agentPort high", func(cfg *pb.ClusterConfiguration) { cfg.Spec.AgentPort = 70000 }, "invalid port for agentPort"},
{"port conflict", func(cfg *pb.ClusterConfiguration) { cfg.Spec.ApiPort = cfg.Spec.AgentPort }, "port conflict"},
{"invalid nodeSubnetBits low", func(cfg *pb.ClusterConfiguration) { cfg.Spec.NodeSubnetBits = 0 }, "invalid spec.nodeSubnetBits"},
{"invalid nodeSubnetBits high", func(cfg *pb.ClusterConfiguration) { cfg.Spec.NodeSubnetBits = 32 }, "invalid spec.nodeSubnetBits"},
{"invalid nodeSubnetBits vs clusterCIDR", func(cfg *pb.ClusterConfiguration) {
cfg.Spec.ClusterCidr = "10.0.0.0/28"
cfg.Spec.NodeSubnetBits = 8
}, "results in an invalid subnet size"},
{"invalid agentTickSeconds", func(cfg *pb.ClusterConfiguration) { cfg.Spec.AgentTickSeconds = 0 }, "agentTickSeconds must be positive"},
{"invalid nodeLossTimeoutSeconds", func(cfg *pb.ClusterConfiguration) { cfg.Spec.NodeLossTimeoutSeconds = 0 }, "nodeLossTimeoutSeconds must be positive"},
{"nodeLoss < agentTick", func(cfg *pb.ClusterConfiguration) {
cfg.Spec.NodeLossTimeoutSeconds = cfg.Spec.AgentTickSeconds - 1
}, "nodeLossTimeoutSeconds must be greater"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
config := &v1alpha1.ClusterConfiguration{Metadata: baseValidMetadata(), Spec: baseValidSpec()}
config := &pb.ClusterConfiguration{Metadata: baseValidMetadata(), Spec: baseValidSpec()}
tt.mutator(config)
err := ValidateClusterConfiguration(config)
if err == nil {