feat: Add unit tests for cluster config parsing and tarball utility
This commit is contained in:
332
internal/config/parse_test.go
Normal file
332
internal/config/parse_test.go
Normal file
@ -0,0 +1,332 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"git.dws.rip/dubey/kat/api/v1alpha1"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
func createTestClusterKatFile(t *testing.T, content string) string {
|
||||
t.Helper()
|
||||
tmpFile, err := ioutil.TempFile(t.TempDir(), "cluster.*.kat")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp file: %v", err)
|
||||
}
|
||||
if _, err := tmpFile.WriteString(content); err != nil {
|
||||
tmpFile.Close()
|
||||
t.Fatalf("Failed to write to temp file: %v", err)
|
||||
}
|
||||
if err := tmpFile.Close(); err != nil {
|
||||
t.Fatalf("Failed to close temp file: %v", err)
|
||||
}
|
||||
return tmpFile.Name()
|
||||
}
|
||||
|
||||
func TestParseClusterConfiguration_Valid(t *testing.T) {
|
||||
yamlContent := `
|
||||
apiVersion: kat.dws.rip/v1alpha1
|
||||
kind: ClusterConfiguration
|
||||
metadata:
|
||||
name: test-cluster
|
||||
spec:
|
||||
clusterCIDR: "10.0.0.0/16"
|
||||
serviceCIDR: "10.1.0.0/16"
|
||||
nodeSubnetBits: 8 # /24 for nodes
|
||||
apiPort: 8080 # Non-default
|
||||
`
|
||||
filePath := createTestClusterKatFile(t, yamlContent)
|
||||
|
||||
config, err := ParseClusterConfiguration(filePath)
|
||||
if err != nil {
|
||||
t.Fatalf("ParseClusterConfiguration() error = %v, wantErr %v", err, false)
|
||||
}
|
||||
|
||||
if config.Metadata.Name != "test-cluster" {
|
||||
t.Errorf("Expected metadata.name 'test-cluster', got '%s'", config.Metadata.Name)
|
||||
}
|
||||
if config.Spec.ClusterCidr != "10.0.0.0/16" {
|
||||
t.Errorf("Expected spec.clusterCIDR '10.0.0.0/16', got '%s'", config.Spec.ClusterCidr)
|
||||
}
|
||||
if config.Spec.ApiPort != 8080 {
|
||||
t.Errorf("Expected spec.apiPort 8080, got %d", config.Spec.ApiPort)
|
||||
}
|
||||
// Check a default value
|
||||
if config.Spec.ClusterDomain != DefaultClusterDomain {
|
||||
t.Errorf("Expected default spec.clusterDomain '%s', got '%s'", DefaultClusterDomain, config.Spec.ClusterDomain)
|
||||
}
|
||||
if config.Spec.NodeSubnetBits != 8 {
|
||||
t.Errorf("Expected spec.nodeSubnetBits 8, got %d", config.Spec.NodeSubnetBits)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseClusterConfiguration_FileNotFound(t *testing.T) {
|
||||
_, err := ParseClusterConfiguration("nonexistent.kat")
|
||||
if err == nil {
|
||||
t.Fatalf("ParseClusterConfiguration() with non-existent file did not return an error")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "file not found") {
|
||||
t.Errorf("Expected 'file not found' error, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseClusterConfiguration_InvalidYAML(t *testing.T) {
|
||||
filePath := createTestClusterKatFile(t, "this: is: not: valid: yaml")
|
||||
_, err := ParseClusterConfiguration(filePath)
|
||||
if err == nil {
|
||||
t.Fatalf("ParseClusterConfiguration() with invalid YAML did not return an error")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "unmarshal YAML") {
|
||||
t.Errorf("Expected 'unmarshal YAML' error, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseClusterConfiguration_MissingRequiredFields(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
content string
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "missing metadata name",
|
||||
content: `
|
||||
apiVersion: kat.dws.rip/v1alpha1
|
||||
kind: ClusterConfiguration
|
||||
spec:
|
||||
clusterCIDR: "10.0.0.0/16"
|
||||
serviceCIDR: "10.1.0.0/16"
|
||||
`,
|
||||
wantErr: "metadata.name is required",
|
||||
},
|
||||
{
|
||||
name: "missing clusterCIDR",
|
||||
content: `
|
||||
apiVersion: kat.dws.rip/v1alpha1
|
||||
kind: ClusterConfiguration
|
||||
metadata:
|
||||
name: test-cluster
|
||||
spec:
|
||||
serviceCIDR: "10.1.0.0/16"
|
||||
`,
|
||||
wantErr: "spec.clusterCIDR is required",
|
||||
},
|
||||
{
|
||||
name: "invalid kind",
|
||||
content: `
|
||||
apiVersion: kat.dws.rip/v1alpha1
|
||||
kind: WrongKind
|
||||
metadata:
|
||||
name: test-cluster
|
||||
spec:
|
||||
clusterCIDR: "10.0.0.0/16"
|
||||
serviceCIDR: "10.1.0.0/16"
|
||||
`,
|
||||
wantErr: "invalid kind",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
filePath := createTestClusterKatFile(t, tt.content)
|
||||
_, err := ParseClusterConfiguration(filePath)
|
||||
if err == nil {
|
||||
t.Fatalf("ParseClusterConfiguration() did not return an error for %s", tt.name)
|
||||
}
|
||||
if !strings.Contains(err.Error(), tt.wantErr) {
|
||||
t.Errorf("Expected error containing '%s', got: %v", tt.wantErr, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetClusterConfigDefaults(t *testing.T) {
|
||||
config := &v1alpha1.ClusterConfiguration{
|
||||
Spec: &v1alpha1.ClusterConfigurationSpec{},
|
||||
}
|
||||
SetClusterConfigDefaults(config)
|
||||
|
||||
if config.Spec.ClusterDomain != DefaultClusterDomain {
|
||||
t.Errorf("DefaultClusterDomain: got %s, want %s", config.Spec.ClusterDomain, DefaultClusterDomain)
|
||||
}
|
||||
if config.Spec.ApiPort != DefaultApiPort {
|
||||
t.Errorf("DefaultApiPort: got %d, want %d", config.Spec.ApiPort, DefaultApiPort)
|
||||
}
|
||||
if config.Spec.AgentPort != DefaultAgentPort {
|
||||
t.Errorf("DefaultAgentPort: got %d, want %d", config.Spec.AgentPort, DefaultAgentPort)
|
||||
}
|
||||
if config.Spec.EtcdClientPort != DefaultEtcdClientPort {
|
||||
t.Errorf("DefaultEtcdClientPort: got %d, want %d", config.Spec.EtcdClientPort, DefaultEtcdClientPort)
|
||||
}
|
||||
if config.Spec.EtcdPeerPort != DefaultEtcdPeerPort {
|
||||
t.Errorf("DefaultEtcdPeerPort: got %d, want %d", config.Spec.EtcdPeerPort, DefaultEtcdPeerPort)
|
||||
}
|
||||
if config.Spec.VolumeBasePath != DefaultVolumeBasePath {
|
||||
t.Errorf("DefaultVolumeBasePath: got %s, want %s", config.Spec.VolumeBasePath, DefaultVolumeBasePath)
|
||||
}
|
||||
if config.Spec.BackupPath != DefaultBackupPath {
|
||||
t.Errorf("DefaultBackupPath: got %s, want %s", config.Spec.BackupPath, DefaultBackupPath)
|
||||
}
|
||||
if config.Spec.BackupIntervalMinutes != DefaultBackupIntervalMins {
|
||||
t.Errorf("DefaultBackupIntervalMins: got %d, want %d", config.Spec.BackupIntervalMinutes, DefaultBackupIntervalMins)
|
||||
}
|
||||
if config.Spec.AgentTickSeconds != DefaultAgentTickSeconds {
|
||||
t.Errorf("DefaultAgentTickSeconds: got %d, want %d", config.Spec.AgentTickSeconds, DefaultAgentTickSeconds)
|
||||
}
|
||||
if config.Spec.NodeLossTimeoutSeconds != DefaultNodeLossTimeoutSec {
|
||||
t.Errorf("DefaultNodeLossTimeoutSec: got %d, want %d", config.Spec.NodeLossTimeoutSeconds, DefaultNodeLossTimeoutSec)
|
||||
}
|
||||
if config.Spec.NodeSubnetBits != DefaultNodeSubnetBits {
|
||||
t.Errorf("DefaultNodeSubnetBits: got %d, want %d", config.Spec.NodeSubnetBits, DefaultNodeSubnetBits)
|
||||
}
|
||||
|
||||
// Test NodeLossTimeoutSeconds derivation
|
||||
configWithTick := &v1alpha1.ClusterConfiguration{
|
||||
Spec: &v1alpha1.ClusterConfigurationSpec{AgentTickSeconds: 10},
|
||||
}
|
||||
SetClusterConfigDefaults(configWithTick)
|
||||
if configWithTick.Spec.NodeLossTimeoutSeconds != 40 { // 10 * 4
|
||||
t.Errorf("Derived NodeLossTimeoutSeconds: got %d, want %d", configWithTick.Spec.NodeLossTimeoutSeconds, 40)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateClusterConfiguration_InvalidValues(t *testing.T) {
|
||||
baseValidSpec := func() *v1alpha1.ClusterConfigurationSpec {
|
||||
return &v1alpha1.ClusterConfigurationSpec{
|
||||
ClusterCidr: "10.0.0.0/16",
|
||||
ServiceCidr: "10.1.0.0/16",
|
||||
NodeSubnetBits: 8,
|
||||
ClusterDomain: "test.local",
|
||||
AgentPort: 10250,
|
||||
ApiPort: 10251,
|
||||
EtcdPeerPort: 2380,
|
||||
EtcdClientPort: 2379,
|
||||
VolumeBasePath: "/var/lib/kat/volumes",
|
||||
BackupPath: "/var/lib/kat/backups",
|
||||
BackupIntervalMinutes: 30,
|
||||
AgentTickSeconds: 15,
|
||||
NodeLossTimeoutSeconds:60,
|
||||
}
|
||||
}
|
||||
baseValidMetadata := func() *v1alpha1.ObjectMeta {
|
||||
return &v1alpha1.ObjectMeta{Name: "test"}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
mutator func(cfg *v1alpha1.ClusterConfiguration)
|
||||
wantErr string
|
||||
}{
|
||||
{"invalid clusterCIDR", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.ClusterCidr = "invalid" }, "invalid spec.clusterCIDR"},
|
||||
{"invalid serviceCIDR", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.ServiceCidr = "invalid" }, "invalid spec.serviceCIDR"},
|
||||
{"invalid agentPort low", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.AgentPort = 0 }, "invalid port for agentPort"},
|
||||
{"invalid agentPort high", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.AgentPort = 70000 }, "invalid port for agentPort"},
|
||||
{"port conflict", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.ApiPort = cfg.Spec.AgentPort }, "port conflict"},
|
||||
{"invalid nodeSubnetBits low", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.NodeSubnetBits = 0 }, "invalid spec.nodeSubnetBits"},
|
||||
{"invalid nodeSubnetBits high", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.NodeSubnetBits = 32 }, "invalid spec.nodeSubnetBits"},
|
||||
{"invalid nodeSubnetBits vs clusterCIDR", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.ClusterCidr = "10.0.0.0/28"; cfg.Spec.NodeSubnetBits = 8 }, "results in an invalid subnet size"},
|
||||
{"invalid agentTickSeconds", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.AgentTickSeconds = 0 }, "agentTickSeconds must be positive"},
|
||||
{"invalid nodeLossTimeoutSeconds", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.NodeLossTimeoutSeconds = 0 }, "nodeLossTimeoutSeconds must be positive"},
|
||||
{"nodeLoss < agentTick", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.NodeLossTimeoutSeconds = cfg.Spec.AgentTickSeconds - 1 }, "nodeLossTimeoutSeconds must be greater"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
config := &v1alpha1.ClusterConfiguration{Metadata: baseValidMetadata(), Spec: baseValidSpec()}
|
||||
tt.mutator(config)
|
||||
err := ValidateClusterConfiguration(config)
|
||||
if err == nil {
|
||||
t.Fatalf("ValidateClusterConfiguration() did not return an error for %s", tt.name)
|
||||
}
|
||||
if !strings.Contains(err.Error(), tt.wantErr) {
|
||||
t.Errorf("Expected error containing '%s', got: %v", tt.wantErr, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseQuadletDirectory_ValidSimple(t *testing.T) {
|
||||
files := map[string][]byte{
|
||||
"workload.kat": []byte(`
|
||||
apiVersion: kat.dws.rip/v1alpha1
|
||||
kind: Workload
|
||||
metadata:
|
||||
name: test-workload
|
||||
spec:
|
||||
type: SERVICE
|
||||
source:
|
||||
image: "nginx:latest"
|
||||
`),
|
||||
"vlb.kat": []byte(`
|
||||
apiVersion: kat.dws.rip/v1alpha1
|
||||
kind: VirtualLoadBalancer
|
||||
metadata:
|
||||
name: test-workload # Assumed to match workload name
|
||||
spec:
|
||||
ports:
|
||||
- containerPort: 80
|
||||
`),
|
||||
}
|
||||
|
||||
parsed, err := ParseQuadletDirectory(files)
|
||||
if err != nil {
|
||||
t.Fatalf("ParseQuadletDirectory() error = %v", err)
|
||||
}
|
||||
if parsed.Workload == nil {
|
||||
t.Fatal("Parsed Workload is nil")
|
||||
}
|
||||
if parsed.Workload.Metadata.Name != "test-workload" {
|
||||
t.Errorf("Expected Workload name 'test-workload', got '%s'", parsed.Workload.Metadata.Name)
|
||||
}
|
||||
if parsed.VirtualLoadBalancer == nil {
|
||||
t.Fatal("Parsed VirtualLoadBalancer is nil")
|
||||
}
|
||||
if parsed.VirtualLoadBalancer.Metadata.Name != "test-workload" {
|
||||
t.Errorf("Expected VLB name 'test-workload', got '%s'", parsed.VirtualLoadBalancer.Metadata.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseQuadletDirectory_MissingWorkload(t *testing.T) {
|
||||
files := map[string][]byte{
|
||||
"vlb.kat": []byte(`kind: VirtualLoadBalancer`),
|
||||
}
|
||||
_, err := ParseQuadletDirectory(files)
|
||||
if err == nil {
|
||||
t.Fatal("ParseQuadletDirectory() with missing workload.kat did not return an error")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "required Workload definition (workload.kat) not found") {
|
||||
t.Errorf("Expected 'required Workload' error, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseQuadletDirectory_MultipleWorkloads(t *testing.T) {
|
||||
files := map[string][]byte{
|
||||
"workload1.kat": []byte(`
|
||||
apiVersion: kat.dws.rip/v1alpha1
|
||||
kind: Workload
|
||||
metadata:
|
||||
name: wl1
|
||||
spec:
|
||||
type: SERVICE
|
||||
source: {image: "img1"}`),
|
||||
"workload2.kat": []byte(`
|
||||
apiVersion: kat.dws.rip/v1alpha1
|
||||
kind: Workload
|
||||
metadata:
|
||||
name: wl2
|
||||
spec:
|
||||
type: SERVICE
|
||||
source: {image: "img2"}`),
|
||||
}
|
||||
|
||||
_, err := ParseQuadletDirectory(files)
|
||||
if err == nil {
|
||||
t.Fatal("ParseQuadletDirectory() with multiple workload.kat did not return an error")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "multiple Workload definitions found") {
|
||||
t.Errorf("Expected 'multiple Workload' error, got: %v", err)
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user