[Aider] Phase 0
This commit is contained in:
323
internal/config/parse.go
Normal file
323
internal/config/parse.go
Normal file
@ -0,0 +1,323 @@
|
||||
// File: internal/config/parse.go
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
|
||||
pb "git.dws.rip/dubey/kat/api/v1alpha1" // Adjust to your actual go module path
|
||||
"gopkg.in/yaml.v3" // Add to go.mod: go get gopkg.in/yaml.v3
|
||||
)
|
||||
|
||||
var _ = yaml.Unmarshal // Used for Quadlet parsing
|
||||
|
||||
// ParseClusterConfiguration reads, unmarshals, and validates a cluster.kat file.
|
||||
func ParseClusterConfiguration(filePath string) (*pb.ClusterConfiguration, error) {
|
||||
if _, err := os.Stat(filePath); os.IsNotExist(err) {
|
||||
return nil, fmt.Errorf("cluster configuration file not found: %s", filePath)
|
||||
}
|
||||
|
||||
yamlFile, err := ioutil.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read cluster configuration file %s: %w", filePath, err)
|
||||
}
|
||||
|
||||
var config pb.ClusterConfiguration
|
||||
// We expect the YAML to have top-level keys like 'apiVersion', 'kind', 'metadata', 'spec'
|
||||
// but our proto is just the ClusterConfiguration message.
|
||||
// So, we'll unmarshal into a temporary map to extract the 'spec' and 'metadata'.
|
||||
var rawConfigMap map[string]interface{}
|
||||
if err = yaml.Unmarshal(yamlFile, &rawConfigMap); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal YAML from %s: %w", filePath, err)
|
||||
}
|
||||
|
||||
// Quick check for kind
|
||||
kind, ok := rawConfigMap["kind"].(string)
|
||||
if !ok || kind != "ClusterConfiguration" {
|
||||
return nil, fmt.Errorf("invalid kind in %s: expected ClusterConfiguration, got %v", filePath, rawConfigMap["kind"])
|
||||
}
|
||||
|
||||
metadataMap, ok := rawConfigMap["metadata"].(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("metadata section not found or invalid in %s", filePath)
|
||||
}
|
||||
metadataBytes, err := yaml.Marshal(metadataMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to re-marshal metadata: %w", err)
|
||||
}
|
||||
if err = yaml.Unmarshal(metadataBytes, &config.Metadata); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal metadata into proto: %w", err)
|
||||
}
|
||||
|
||||
specMap, ok := rawConfigMap["spec"].(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("spec section not found or invalid in %s", filePath)
|
||||
}
|
||||
specBytes, err := yaml.Marshal(specMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to re-marshal spec: %w", err)
|
||||
}
|
||||
if err = yaml.Unmarshal(specBytes, &config.Spec); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal spec into proto: %w", err)
|
||||
}
|
||||
|
||||
SetClusterConfigDefaults(&config)
|
||||
|
||||
if err := ValidateClusterConfiguration(&config); err != nil {
|
||||
return nil, fmt.Errorf("invalid cluster configuration in %s: %w", filePath, err)
|
||||
}
|
||||
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
// SetClusterConfigDefaults applies default values to the ClusterConfiguration spec.
|
||||
func SetClusterConfigDefaults(config *pb.ClusterConfiguration) {
|
||||
if config.Spec == nil {
|
||||
config.Spec = &pb.ClusterConfigurationSpec{}
|
||||
}
|
||||
s := config.Spec
|
||||
|
||||
if s.ClusterDomain == "" {
|
||||
s.ClusterDomain = DefaultClusterDomain
|
||||
}
|
||||
if s.AgentPort == 0 {
|
||||
s.AgentPort = DefaultAgentPort
|
||||
}
|
||||
if s.ApiPort == 0 {
|
||||
s.ApiPort = DefaultApiPort
|
||||
}
|
||||
if s.EtcdPeerPort == 0 {
|
||||
s.EtcdPeerPort = DefaultEtcdPeerPort
|
||||
}
|
||||
if s.EtcdClientPort == 0 {
|
||||
s.EtcdClientPort = DefaultEtcdClientPort
|
||||
}
|
||||
if s.VolumeBasePath == "" {
|
||||
s.VolumeBasePath = DefaultVolumeBasePath
|
||||
}
|
||||
if s.BackupPath == "" {
|
||||
s.BackupPath = DefaultBackupPath
|
||||
}
|
||||
if s.BackupIntervalMinutes == 0 {
|
||||
s.BackupIntervalMinutes = DefaultBackupIntervalMins
|
||||
}
|
||||
if s.AgentTickSeconds == 0 {
|
||||
s.AgentTickSeconds = DefaultAgentTickSeconds
|
||||
}
|
||||
if s.NodeLossTimeoutSeconds == 0 {
|
||||
s.NodeLossTimeoutSeconds = DefaultNodeLossTimeoutSec
|
||||
if s.AgentTickSeconds > 0 { // If agent tick is set, derive from it
|
||||
s.NodeLossTimeoutSeconds = s.AgentTickSeconds * 4 // Example: 4 ticks
|
||||
}
|
||||
}
|
||||
if s.NodeSubnetBits == 0 {
|
||||
s.NodeSubnetBits = DefaultNodeSubnetBits
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateClusterConfiguration performs basic validation on the ClusterConfiguration.
|
||||
func ValidateClusterConfiguration(config *pb.ClusterConfiguration) error {
|
||||
if config.Metadata == nil || config.Metadata.Name == "" {
|
||||
return fmt.Errorf("metadata.name is required")
|
||||
}
|
||||
if config.Spec == nil {
|
||||
return fmt.Errorf("spec is required")
|
||||
}
|
||||
|
||||
s := config.Spec
|
||||
if s.ClusterCidr == "" {
|
||||
return fmt.Errorf("spec.clusterCIDR is required")
|
||||
}
|
||||
if _, _, err := net.ParseCIDR(s.ClusterCidr); err != nil {
|
||||
return fmt.Errorf("invalid spec.clusterCIDR %s: %w", s.ClusterCidr, err)
|
||||
}
|
||||
|
||||
if s.ServiceCidr == "" {
|
||||
return fmt.Errorf("spec.serviceCIDR is required")
|
||||
}
|
||||
if _, _, err := net.ParseCIDR(s.ServiceCidr); err != nil {
|
||||
return fmt.Errorf("invalid spec.serviceCIDR %s: %w", s.ServiceCidr, err)
|
||||
}
|
||||
|
||||
// Validate ports
|
||||
ports := []struct {
|
||||
name string
|
||||
port int32
|
||||
}{
|
||||
{"agentPort", s.AgentPort}, {"apiPort", s.ApiPort},
|
||||
{"etcdPeerPort", s.EtcdPeerPort}, {"etcdClientPort", s.EtcdClientPort},
|
||||
}
|
||||
for _, p := range ports {
|
||||
if p.port <= 0 || p.port > 65535 {
|
||||
return fmt.Errorf("invalid port for %s: %d. Must be between 1 and 65535", p.name, p.port)
|
||||
}
|
||||
}
|
||||
// Check for port conflicts among configured ports
|
||||
portSet := make(map[int32]string)
|
||||
for _, p := range ports {
|
||||
if existing, found := portSet[p.port]; found {
|
||||
return fmt.Errorf("port conflict: %s (%d) and %s (%d) use the same port", p.name, p.port, existing, p.port)
|
||||
}
|
||||
portSet[p.port] = p.name
|
||||
}
|
||||
|
||||
if s.NodeSubnetBits <= 0 || s.NodeSubnetBits >= 32 {
|
||||
return fmt.Errorf("invalid spec.nodeSubnetBits: %d. Must be > 0 and < 32", s.NodeSubnetBits)
|
||||
}
|
||||
// Validate nodeSubnetBits against clusterCIDR prefix length
|
||||
_, clusterNet, _ := net.ParseCIDR(s.ClusterCidr)
|
||||
clusterPrefixLen, _ := clusterNet.Mask.Size()
|
||||
if int(s.NodeSubnetBits) <= clusterPrefixLen {
|
||||
// This logic might be too simple. NodeSubnetBits is the number of *additional* bits for the subnet *within* the cluster prefix.
|
||||
// So, the resulting node subnet prefix length would be clusterPrefixLen + s.NodeSubnetBits.
|
||||
// This must be less than 32 (or 31 for usable IPs).
|
||||
// The RFC states: "Default 7 (yielding /23 subnets if clusterCIDR=/16)"
|
||||
// So if clusterCIDR is /16, node subnet is / (16+7) = /23. This is valid.
|
||||
// A node subnet prefix length must be > clusterPrefixLen and < 32.
|
||||
if (clusterPrefixLen + int(s.NodeSubnetBits)) >= 32 {
|
||||
return fmt.Errorf("spec.nodeSubnetBits (%d) combined with clusterCIDR prefix (%d) results in an invalid subnet size (>= /32)", s.NodeSubnetBits, clusterPrefixLen)
|
||||
}
|
||||
} else {
|
||||
// This case seems unlikely if nodeSubnetBits is the number of bits for the node part.
|
||||
// Let's assume nodeSubnetBits is the number of bits *after* the cluster prefix that define the node subnet.
|
||||
// e.g. cluster 10.0.0.0/8, nodeSubnetBits=8 -> node subnets are /16.
|
||||
}
|
||||
|
||||
if s.BackupIntervalMinutes < 0 { // 0 could mean disabled, but RFC implies positive
|
||||
return fmt.Errorf("spec.backupIntervalMinutes must be non-negative")
|
||||
}
|
||||
if s.AgentTickSeconds <= 0 {
|
||||
return fmt.Errorf("spec.agentTickSeconds must be positive")
|
||||
}
|
||||
if s.NodeLossTimeoutSeconds <= 0 {
|
||||
return fmt.Errorf("spec.nodeLossTimeoutSeconds must be positive")
|
||||
}
|
||||
if s.NodeLossTimeoutSeconds < s.AgentTickSeconds {
|
||||
return fmt.Errorf("spec.nodeLossTimeoutSeconds must be greater than or equal to spec.agentTickSeconds")
|
||||
}
|
||||
|
||||
// volumeBasePath and backupPath should be absolute paths, but validation can be tricky
|
||||
// For now, just check if they are non-empty if specified, defaults handle empty.
|
||||
// A more robust check would be filepath.IsAbs()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParsedQuadletFiles holds the structured data from a Quadlet directory.
|
||||
type ParsedQuadletFiles struct {
|
||||
Workload *pb.Workload
|
||||
VirtualLoadBalancer *pb.VirtualLoadBalancer
|
||||
JobDefinition *pb.JobDefinition
|
||||
BuildDefinition *pb.BuildDefinition
|
||||
// Namespace is typically a cluster-level resource, not part of a workload quadlet bundle.
|
||||
// If it were, it would be: Namespace *pb.Namespace
|
||||
|
||||
// Store raw file contents for potential future use (e.g. annotations, original source)
|
||||
RawFiles map[string][]byte
|
||||
}
|
||||
|
||||
// ParseQuadletFile unmarshals a single Quadlet file content based on its kind.
|
||||
// It returns the specific protobuf message.
|
||||
func ParseQuadletFile(fileName string, content []byte) (interface{}, error) {
|
||||
var base struct {
|
||||
ApiVersion string `yaml:"apiVersion"`
|
||||
Kind string `yaml:"kind"`
|
||||
}
|
||||
if err := yaml.Unmarshal(content, &base); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal base YAML from %s to determine kind: %w", fileName, err)
|
||||
}
|
||||
|
||||
// TODO: Check apiVersion, e.g., base.ApiVersion == "kat.dws.rip/v1alpha1"
|
||||
|
||||
var resource interface{}
|
||||
var err error
|
||||
|
||||
switch base.Kind {
|
||||
case "Workload":
|
||||
var wl pb.Workload
|
||||
// Similar to ClusterConfiguration, need to unmarshal metadata and spec separately
|
||||
// from a temporary map if the proto doesn't match the full YAML structure directly.
|
||||
// For simplicity in Phase 0, assuming direct unmarshal works if YAML matches proto structure.
|
||||
// If YAML has apiVersion/kind/metadata/spec at top level, then:
|
||||
var raw map[string]interface{}
|
||||
if err = yaml.Unmarshal(content, &raw); err == nil {
|
||||
if meta, ok := raw["metadata"]; ok {
|
||||
metaBytes, _ := yaml.Marshal(meta)
|
||||
yaml.Unmarshal(metaBytes, &wl.Metadata)
|
||||
}
|
||||
if spec, ok := raw["spec"]; ok {
|
||||
specBytes, _ := yaml.Marshal(spec)
|
||||
yaml.Unmarshal(specBytes, &wl.Spec)
|
||||
}
|
||||
}
|
||||
resource = &wl
|
||||
case "VirtualLoadBalancer":
|
||||
var vlb pb.VirtualLoadBalancer
|
||||
var raw map[string]interface{}
|
||||
if err = yaml.Unmarshal(content, &raw); err == nil {
|
||||
if meta, ok := raw["metadata"]; ok {
|
||||
metaBytes, _ := yaml.Marshal(meta)
|
||||
yaml.Unmarshal(metaBytes, &vlb.Metadata)
|
||||
}
|
||||
if spec, ok := raw["spec"]; ok {
|
||||
specBytes, _ := yaml.Marshal(spec)
|
||||
yaml.Unmarshal(specBytes, &vlb.Spec)
|
||||
}
|
||||
}
|
||||
resource = &vlb
|
||||
// Add cases for JobDefinition, BuildDefinition as they are defined
|
||||
case "JobDefinition":
|
||||
var jd pb.JobDefinition
|
||||
// ... unmarshal logic ...
|
||||
resource = &jd
|
||||
case "BuildDefinition":
|
||||
var bd pb.BuildDefinition
|
||||
// ... unmarshal logic ...
|
||||
resource = &bd
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown Kind '%s' in file %s", base.Kind, fileName)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal YAML for Kind '%s' from %s: %w", base.Kind, fileName, err)
|
||||
}
|
||||
// TODO: Add basic validation for each parsed type (e.g., required fields like metadata.name)
|
||||
return resource, nil
|
||||
}
|
||||
|
||||
// ParseQuadletDirectory processes a map of file contents (from UntarQuadlets).
|
||||
func ParseQuadletDirectory(files map[string][]byte) (*ParsedQuadletFiles, error) {
|
||||
parsed := &ParsedQuadletFiles{RawFiles: files}
|
||||
|
||||
for fileName, content := range files {
|
||||
obj, err := ParseQuadletFile(fileName, content)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing quadlet file %s: %w", fileName, err)
|
||||
}
|
||||
switch v := obj.(type) {
|
||||
case *pb.Workload:
|
||||
if parsed.Workload != nil {
|
||||
return nil, fmt.Errorf("multiple Workload definitions found")
|
||||
}
|
||||
parsed.Workload = v
|
||||
case *pb.VirtualLoadBalancer:
|
||||
if parsed.VirtualLoadBalancer != nil {
|
||||
return nil, fmt.Errorf("multiple VirtualLoadBalancer definitions found")
|
||||
}
|
||||
parsed.VirtualLoadBalancer = v
|
||||
// Add cases for JobDefinition, BuildDefinition
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Perform cross-Quadlet file validation (e.g., workload.kat must exist)
|
||||
if parsed.Workload == nil {
|
||||
return nil, fmt.Errorf("required Workload definition (workload.kat) not found in Quadlet bundle")
|
||||
}
|
||||
if parsed.Workload.Metadata == nil || parsed.Workload.Metadata.Name == "" {
|
||||
return nil, fmt.Errorf("workload.kat must have metadata.name defined")
|
||||
}
|
||||
|
||||
return parsed, nil
|
||||
}
|
@ -1,19 +1,16 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"git.dws.rip/dubey/kat/api/v1alpha1"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
pb "git.dws.rip/dubey/kat/api/v1alpha1"
|
||||
)
|
||||
|
||||
func createTestClusterKatFile(t *testing.T, content string) string {
|
||||
t.Helper()
|
||||
tmpFile, err := ioutil.TempFile(t.TempDir(), "cluster.*.kat")
|
||||
tmpFile, err := os.CreateTemp(t.TempDir(), "cluster.*.kat")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp file: %v", err)
|
||||
}
|
||||
@ -144,8 +141,8 @@ spec:
|
||||
}
|
||||
|
||||
func TestSetClusterConfigDefaults(t *testing.T) {
|
||||
config := &v1alpha1.ClusterConfiguration{
|
||||
Spec: &v1alpha1.ClusterConfigurationSpec{},
|
||||
config := &pb.ClusterConfiguration{
|
||||
Spec: &pb.ClusterConfigurationSpec{},
|
||||
}
|
||||
SetClusterConfigDefaults(config)
|
||||
|
||||
@ -184,8 +181,8 @@ func TestSetClusterConfigDefaults(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test NodeLossTimeoutSeconds derivation
|
||||
configWithTick := &v1alpha1.ClusterConfiguration{
|
||||
Spec: &v1alpha1.ClusterConfigurationSpec{AgentTickSeconds: 10},
|
||||
configWithTick := &pb.ClusterConfiguration{
|
||||
Spec: &pb.ClusterConfigurationSpec{AgentTickSeconds: 10},
|
||||
}
|
||||
SetClusterConfigDefaults(configWithTick)
|
||||
if configWithTick.Spec.NodeLossTimeoutSeconds != 40 { // 10 * 4
|
||||
@ -194,48 +191,53 @@ func TestSetClusterConfigDefaults(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValidateClusterConfiguration_InvalidValues(t *testing.T) {
|
||||
baseValidSpec := func() *v1alpha1.ClusterConfigurationSpec {
|
||||
return &v1alpha1.ClusterConfigurationSpec{
|
||||
ClusterCidr: "10.0.0.0/16",
|
||||
ServiceCidr: "10.1.0.0/16",
|
||||
NodeSubnetBits: 8,
|
||||
ClusterDomain: "test.local",
|
||||
AgentPort: 10250,
|
||||
ApiPort: 10251,
|
||||
EtcdPeerPort: 2380,
|
||||
EtcdClientPort: 2379,
|
||||
VolumeBasePath: "/var/lib/kat/volumes",
|
||||
BackupPath: "/var/lib/kat/backups",
|
||||
BackupIntervalMinutes: 30,
|
||||
AgentTickSeconds: 15,
|
||||
NodeLossTimeoutSeconds:60,
|
||||
baseValidSpec := func() *pb.ClusterConfigurationSpec {
|
||||
return &pb.ClusterConfigurationSpec{
|
||||
ClusterCidr: "10.0.0.0/16",
|
||||
ServiceCidr: "10.1.0.0/16",
|
||||
NodeSubnetBits: 8,
|
||||
ClusterDomain: "test.local",
|
||||
AgentPort: 10250,
|
||||
ApiPort: 10251,
|
||||
EtcdPeerPort: 2380,
|
||||
EtcdClientPort: 2379,
|
||||
VolumeBasePath: "/var/lib/kat/volumes",
|
||||
BackupPath: "/var/lib/kat/backups",
|
||||
BackupIntervalMinutes: 30,
|
||||
AgentTickSeconds: 15,
|
||||
NodeLossTimeoutSeconds: 60,
|
||||
}
|
||||
}
|
||||
baseValidMetadata := func() *v1alpha1.ObjectMeta {
|
||||
return &v1alpha1.ObjectMeta{Name: "test"}
|
||||
baseValidMetadata := func() *pb.ObjectMeta {
|
||||
return &pb.ObjectMeta{Name: "test"}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
mutator func(cfg *v1alpha1.ClusterConfiguration)
|
||||
mutator func(cfg *pb.ClusterConfiguration)
|
||||
wantErr string
|
||||
}{
|
||||
{"invalid clusterCIDR", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.ClusterCidr = "invalid" }, "invalid spec.clusterCIDR"},
|
||||
{"invalid serviceCIDR", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.ServiceCidr = "invalid" }, "invalid spec.serviceCIDR"},
|
||||
{"invalid agentPort low", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.AgentPort = 0 }, "invalid port for agentPort"},
|
||||
{"invalid agentPort high", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.AgentPort = 70000 }, "invalid port for agentPort"},
|
||||
{"port conflict", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.ApiPort = cfg.Spec.AgentPort }, "port conflict"},
|
||||
{"invalid nodeSubnetBits low", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.NodeSubnetBits = 0 }, "invalid spec.nodeSubnetBits"},
|
||||
{"invalid nodeSubnetBits high", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.NodeSubnetBits = 32 }, "invalid spec.nodeSubnetBits"},
|
||||
{"invalid nodeSubnetBits vs clusterCIDR", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.ClusterCidr = "10.0.0.0/28"; cfg.Spec.NodeSubnetBits = 8 }, "results in an invalid subnet size"},
|
||||
{"invalid agentTickSeconds", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.AgentTickSeconds = 0 }, "agentTickSeconds must be positive"},
|
||||
{"invalid nodeLossTimeoutSeconds", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.NodeLossTimeoutSeconds = 0 }, "nodeLossTimeoutSeconds must be positive"},
|
||||
{"nodeLoss < agentTick", func(cfg *v1alpha1.ClusterConfiguration) { cfg.Spec.NodeLossTimeoutSeconds = cfg.Spec.AgentTickSeconds - 1 }, "nodeLossTimeoutSeconds must be greater"},
|
||||
{"invalid clusterCIDR", func(cfg *pb.ClusterConfiguration) { cfg.Spec.ClusterCidr = "invalid" }, "invalid spec.clusterCIDR"},
|
||||
{"invalid serviceCIDR", func(cfg *pb.ClusterConfiguration) { cfg.Spec.ServiceCidr = "invalid" }, "invalid spec.serviceCIDR"},
|
||||
{"invalid agentPort low", func(cfg *pb.ClusterConfiguration) { cfg.Spec.AgentPort = 0 }, "invalid port for agentPort"},
|
||||
{"invalid agentPort high", func(cfg *pb.ClusterConfiguration) { cfg.Spec.AgentPort = 70000 }, "invalid port for agentPort"},
|
||||
{"port conflict", func(cfg *pb.ClusterConfiguration) { cfg.Spec.ApiPort = cfg.Spec.AgentPort }, "port conflict"},
|
||||
{"invalid nodeSubnetBits low", func(cfg *pb.ClusterConfiguration) { cfg.Spec.NodeSubnetBits = 0 }, "invalid spec.nodeSubnetBits"},
|
||||
{"invalid nodeSubnetBits high", func(cfg *pb.ClusterConfiguration) { cfg.Spec.NodeSubnetBits = 32 }, "invalid spec.nodeSubnetBits"},
|
||||
{"invalid nodeSubnetBits vs clusterCIDR", func(cfg *pb.ClusterConfiguration) {
|
||||
cfg.Spec.ClusterCidr = "10.0.0.0/28"
|
||||
cfg.Spec.NodeSubnetBits = 8
|
||||
}, "results in an invalid subnet size"},
|
||||
{"invalid agentTickSeconds", func(cfg *pb.ClusterConfiguration) { cfg.Spec.AgentTickSeconds = 0 }, "agentTickSeconds must be positive"},
|
||||
{"invalid nodeLossTimeoutSeconds", func(cfg *pb.ClusterConfiguration) { cfg.Spec.NodeLossTimeoutSeconds = 0 }, "nodeLossTimeoutSeconds must be positive"},
|
||||
{"nodeLoss < agentTick", func(cfg *pb.ClusterConfiguration) {
|
||||
cfg.Spec.NodeLossTimeoutSeconds = cfg.Spec.AgentTickSeconds - 1
|
||||
}, "nodeLossTimeoutSeconds must be greater"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
config := &v1alpha1.ClusterConfiguration{Metadata: baseValidMetadata(), Spec: baseValidSpec()}
|
||||
config := &pb.ClusterConfiguration{Metadata: baseValidMetadata(), Spec: baseValidSpec()}
|
||||
tt.mutator(config)
|
||||
err := ValidateClusterConfiguration(config)
|
||||
if err == nil {
|
||||
|
23
internal/config/types.go
Normal file
23
internal/config/types.go
Normal file
@ -0,0 +1,23 @@
|
||||
// File: internal/config/types.go
|
||||
package config
|
||||
|
||||
// For Phase 0, we will primarily use the generated protobuf types
|
||||
// (e.g., *v1alpha1.ClusterConfiguration) directly.
|
||||
// This file can hold auxiliary types or constants related to config parsing if needed later.
|
||||
|
||||
const (
|
||||
DefaultClusterDomain = "kat.cluster.local"
|
||||
DefaultAgentPort = 9116
|
||||
DefaultApiPort = 9115
|
||||
DefaultEtcdPeerPort = 2380
|
||||
DefaultEtcdClientPort = 2379
|
||||
DefaultVolumeBasePath = "/var/lib/kat/volumes"
|
||||
DefaultBackupPath = "/var/lib/kat/backups"
|
||||
DefaultBackupIntervalMins = 30
|
||||
DefaultAgentTickSeconds = 15
|
||||
DefaultNodeLossTimeoutSec = 60 // DefaultNodeLossTimeoutSeconds = DefaultAgentTickSeconds * 4 (example logic)
|
||||
DefaultNodeSubnetBits = 7 // yields /23 from /16, or /31 from /24 etc. (5 bits for /29, 7 for /25)
|
||||
// RFC says 7 for /23 from /16. This means 2^(32-16-7) = 2^9 = 512 IPs per node subnet.
|
||||
// If nodeSubnetBits means bits for the node portion *within* the host part of clusterCIDR:
|
||||
// e.g. /16 -> 16 host bits. If nodeSubnetBits = 7, then node subnet is / (16+7) = /23.
|
||||
)
|
Reference in New Issue
Block a user